Merge branch 'main' into remove-imutils-1

pull/146/head
cneud 1 week ago
commit eaff9e3537

@ -14,6 +14,12 @@ jobs:
python-version: ['3.8', '3.9', '3.10', '3.11']
steps:
- name: clean up
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- uses: actions/checkout@v4
- uses: actions/cache@v4
id: model_cache
@ -30,7 +36,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .
pip install .[OCR,plotting]
pip install -r requirements-test.txt
- name: Test with pytest
run: make test

@ -32,9 +32,9 @@ models_eynollah: models_eynollah.tar.gz
models_eynollah.tar.gz:
# wget 'https://qurator-data.de/eynollah/2021-04-25/models_eynollah.tar.gz'
# wget 'https://qurator-data.de/eynollah/2022-04-05/models_eynollah_renamed.tar.gz'
# wget 'https://qurator-data.de/eynollah/2022-04-05/models_eynollah_renamed_savedmodel.tar.gz'
wget 'https://qurator-data.de/eynollah/2022-04-05/models_eynollah.tar.gz'
# wget 'https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz'
wget 'https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz'
# wget 'https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz'
# Install with pip
install:
@ -45,7 +45,7 @@ install-dev:
pip install -e .
smoke-test:
eynollah -i tests/resources/kant_aufklaerung_1784_0020.tif -o . -m $(PWD)/models_eynollah
eynollah layout -i tests/resources/kant_aufklaerung_1784_0020.tif -o . -m $(PWD)/models_eynollah
# Run unit tests
test:

@ -25,9 +25,14 @@ classifiers = [
"Topic :: Scientific/Engineering :: Image Processing",
]
[project.optional-dependencies]
OCR = ["torch <= 2.0.1", "transformers <= 4.30.2"]
plotting = ["matplotlib"]
[project.scripts]
eynollah = "eynollah.cli:main"
ocrd-eynollah-segment = "eynollah.ocrd_cli:main"
ocrd-sbb-binarize = "eynollah.ocrd_cli_binarization:cli"
[project.urls]
Homepage = "https://github.com/qurator-spk/eynollah"

@ -3,5 +3,6 @@ ocrd >= 2.23.3
numpy <1.24.0
scikit-learn >= 0.23.2
tensorflow < 2.13
matplotlib
setuptools >= 50
imutils >= 0.5.3
numba <= 0.58.1
loky

@ -1,16 +1,95 @@
import sys
import click
from ocrd_utils import initLogging, setOverrideLogLevel
from eynollah.eynollah import Eynollah
from eynollah.eynollah import Eynollah, Eynollah_ocr
from eynollah.sbb_binarize import SbbBinarizer
@click.group()
def main():
pass
@click.command()
@main.command()
@click.option(
"--dir_xml",
"-dx",
help="directory of GT page-xml files",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_out_modal_image",
"-domi",
help="directory where ground truth images would be written",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_out_classes",
"-docl",
help="directory where ground truth classes would be written",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--input_height",
"-ih",
help="input height",
)
@click.option(
"--input_width",
"-iw",
help="input width",
)
@click.option(
"--min_area_size",
"-min",
help="min area size of regions considered for reading order training.",
)
def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size):
xml_files_ind = os.listdir(dir_xml)
@main.command()
@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.')
@click.option('--model_dir', '-m', type=click.Path(exists=True, file_okay=False), required=True, help='directory containing models for prediction')
@click.argument('input_image')
@click.argument('output_image')
@click.option(
"--dir_in",
"-di",
help="directory of images",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--dir_out",
"-do",
help="directory where the binarized images will be written",
type=click.Path(exists=True, file_okay=False),
)
def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out):
if not dir_out and (dir_in):
print("Error: You used -di but did not set -do")
sys.exit(1)
elif dir_out and not (dir_in):
print("Error: You used -do to write out binarized images but have not set -di")
sys.exit(1)
SbbBinarizer(model_dir).run(image_path=input_image, use_patches=patches, save=output_image, dir_in=dir_in, dir_out=dir_out)
@main.command()
@click.option(
"--image",
"-i",
help="image filename",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--out",
"-o",
@ -18,6 +97,12 @@ from eynollah.eynollah import Eynollah
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--overwrite",
"-O",
help="overwrite (instead of skipping) if output xml exists",
is_flag=True,
)
@click.option(
"--dir_in",
"-di",
@ -140,39 +225,44 @@ from eynollah.eynollah import Eynollah
help="if this parameter set to true, this tool would ignore page extraction",
)
@click.option(
"--log-level",
"--reading_order_machine_based/--heuristic_reading_order",
"-romb/-hro",
is_flag=True,
help="if this parameter set to true, this tool would apply machine based reading order detection",
)
@click.option(
"--do_ocr",
"-ocr/-noocr",
is_flag=True,
help="if this parameter set to true, this tool will try to do ocr",
)
@click.option(
"--num_col_upper",
"-ncu",
help="lower limit of columns in document image",
)
@click.option(
"--num_col_lower",
"-ncl",
help="upper limit of columns in document image",
)
@click.option(
"--skip_layout_and_reading_order",
"-slro/-noslro",
is_flag=True,
help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.",
)
@click.option(
"--log_level",
"-l",
type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']),
help="Override log level globally to this",
)
def main(
image,
out,
dir_in,
model,
save_images,
save_layout,
save_deskewed,
save_all,
extract_only_images,
save_page,
enable_plotting,
allow_enhancement,
curved_line,
textline_light,
full_layout,
tables,
right2left,
input_binary,
allow_scaling,
headers_off,
light_version,
ignore_page_extraction,
log_level
):
if log_level:
setOverrideLogLevel(log_level)
def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, skip_layout_and_reading_order, ignore_page_extraction, log_level):
initLogging()
if log_level:
getLogger('eynollah').setLevel(getLevelName(log_level))
if not enable_plotting and (save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement):
print("Error: You used one of -sl, -sd, -sa, -sp, -si or -ae but did not enable plotting with -ep")
sys.exit(1)
@ -182,11 +272,14 @@ def main(
if textline_light and not light_version:
print('Error: You used -tll to enable light textline detection but -light is not enabled')
sys.exit(1)
if light_version and not textline_light:
print('Error: You used -light without -tll. Light version need light textline to be enabled.')
if extract_only_images and (allow_enhancement or allow_scaling or light_version or curved_line or textline_light or full_layout or tables or right2left or headers_off) :
print('Error: You used -eoi which can not be enabled alongside light_version -light or allow_scaling -as or allow_enhancement -ae or curved_line -cl or textline_light -tll or full_layout -fl or tables -tab or right2left -r2l or headers_off -ho')
sys.exit(1)
eynollah = Eynollah(
image_filename=image,
overwrite=overwrite,
dir_out=out,
dir_in=dir_in,
dir_models=model,
@ -208,12 +301,85 @@ def main(
headers_off=headers_off,
light_version=light_version,
ignore_page_extraction=ignore_page_extraction,
reading_order_machine_based=reading_order_machine_based,
do_ocr=do_ocr,
num_col_upper=num_col_upper,
num_col_lower=num_col_lower,
skip_layout_and_reading_order=skip_layout_and_reading_order,
)
if dir_in:
eynollah.run()
else:
pcgts = eynollah.run()
eynollah.writer.write_pagexml(pcgts)
@main.command()
@click.option(
"--dir_in",
"-di",
help="directory of images",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--out",
"-o",
help="directory to write output xml data",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--dir_xmls",
"-dx",
help="directory of xmls",
type=click.Path(exists=True, file_okay=False),
)
@click.option(
"--model",
"-m",
help="directory of models",
type=click.Path(exists=True, file_okay=False),
required=True,
)
@click.option(
"--tr_ocr",
"-trocr/-notrocr",
is_flag=True,
help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.",
)
@click.option(
"--export_textline_images_and_text",
"-etit/-noetit",
is_flag=True,
help="if this parameter set to true, images and text in xml will be exported into output dir. This files can be used for training a OCR engine.",
)
@click.option(
"--do_not_mask_with_textline_contour",
"-nmtc/-mtc",
is_flag=True,
help="if this parameter set to true, cropped textline images will not be masked with textline contour.",
)
@click.option(
"--log_level",
"-l",
type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']),
help="Override log level globally to this",
)
def ocr(dir_in, out, dir_xmls, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, log_level):
if log_level:
setOverrideLogLevel(log_level)
initLogging()
eynollah_ocr = Eynollah_ocr(
dir_xmls=dir_xmls,
dir_in=dir_in,
dir_out=out,
dir_models=model,
tr_ocr=tr_ocr,
export_textline_images_and_text=export_textline_images_and_text,
do_not_mask_with_textline_contour=do_not_mask_with_textline_contour,
)
eynollah_ocr.run()
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

@ -0,0 +1,47 @@
{
"version": "0.1.0",
"git_url": "https://github.com/qurator-spk/sbb_binarization",
"tools": {
"ocrd-sbb-binarize": {
"executable": "ocrd-sbb-binarize",
"description": "Pixelwise binarization with selectional auto-encoders in Keras",
"categories": ["Image preprocessing"],
"steps": ["preprocessing/optimization/binarization"],
"input_file_grp": [],
"output_file_grp": [],
"parameters": {
"operation_level": {
"type": "string",
"enum": ["page", "region"],
"default": "page",
"description": "PAGE XML hierarchy level to operate on"
},
"model": {
"description": "Directory containing HDF5 or SavedModel/ProtoBuf models. Can be an absolute path or a path relative to the OCR-D resource location, the current working directory or the $SBB_BINARIZE_DATA environment variable (if set)",
"type": "string",
"format": "uri",
"content-type": "text/directory",
"required": true
}
},
"resources": [
{
"url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2020_01_16.zip",
"name": "default",
"type": "archive",
"path_in_archive": "saved_model_2020_01_16",
"size": 563147331,
"description": "default models provided by github.com/qurator-spk (SavedModel format)"
},
{
"url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip",
"name": "default-2021-03-09",
"type": "archive",
"path_in_archive": ".",
"size": 133230419,
"description": "updated default models provided by github.com/qurator-spk (SavedModel format)"
}
]
}
}
}

@ -28,6 +28,16 @@
"type": "boolean",
"default": true,
"description": "Try to detect all element subtypes, including drop-caps and headings"
},
"light_version": {
"type": "boolean",
"default": true,
"description": "Try to detect all element subtypes in light version"
},
"textline_light": {
"type": "boolean",
"default": true,
"description": "Light version need textline light"
},
"tables": {
"type": "boolean",

@ -0,0 +1,158 @@
from os import environ
from os.path import join
from pathlib import Path
from pkg_resources import resource_string
from json import loads
from PIL import Image
import numpy as np
import cv2
from click import command
from ocrd_utils import (
getLogger,
assert_file_grp_cardinality,
make_file_id,
MIMETYPE_PAGE
)
from ocrd import Processor
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import AlternativeImageType, to_xml
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
from .sbb_binarize import SbbBinarizer
OCRD_TOOL = loads(resource_string(__name__, 'ocrd-tool-binarization.json').decode('utf8'))
TOOL = 'ocrd-sbb-binarize'
def cv2pil(img):
return Image.fromarray(img.astype('uint8'))
def pil2cv(img):
# from ocrd/workspace.py
color_conversion = cv2.COLOR_GRAY2BGR if img.mode in ('1', 'L') else cv2.COLOR_RGB2BGR
pil_as_np_array = np.array(img).astype('uint8') if img.mode == '1' else np.array(img)
return cv2.cvtColor(pil_as_np_array, color_conversion)
class SbbBinarizeProcessor(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super().__init__(*args, **kwargs)
if hasattr(self, 'output_file_grp'):
# processing context
self.setup()
def setup(self):
"""
Set up the model prior to processing.
"""
LOG = getLogger('processor.SbbBinarize.__init__')
if not 'model' in self.parameter:
raise ValueError("'model' parameter is required")
# resolve relative path via environment variable
model_path = Path(self.parameter['model'])
if not model_path.is_absolute():
if 'SBB_BINARIZE_DATA' in environ and environ['SBB_BINARIZE_DATA']:
LOG.info("Environment variable SBB_BINARIZE_DATA is set to '%s'" \
" - prepending to model value '%s'. If you don't want this mechanism," \
" unset the SBB_BINARIZE_DATA environment variable.",
environ['SBB_BINARIZE_DATA'], model_path)
model_path = Path(environ['SBB_BINARIZE_DATA']).joinpath(model_path)
model_path = model_path.resolve()
if not model_path.is_dir():
raise FileNotFoundError("Does not exist or is not a directory: %s" % model_path)
# resolve relative path via OCR-D ResourceManager
model_path = self.resolve_resource(str(model_path))
self.binarizer = SbbBinarizer(model_dir=model_path, logger=LOG)
def process(self):
"""
Binarize images with sbb_binarization (based on selectional auto-encoders).
For each page of the input file group, open and deserialize input PAGE-XML
and its respective images. Then iterate over the element hierarchy down to
the requested ``operation_level``.
For each segment element, retrieve a raw (non-binarized) segment image
according to the layout annotation (from an existing ``AlternativeImage``,
or by cropping into the higher-level images, and deskewing when applicable).
Pass the image to the binarizer (which runs in fixed-size windows/patches
across the image and stitches the results together).
Serialize the resulting bilevel image as PNG file and add it to the output
file group (with file ID suffix ``.IMG-BIN``) along with the output PAGE-XML
(referencing it as new ``AlternativeImage`` for the segment element).
Produce a new PAGE output file by serialising the resulting hierarchy.
"""
LOG = getLogger('processor.SbbBinarize')
assert_file_grp_cardinality(self.input_file_grp, 1)
assert_file_grp_cardinality(self.output_file_grp, 1)
oplevel = self.parameter['operation_level']
for n, input_file in enumerate(self.input_files):
file_id = make_file_id(input_file, self.output_file_grp)
page_id = input_file.pageId or input_file.ID
LOG.info("INPUT FILE %i / %s", n, page_id)
pcgts = page_from_file(self.workspace.download_file(input_file))
self.add_metadata(pcgts)
pcgts.set_pcGtsId(file_id)
page = pcgts.get_Page()
page_image, page_xywh, _ = self.workspace.image_from_page(page, page_id, feature_filter='binarized')
if oplevel == 'page':
LOG.info("Binarizing on 'page' level in page '%s'", page_id)
bin_image = cv2pil(self.binarizer.run(image=pil2cv(page_image), use_patches=True))
# update METS (add the image file):
bin_image_path = self.workspace.save_image_file(bin_image,
file_id + '.IMG-BIN',
page_id=input_file.pageId,
file_grp=self.output_file_grp)
page.add_AlternativeImage(AlternativeImageType(filename=bin_image_path, comments='%s,binarized' % page_xywh['features']))
elif oplevel == 'region':
regions = page.get_AllRegions(['Text', 'Table'], depth=1)
if not regions:
LOG.warning("Page '%s' contains no text/table regions", page_id)
for region in regions:
region_image, region_xywh = self.workspace.image_from_segment(region, page_image, page_xywh, feature_filter='binarized')
region_image_bin = cv2pil(binarizer.run(image=pil2cv(region_image), use_patches=True))
region_image_bin_path = self.workspace.save_image_file(
region_image_bin,
"%s_%s.IMG-BIN" % (file_id, region.id),
page_id=input_file.pageId,
file_grp=self.output_file_grp)
region.add_AlternativeImage(
AlternativeImageType(filename=region_image_bin_path, comments='%s,binarized' % region_xywh['features']))
elif oplevel == 'line':
region_line_tuples = [(r.id, r.get_TextLine()) for r in page.get_AllRegions(['Text'], depth=0)]
if not region_line_tuples:
LOG.warning("Page '%s' contains no text lines", page_id)
for region_id, line in region_line_tuples:
line_image, line_xywh = self.workspace.image_from_segment(line, page_image, page_xywh, feature_filter='binarized')
line_image_bin = cv2pil(binarizer.run(image=pil2cv(line_image), use_patches=True))
line_image_bin_path = self.workspace.save_image_file(
line_image_bin,
"%s_%s_%s.IMG-BIN" % (file_id, region_id, line.id),
page_id=input_file.pageId,
file_grp=self.output_file_grp)
line.add_AlternativeImage(
AlternativeImageType(filename=line_image_bin_path, comments='%s,binarized' % line_xywh['features']))
self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=input_file.pageId,
mimetype=MIMETYPE_PAGE,
local_filename=join(self.output_file_grp, file_id + '.xml'),
content=to_xml(pcgts))
@command()
@ocrd_cli_options
def cli(*args, **kwargs):
return ocrd_cli_wrap_processor(SbbBinarizeProcessor, *args, **kwargs)

@ -45,10 +45,13 @@ class EynollahProcessor(Processor):
image_filename = self.workspace.download_file(next(self.workspace.mets.find_files(local_filename=page.imageFilename))).local_filename
eynollah_kwargs = {
'dir_models': self.resolve_resource(self.parameter['models']),
'dir_out': self.output_file_grp,
'allow_enhancement': False,
'curved_line': self.parameter['curved_line'],
'full_layout': self.parameter['full_layout'],
'allow_scaling': self.parameter['allow_scaling'],
'light_version': self.parameter['light_version'],
'textline_light': self.parameter['textline_light'],
'headers_off': self.parameter['headers_off'],
'tables': self.parameter['tables'],
'override_dpi': self.parameter['dpi'],

@ -0,0 +1,383 @@
"""
Tool to load model and binarize a given image.
"""
import sys
from glob import glob
from os import environ, devnull
from os.path import join
from warnings import catch_warnings, simplefilter
import os
import numpy as np
from PIL import Image
import cv2
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
stderr = sys.stderr
sys.stderr = open(devnull, 'w')
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.python.keras import backend as tensorflow_backend
sys.stderr = stderr
import logging
def resize_image(img_in, input_height, input_width):
return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST)
class SbbBinarizer:
def __init__(self, model_dir, logger=None):
self.model_dir = model_dir
self.log = logger if logger else logging.getLogger('SbbBinarizer')
self.start_new_session()
self.model_files = glob(self.model_dir+"/*/", recursive = True)
self.models = []
for model_file in self.model_files:
self.models.append(self.load_model(model_file))
def start_new_session(self):
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
self.session = tf.compat.v1.Session(config=config) # tf.InteractiveSession()
tensorflow_backend.set_session(self.session)
def end_session(self):
tensorflow_backend.clear_session()
self.session.close()
del self.session
def load_model(self, model_name):
model = load_model(join(self.model_dir, model_name), compile=False)
model_height = model.layers[len(model.layers)-1].output_shape[1]
model_width = model.layers[len(model.layers)-1].output_shape[2]
n_classes = model.layers[len(model.layers)-1].output_shape[3]
return model, model_height, model_width, n_classes
def predict(self, model_in, img, use_patches, n_batch_inference=5):
tensorflow_backend.set_session(self.session)
model, model_height, model_width, n_classes = model_in
img_org_h = img.shape[0]
img_org_w = img.shape[1]
if img.shape[0] < model_height and img.shape[1] >= model_width:
img_padded = np.zeros(( model_height, img.shape[1], img.shape[2] ))
index_start_h = int( abs( img.shape[0] - model_height) /2.)
index_start_w = 0
img_padded [ index_start_h: index_start_h+img.shape[0], :, : ] = img[:,:,:]
elif img.shape[0] >= model_height and img.shape[1] < model_width:
img_padded = np.zeros(( img.shape[0], model_width, img.shape[2] ))
index_start_h = 0
index_start_w = int( abs( img.shape[1] - model_width) /2.)
img_padded [ :, index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:]
elif img.shape[0] < model_height and img.shape[1] < model_width:
img_padded = np.zeros(( model_height, model_width, img.shape[2] ))
index_start_h = int( abs( img.shape[0] - model_height) /2.)
index_start_w = int( abs( img.shape[1] - model_width) /2.)
img_padded [ index_start_h: index_start_h+img.shape[0], index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:]
else:
index_start_h = 0
index_start_w = 0
img_padded = np.copy(img)
img = np.copy(img_padded)
if use_patches:
margin = int(0.1 * model_width)
width_mid = model_width - 2 * margin
height_mid = model_height - 2 * margin
img = img / float(255.0)
img_h = img.shape[0]
img_w = img.shape[1]
prediction_true = np.zeros((img_h, img_w, 3))
mask_true = np.zeros((img_h, img_w))
nxf = img_w / float(width_mid)
nyf = img_h / float(height_mid)
if nxf > int(nxf):
nxf = int(nxf) + 1
else:
nxf = int(nxf)
if nyf > int(nyf):
nyf = int(nyf) + 1
else:
nyf = int(nyf)
list_i_s = []
list_j_s = []
list_x_u = []
list_x_d = []
list_y_u = []
list_y_d = []
batch_indexer = 0
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
for i in range(nxf):
for j in range(nyf):
if i == 0:
index_x_d = i * width_mid
index_x_u = index_x_d + model_width
elif i > 0:
index_x_d = i * width_mid
index_x_u = index_x_d + model_width
if j == 0:
index_y_d = j * height_mid
index_y_u = index_y_d + model_height
elif j > 0:
index_y_d = j * height_mid
index_y_u = index_y_d + model_height
if index_x_u > img_w:
index_x_u = img_w
index_x_d = img_w - model_width
if index_y_u > img_h:
index_y_u = img_h
index_y_d = img_h - model_height
list_i_s.append(i)
list_j_s.append(j)
list_x_u.append(index_x_u)
list_x_d.append(index_x_d)
list_y_d.append(index_y_d)
list_y_u.append(index_y_u)
img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :]
batch_indexer = batch_indexer + 1
if batch_indexer == n_batch_inference:
label_p_pred = model.predict(img_patch,verbose=0)
seg = np.argmax(label_p_pred, axis=3)
#print(seg.shape, len(seg), len(list_i_s))
indexer_inside_batch = 0
for i_batch, j_batch in zip(list_i_s, list_j_s):
seg_in = seg[indexer_inside_batch,:,:]
seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2)
index_y_u_in = list_y_u[indexer_inside_batch]
index_y_d_in = list_y_d[indexer_inside_batch]
index_x_u_in = list_x_u[indexer_inside_batch]
index_x_d_in = list_x_d[indexer_inside_batch]
if i_batch == 0 and j_batch == 0:
seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
elif i_batch == nxf - 1 and j_batch == nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :]
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
elif i_batch == 0 and j_batch == nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
elif i_batch == nxf - 1 and j_batch == 0:
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0:
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
else:
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
indexer_inside_batch = indexer_inside_batch +1
list_i_s = []
list_j_s = []
list_x_u = []
list_x_d = []
list_y_u = []
list_y_d = []
batch_indexer = 0
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
elif i==(nxf-1) and j==(nyf-1):
label_p_pred = model.predict(img_patch,verbose=0)
seg = np.argmax(label_p_pred, axis=3)
#print(seg.shape, len(seg), len(list_i_s))
indexer_inside_batch = 0
for i_batch, j_batch in zip(list_i_s, list_j_s):
seg_in = seg[indexer_inside_batch,:,:]
seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2)
index_y_u_in = list_y_u[indexer_inside_batch]
index_y_d_in = list_y_d[indexer_inside_batch]
index_x_u_in = list_x_u[indexer_inside_batch]
index_x_d_in = list_x_d[indexer_inside_batch]
if i_batch == 0 and j_batch == 0:
seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
elif i_batch == nxf - 1 and j_batch == nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :]
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
elif i_batch == 0 and j_batch == nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
elif i_batch == nxf - 1 and j_batch == 0:
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0:
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1:
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
else:
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
indexer_inside_batch = indexer_inside_batch +1
list_i_s = []
list_j_s = []
list_x_u = []
list_x_d = []
list_y_u = []
list_y_d = []
batch_indexer = 0
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
prediction_true = prediction_true[index_start_h: index_start_h+img_org_h, index_start_w: index_start_w+img_org_w,:]
prediction_true = prediction_true.astype(np.uint8)
else:
img_h_page = img.shape[0]
img_w_page = img.shape[1]
img = img / float(255.0)
img = resize_image(img, model_height, model_width)
label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2]))
seg = np.argmax(label_p_pred, axis=3)[0]
seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2)
prediction_true = resize_image(seg_color, img_h_page, img_w_page)
prediction_true = prediction_true.astype(np.uint8)
return prediction_true[:,:,0]
def run(self, image=None, image_path=None, save=None, use_patches=False, dir_in=None, dir_out=None):
print(dir_in,'dir_in')
if not dir_in:
if (image is not None and image_path is not None) or \
(image is None and image_path is None):
raise ValueError("Must pass either a opencv2 image or an image_path")
if image_path is not None:
image = cv2.imread(image_path)
img_last = 0
for n, (model, model_file) in enumerate(zip(self.models, self.model_files)):
self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files)))
res = self.predict(model, image, use_patches)
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
res[:, :][res[:, :] == 0] = 2
res = res - 1
res = res * 255
img_fin[:, :, 0] = res
img_fin[:, :, 1] = res
img_fin[:, :, 2] = res
img_fin = img_fin.astype(np.uint8)
img_fin = (res[:, :] == 0) * 255
img_last = img_last + img_fin
kernel = np.ones((5, 5), np.uint8)
img_last[:, :][img_last[:, :] > 0] = 255
img_last = (img_last[:, :] == 0) * 255
if save:
cv2.imwrite(save, img_last)
return img_last
else:
ls_imgs = os.listdir(dir_in)
for image_name in ls_imgs:
image_stem = image_name.split('.')[0]
print(image_name,'image_name')
image = cv2.imread(os.path.join(dir_in,image_name) )
img_last = 0
for n, (model, model_file) in enumerate(zip(self.models, self.model_files)):
self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files)))
res = self.predict(model, image, use_patches)
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
res[:, :][res[:, :] == 0] = 2
res = res - 1
res = res * 255
img_fin[:, :, 0] = res
img_fin[:, :, 1] = res
img_fin[:, :, 2] = res
img_fin = img_fin.astype(np.uint8)
img_fin = (res[:, :] == 0) * 255
img_last = img_last + img_fin
kernel = np.ones((5, 5), np.uint8)
img_last[:, :][img_last[:, :] > 0] = 255
img_last = (img_last[:, :] == 0) * 255
cv2.imwrite(os.path.join(dir_out,image_stem+'.png'), img_last)

File diff suppressed because it is too large Load Diff

@ -1,10 +1,10 @@
from functools import partial
import cv2
import numpy as np
from shapely import geometry
from .rotate import rotate_image, rotation_image_new
from multiprocessing import Process, Queue, cpu_count
from multiprocessing import Pool
def contours_in_same_horizon(cy_main_hor):
X1 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
X2 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
@ -27,37 +27,33 @@ def find_contours_mean_y_diff(contours_main):
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
return np.mean(np.diff(np.sort(np.array(cy_main))))
def get_text_region_boxes_by_given_contours(contours):
kernel = np.ones((5, 5), np.uint8)
boxes = []
contours_new = []
for jj in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[jj])
boxes.append([x, y, w, h])
box = cv2.boundingRect(contours[jj])
boxes.append(box)
contours_new.append(contours[jj])
del contours
return boxes, contours_new
def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area):
found_polygons_early = list()
found_polygons_early = []
for jv,c in enumerate(contours):
if len(c) < 3: # A polygon cannot have less than 3 points
continue
polygon = geometry.Polygon([point[0] for point in c])
area = polygon.area
if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1: # and hierarchy[0][jv][3]==-1 :
found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint))
if (area >= min_area * np.prod(image.shape[:2]) and
area <= max_area * np.prod(image.shape[:2]) and
hierarchy[0][jv][3] == -1):
found_polygons_early.append(np.array([[point]
for point in polygon.exterior.coords], dtype=np.uint))
return found_polygons_early
def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area):
found_polygons_early = list()
found_polygons_early = []
for jv,c in enumerate(contours):
if len(c) < 3: # A polygon cannot have less than 3 points
continue
@ -68,48 +64,59 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m
##print(np.prod(thresh.shape[:2]))
# Check that polygon has area greater than minimal area
# print(hierarchy[0][jv][3],hierarchy )
if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 :
if (area >= min_area * np.prod(image.shape[:2]) and
area <= max_area * np.prod(image.shape[:2]) and
# hierarchy[0][jv][3]==-1
True):
# print(c[0][0][1])
found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32))
found_polygons_early.append(np.array([[point]
for point in polygon.exterior.coords], dtype=np.int32))
return found_polygons_early
def find_new_features_of_contours(contours_main):
areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))]
cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
areas_main = np.array([cv2.contourArea(contours_main[j])
for j in range(len(contours_main))])
M_main = [cv2.moments(contours_main[j])
for j in range(len(contours_main))]
cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32))
for j in range(len(M_main))]
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32))
for j in range(len(M_main))]
try:
x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0] for j in range(len(contours_main))])
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1] for j in range(len(contours_main))])
x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))])
y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))])
x_min_main = np.array([np.min(contours_main[j][:, 0, 0])
for j in range(len(contours_main))])
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0])
for j in range(len(contours_main))])
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0]
for j in range(len(contours_main))])
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1]
for j in range(len(contours_main))])
x_max_main = np.array([np.max(contours_main[j][:, 0, 0])
for j in range(len(contours_main))])
y_min_main = np.array([np.min(contours_main[j][:, 0, 1])
for j in range(len(contours_main))])
y_max_main = np.array([np.max(contours_main[j][:, 0, 1])
for j in range(len(contours_main))])
except:
x_min_main = np.array([np.min(contours_main[j][:, 0]) for j in range(len(contours_main))])
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) for j in range(len(contours_main))])
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] for j in range(len(contours_main))])
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] for j in range(len(contours_main))])
x_max_main = np.array([np.max(contours_main[j][:, 0]) for j in range(len(contours_main))])
y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))])
y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))])
x_min_main = np.array([np.min(contours_main[j][:, 0])
for j in range(len(contours_main))])
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0])
for j in range(len(contours_main))])
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0]
for j in range(len(contours_main))])
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1]
for j in range(len(contours_main))])
x_max_main = np.array([np.max(contours_main[j][:, 0])
for j in range(len(contours_main))])
y_min_main = np.array([np.min(contours_main[j][:, 1])
for j in range(len(contours_main))])
y_max_main = np.array([np.max(contours_main[j][:, 1])
for j in range(len(contours_main))])
# dis_x=np.abs(x_max_main-x_min_main)
return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin
def find_features_of_contours(contours_main):
def find_features_of_contours(contours_main):
areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))]
cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))]
@ -120,14 +127,15 @@ def find_features_of_contours(contours_main):
y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))])
y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))])
return y_min_main, y_max_main
def return_parent_contours(contours, hierarchy):
contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1]
contours_parent = [contours[i]
for i in range(len(contours))
if hierarchy[0][i][3] == -1]
return contours_parent
def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002):
# pixels of images are identified by 5
if len(region_pre_p.shape) == 3:
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
@ -139,80 +147,16 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002):
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area)
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy,
max_area=1, min_area=min_area)
return contours_imgs
def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, indexes_r_con_per_pro, img, slope_first):
cnts_org_per_each_subprocess = []
index_by_text_region_contours = []
for mv in range(len(contours_per_process)):
index_by_text_region_contours.append(indexes_r_con_per_pro[mv])
img_copy = np.zeros(img.shape)
img_copy = cv2.fillPoly(img_copy, pts=[contours_per_process[mv]], color=(1, 1, 1))
img_copy = rotation_image_new(img_copy, -slope_first)
img_copy = img_copy.astype(np.uint8)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
cnts_org_per_each_subprocess.append(cont_int[0])
queue_of_all_params.put([ cnts_org_per_each_subprocess, index_by_text_region_contours])
def get_textregion_contours_in_org_image_multi(cnts, img, slope_first):
num_cores = cpu_count()
queue_of_all_params = Queue()
processes = []
nh = np.linspace(0, len(cnts), num_cores + 1)
indexes_by_text_con = np.array(range(len(cnts)))
for i in range(num_cores):
contours_per_process = cnts[int(nh[i]) : int(nh[i + 1])]
indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])]
processes.append(Process(target=do_work_of_contours_in_image, args=(queue_of_all_params, contours_per_process, indexes_text_con_per_process, img,slope_first )))
for i in range(num_cores):
processes[i].start()
cnts_org = []
all_index_text_con = []
for i in range(num_cores):
list_all_par = queue_of_all_params.get(True)
contours_for_sub_process = list_all_par[0]
indexes_for_sub_process = list_all_par[1]
for j in range(len(contours_for_sub_process)):
cnts_org.append(contours_for_sub_process[j])
all_index_text_con.append(indexes_for_sub_process[j])
for i in range(num_cores):
processes[i].join()
print(all_index_text_con)
return cnts_org
def loop_contour_image(index_l, cnts,img, slope_first):
def do_work_of_contours_in_image(contour, index_r_con, img, slope_first):
img_copy = np.zeros(img.shape)
img_copy = cv2.fillPoly(img_copy, pts=[cnts[index_l]], color=(1, 1, 1))
# plt.imshow(img_copy)
# plt.show()
img_copy = cv2.fillPoly(img_copy, pts=[contour], color=(1, 1, 1))
# print(img.shape,'img')
img_copy = rotation_image_new(img_copy, -slope_first)
##print(img_copy.shape,'img_copy')
# plt.imshow(img_copy)
# plt.show()
img_copy = img_copy.astype(np.uint8)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
@ -221,20 +165,20 @@ def loop_contour_image(index_l, cnts,img, slope_first):
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
# print(np.shape(cont_int[0]))
return cont_int[0]
def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first):
return cont_int[0], index_r_con
cnts_org = []
# print(cnts,'cnts')
with Pool(cpu_count()) as p:
cnts_org = p.starmap(loop_contour_image, [(index_l,cnts, img,slope_first) for index_l in range(len(cnts))])
return cnts_org
def get_textregion_contours_in_org_image_multi(cnts, img, slope_first, map=map):
if not len(cnts):
return [], []
results = map(partial(do_work_of_contours_in_image,
img=img,
slope_first=slope_first,
),
cnts, range(len(cnts)))
return tuple(zip(*results))
def get_textregion_contours_in_org_image(cnts, img, slope_first):
cnts_org = []
# print(cnts,'cnts')
for i in range(len(cnts)):
@ -255,7 +199,6 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first):
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
# print(np.shape(cont_int[0]))
@ -263,45 +206,57 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first):
return cnts_org
def get_textregion_contours_in_org_image_light(cnts, img, slope_first):
h_o = img.shape[0]
w_o = img.shape[1]
img = cv2.resize(img, (int(img.shape[1]/3.), int(img.shape[0]/3.)), interpolation=cv2.INTER_NEAREST)
##cnts = list( (np.array(cnts)/2).astype(np.int16) )
#cnts = cnts/2
cnts = [(i/ 3).astype(np.int32) for i in cnts]
def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first):
zoom = 3
img = cv2.resize(img, (img.shape[1] // zoom,
img.shape[0] // zoom),
interpolation=cv2.INTER_NEAREST)
cnts_org = []
#print(cnts,'cnts')
for i in range(len(cnts)):
for cnt in cnts:
img_copy = np.zeros(img.shape)
img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1))
# plt.imshow(img_copy)
# plt.show()
# print(img.shape,'img')
img_copy = rotation_image_new(img_copy, -slope_first)
##print(img_copy.shape,'img_copy')
# plt.imshow(img_copy)
# plt.show()
img_copy = cv2.fillPoly(img_copy, pts=[(cnt / zoom).astype(int)], color=(1, 1, 1))
img_copy = img_copy.astype(np.uint8)
img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
# print(np.shape(cont_int[0]))
cnts_org.append(cont_int[0]*3)
cnts_org.append(cont_int[0] * zoom)
return cnts_org
def return_contours_of_interested_textline(region_pre_p, pixel):
def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first):
img_copy = np.zeros(img.shape)
img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=(1, 1, 1))
img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
# print(np.shape(cont_int[0]))
return cont_int[0], index_r_con
def get_textregion_contours_in_org_image_light(cnts, img, slope_first, map=map):
if not len(cnts):
return []
img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST)
##cnts = list( (np.array(cnts)/2).astype(np.int16) )
#cnts = cnts/2
cnts = [(i/6).astype(np.int) for i in cnts]
results = map(partial(do_back_rotation_and_get_cnt_back,
img=img,
slope_first=slope_first,
),
cnts, range(len(cnts)))
contours, indexes = tuple(zip(*results))
return [i*6 for i in contours]
def return_contours_of_interested_textline(region_pre_p, pixel):
# pixels of images are identified by 5
if len(region_pre_p.shape) == 3:
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
@ -314,11 +269,11 @@ def return_contours_of_interested_textline(region_pre_p, pixel):
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003)
contours_imgs = filter_contours_area_of_image_tables(
thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003)
return contours_imgs
def return_contours_of_image(image):
if len(image.shape) == 2:
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
image = image.astype(np.uint8)
@ -330,7 +285,6 @@ def return_contours_of_image(image):
return contours, hierarchy
def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003):
# pixels of images are identified by 5
if len(region_pre_p.shape) == 3:
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
@ -342,14 +296,13 @@ def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_si
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size)
contours_imgs = filter_contours_area_of_image_tables(
thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size)
return contours_imgs
def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area):
# pixels of images are identified by 5
if len(region_pre_p.shape) == 3:
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
@ -362,9 +315,11 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area,
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area)
contours_imgs = filter_contours_area_of_image_tables(
thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area)
img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3))
img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1))
return img_ret[:, :, 0]

@ -4,6 +4,7 @@ from .contour import (
find_new_features_of_contours,
return_contours_of_image,
return_parent_contours,
return_contours_of_interested_region,
)
def adhere_drop_capital_region_into_corresponding_textline(
@ -17,6 +18,7 @@ def adhere_drop_capital_region_into_corresponding_textline(
all_found_textline_polygons_h,
kernel=None,
curved_line=False,
textline_light=False,
):
# print(np.shape(all_found_textline_polygons),np.shape(all_found_textline_polygons[3]),'all_found_textline_polygonsshape')
# print(all_found_textline_polygons[3])
@ -76,7 +78,7 @@ def adhere_drop_capital_region_into_corresponding_textline(
# region_with_intersected_drop=region_with_intersected_drop/3
region_with_intersected_drop = region_with_intersected_drop.astype(np.uint8)
# print(np.unique(img_con_all_copy[:,:,0]))
if curved_line:
if curved_line or textline_light:
if len(region_with_intersected_drop) > 1:
sum_pixels_of_intersection = []
@ -114,12 +116,17 @@ def adhere_drop_capital_region_into_corresponding_textline(
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
img_textlines = img_textlines.astype(np.uint8)
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined = return_contours_of_interested_region(img_textlines, 255, 0)
#plt.imshow(img_textlines)
#plt.show()
#imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours_combined),'len textlines mixed')
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
contours_biggest = contours_combined[np.argmax(areas_cnt_text)]
@ -130,8 +137,13 @@ def adhere_drop_capital_region_into_corresponding_textline(
# contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0]
# contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
if len(contours_combined)==1:
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
elif len(contours_combined)==2:
all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] )
else:
pass
except:
# print('gordun1')
@ -167,14 +179,13 @@ def adhere_drop_capital_region_into_corresponding_textline(
img_textlines = img_textlines.astype(np.uint8)
# plt.imshow(img_textlines)
# plt.show()
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined = return_contours_of_interested_region(img_textlines, 255, 0)
##imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
##ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
##contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours_combined),'len textlines mixed')
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
contours_biggest = contours_combined[np.argmax(areas_cnt_text)]
@ -186,7 +197,12 @@ def adhere_drop_capital_region_into_corresponding_textline(
# print(np.shape(contours_biggest),'contours_biggest')
# print(np.shape(all_found_textline_polygons[int(region_final)][arg_min]))
##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
if len(contours_combined)==1:
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
elif len(contours_combined)==2:
all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] )
else:
pass
except:
pass
@ -215,10 +231,11 @@ def adhere_drop_capital_region_into_corresponding_textline(
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
img_textlines = img_textlines.astype(np.uint8)
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined = return_contours_of_interested_region(img_textlines, 255, 0)
#imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours_combined),'len textlines mixed')
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
@ -231,7 +248,12 @@ def adhere_drop_capital_region_into_corresponding_textline(
contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] # -all_box_coord[int(region_final)][0]
##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
if len(contours_combined)==1:
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
elif len(contours_combined)==2:
all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] )
else:
pass
# all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest
except:
@ -320,10 +342,12 @@ def adhere_drop_capital_region_into_corresponding_textline(
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
img_textlines = img_textlines.astype(np.uint8)
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined = return_contours_of_interested_region(img_textlines, 255, 0)
#imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours_combined),'len textlines mixed')
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
@ -336,8 +360,12 @@ def adhere_drop_capital_region_into_corresponding_textline(
contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] - all_box_coord[int(region_final)][0]
contours_biggest = contours_biggest.reshape(np.shape(contours_biggest)[0], np.shape(contours_biggest)[2])
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
if len(contours_combined)==1:
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
elif len(contours_combined)==2:
all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] )
else:
pass
except:
# print('gordun1')
@ -375,10 +403,12 @@ def adhere_drop_capital_region_into_corresponding_textline(
img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255))
img_textlines = img_textlines.astype(np.uint8)
imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined = return_contours_of_interested_region(img_textlines, 255, 0)
#imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours_combined),'len textlines mixed')
areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))])
@ -391,7 +421,12 @@ def adhere_drop_capital_region_into_corresponding_textline(
contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] - all_box_coord[int(region_final)][0]
contours_biggest = contours_biggest.reshape(np.shape(contours_biggest)[0], np.shape(contours_biggest)[2])
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
if len(contours_combined)==1:
all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest
elif len(contours_combined)==2:
all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] )
else:
pass
# all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest
except:

@ -2,13 +2,11 @@ import numpy as np
import cv2
from scipy.signal import find_peaks
from scipy.ndimage import gaussian_filter1d
from .contour import find_new_features_of_contours, return_contours_of_interested_region
from .resize import resize_image
from .rotate import rotate_image
def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None):
def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_version=False, kernel=None):
mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1]))
mask_marginals=mask_marginals.astype(np.uint8)
@ -49,27 +47,14 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=N
if thickness_along_y_percent>=14:
text_with_lines_y_rev=-1*text_with_lines_y[:]
#print(text_with_lines_y)
#print(text_with_lines_y_rev)
#plt.plot(text_with_lines_y)
#plt.show()
text_with_lines_y_rev=text_with_lines_y_rev-np.min(text_with_lines_y_rev)
#plt.plot(text_with_lines_y_rev)
#plt.show()
sigma_gaus=1
region_sum_0= gaussian_filter1d(text_with_lines_y, sigma_gaus)
region_sum_0_rev=gaussian_filter1d(text_with_lines_y_rev, sigma_gaus)
#plt.plot(region_sum_0_rev)
#plt.show()
region_sum_0_updown=region_sum_0[len(region_sum_0)::-1]
first_nonzero=(next((i for i, x in enumerate(region_sum_0) if x), None))
@ -78,43 +63,17 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=N
last_nonzero=len(region_sum_0)-last_nonzero
##img_sum_0_smooth_rev=-region_sum_0
mid_point=(last_nonzero+first_nonzero)/2.
one_third_right=(last_nonzero-mid_point)/3.0
one_third_left=(mid_point-first_nonzero)/3.0
#img_sum_0_smooth_rev=img_sum_0_smooth_rev-np.min(img_sum_0_smooth_rev)
peaks, _ = find_peaks(text_with_lines_y_rev, height=0)
peaks=np.array(peaks)
#print(region_sum_0[peaks])
##plt.plot(region_sum_0)
##plt.plot(peaks,region_sum_0[peaks],'*')
##plt.show()
#print(first_nonzero,last_nonzero,peaks)
peaks=peaks[(peaks>first_nonzero) & ((peaks<last_nonzero))]
#print(first_nonzero,last_nonzero,peaks)
#print(region_sum_0[peaks]<10)
####peaks=peaks[region_sum_0[peaks]<25 ]
#print(region_sum_0[peaks])
peaks=peaks[region_sum_0[peaks]<min_textline_thickness ]
#print(peaks)
#print(first_nonzero,last_nonzero,one_third_right,one_third_left)
if num_col==1:
peaks_right=peaks[peaks>mid_point]
@ -137,9 +96,6 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=N
#print(point_left,point_right)
#print(text_regions.shape)
if point_right>=mask_marginals.shape[1]:
point_right=mask_marginals.shape[1]-1
@ -148,10 +104,8 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=N
except:
mask_marginals[:,:]=1
#print(mask_marginals.shape,point_left,point_right,'nadosh')
mask_marginals_rotated=rotate_image(mask_marginals,-slope_deskew)
#print(mask_marginals_rotated.shape,'nadosh')
mask_marginals_rotated_sum=mask_marginals_rotated.sum(axis=0)
mask_marginals_rotated_sum[mask_marginals_rotated_sum!=0]=1
@ -167,73 +121,92 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=N
if max_point_of_right_marginal>=text_regions.shape[1]:
max_point_of_right_marginal=text_regions.shape[1]-1
if light_version:
text_regions_org = np.copy(text_regions)
text_regions[text_regions[:,:]==1]=4
pixel_img=4
min_area_text=0.00001
polygon_mask_marginals_rotated = return_contours_of_interested_region(mask_marginals,1,min_area_text)
polygon_mask_marginals_rotated = polygon_mask_marginals_rotated[0]
#print(np.min(index_x_interest) ,np.max(index_x_interest),'minmaxnew')
#print(mask_marginals_rotated.shape,text_regions.shape,'mask_marginals_rotated')
#plt.imshow(mask_marginals)
#plt.show()
polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text)
#plt.imshow(mask_marginals_rotated)
#plt.show()
cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals)
text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4
text_regions[(text_regions[:,:]==4)]=1
#plt.imshow(text_regions)
#plt.show()
marginlas_should_be_main_text=[]
pixel_img=4
min_area_text=0.00001
polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text)
x_min_marginals_left=[]
x_min_marginals_right=[]
cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals)
for i in range(len(cx_text_only)):
results = cv2.pointPolygonTest(polygon_mask_marginals_rotated, (cx_text_only[i], cy_text_only[i]), False)
text_regions[(text_regions[:,:]==4)]=1
if results == -1:
marginlas_should_be_main_text.append(polygons_of_marginals[i])
marginlas_should_be_main_text=[]
x_min_marginals_left=[]
x_min_marginals_right=[]
for i in range(len(cx_text_only)):
text_regions_org=cv2.fillPoly(text_regions_org, pts =marginlas_should_be_main_text, color=(4,4))
text_regions = np.copy(text_regions_org)
x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i])
y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i])
#print(x_width_mar,y_height_mar,y_height_mar/x_width_mar,'y_height_mar')
if x_width_mar>16 and y_height_mar/x_width_mar<18:
marginlas_should_be_main_text.append(polygons_of_marginals[i])
if x_min_text_only[i]<(mid_point-one_third_left):
x_min_marginals_left_new=x_min_text_only[i]
if len(x_min_marginals_left)==0:
x_min_marginals_left.append(x_min_marginals_left_new)
else:
x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new)
else:
x_min_marginals_right_new=x_min_text_only[i]
if len(x_min_marginals_right)==0:
x_min_marginals_right.append(x_min_marginals_right_new)
else:
x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new)
else:
text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4
pixel_img=4
min_area_text=0.00001
if len(x_min_marginals_left)==0:
x_min_marginals_left=[0]
if len(x_min_marginals_right)==0:
x_min_marginals_right=[text_regions.shape[1]-1]
polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text)
cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals)
text_regions[(text_regions[:,:]==4)]=1
marginlas_should_be_main_text=[]
x_min_marginals_left=[]
x_min_marginals_right=[]
for i in range(len(cx_text_only)):
x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i])
y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i])
if x_width_mar>16 and y_height_mar/x_width_mar<18:
marginlas_should_be_main_text.append(polygons_of_marginals[i])
if x_min_text_only[i]<(mid_point-one_third_left):
x_min_marginals_left_new=x_min_text_only[i]
if len(x_min_marginals_left)==0:
x_min_marginals_left.append(x_min_marginals_left_new)
else:
x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new)
else:
x_min_marginals_right_new=x_min_text_only[i]
if len(x_min_marginals_right)==0:
x_min_marginals_right.append(x_min_marginals_right_new)
else:
x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new)
#print(x_min_marginals_left[0],x_min_marginals_right[0],'margo')
if len(x_min_marginals_left)==0:
x_min_marginals_left=[0]
if len(x_min_marginals_right)==0:
x_min_marginals_right=[text_regions.shape[1]-1]
#print(marginlas_should_be_main_text,'marginlas_should_be_main_text')
text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4))
#print(np.unique(text_regions))
text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4))
#text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0
#text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0
text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0
text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0
#text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0
#text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0
text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0
text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0
###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4

File diff suppressed because it is too large Load Diff

@ -72,7 +72,7 @@ def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region
index_of_types_2 = index_of_types[kind_of_texts == 2]
indexes_sorted_2 = indexes_sorted[kind_of_texts == 2]
counter = EynollahIdCounter(region_idx=ref_point)
for idx_textregion, _ in enumerate(found_polygons_text_region):
id_of_texts.append(counter.next_region_id)

@ -2,7 +2,7 @@
# pylint: disable=import-error
from pathlib import Path
import os.path
import xml.etree.ElementTree as ET
from .utils.xml import create_page_xml, xml_reading_order
from .utils.counter import EynollahIdCounter
@ -12,6 +12,7 @@ from ocrd_models.ocrd_page import (
CoordsType,
PcGtsType,
TextLineType,
TextEquivType,
TextRegionType,
ImageRegionType,
TableRegionType,
@ -27,6 +28,7 @@ class EynollahXmlWriter():
self.counter = EynollahIdCounter()
self.dir_out = dir_out
self.image_filename = image_filename
self.output_filename = os.path.join(self.dir_out, self.image_filename_stem) + ".xml"
self.curved_line = curved_line
self.textline_light = textline_light
self.pcgts = pcgts
@ -59,6 +61,7 @@ class EynollahXmlWriter():
coords = CoordsType()
textline = TextLineType(id=counter.next_line_id, Coords=coords)
marginal_region.add_TextLine(textline)
marginal_region.set_orientation(-slopes_marginals[marginal_idx])
points_co = ''
for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])):
if not (self.curved_line or self.textline_light):
@ -93,12 +96,15 @@ class EynollahXmlWriter():
points_co += ' '
coords.set_points(points_co[:-1])
def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter):
def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion):
self.logger.debug('enter serialize_lines_in_region')
for j in range(len(all_found_textline_polygons[region_idx])):
coords = CoordsType()
textline = TextLineType(id=counter.next_line_id, Coords=coords)
if ocr_all_textlines_textregion:
textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] )
text_region.add_TextLine(textline)
text_region.set_orientation(-slopes[region_idx])
region_bboxes = all_box_coord[region_idx]
points_co = ''
for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[region_idx][j]):
@ -133,14 +139,36 @@ class EynollahXmlWriter():
points_co += str(int((contour_textline[0][1] + region_bboxes[0]+page_coord[0])/self.scale_y))
points_co += ' '
coords.set_points(points_co[:-1])
def serialize_lines_in_dropcapital(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion):
self.logger.debug('enter serialize_lines_in_region')
for j in range(1):
coords = CoordsType()
textline = TextLineType(id=counter.next_line_id, Coords=coords)
if ocr_all_textlines_textregion:
textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] )
text_region.add_TextLine(textline)
#region_bboxes = all_box_coord[region_idx]
points_co = ''
for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[j]):
if len(contour_textline) == 2:
points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x))
points_co += ','
points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y))
else:
points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x))
points_co += ','
points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y))
points_co += ' '
coords.set_points(points_co[:-1])
def write_pagexml(self, pcgts):
out_fname = os.path.join(self.dir_out, self.image_filename_stem) + ".xml"
self.logger.info("output filename: '%s'", out_fname)
with open(out_fname, 'w') as f:
self.logger.info("output filename: '%s'", self.output_filename)
with open(self.output_filename, 'w') as f:
f.write(to_xml(pcgts))
def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables):
def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines):
self.logger.debug('enter build_pagexml_no_full_layout')
# create the file structure
@ -159,7 +187,11 @@ class EynollahXmlWriter():
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)),
)
page.add_TextRegion(textregion)
self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter)
if ocr_all_textlines:
ocr_textlines = ocr_all_textlines[mm]
else:
ocr_textlines = None
self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines)
for mm in range(len(found_polygons_marginals)):
marginal = TextRegionType(id=counter.next_region_id, type_='marginalia',
@ -209,7 +241,7 @@ class EynollahXmlWriter():
return pcgts
def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml):
def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines):
self.logger.debug('enter build_pagexml_full_layout')
# create the file structure
@ -226,14 +258,24 @@ class EynollahXmlWriter():
textregion = TextRegionType(id=counter.next_region_id, type_='paragraph',
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)))
page.add_TextRegion(textregion)
self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter)
if ocr_all_textlines:
ocr_textlines = ocr_all_textlines[mm]
else:
ocr_textlines = None
self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines)
self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h))
for mm in range(len(found_polygons_text_region_h)):
textregion = TextRegionType(id=counter.next_region_id, type_='header',
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord)))
page.add_TextRegion(textregion)
self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter)
if ocr_all_textlines:
ocr_textlines = ocr_all_textlines[mm]
else:
ocr_textlines = None
self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter, ocr_textlines)
for mm in range(len(found_polygons_marginals)):
marginal = TextRegionType(id=counter.next_region_id, type_='marginalia',
@ -242,8 +284,12 @@ class EynollahXmlWriter():
self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter)
for mm in range(len(found_polygons_drop_capitals)):
page.add_TextRegion(TextRegionType(id=counter.next_region_id, type_='drop-capital',
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord))))
dropcapital = TextRegionType(id=counter.next_region_id, type_='drop-capital',
Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord)))
page.add_TextRegion(dropcapital)
###all_box_coord_drop = None
###slopes_drop = None
###self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=None)
for mm in range(len(found_polygons_text_region_img)):
page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord))))

@ -2,7 +2,7 @@ from os import environ
from pathlib import Path
from ocrd_utils import pushd_popd
from tests.base import CapturingTestCase as TestCase, main
from eynollah.cli import main as eynollah_cli
from eynollah.cli import layout as eynollah_cli
testdir = Path(__file__).parent.resolve()

Loading…
Cancel
Save