From 826d38b865557be1711bc2c898ce9f8fdadbf2b7 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 20 Sep 2024 23:10:02 +0200 Subject: [PATCH] pep 8 code style --- src/eynollah/cli.py | 3 +- src/eynollah/eynollah.py | 2228 ++++++++++++---------- src/eynollah/ocrd_cli.py | 2 + src/eynollah/plot.py | 74 +- src/eynollah/processor.py | 1 + src/eynollah/utils/__init__.py | 2570 +++++++++++++------------- src/eynollah/utils/contour.py | 104 +- src/eynollah/utils/counter.py | 1 + src/eynollah/utils/drop_capitals.py | 204 +- src/eynollah/utils/marginals.py | 301 ++- src/eynollah/utils/pil_cv2.py | 5 +- src/eynollah/utils/resize.py | 1 + src/eynollah/utils/rotate.py | 11 +- src/eynollah/utils/separate_lines.py | 928 +++++----- src/eynollah/utils/xml.py | 3 + src/eynollah/writer.py | 116 +- tests/base.py | 5 +- tests/test_counter.py | 4 + tests/test_dpi.py | 2 + tests/test_run.py | 2 + tests/test_xml.py | 2 + 21 files changed, 3423 insertions(+), 3144 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index b4d323f..48f5487 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -182,7 +182,7 @@ def main( if textline_light and not light_version: print('Error: You used -tll to enable light textline detection but -light is not enabled') sys.exit(1) - if extract_only_images and (allow_enhancement or allow_scaling or light_version or curved_line or textline_light or full_layout or tables or right2left or headers_off) : + if extract_only_images and (allow_enhancement or allow_scaling or light_version or curved_line or textline_light or full_layout or tables or right2left or headers_off): print('Error: You used -eoi which can not be enabled alongside light_version -light or allow_scaling -as or allow_enhancement -ae or curved_line -cl or textline_light -tll or full_layout -fl or tables -tab or right2left -r2l or headers_off -ho') sys.exit(1) eynollah = Eynollah( @@ -215,5 +215,6 @@ def main( pcgts = eynollah.run() eynollah.writer.write_pagexml(pcgts) + if __name__ == "__main__": main() diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 511e994..1b956f7 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -17,12 +17,14 @@ import gc from ocrd_utils import getLogger import cv2 import numpy as np + os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" stderr = sys.stderr sys.stderr = open(os.devnull, "w") import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.keras.models import load_model + sys.stderr = stderr tf.get_logger().setLevel("ERROR") warnings.filterwarnings("ignore") @@ -80,14 +82,14 @@ from .plot import EynollahPlotter from .writer import EynollahXmlWriter SLOPE_THRESHOLD = 0.13 -RATIO_OF_TWO_MODEL_THRESHOLD = 95.50 #98.45: +RATIO_OF_TWO_MODEL_THRESHOLD = 95.50 # 98.45: DPI_THRESHOLD = 298 MAX_SLOPE = 999 KERNEL = np.ones((5, 5), np.uint8) projection_dim = 64 patch_size = 1 -num_patches =21*21#14*14#28*28#14*14#28*28 +num_patches = 21 * 21 # 14*14#28*28#14*14#28*28 class Patches(layers.Layer): @@ -107,15 +109,15 @@ class Patches(layers.Layer): patch_dims = patches.shape[-1] patches = tf.reshape(patches, [batch_size, -1, patch_dims]) return patches - def get_config(self): + def get_config(self): config = super().get_config().copy() config.update({ 'patch_size': self.patch_size, }) return config - - + + class PatchEncoder(layers.Layer): def __init__(self, **kwargs): super(PatchEncoder, self).__init__() @@ -129,8 +131,8 @@ class PatchEncoder(layers.Layer): positions = tf.range(start=0, limit=self.num_patches, delta=1) encoded = self.projection(patch) + self.position_embedding(positions) return encoded - def get_config(self): + def get_config(self): config = super().get_config().copy() config.update({ 'num_patches': self.num_patches, @@ -139,36 +141,37 @@ class PatchEncoder(layers.Layer): }) return config + class Eynollah: def __init__( - self, - dir_models, - image_filename=None, - image_pil=None, - image_filename_stem=None, - dir_out=None, - dir_in=None, - dir_of_cropped_images=None, - extract_only_images=False, - dir_of_layout=None, - dir_of_deskewed=None, - dir_of_all=None, - dir_save_page=None, - enable_plotting=False, - allow_enhancement=False, - curved_line=False, - textline_light=False, - full_layout=False, - tables=False, - right2left=False, - input_binary=False, - allow_scaling=False, - headers_off=False, - light_version=False, - ignore_page_extraction=False, - override_dpi=None, - logger=None, - pcgts=None, + self, + dir_models, + image_filename=None, + image_pil=None, + image_filename_stem=None, + dir_out=None, + dir_in=None, + dir_of_cropped_images=None, + extract_only_images=False, + dir_of_layout=None, + dir_of_deskewed=None, + dir_of_all=None, + dir_save_page=None, + enable_plotting=False, + allow_enhancement=False, + curved_line=False, + textline_light=False, + full_layout=False, + tables=False, + right2left=False, + input_binary=False, + allow_scaling=False, + headers_off=False, + light_version=False, + ignore_page_extraction=False, + override_dpi=None, + logger=None, + pcgts=None, ): if not dir_in: if image_pil: @@ -183,9 +186,9 @@ class Eynollah: self.dir_of_all = dir_of_all self.dir_save_page = dir_save_page self.dir_of_deskewed = dir_of_deskewed - self.dir_of_deskewed = dir_of_deskewed - self.dir_of_cropped_images=dir_of_cropped_images - self.dir_of_layout=dir_of_layout + self.dir_of_deskewed = dir_of_deskewed + self.dir_of_cropped_images = dir_of_cropped_images + self.dir_of_layout = dir_of_layout self.enable_plotting = enable_plotting self.allow_enhancement = allow_enhancement self.curved_line = curved_line @@ -213,7 +216,7 @@ class Eynollah: dir_out=self.dir_out, image_filename=self.image_filename, curved_line=self.curved_line, - textline_light = self.textline_light, + textline_light=self.textline_light, pcgts=pcgts) self.logger = logger if logger else getLogger('eynollah') self.dir_models = dir_models @@ -234,15 +237,15 @@ class Eynollah: else: self.model_textline_dir = dir_models + "/eynollah-textline_20210425" self.model_tables = dir_models + "/eynollah-tables_20210319" - + self.models = {} - + if dir_in and light_version: config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=config) set_session(session) - + self.model_page = self.our_load_model(self.model_page_dir) self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) self.model_bin = self.our_load_model(self.model_dir_of_binarization) @@ -250,9 +253,9 @@ class Eynollah: self.model_region = self.our_load_model(self.model_region_dir_p_ens_light) self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np) self.model_region_fl = self.our_load_model(self.model_region_dir_fully) - - self.ls_imgs = os.listdir(self.dir_in) - + + self.ls_imgs = os.listdir(self.dir_in) + if dir_in and self.extract_only_images: config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True @@ -262,19 +265,19 @@ class Eynollah: self.model_page = self.our_load_model(self.model_page_dir) self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) self.model_bin = self.our_load_model(self.model_dir_of_binarization) - #self.model_textline = self.our_load_model(self.model_textline_dir) + # self.model_textline = self.our_load_model(self.model_textline_dir) self.model_region = self.our_load_model(self.model_region_dir_p_ens_light_only_images_extraction) - #self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np) - #self.model_region_fl = self.our_load_model(self.model_region_dir_fully) + # self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np) + # self.model_region_fl = self.our_load_model(self.model_region_dir_fully) - self.ls_imgs = os.listdir(self.dir_in) + self.ls_imgs = os.listdir(self.dir_in) if dir_in and not (light_version or self.extract_only_images): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=config) set_session(session) - + self.model_page = self.our_load_model(self.model_page_dir) self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) self.model_bin = self.our_load_model(self.model_dir_of_binarization) @@ -284,10 +287,9 @@ class Eynollah: self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np) self.model_region_fl = self.our_load_model(self.model_region_dir_fully) self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement) - - self.ls_imgs = os.listdir(self.dir_in) - - + + self.ls_imgs = os.listdir(self.dir_in) + def _cache_images(self, image_filename=None, image_pil=None): ret = {} if image_filename: @@ -297,13 +299,14 @@ class Eynollah: ret['img'] = pil2cv(image_pil) self.dpi = check_dpi(image_pil) ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) - for prefix in ('', '_grayscale'): + for prefix in ('', '_grayscale'): ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) return ret + def reset_file_name_dir(self, image_filename): self._imgs = self._cache_images(image_filename=image_filename) self.image_filename = image_filename - + self.plotter = None if not self.enable_plotting else EynollahPlotter( dir_out=self.dir_out, dir_of_all=self.dir_of_all, @@ -312,13 +315,14 @@ class Eynollah: dir_of_cropped_images=self.dir_of_cropped_images, dir_of_layout=self.dir_of_layout, image_filename_stem=Path(Path(image_filename).name).stem) - + self.writer = EynollahXmlWriter( dir_out=self.dir_out, image_filename=self.image_filename, curved_line=self.curved_line, - textline_light = self.textline_light, + textline_light=self.textline_light, pcgts=self.pcgts) + def imread(self, grayscale=False, uint8=True): key = 'img' if grayscale: @@ -326,11 +330,10 @@ class Eynollah: if uint8: key += '_uint8' return self._imgs[key].copy() - + def isNaN(self, num): return num != num - def predict_enhancement(self, img): self.logger.debug("enter predict_enhancement") model_enhancement, session_enhancement = self.start_new_session_and_model(self.model_dir_of_enhancement) @@ -380,39 +383,40 @@ class Eynollah: index_y_d = img_h - img_height_model img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = model_enhancement.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), - verbose=0) + label_p_pred = model_enhancement.predict( + img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), + verbose=0) seg = label_p_pred[0, :, :, :] seg = seg * 255 if i == 0 and j == 0: - seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg + seg = seg[0: seg.shape[0] - margin, 0: seg.shape[1] - margin] + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + 0: index_x_u - margin, :] = seg elif i == nxf - 1 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg + seg = seg[margin: seg.shape[0] - 0, margin: seg.shape[1] - 0] + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - 0, :] = seg elif i == 0 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg + seg = seg[margin: seg.shape[0] - 0, 0: seg.shape[1] - margin] + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + 0: index_x_u - margin, :] = seg elif i == nxf - 1 and j == 0: - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg + seg = seg[0: seg.shape[0] - margin, margin: seg.shape[1] - 0] + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - 0, :] = seg elif i == 0 and j != 0 and j != nyf - 1: - seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg + seg = seg[margin: seg.shape[0] - margin, 0: seg.shape[1] - margin] + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + 0: index_x_u - margin, :] = seg elif i == nxf - 1 and j != 0 and j != nyf - 1: - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg + seg = seg[margin: seg.shape[0] - margin, margin: seg.shape[1] - 0] + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - 0, :] = seg elif i != 0 and i != nxf - 1 and j == 0: - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg + seg = seg[0: seg.shape[0] - margin, margin: seg.shape[1] - margin] + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - margin, :] = seg elif i != 0 and i != nxf - 1 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg + seg = seg[margin: seg.shape[0] - 0, margin: seg.shape[1] - margin] + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - margin, :] = seg else: - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg + seg = seg[margin: seg.shape[0] - margin, margin: seg.shape[1] - margin] + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - margin, :] = seg prediction_true = prediction_true.astype(int) return prediction_true @@ -513,7 +517,8 @@ class Eynollah: _, page_coord = self.early_page_for_num_of_column_classification(img) if not self.dir_in: - model_num_classifier, session_col_classifier = self.start_new_session_and_model(self.model_dir_of_col_classifier) + model_num_classifier, session_col_classifier = self.start_new_session_and_model( + self.model_dir_of_col_classifier) if self.input_binary: img_in = np.copy(img) img_in = img_in / 255.0 @@ -523,7 +528,7 @@ class Eynollah: else: img_1ch = self.imread(grayscale=True, uint8=False) width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + img_1ch = img_1ch[page_coord[0]: page_coord[1], page_coord[2]: page_coord[3]] # plt.imshow(img_1ch) # plt.show() @@ -553,7 +558,7 @@ class Eynollah: return img, img_new, is_image_enhanced - def resize_and_enhance_image_with_column_classifier(self,light_version): + def resize_and_enhance_image_with_column_classifier(self, light_version): self.logger.debug("enter resize_and_enhance_image_with_column_classifier") dpi = self.dpi self.logger.info("Detected %s DPI", dpi) @@ -562,18 +567,18 @@ class Eynollah: if self.dir_in: prediction_bin = self.do_prediction(True, img, self.model_bin) else: - + model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization) prediction_bin = self.do_prediction(True, img, model_bin) - - prediction_bin=prediction_bin[:,:,0] - prediction_bin = (prediction_bin[:,:]==0)*1 - prediction_bin = prediction_bin*255 - - prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + + prediction_bin = prediction_bin[:, :, 0] + prediction_bin = (prediction_bin[:, :] == 0) * 1 + prediction_bin = prediction_bin * 255 + + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) prediction_bin = prediction_bin.astype(np.uint8) - img= np.copy(prediction_bin) + img = np.copy(prediction_bin) img_bin = np.copy(prediction_bin) else: img = self.imread() @@ -582,8 +587,9 @@ class Eynollah: t1 = time.time() _, page_coord = self.early_page_for_num_of_column_classification(img_bin) if not self.dir_in: - model_num_classifier, session_col_classifier = self.start_new_session_and_model(self.model_dir_of_col_classifier) - + model_num_classifier, session_col_classifier = self.start_new_session_and_model( + self.model_dir_of_col_classifier) + if self.input_binary: img_in = np.copy(img) width_early = img_in.shape[1] @@ -593,7 +599,7 @@ class Eynollah: else: img_1ch = self.imread(grayscale=True) width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + img_1ch = img_1ch[page_coord[0]: page_coord[1], page_coord[2]: page_coord[3]] img_1ch = img_1ch / 255.0 img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) @@ -602,18 +608,18 @@ class Eynollah: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - if self.dir_in: label_p_pred = self.model_classifier.predict(img_in, verbose=0) else: label_p_pred = model_num_classifier.predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 - + self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) if not self.extract_only_images: if dpi < DPI_THRESHOLD: - img_new, num_column_is_classified = self.calculate_width_height_by_columns(img, num_col, width_early, label_p_pred) + img_new, num_column_is_classified = self.calculate_width_height_by_columns(img, num_col, width_early, + label_p_pred) if light_version: image_res = np.copy(img_new) else: @@ -691,12 +697,11 @@ class Eynollah: return model, session - def start_new_session_and_model(self, model_dir): self.logger.debug("enter start_new_session_and_model (model_dir=%s)", model_dir) - #gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) - #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=7.7, allow_growth=True) - #session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) + # gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + # gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=7.7, allow_growth=True) + # session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) physical_devices = tf.config.list_physical_devices('GPU') try: for device in physical_devices: @@ -714,10 +719,10 @@ class Eynollah: model = load_model(model_dir, compile=False) self.models[model_dir] = model except: - model = load_model(model_dir , compile=False,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) + model = load_model(model_dir, compile=False, + custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches}) self.models[model_dir] = model - return model, None def do_prediction(self, patches, img, model, marginal_of_patch_percent=0.1): @@ -740,7 +745,6 @@ class Eynollah: prediction_true = resize_image(seg_color, img_h_page, img_w_page) prediction_true = prediction_true.astype(np.uint8) - else: if img.shape[0] < img_height_model: img = resize_image(img, img_height_model, img.shape[1]) @@ -785,61 +789,72 @@ class Eynollah: index_y_d = img_h - img_height_model img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), - verbose=0) + label_p_pred = model.predict( + img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), + verbose=0) seg = np.argmax(label_p_pred, axis=3)[0] seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) if i == 0 and j == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - #seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - #mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color + seg_color = seg_color[0: seg_color.shape[0] - margin, 0: seg_color.shape[1] - margin, :] + # seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + # mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + 0: index_x_u - margin, + :] = seg_color elif i == nxf - 1 and j == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :] - #seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] - #mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0] = seg - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - 0, margin: seg_color.shape[1] - 0, :] + # seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] + # mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0] = seg + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - 0, + :] = seg_color elif i == 0 and j == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :] - #seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] - #mask_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - 0, 0: seg_color.shape[1] - margin, :] + # seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] + # mask_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + 0: index_x_u - margin, + :] = seg_color elif i == nxf - 1 and j == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - #seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] - #mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color + seg_color = seg_color[0: seg_color.shape[0] - margin, margin: seg_color.shape[1] - 0, :] + # seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] + # mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - 0, + :] = seg_color elif i == 0 and j != 0 and j != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - #seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - #mask_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - margin, 0: seg_color.shape[1] - margin, :] + # seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + # mask_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + 0: index_x_u - margin, + :] = seg_color elif i == nxf - 1 and j != 0 and j != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - #seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] - #mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - margin, margin: seg_color.shape[1] - 0, :] + # seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] + # mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - 0, + :] = seg_color elif i != 0 and i != nxf - 1 and j == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - #seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] - #mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color + seg_color = seg_color[0: seg_color.shape[0] - margin, margin: seg_color.shape[1] - margin, :] + # seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] + # mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - margin, + :] = seg_color elif i != 0 and i != nxf - 1 and j == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :] - #seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] - #mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - 0, margin: seg_color.shape[1] - margin, :] + # seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] + # mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - margin, + :] = seg_color else: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - #seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] - #mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - margin, margin: seg_color.shape[1] - margin, :] + # seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] + # mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - margin, + :] = seg_color prediction_true = prediction_true.astype(np.uint8) - #del model - #gc.collect() + # del model + # gc.collect() return prediction_true + def do_prediction_new_concept(self, patches, img, model, marginal_of_patch_percent=0.1): self.logger.debug("enter do_prediction") @@ -854,13 +869,11 @@ class Eynollah: label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2])) - seg = np.argmax(label_p_pred, axis=3)[0] seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) prediction_true = resize_image(seg_color, img_h_page, img_w_page) prediction_true = prediction_true.astype(np.uint8) - else: if img.shape[0] < img_height_model: img = resize_image(img, img_height_model, img.shape[1]) @@ -905,106 +918,111 @@ class Eynollah: index_y_d = img_h - img_height_model img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), - verbose=0) + label_p_pred = model.predict( + img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), + verbose=0) seg = np.argmax(label_p_pred, axis=3)[0] - - - seg_not_base = label_p_pred[0,:,:,4] - ##seg2 = -label_p_pred[0,:,:,2] - + + seg_not_base = label_p_pred[0, :, :, 4] + # seg2 = -label_p_pred[0,:,:,2] + if self.extract_only_images: - #seg_not_base[seg_not_base>0.3] =1 - seg_not_base[seg_not_base>0.5] =1 - seg_not_base[seg_not_base<1] =0 + # seg_not_base[seg_not_base>0.3] =1 + seg_not_base[seg_not_base > 0.5] = 1 + seg_not_base[seg_not_base < 1] = 0 else: - seg_not_base[seg_not_base>0.03] =1 - seg_not_base[seg_not_base<1] =0 - - - - seg_test = label_p_pred[0,:,:,1] - ##seg2 = -label_p_pred[0,:,:,2] - - - seg_test[seg_test>0.75] =1 - seg_test[seg_test<1] =0 - - - seg_line = label_p_pred[0,:,:,3] - ##seg2 = -label_p_pred[0,:,:,2] - - - seg_line[seg_line>0.1] =1 - seg_line[seg_line<1] =0 - + seg_not_base[seg_not_base > 0.03] = 1 + seg_not_base[seg_not_base < 1] = 0 + + seg_test = label_p_pred[0, :, :, 1] + # seg2 = -label_p_pred[0,:,:,2] + + seg_test[seg_test > 0.75] = 1 + seg_test[seg_test < 1] = 0 + + seg_line = label_p_pred[0, :, :, 3] + # seg2 = -label_p_pred[0,:,:,2] + + seg_line[seg_line > 0.1] = 1 + seg_line[seg_line < 1] = 0 + if not self.extract_only_images: - seg_background = label_p_pred[0,:,:,0] - seg_background[seg_background>0.25] =1 - seg_background[seg_background<1] =0 - ##seg = seg+seg2 - #seg = label_p_pred[0,:,:,2] - #seg[seg>0.4] =1 - #seg[seg<1] =0 - - ##plt.imshow(seg_test) - ##plt.show() - - ##plt.imshow(seg_background) - ##plt.show() - #seg[seg==1]=0 - #seg[seg_test==1]=1 - ###seg[seg_not_base==1]=4 + seg_background = label_p_pred[0, :, :, 0] + seg_background[seg_background > 0.25] = 1 + seg_background[seg_background < 1] = 0 + # seg = seg+seg2 + # seg = label_p_pred[0,:,:,2] + # seg[seg>0.4] =1 + # seg[seg<1] =0 + + # plt.imshow(seg_test) + # plt.show() + + # plt.imshow(seg_background) + # plt.show() + # seg[seg==1]=0 + # seg[seg_test==1]=1 + # seg[seg_not_base==1]=4 if not self.extract_only_images: - seg[seg_background==1]=0 - seg[(seg_line==1) & (seg==0)]=3 + seg[seg_background == 1] = 0 + seg[(seg_line == 1) & (seg == 0)] = 3 seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) if i == 0 and j == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color + seg_color = seg_color[0: seg_color.shape[0] - margin, 0: seg_color.shape[1] - margin, :] + seg = seg[0: seg.shape[0] - margin, 0: seg.shape[1] - margin] + mask_true[index_y_d + 0: index_y_u - margin, index_x_d + 0: index_x_u - margin] = seg + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + 0: index_x_u - margin, + :] = seg_color elif i == nxf - 1 and j == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :] - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] - mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0] = seg - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - 0, margin: seg_color.shape[1] - 0, :] + seg = seg[margin: seg.shape[0] - 0, margin: seg.shape[1] - 0] + mask_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - 0] = seg + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - 0, + :] = seg_color elif i == 0 and j == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :] - seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] - mask_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - 0, 0: seg_color.shape[1] - margin, :] + seg = seg[margin: seg.shape[0] - 0, 0: seg.shape[1] - margin] + mask_true[index_y_d + margin: index_y_u - 0, index_x_d + 0: index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + 0: index_x_u - margin, + :] = seg_color elif i == nxf - 1 and j == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] - mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color + seg_color = seg_color[0: seg_color.shape[0] - margin, margin: seg_color.shape[1] - 0, :] + seg = seg[0: seg.shape[0] - margin, margin: seg.shape[1] - 0] + mask_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - 0] = seg + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - 0, + :] = seg_color elif i == 0 and j != 0 and j != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - mask_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - margin, 0: seg_color.shape[1] - margin, :] + seg = seg[margin: seg.shape[0] - margin, 0: seg.shape[1] - margin] + mask_true[index_y_d + margin: index_y_u - margin, index_x_d + 0: index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + 0: index_x_u - margin, + :] = seg_color elif i == nxf - 1 and j != 0 and j != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] - mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - margin, margin: seg_color.shape[1] - 0, :] + seg = seg[margin: seg.shape[0] - margin, margin: seg.shape[1] - 0] + mask_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - 0] = seg + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - 0, + :] = seg_color elif i != 0 and i != nxf - 1 and j == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] - mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color + seg_color = seg_color[0: seg_color.shape[0] - margin, margin: seg_color.shape[1] - margin, :] + seg = seg[0: seg.shape[0] - margin, margin: seg.shape[1] - margin] + mask_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - margin] = seg + prediction_true[index_y_d + 0: index_y_u - margin, index_x_d + margin: index_x_u - margin, + :] = seg_color elif i != 0 and i != nxf - 1 and j == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :] - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] - mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - 0, margin: seg_color.shape[1] - margin, :] + seg = seg[margin: seg.shape[0] - 0, margin: seg.shape[1] - margin] + mask_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - 0, index_x_d + margin: index_x_u - margin, + :] = seg_color else: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] - mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color + seg_color = seg_color[margin: seg_color.shape[0] - margin, margin: seg_color.shape[1] - margin, + :] + seg = seg[margin: seg.shape[0] - margin, margin: seg.shape[1] - margin] + mask_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - margin] = seg + prediction_true[index_y_d + margin: index_y_u - margin, index_x_d + margin: index_x_u - margin, + :] = seg_color prediction_true = prediction_true.astype(np.uint8) return prediction_true @@ -1014,10 +1032,10 @@ class Eynollah: cont_page = [] if not self.ignore_page_extraction: img = cv2.GaussianBlur(self.image, (5, 5), 0) - + if not self.dir_in: model_page, session_page = self.start_new_session_and_model(self.model_page_dir) - + if not self.dir_in: img_page_prediction = self.do_prediction(False, img, model_page) else: @@ -1026,8 +1044,8 @@ class Eynollah: _, thresh = cv2.threshold(imgray, 0, 255, 0) thresh = cv2.dilate(thresh, KERNEL, iterations=3) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - if len(contours)>0: + + if len(contours) > 0: cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) cnt = contours[np.argmax(cnt_size)] x, y, w, h = cv2.boundingRect(cnt) @@ -1045,28 +1063,32 @@ class Eynollah: box = [x, y, w, h] else: box = [0, 0, img.shape[1], img.shape[0]] - croped_page, page_coord = crop_image_inside_box(box, self.image) - cont_page.append(np.array([[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], [page_coord[2], page_coord[1]]])) - + cropped_page, page_coord = crop_image_inside_box(box, self.image) + cont_page.append(np.array( + [[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], + [page_coord[2], page_coord[1]]])) + self.logger.debug("exit extract_page") else: box = [0, 0, self.image.shape[1], self.image.shape[0]] - croped_page, page_coord = crop_image_inside_box(box, self.image) - cont_page.append(np.array([[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], [page_coord[2], page_coord[1]]])) - return croped_page, page_coord, cont_page + cropped_page, page_coord = crop_image_inside_box(box, self.image) + cont_page.append(np.array( + [[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], + [page_coord[2], page_coord[1]]])) + return cropped_page, page_coord, cont_page - def early_page_for_num_of_column_classification(self,img_bin): + def early_page_for_num_of_column_classification(self, img_bin): if not self.ignore_page_extraction: self.logger.debug("enter early_page_for_num_of_column_classification") if self.input_binary: - img =np.copy(img_bin) + img = np.copy(img_bin) img = img.astype(np.uint8) else: img = self.imread() if not self.dir_in: model_page, session_page = self.start_new_session_and_model(self.model_page_dir) img = cv2.GaussianBlur(img, (5, 5), 0) - + if self.dir_in: img_page_prediction = self.do_prediction(False, img, self.model_page) else: @@ -1076,28 +1098,29 @@ class Eynollah: _, thresh = cv2.threshold(imgray, 0, 255, 0) thresh = cv2.dilate(thresh, KERNEL, iterations=3) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if len(contours)>0: + if len(contours) > 0: cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) cnt = contours[np.argmax(cnt_size)] x, y, w, h = cv2.boundingRect(cnt) box = [x, y, w, h] else: box = [0, 0, img.shape[1], img.shape[0]] - croped_page, page_coord = crop_image_inside_box(box, img) - + cropped_page, page_coord = crop_image_inside_box(box, img) + self.logger.debug("exit early_page_for_num_of_column_classification") else: img = self.imread() box = [0, 0, img.shape[1], img.shape[0]] - croped_page, page_coord = crop_image_inside_box(box, img) - return croped_page, page_coord + cropped_page, page_coord = crop_image_inside_box(box, img) + return cropped_page, page_coord def extract_text_regions(self, img, patches, cols): self.logger.debug("enter extract_text_regions") img_height_h = img.shape[0] img_width_h = img.shape[1] if not self.dir_in: - model_region, session_region = self.start_new_session_and_model(self.model_region_dir_fully if patches else self.model_region_dir_fully_np) + model_region, session_region = self.start_new_session_and_model( + self.model_region_dir_fully if patches else self.model_region_dir_fully_np) else: model_region = self.model_region_fl if patches else self.model_region_fl_np @@ -1156,39 +1179,40 @@ class Eynollah: if (self.scale_x == 1 and img_width_h > 4000) or (self.scale_x != 1 and img_width_h > 3700): img = otsu_copy_binary(img) img = img.astype(np.uint8) - img= resize_image(img, int(img_height_h * 3700 / float(img_width_h)), 3700) + img = resize_image(img, int(img_height_h * 3700 / float(img_width_h)), 3700) else: img = otsu_copy_binary(img) img = img.astype(np.uint8) - img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) if cols == 5: if self.scale_x == 1 and img_width_h > 5000: img = otsu_copy_binary(img) img = img.astype(np.uint8) - img= resize_image(img, int(img_height_h * 0.7), int(img_width_h * 0.7)) + img = resize_image(img, int(img_height_h * 0.7), int(img_width_h * 0.7)) else: img = otsu_copy_binary(img) img = img.astype(np.uint8) - img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9) ) + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) if cols >= 6: if img_width_h > 5600: img = otsu_copy_binary(img) img = img.astype(np.uint8) - img= resize_image(img, int(img_height_h * 5600 / float(img_width_h)), 5600) + img = resize_image(img, int(img_height_h * 5600 / float(img_width_h)), 5600) else: img = otsu_copy_binary(img) img = img.astype(np.uint8) - img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) marginal_of_patch_percent = 0.1 prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent) prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) self.logger.debug("exit extract_text_regions") return prediction_regions, prediction_regions2 - - def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + + def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, + slope_deskew): self.logger.debug("enter get_slopes_and_deskew_new") num_cores = cpu_count() queue_of_all_params = Queue() @@ -1197,10 +1221,10 @@ class Eynollah: nh = np.linspace(0, len(boxes), num_cores + 1) indexes_by_text_con = np.array(range(len(contours_par))) for i in range(num_cores): - boxes_per_process = boxes[int(nh[i]) : int(nh[i + 1])] - contours_per_process = contours[int(nh[i]) : int(nh[i + 1])] - contours_par_per_process = contours_par[int(nh[i]) : int(nh[i + 1])] - indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])] + boxes_per_process = boxes[int(nh[i]): int(nh[i + 1])] + contours_per_process = contours[int(nh[i]): int(nh[i + 1])] + contours_par_per_process = contours_par[int(nh[i]): int(nh[i + 1])] + indexes_text_con_per_process = indexes_by_text_con[int(nh[i]): int(nh[i + 1])] processes.append(Process(target=self.do_work_of_slopes_new_light, args=(queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, contours_par_per_process, indexes_text_con_per_process, image_page_rotated, slope_deskew))) for i in range(num_cores): @@ -1236,7 +1260,8 @@ class Eynollah: self.logger.debug("exit get_slopes_and_deskew_new") return slopes, all_found_textline_polygons, boxes, all_found_text_regions, all_found_text_regions_par, all_box_coord, all_index_text_con - def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, + slope_deskew): self.logger.debug("enter get_slopes_and_deskew_new") num_cores = cpu_count() queue_of_all_params = Queue() @@ -1245,12 +1270,15 @@ class Eynollah: nh = np.linspace(0, len(boxes), num_cores + 1) indexes_by_text_con = np.array(range(len(contours_par))) for i in range(num_cores): - boxes_per_process = boxes[int(nh[i]) : int(nh[i + 1])] - contours_per_process = contours[int(nh[i]) : int(nh[i + 1])] - contours_par_per_process = contours_par[int(nh[i]) : int(nh[i + 1])] - indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])] - - processes.append(Process(target=self.do_work_of_slopes_new, args=(queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, contours_par_per_process, indexes_text_con_per_process, image_page_rotated, slope_deskew))) + boxes_per_process = boxes[int(nh[i]): int(nh[i + 1])] + contours_per_process = contours[int(nh[i]): int(nh[i + 1])] + contours_par_per_process = contours_par[int(nh[i]): int(nh[i + 1])] + indexes_text_con_per_process = indexes_by_text_con[int(nh[i]): int(nh[i + 1])] + + processes.append(Process(target=self.do_work_of_slopes_new, args=( + queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, + contours_par_per_process, + indexes_text_con_per_process, image_page_rotated, slope_deskew))) for i in range(num_cores): processes[i].start() @@ -1284,7 +1312,8 @@ class Eynollah: self.logger.debug("exit get_slopes_and_deskew_new") return slopes, all_found_textline_polygons, boxes, all_found_text_regions, all_found_text_regions_par, all_box_coord, all_index_text_con - def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, mask_texts_only, num_col, scale_par, slope_deskew): + def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, + mask_texts_only, num_col, scale_par, slope_deskew): self.logger.debug("enter get_slopes_and_deskew_new_curved") num_cores = cpu_count() queue_of_all_params = Queue() @@ -1294,12 +1323,15 @@ class Eynollah: indexes_by_text_con = np.array(range(len(contours_par))) for i in range(num_cores): - boxes_per_process = boxes[int(nh[i]) : int(nh[i + 1])] - contours_per_process = contours[int(nh[i]) : int(nh[i + 1])] - contours_par_per_process = contours_par[int(nh[i]) : int(nh[i + 1])] - indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])] + boxes_per_process = boxes[int(nh[i]): int(nh[i + 1])] + contours_per_process = contours[int(nh[i]): int(nh[i + 1])] + contours_par_per_process = contours_par[int(nh[i]): int(nh[i + 1])] + indexes_text_con_per_process = indexes_by_text_con[int(nh[i]): int(nh[i + 1])] - processes.append(Process(target=self.do_work_of_slopes_new_curved, args=(queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, contours_par_per_process, image_page_rotated, mask_texts_only, num_col, scale_par, indexes_text_con_per_process, slope_deskew))) + processes.append(Process(target=self.do_work_of_slopes_new_curved, args=( + queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, + contours_par_per_process, + image_page_rotated, mask_texts_only, num_col, scale_par, indexes_text_con_per_process, slope_deskew))) for i in range(num_cores): processes[i].start() @@ -1335,7 +1367,9 @@ class Eynollah: # print(slopes,'slopes') return all_found_textline_polygons, boxes, all_found_text_regions, all_found_text_regions_par, all_box_coord, all_index_text_con, slopes - def do_work_of_slopes_new_curved(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, contours_par_per_process, image_page_rotated, mask_texts_only, num_col, scale_par, indexes_r_con_per_pro, slope_deskew): + def do_work_of_slopes_new_curved(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, + contours_par_per_process, image_page_rotated, mask_texts_only, num_col, scale_par, + indexes_r_con_per_pro, slope_deskew): self.logger.debug("enter do_work_of_slopes_new_curved") slopes_per_each_subprocess = [] bounding_box_of_textregion_per_each_subprocess = [] @@ -1349,7 +1383,8 @@ class Eynollah: for mv in range(len(boxes_text)): - all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]] + all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1]: boxes_text[mv][1] + boxes_text[mv][3], + boxes_text[mv][0]: boxes_text[mv][0] + boxes_text[mv][2]] all_text_region_raw = all_text_region_raw.astype(np.uint8) img_int_p = all_text_region_raw[:, :] @@ -1363,7 +1398,8 @@ class Eynollah: else: try: textline_con, hierarchy = return_contours_of_image(img_int_p) - textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.0008) + textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, + min_area=0.0008) y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if self.isNaN(y_diff_mean): slope_for_all = MAX_SLOPE @@ -1393,19 +1429,20 @@ class Eynollah: x, y, w, h = cv2.boundingRect(cnt_o_t_max) mask_biggest = np.zeros(mask_texts_only.shape) mask_biggest = cv2.fillPoly(mask_biggest, pts=[cnt_o_t_max], color=(1, 1, 1)) - mask_region_in_patch_region = mask_biggest[y : y + h, x : x + w] + mask_region_in_patch_region = mask_biggest[y: y + h, x: x + w] textline_biggest_region = mask_biggest * textline_mask_tot_ea # print(slope_for_all,'slope_for_all') - textline_rotated_separated = separate_lines_new2(textline_biggest_region[y : y + h, x : x + w], 0, num_col, slope_for_all, plotter=self.plotter) + textline_rotated_separated = separate_lines_new2(textline_biggest_region[y: y + h, x: x + w], 0, + num_col, slope_for_all, plotter=self.plotter) # new line added - ##print(np.shape(textline_rotated_separated),np.shape(mask_biggest)) + # print(np.shape(textline_rotated_separated),np.shape(mask_biggest)) textline_rotated_separated[mask_region_in_patch_region[:, :] != 1] = 0 # till here - textline_cnt_separated[y : y + h, x : x + w] = textline_rotated_separated - textline_region_in_image[y : y + h, x : x + w] = textline_rotated_separated + textline_cnt_separated[y: y + h, x: x + w] = textline_rotated_separated + textline_region_in_image[y: y + h, x: x + w] = textline_rotated_separated # plt.imshow(textline_region_in_image) # plt.show() @@ -1425,7 +1462,8 @@ class Eynollah: mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=4) pixel_img = 1 - mask_biggest2 = resize_image(mask_biggest2, int(mask_biggest2.shape[0] * scale_par), int(mask_biggest2.shape[1] * scale_par)) + mask_biggest2 = resize_image(mask_biggest2, int(mask_biggest2.shape[0] * scale_par), + int(mask_biggest2.shape[1] * scale_par)) cnt_textlines_in_image_ind = return_contours_of_interested_textline(mask_biggest2, pixel_img) try: textlines_cnt_per_region.append(cnt_textlines_in_image_ind[0]) @@ -1433,7 +1471,10 @@ class Eynollah: self.logger.error(why) else: add_boxes_coor_into_textlines = True - textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv], add_boxes_coor_into_textlines) + textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, slope_for_all, + contours_par_per_process[mv], + boxes_text[mv], + add_boxes_coor_into_textlines) add_boxes_coor_into_textlines = False # print(np.shape(textlines_cnt_per_region),'textlines_cnt_per_region') @@ -1443,8 +1484,13 @@ class Eynollah: contours_textregion_par_per_each_subprocess.append(contours_par_per_process[mv]) all_box_coord_per_process.append(crop_coor) - queue_of_all_params.put([textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, all_box_coord_per_process, index_by_text_region_contours, slopes_per_each_subprocess]) - def do_work_of_slopes_new_light(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, contours_par_per_process, indexes_r_con_per_pro, image_page_rotated, slope_deskew): + queue_of_all_params.put( + [textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, + contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, + all_box_coord_per_process, index_by_text_region_contours, slopes_per_each_subprocess]) + + def do_work_of_slopes_new_light(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, + contours_par_per_process, indexes_r_con_per_pro, image_page_rotated, slope_deskew): self.logger.debug('enter do_work_of_slopes_new_light') slopes_per_each_subprocess = [] bounding_box_of_textregion_per_each_subprocess = [] @@ -1454,27 +1500,33 @@ class Eynollah: all_box_coord_per_process = [] index_by_text_region_contours = [] for mv in range(len(boxes_text)): - _, crop_coor = crop_image_inside_box(boxes_text[mv],image_page_rotated) - mask_textline = np.zeros((textline_mask_tot_ea.shape)) - mask_textline = cv2.fillPoly(mask_textline,pts=[contours_per_process[mv]],color=(1,1,1)) - all_text_region_raw = (textline_mask_tot_ea*mask_textline[:,:])[boxes_text[mv][1]:boxes_text[mv][1]+boxes_text[mv][3] , boxes_text[mv][0]:boxes_text[mv][0]+boxes_text[mv][2] ] - all_text_region_raw=all_text_region_raw.astype(np.uint8) + _, crop_coor = crop_image_inside_box(boxes_text[mv], image_page_rotated) + mask_textline = np.zeros(textline_mask_tot_ea.shape) + mask_textline = cv2.fillPoly(mask_textline, pts=[contours_per_process[mv]], color=(1, 1, 1)) + all_text_region_raw = (textline_mask_tot_ea * mask_textline[:, :])[ + boxes_text[mv][1]:boxes_text[mv][1] + boxes_text[mv][3], + boxes_text[mv][0]:boxes_text[mv][0] + boxes_text[mv][2]] + all_text_region_raw = all_text_region_raw.astype(np.uint8) slopes_per_each_subprocess.append([slope_deskew][0]) mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) - mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contours_par_per_process[mv]], color=(1, 1, 1)) + mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contours_par_per_process[mv]], + color=(1, 1, 1)) # plt.imshow(mask_only_con_region) # plt.show() - + if self.textline_light: all_text_region_raw = np.copy(textline_mask_tot_ea) all_text_region_raw[mask_only_con_region == 0] = 0 cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw) - cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot, max_area=1, min_area=0.00001) + cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, + hir_on_cnt_clean_rot, max_area=1, min_area=0.00001) else: - all_text_region_raw = np.copy(textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]]) - mask_only_con_region = mask_only_con_region[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]] + all_text_region_raw = np.copy( + textline_mask_tot_ea[boxes_text[mv][1]: boxes_text[mv][1] + boxes_text[mv][3], + boxes_text[mv][0]: boxes_text[mv][0] + boxes_text[mv][2]]) + mask_only_con_region = mask_only_con_region[boxes_text[mv][1]: boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0]: boxes_text[mv][0] + boxes_text[mv][2]] all_text_region_raw[mask_only_con_region == 0] = 0 cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, [slope_deskew][0], contours_par_per_process[mv], boxes_text[mv]) @@ -1485,9 +1537,13 @@ class Eynollah: contours_textregion_per_each_subprocess.append(contours_per_process[mv]) contours_textregion_par_per_each_subprocess.append(contours_par_per_process[mv]) all_box_coord_per_process.append(crop_coor) - queue_of_all_params.put([slopes_per_each_subprocess, textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, all_box_coord_per_process, index_by_text_region_contours]) - - def do_work_of_slopes_new(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, contours_par_per_process, indexes_r_con_per_pro, image_page_rotated, slope_deskew): + queue_of_all_params.put([slopes_per_each_subprocess, textlines_rectangles_per_each_subprocess, + bounding_box_of_textregion_per_each_subprocess, + contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, + all_box_coord_per_process, index_by_text_region_contours]) + + def do_work_of_slopes_new(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, + contours_par_per_process, indexes_r_con_per_pro, image_page_rotated, slope_deskew): self.logger.debug('enter do_work_of_slopes_new') slopes_per_each_subprocess = [] bounding_box_of_textregion_per_each_subprocess = [] @@ -1497,26 +1553,31 @@ class Eynollah: all_box_coord_per_process = [] index_by_text_region_contours = [] for mv in range(len(boxes_text)): - _, crop_coor = crop_image_inside_box(boxes_text[mv],image_page_rotated) - mask_textline = np.zeros((textline_mask_tot_ea.shape)) - mask_textline = cv2.fillPoly(mask_textline,pts=[contours_per_process[mv]],color=(1,1,1)) - all_text_region_raw = (textline_mask_tot_ea*mask_textline[:,:])[boxes_text[mv][1]:boxes_text[mv][1]+boxes_text[mv][3] , boxes_text[mv][0]:boxes_text[mv][0]+boxes_text[mv][2] ] - all_text_region_raw=all_text_region_raw.astype(np.uint8) - img_int_p=all_text_region_raw[:,:]#self.all_text_region_raw[mv] - img_int_p=cv2.erode(img_int_p,KERNEL,iterations = 2) - - if img_int_p.shape[0]/img_int_p.shape[1]<0.1: + _, crop_coor = crop_image_inside_box(boxes_text[mv], image_page_rotated) + mask_textline = np.zeros(textline_mask_tot_ea.shape) + mask_textline = cv2.fillPoly(mask_textline, pts=[contours_per_process[mv]], color=(1, 1, 1)) + all_text_region_raw = (textline_mask_tot_ea * mask_textline[:, :])[ + boxes_text[mv][1]:boxes_text[mv][1] + boxes_text[mv][3], + boxes_text[mv][0]:boxes_text[mv][0] + boxes_text[mv][2]] + all_text_region_raw = all_text_region_raw.astype(np.uint8) + img_int_p = all_text_region_raw[:, :] # self.all_text_region_raw[mv] + img_int_p = cv2.erode(img_int_p, KERNEL, iterations=2) + + if img_int_p.shape[0] / img_int_p.shape[1] < 0.1: slopes_per_each_subprocess.append(0) slope_for_all = [slope_deskew][0] - all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]] - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv], 0) + all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1]: boxes_text[mv][1] + boxes_text[mv][3], + boxes_text[mv][0]: boxes_text[mv][0] + boxes_text[mv][2]] + cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, + contours_par_per_process[mv], boxes_text[mv], 0) textlines_rectangles_per_each_subprocess.append(cnt_clean_rot) index_by_text_region_contours.append(indexes_r_con_per_pro[mv]) bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv]) else: try: textline_con, hierarchy = return_contours_of_image(img_int_p) - textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.00008) + textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, + min_area=0.00008) y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if self.isNaN(y_diff_mean): slope_for_all = MAX_SLOPE @@ -1535,22 +1596,27 @@ class Eynollah: slope_for_all = [slope_deskew][0] slopes_per_each_subprocess.append(slope_for_all) mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) - mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contours_par_per_process[mv]], color=(1, 1, 1)) + mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contours_par_per_process[mv]], + color=(1, 1, 1)) # plt.imshow(mask_only_con_region) # plt.show() - all_text_region_raw = np.copy(textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]]) - mask_only_con_region = mask_only_con_region[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]] + all_text_region_raw = np.copy( + textline_mask_tot_ea[boxes_text[mv][1]: boxes_text[mv][1] + boxes_text[mv][3], + boxes_text[mv][0]: boxes_text[mv][0] + boxes_text[mv][2]]) + mask_only_con_region = mask_only_con_region[boxes_text[mv][1]: boxes_text[mv][1] + boxes_text[mv][3], + boxes_text[mv][0]: boxes_text[mv][0] + boxes_text[mv][2]] - ##plt.imshow(textline_mask_tot_ea) - ##plt.show() - ##plt.imshow(all_text_region_raw) - ##plt.show() - ##plt.imshow(mask_only_con_region) - ##plt.show() + # plt.imshow(textline_mask_tot_ea) + # plt.show() + # plt.imshow(all_text_region_raw) + # plt.show() + # plt.imshow(mask_only_con_region) + # plt.show() all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv]) + cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, + contours_par_per_process[mv], boxes_text[mv]) textlines_rectangles_per_each_subprocess.append(cnt_clean_rot) index_by_text_region_contours.append(indexes_r_con_per_pro[mv]) @@ -1559,12 +1625,16 @@ class Eynollah: contours_textregion_per_each_subprocess.append(contours_per_process[mv]) contours_textregion_par_per_each_subprocess.append(contours_par_per_process[mv]) all_box_coord_per_process.append(crop_coor) - queue_of_all_params.put([slopes_per_each_subprocess, textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, all_box_coord_per_process, index_by_text_region_contours]) + queue_of_all_params.put([slopes_per_each_subprocess, textlines_rectangles_per_each_subprocess, + bounding_box_of_textregion_per_each_subprocess, + contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, + all_box_coord_per_process, index_by_text_region_contours]) def textline_contours(self, img, patches, scaler_h, scaler_w): self.logger.debug('enter textline_contours') if not self.dir_in: - model_textline, session_textline = self.start_new_session_and_model(self.model_textline_dir if patches else self.model_textline_dir_np) + model_textline, session_textline = self.start_new_session_and_model( + self.model_textline_dir if patches else self.model_textline_dir_np) img = img.astype(np.uint8) img_org = np.copy(img) img_h = img_org.shape[0] @@ -1580,14 +1650,12 @@ class Eynollah: else: prediction_textline_longshot = self.do_prediction(False, img, self.model_textline) prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w) - if self.textline_light: - return (prediction_textline[:, :, 0]==1)*1, (prediction_textline_longshot_true_size[:, :, 0]==1)*1 + return (prediction_textline[:, :, 0] == 1) * 1, (prediction_textline_longshot_true_size[:, :, 0] == 1) * 1 else: return prediction_textline[:, :, 0], prediction_textline_longshot_true_size[:, :, 0] - def do_work_of_slopes(self, q, poly, box_sub, boxes_per_process, textline_mask_tot, contours_per_process): self.logger.debug('enter do_work_of_slopes') slope_biggest = 0 @@ -1595,12 +1663,14 @@ class Eynollah: boxes_sub_new = [] poly_sub = [] for mv in range(len(boxes_per_process)): - crop_img, _ = crop_image_inside_box(boxes_per_process[mv], np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2)) + crop_img, _ = crop_image_inside_box(boxes_per_process[mv], + np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2)) crop_img = crop_img[:, :, 0] crop_img = cv2.erode(crop_img, KERNEL, iterations=2) try: textline_con, hierarchy = return_contours_of_image(crop_img) - textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, max_area=1, min_area=0.0008) + textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, max_area=1, + min_area=0.0008) y_diff_mean = find_contours_mean_y_diff(textline_con_fil) sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) crop_img[crop_img > 0] = 1 @@ -1613,7 +1683,8 @@ class Eynollah: slope_corresponding_textregion = slope_biggest slopes_sub.append(slope_corresponding_textregion) - cnt_clean_rot = textline_contours_postprocessing(crop_img, slope_corresponding_textregion, contours_per_process[mv], boxes_per_process[mv]) + cnt_clean_rot = textline_contours_postprocessing(crop_img, slope_corresponding_textregion, + contours_per_process[mv], boxes_per_process[mv]) poly_sub.append(cnt_clean_rot) boxes_sub_new.append(boxes_per_process[mv]) @@ -1622,7 +1693,7 @@ class Eynollah: poly.put(poly_sub) box_sub.put(boxes_sub_new) - def get_regions_light_v_extract_only_images(self,img,is_image_enhanced, num_col_classifier): + def get_regions_light_v_extract_only_images(self, img, is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_extract_images_only") erosion_hurts = False img_org = np.copy(img) @@ -1643,69 +1714,65 @@ class Eynollah: img_w_new = 2500 img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) - img_resized = resize_image(img,img_h_new, img_w_new ) - - + img_resized = resize_image(img, img_h_new, img_w_new) if not self.dir_in: - model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens_light_only_images_extraction) + model_region, session_region = self.start_new_session_and_model( + self.model_region_dir_p_ens_light_only_images_extraction) prediction_regions_org = self.do_prediction_new_concept(True, img_resized, model_region) else: prediction_regions_org = self.do_prediction_new_concept(True, img_resized, self.model_region) - #plt.imshow(prediction_regions_org[:,:,0]) - #plt.show() + # plt.imshow(prediction_regions_org[:,:,0]) + # plt.show() - prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h) image_page, page_coord, cont_page = self.extract_page() + prediction_regions_org = prediction_regions_org[page_coord[0]: page_coord[1], page_coord[2]: page_coord[3]] - prediction_regions_org = prediction_regions_org[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + prediction_regions_org = prediction_regions_org[:, :, 0] + mask_lines_only = (prediction_regions_org[:, :] == 3) * 1 - prediction_regions_org=prediction_regions_org[:,:,0] + mask_texts_only = (prediction_regions_org[:, :] == 1) * 1 - mask_lines_only = (prediction_regions_org[:,:] ==3)*1 - - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - - mask_images_only=(prediction_regions_org[:,:] ==2)*1 + mask_images_only = (prediction_regions_org[:, :] == 2) * 1 polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) - polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) - + polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, + hir_lines_xml, max_area=1, + min_area=0.00001) - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3, 3, 3)) - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + text_regions_p_true[:, :][mask_images_only[:, :] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1, 1, 1)) + text_regions_p_true[text_regions_p_true.shape[0] - 15:text_regions_p_true.shape[0], :] = 0 + text_regions_p_true[:, text_regions_p_true.shape[1] - 15:text_regions_p_true.shape[1]] = 0 - - text_regions_p_true[text_regions_p_true.shape[0]-15:text_regions_p_true.shape[0], :] = 0 - text_regions_p_true[:, text_regions_p_true.shape[1]-15:text_regions_p_true.shape[1]] = 0 - - ##polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001) + # polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001) polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.001) image_boundary_of_doc = np.zeros((text_regions_p_true.shape[0], text_regions_p_true.shape[1])) - ###image_boundary_of_doc[:6, :] = 1 - ###image_boundary_of_doc[text_regions_p_true.shape[0]-6:text_regions_p_true.shape[0], :] = 1 + # image_boundary_of_doc[:6, :] = 1 + # image_boundary_of_doc[text_regions_p_true.shape[0]-6:text_regions_p_true.shape[0], :] = 1 - ###image_boundary_of_doc[:, :6] = 1 - ###image_boundary_of_doc[:, text_regions_p_true.shape[1]-6:text_regions_p_true.shape[1]] = 1 + # image_boundary_of_doc[:, :6] = 1 + # image_boundary_of_doc[:, text_regions_p_true.shape[1]-6:text_regions_p_true.shape[1]] = 1 - #plt.imshow(image_boundary_of_doc) - #plt.show() + # plt.imshow(image_boundary_of_doc) + # plt.show() polygons_of_images_fin = [] for ploy_img_ind in polygons_of_images: @@ -1734,34 +1801,35 @@ class Eynollah: else: box = [x, y, w, h] _, page_coord_img = crop_image_inside_box(box, text_regions_p_true) - #cont_page.append(np.array([[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], [page_coord[2], page_coord[1]]])) + # cont_page.append(np.array([[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], [page_coord[2], page_coord[1]]])) - polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], [page_coord_img[3], page_coord_img[0]], [page_coord_img[3], page_coord_img[1]], [page_coord_img[2], page_coord_img[1]]]) ) + polygons_of_images_fin.append(np.array( + [[page_coord_img[2], page_coord_img[0]], [page_coord_img[3], page_coord_img[0]], + [page_coord_img[3], page_coord_img[1]], [page_coord_img[2], page_coord_img[1]]])) return text_regions_p_true, erosion_hurts, polygons_lines_xml, polygons_of_images_fin, image_page, page_coord, cont_page - def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier): + + def get_regions_light_v(self, img, is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_light_v") erosion_hurts = False img_org = np.copy(img) img_height_h = img_org.shape[0] img_width_h = img_org.shape[1] - #model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) + # model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) - - if num_col_classifier == 1: img_w_new = 1000 img_h_new = int(img_org.shape[0] / float(img_org.shape[1]) * img_w_new) - + elif num_col_classifier == 2: img_w_new = 1500 img_h_new = int(img_org.shape[0] / float(img_org.shape[1]) * img_w_new) - + elif num_col_classifier == 3: img_w_new = 2000 img_h_new = int(img_org.shape[0] / float(img_org.shape[1]) * img_w_new) - + elif num_col_classifier == 4: img_w_new = 2500 img_h_new = int(img_org.shape[0] / float(img_org.shape[1]) * img_w_new) @@ -1771,25 +1839,23 @@ class Eynollah: else: img_w_new = 4000 img_h_new = int(img_org.shape[0] / float(img_org.shape[1]) * img_w_new) - img_resized = resize_image(img,img_h_new, img_w_new ) - + img_resized = resize_image(img, img_h_new, img_w_new) + if not self.dir_in: model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization) prediction_bin = self.do_prediction(True, img_resized, model_bin) else: prediction_bin = self.do_prediction(True, img_resized, self.model_bin) - prediction_bin=prediction_bin[:,:,0] - prediction_bin = (prediction_bin[:,:]==0)*1 - prediction_bin = prediction_bin*255 - - prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - + prediction_bin = prediction_bin[:, :, 0] + prediction_bin = (prediction_bin[:, :] == 0) * 1 + prediction_bin = prediction_bin * 255 + + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + prediction_bin = prediction_bin.astype(np.uint16) - #img= np.copy(prediction_bin) + # img= np.copy(prediction_bin) img_bin = np.copy(prediction_bin) - - - + textline_mask_tot_ea = self.run_textline(img_bin) if not self.dir_in: @@ -1797,121 +1863,117 @@ class Eynollah: prediction_regions_org = self.do_prediction_new_concept(True, img_bin, model_region) else: prediction_regions_org = self.do_prediction_new_concept(True, img_bin, self.model_region) - - #plt.imshow(prediction_regions_org[:,:,0]) - #plt.show() - - prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) - textline_mask_tot_ea = resize_image(textline_mask_tot_ea,img_height_h, img_width_h ) - - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only = (prediction_regions_org[:,:] ==3)*1 - - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - - mask_images_only=(prediction_regions_org[:,:] ==2)*1 - + + # plt.imshow(prediction_regions_org[:,:,0]) + # plt.show() + + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h) + textline_mask_tot_ea = resize_image(textline_mask_tot_ea, img_height_h, img_width_h) + + prediction_regions_org = prediction_regions_org[:, :, 0] + + mask_lines_only = (prediction_regions_org[:, :] == 3) * 1 + + mask_texts_only = (prediction_regions_org[:, :] == 1) * 1 + + mask_images_only = (prediction_regions_org[:, :] == 2) * 1 + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) - polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) - - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - + polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, + hir_lines_xml, max_area=1, + min_area=0.00001) + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) + + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) + text_regions_p_true = np.zeros(prediction_regions_org.shape) - - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) - + + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3, 3, 3)) + + text_regions_p_true[:, :][mask_images_only[:, :] == 1] = 2 + + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1, 1, 1)) + return text_regions_p_true, erosion_hurts, polygons_lines_xml, textline_mask_tot_ea - def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier): + def get_regions_from_xy_2models(self, img, is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_from_xy_2models") erosion_hurts = False img_org = np.copy(img) img_height_h = img_org.shape[0] img_width_h = img_org.shape[1] - + if not self.dir_in: model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) - ratio_y=1.3 - ratio_x=1 + ratio_y = 1.3 + ratio_x = 1 - img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + img = resize_image(img_org, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x)) if not self.dir_in: prediction_regions_org_y = self.do_prediction(True, img, model_region) else: prediction_regions_org_y = self.do_prediction(True, img, self.model_region) - prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h ) - - #plt.imshow(prediction_regions_org_y[:,:,0]) - #plt.show() - prediction_regions_org_y = prediction_regions_org_y[:,:,0] - mask_zeros_y = (prediction_regions_org_y[:,:]==0)*1 - - ##img_only_regions_with_sep = ( (prediction_regions_org_y[:,:] != 3) & (prediction_regions_org_y[:,:] != 0) )*1 - img_only_regions_with_sep = ( prediction_regions_org_y[:,:] == 1 )*1 + prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h) + + # plt.imshow(prediction_regions_org_y[:,:,0]) + # plt.show() + prediction_regions_org_y = prediction_regions_org_y[:, :, 0] + mask_zeros_y = (prediction_regions_org_y[:, :] == 0) * 1 + + # img_only_regions_with_sep = ( (prediction_regions_org_y[:,:] != 3) & (prediction_regions_org_y[:,:] != 0) )*1 + img_only_regions_with_sep = (prediction_regions_org_y[:, :] == 1) * 1 img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) - + try: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=20) + img_only_regions = cv2.erode(img_only_regions_with_sep[:, :], KERNEL, iterations=20) _, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) - - img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1))) - + + img = resize_image(img_org, int(img_org.shape[0]), + int(img_org.shape[1] * (1.2 if is_image_enhanced else 1))) + if self.dir_in: prediction_regions_org = self.do_prediction(True, img, self.model_region) else: prediction_regions_org = self.do_prediction(True, img, model_region) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h) + + prediction_regions_org = prediction_regions_org[:, :, 0] + prediction_regions_org[(prediction_regions_org[:, :] == 1) & (mask_zeros_y[:, :] == 1)] = 0 - prediction_regions_org=prediction_regions_org[:,:,0] - prediction_regions_org[(prediction_regions_org[:,:]==1) & (mask_zeros_y[:,:]==1)]=0 - - if not self.dir_in: model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p2) img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1])) - + if self.dir_in: prediction_regions_org2 = self.do_prediction(True, img, self.model_region_p2, 0.2) else: prediction_regions_org2 = self.do_prediction(True, img, model_region, 0.2) - prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h ) - + prediction_regions_org2 = resize_image(prediction_regions_org2, img_height_h, img_width_h) - mask_zeros2 = (prediction_regions_org2[:,:,0] == 0) - mask_lines2 = (prediction_regions_org2[:,:,0] == 3) - text_sume_early = (prediction_regions_org[:,:] == 1).sum() + mask_zeros2 = (prediction_regions_org2[:, :, 0] == 0) + mask_lines2 = (prediction_regions_org2[:, :, 0] == 3) + text_sume_early = (prediction_regions_org[:, :] == 1).sum() prediction_regions_org_copy = np.copy(prediction_regions_org) - prediction_regions_org_copy[(prediction_regions_org_copy[:,:]==1) & (mask_zeros2[:,:]==1)] = 0 - text_sume_second = ((prediction_regions_org_copy[:,:]==1)*1).sum() + prediction_regions_org_copy[(prediction_regions_org_copy[:, :] == 1) & (mask_zeros2[:, :] == 1)] = 0 + text_sume_second = ((prediction_regions_org_copy[:, :] == 1) * 1).sum() rate_two_models = text_sume_second / float(text_sume_early) * 100 self.logger.info("ratio_of_two_models: %s", rate_two_models) - if not(is_image_enhanced and rate_two_models < RATIO_OF_TWO_MODEL_THRESHOLD): + if not (is_image_enhanced and rate_two_models < RATIO_OF_TWO_MODEL_THRESHOLD): prediction_regions_org = np.copy(prediction_regions_org_copy) - - - prediction_regions_org[(mask_lines2[:,:]==1) & (prediction_regions_org[:,:]==0)]=3 - mask_lines_only=(prediction_regions_org[:,:]==3)*1 - prediction_regions_org = cv2.erode(prediction_regions_org[:,:], KERNEL, iterations=2) + prediction_regions_org[(mask_lines2[:, :] == 1) & (prediction_regions_org[:, :] == 0)] = 3 + mask_lines_only = (prediction_regions_org[:, :] == 3) * 1 + prediction_regions_org = cv2.erode(prediction_regions_org[:, :], KERNEL, iterations=2) + prediction_regions_org = cv2.dilate(prediction_regions_org[:, :], KERNEL, iterations=2) - prediction_regions_org = cv2.dilate(prediction_regions_org[:,:], KERNEL, iterations=2) - - - if rate_two_models<=40: + if rate_two_models <= 40: if self.input_binary: prediction_bin = np.copy(img_org) else: @@ -1920,141 +1982,142 @@ class Eynollah: prediction_bin = self.do_prediction(True, img_org, model_bin) else: prediction_bin = self.do_prediction(True, img_org, self.model_bin) - prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) - - prediction_bin=prediction_bin[:,:,0] - prediction_bin = (prediction_bin[:,:]==0)*1 - prediction_bin = prediction_bin*255 - - prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - + prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h) + + prediction_bin = prediction_bin[:, :, 0] + prediction_bin = (prediction_bin[:, :] == 0) * 1 + prediction_bin = prediction_bin * 255 + + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + if not self.dir_in: model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) - ratio_y=1 - ratio_x=1 + ratio_y = 1 + ratio_x = 1 + img = resize_image(prediction_bin, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x)) - img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - if not self.dir_in: prediction_regions_org = self.do_prediction(True, img, model_region) else: prediction_regions_org = self.do_prediction(True, img, self.model_region) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only=(prediction_regions_org[:,:]==3)*1 - - mask_texts_only=(prediction_regions_org[:,:]==1)*1 - mask_images_only=(prediction_regions_org[:,:]==2)*1 - - - + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h) + prediction_regions_org = prediction_regions_org[:, :, 0] + + mask_lines_only = (prediction_regions_org[:, :] == 3) * 1 + + mask_texts_only = (prediction_regions_org[:, :] == 1) * 1 + mask_images_only = (prediction_regions_org[:, :] == 2) * 1 + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) - polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, + hir_lines_xml, max_area=1, + min_area=0.00001) polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true,pts = polygons_of_only_lines, color=(3, 3, 3)) - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3, 3, 3)) + text_regions_p_true[:, :][mask_images_only[:, :] == 1] = 2 - text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1)) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1, 1, 1)) return text_regions_p_true, erosion_hurts, polygons_lines_xml except: - + if self.input_binary: prediction_bin = np.copy(img_org) - + if not self.dir_in: model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization) prediction_bin = self.do_prediction(True, img_org, model_bin) else: prediction_bin = self.do_prediction(True, img_org, self.model_bin) - prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) - prediction_bin=prediction_bin[:,:,0] - - prediction_bin = (prediction_bin[:,:]==0)*1 - - prediction_bin = prediction_bin*255 - - prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - - + prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h) + prediction_bin = prediction_bin[:, :, 0] + + prediction_bin = (prediction_bin[:, :] == 0) * 1 + + prediction_bin = prediction_bin * 255 + + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + if not self.dir_in: model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) - + else: prediction_bin = np.copy(img_org) - ratio_y=1 - ratio_x=1 + ratio_y = 1 + ratio_x = 1 - - img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + img = resize_image(prediction_bin, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x)) if not self.dir_in: prediction_regions_org = self.do_prediction(True, img, model_region) else: prediction_regions_org = self.do_prediction(True, img, self.model_region) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - #mask_lines_only=(prediction_regions_org[:,:]==3)*1 - #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) - - #prediction_regions_org = self.do_prediction(True, img, model_region) - - #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - - #prediction_regions_org = prediction_regions_org[:,:,0] - - #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 - - - mask_lines_only = (prediction_regions_org[:,:] ==3)*1 - - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - - mask_images_only=(prediction_regions_org[:,:] ==2)*1 - + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h) + prediction_regions_org = prediction_regions_org[:, :, 0] + + # mask_lines_only=(prediction_regions_org[:,:]==3)*1 + # img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) + + # prediction_regions_org = self.do_prediction(True, img, model_region) + + # prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + + # prediction_regions_org = prediction_regions_org[:,:,0] + + # prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 + + mask_lines_only = (prediction_regions_org[:, :] == 3) * 1 + + mask_texts_only = (prediction_regions_org[:, :] == 1) * 1 + + mask_images_only = (prediction_regions_org[:, :] == 2) * 1 + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) - polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) - - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - - + polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, + hir_lines_xml, max_area=1, + min_area=0.00001) + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) + + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) + text_regions_p_true = np.zeros(prediction_regions_org.shape) - - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) - + + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3, 3, 3)) + + text_regions_p_true[:, :][mask_images_only[:, :] == 1] = 2 + + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1, 1, 1)) + erosion_hurts = True return text_regions_p_true, erosion_hurts, polygons_lines_xml - def do_order_of_regions_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): + def do_order_of_regions_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, + textline_mask_tot): self.logger.debug("enter do_order_of_regions_full_layout") - cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours(contours_only_text_parent) - cx_text_only_h, cy_text_only_h, x_min_text_only_h, _, _, _, y_cor_x_min_main_h = find_new_features_of_contours(contours_only_text_parent_h) + cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( + contours_only_text_parent) + cx_text_only_h, cy_text_only_h, x_min_text_only_h, _, _, _, y_cor_x_min_main_h = find_new_features_of_contours( + contours_only_text_parent_h) try: arg_text_con = [] for ii in range(len(cx_text_only)): for jj in range(len(boxes)): - if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]: + if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and \ + y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]: arg_text_con.append(jj) break args_contours = np.array(range(len(arg_text_con))) arg_text_con_h = [] for ii in range(len(cx_text_only_h)): for jj in range(len(boxes)): - if (x_min_text_only_h[ii] + 80) >= boxes[jj][0] and (x_min_text_only_h[ii] + 80) < boxes[jj][1] and y_cor_x_min_main_h[ii] >= boxes[jj][2] and y_cor_x_min_main_h[ii] < boxes[jj][3]: + if (x_min_text_only_h[ii] + 80) >= boxes[jj][0] and (x_min_text_only_h[ii] + 80) < boxes[jj][1] and \ + y_cor_x_min_main_h[ii] >= boxes[jj][2] and y_cor_x_min_main_h[ii] < boxes[jj][3]: arg_text_con_h.append(jj) break args_contours_h = np.array(range(len(arg_text_con_h))) @@ -2078,9 +2141,13 @@ class Eynollah: for box in args_contours_box_h: con_inter_box_h.append(contours_only_text_parent_h[box]) - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[int(boxes[iij][2]): int(boxes[iij][3]), int(boxes[iij][0]): int(boxes[iij][1])], + con_inter_box, con_inter_box_h, boxes[iij][2]) - order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, + indexes_sorted, index_by_kind_sorted, + kind_of_texts_sorted, ref_point) indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] @@ -2089,11 +2156,13 @@ class Eynollah: for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point for zahler, _ in enumerate(args_contours_box_h): arg_order_v = indexes_sorted_head[zahler] - order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point for jji in range(len(id_of_texts)): order_of_texts_tot.append(order_of_texts[jji] + ref_point) @@ -2116,14 +2185,15 @@ class Eynollah: arg_text_con = [] for ii in range(len(cx_text_only)): for jj in range(len(boxes)): - if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located + if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= \ + boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located arg_text_con.append(jj) break args_contours = np.array(range(len(arg_text_con))) order_by_con_main = np.zeros(len(arg_text_con)) - ############################# head + # head arg_text_con_h = [] for ii in range(len(cx_text_only_h)): @@ -2150,9 +2220,13 @@ class Eynollah: for box in args_contours_box_h: con_inter_box_h.append(contours_only_text_parent_h[box]) - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[int(boxes[iij][2]): int(boxes[iij][3]), int(boxes[iij][0]): int(boxes[iij][1])], + con_inter_box, con_inter_box_h, boxes[iij][2]) - order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, + indexes_sorted, index_by_kind_sorted, + kind_of_texts_sorted, ref_point) indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] @@ -2161,11 +2235,13 @@ class Eynollah: for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point for zahler, _ in enumerate(args_contours_box_h): arg_order_v = indexes_sorted_head[zahler] - order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point for jji, _ in enumerate(id_of_texts): order_of_texts_tot.append(order_of_texts[jji] + ref_point) @@ -2184,15 +2260,18 @@ class Eynollah: order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) return order_text_new, id_of_texts_tot - def do_order_of_regions_no_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): + def do_order_of_regions_no_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, + textline_mask_tot): self.logger.debug("enter do_order_of_regions_no_full_layout") - cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours(contours_only_text_parent) + cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( + contours_only_text_parent) try: arg_text_con = [] for ii in range(len(cx_text_only)): for jj in range(len(boxes)): - if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]: + if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and \ + y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]: arg_text_con.append(jj) break args_contours = np.array(range(len(arg_text_con))) @@ -2208,16 +2287,21 @@ class Eynollah: for i in range(len(args_contours_box)): con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[int(boxes[iij][2]): int(boxes[iij][3]), int(boxes[iij][0]): int(boxes[iij][1])], + con_inter_box, con_inter_box_h, boxes[iij][2]) - order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, + indexes_sorted, index_by_kind_sorted, + kind_of_texts_sorted, ref_point) indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point for jji, _ in enumerate(id_of_texts): order_of_texts_tot.append(order_of_texts[jji] + ref_point) @@ -2231,13 +2315,14 @@ class Eynollah: order_text_new = [] for iii in range(len(order_of_texts_tot)): order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) - + except Exception as why: self.logger.error(why) arg_text_con = [] for ii in range(len(cx_text_only)): for jj in range(len(boxes)): - if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located + if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= \ + boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located arg_text_con.append(jj) break args_contours = np.array(range(len(arg_text_con))) @@ -2255,16 +2340,21 @@ class Eynollah: for i in range(len(args_contours_box)): con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[int(boxes[iij][2]): int(boxes[iij][3]), int(boxes[iij][0]): int(boxes[iij][1])], + con_inter_box, con_inter_box_h, boxes[iij][2]) - order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, + indexes_sorted, index_by_kind_sorted, + kind_of_texts_sorted, ref_point) indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point for jji, _ in enumerate(id_of_texts): order_of_texts_tot.append(order_of_texts[jji] + ref_point) @@ -2272,326 +2362,370 @@ class Eynollah: ref_point += len(id_of_texts) order_of_texts_tot = [] - + for tj1 in range(len(contours_only_text_parent)): order_of_texts_tot.append(int(order_by_con_main[tj1])) order_text_new = [] for iii in range(len(order_of_texts_tot)): order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) - + return order_text_new, id_of_texts_tot - def check_iou_of_bounding_box_and_contour_for_tables(self, layout, table_prediction_early, pixel_tabel, num_col_classifier): - layout_org = np.copy(layout) - layout_org[:,:,0][layout_org[:,:,0]==pixel_tabel] = 0 - layout = (layout[:,:,0]==pixel_tabel)*1 - layout =np.repeat(layout[:, :, np.newaxis], 3, axis=2) + def check_iou_of_bounding_box_and_contour_for_tables(self, layout, table_prediction_early, pixel_tabel, + num_col_classifier): + layout_org = np.copy(layout) + layout_org[:, :, 0][layout_org[:, :, 0] == pixel_tabel] = 0 + layout = (layout[:, :, 0] == pixel_tabel) * 1 + + layout = np.repeat(layout[:, :, np.newaxis], 3, axis=2) layout = layout.astype(np.uint8) - imgray = cv2.cvtColor(layout, cv2.COLOR_BGR2GRAY ) + imgray = cv2.cvtColor(layout, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) - + contours_new = [] for i in range(len(contours)): x, y, w, h = cv2.boundingRect(contours[i]) - iou = cnt_size[i] /float(w*h) *100 - - if iou<80: + iou = cnt_size[i] / float(w * h) * 100 + + if iou < 80: layout_contour = np.zeros((layout_org.shape[0], layout_org.shape[1])) - layout_contour= cv2.fillPoly(layout_contour,pts=[contours[i]] ,color=(1,1,1)) - - + layout_contour = cv2.fillPoly(layout_contour, pts=[contours[i]], color=(1, 1, 1)) + layout_contour_sum = layout_contour.sum(axis=0) layout_contour_sum_diff = np.diff(layout_contour_sum) - layout_contour_sum_diff= np.abs(layout_contour_sum_diff) - layout_contour_sum_diff_smoothed= gaussian_filter1d(layout_contour_sum_diff, 10) + layout_contour_sum_diff = np.abs(layout_contour_sum_diff) + layout_contour_sum_diff_smoothed = gaussian_filter1d(layout_contour_sum_diff, 10) peaks, _ = find_peaks(layout_contour_sum_diff_smoothed, height=0) - peaks= peaks[layout_contour_sum_diff_smoothed[peaks]>4] - + peaks = peaks[layout_contour_sum_diff_smoothed[peaks] > 4] + for j in range(len(peaks)): - layout_contour[:,peaks[j]-3+1:peaks[j]+1+3] = 0 - - layout_contour=cv2.erode(layout_contour[:,:], KERNEL, iterations=5) - layout_contour=cv2.dilate(layout_contour[:,:], KERNEL, iterations=5) - - layout_contour =np.repeat(layout_contour[:, :, np.newaxis], 3, axis=2) + layout_contour[:, peaks[j] - 3 + 1:peaks[j] + 1 + 3] = 0 + + layout_contour = cv2.erode(layout_contour[:, :], KERNEL, iterations=5) + layout_contour = cv2.dilate(layout_contour[:, :], KERNEL, iterations=5) + + layout_contour = np.repeat(layout_contour[:, :, np.newaxis], 3, axis=2) layout_contour = layout_contour.astype(np.uint8) - - imgray = cv2.cvtColor(layout_contour, cv2.COLOR_BGR2GRAY ) + + imgray = cv2.cvtColor(layout_contour, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) contours_sep, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - for ji in range(len(contours_sep) ): + for ji in range(len(contours_sep)): contours_new.append(contours_sep[ji]) - if num_col_classifier>=2: - only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) - only_recent_contour_image= cv2.fillPoly(only_recent_contour_image,pts=[contours_sep[ji]] ,color=(1,1,1)) - table_pixels_masked_from_early_pre = only_recent_contour_image[:,:]*table_prediction_early[:,:] - iou_in = table_pixels_masked_from_early_pre.sum() /float(only_recent_contour_image.sum()) *100 - #print(iou_in,'iou_in_in1') - - if iou_in>30: - layout_org= cv2.fillPoly(layout_org,pts=[contours_sep[ji]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel)) + if num_col_classifier >= 2: + only_recent_contour_image = np.zeros((layout.shape[0], layout.shape[1])) + only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, pts=[contours_sep[ji]], + color=(1, 1, 1)) + table_pixels_masked_from_early_pre = only_recent_contour_image[:, :] * table_prediction_early[:, :] + iou_in = table_pixels_masked_from_early_pre.sum() / float(only_recent_contour_image.sum()) * 100 + # print(iou_in,'iou_in_in1') + + if iou_in > 30: + layout_org = cv2.fillPoly(layout_org, pts=[contours_sep[ji]], + color=(pixel_tabel, pixel_tabel, pixel_tabel)) else: pass else: - - layout_org= cv2.fillPoly(layout_org,pts=[contours_sep[ji]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel)) - + + layout_org = cv2.fillPoly(layout_org, pts=[contours_sep[ji]], + color=(pixel_tabel, pixel_tabel, pixel_tabel)) + else: contours_new.append(contours[i]) - if num_col_classifier>=2: - only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) - only_recent_contour_image= cv2.fillPoly(only_recent_contour_image,pts=[contours[i]] ,color=(1,1,1)) - - table_pixels_masked_from_early_pre = only_recent_contour_image[:,:]*table_prediction_early[:,:] - iou_in = table_pixels_masked_from_early_pre.sum() /float(only_recent_contour_image.sum()) *100 - #print(iou_in,'iou_in') - if iou_in>30: - layout_org= cv2.fillPoly(layout_org,pts=[contours[i]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel)) + if num_col_classifier >= 2: + only_recent_contour_image = np.zeros((layout.shape[0], layout.shape[1])) + only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, pts=[contours[i]], + color=(1, 1, 1)) + + table_pixels_masked_from_early_pre = only_recent_contour_image[:, :] * table_prediction_early[:, :] + iou_in = table_pixels_masked_from_early_pre.sum() / float(only_recent_contour_image.sum()) * 100 + # print(iou_in,'iou_in') + if iou_in > 30: + layout_org = cv2.fillPoly(layout_org, pts=[contours[i]], + color=(pixel_tabel, pixel_tabel, pixel_tabel)) else: pass else: - layout_org= cv2.fillPoly(layout_org,pts=[contours[i]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel)) - + layout_org = cv2.fillPoly(layout_org, pts=[contours[i]], + color=(pixel_tabel, pixel_tabel, pixel_tabel)) + return layout_org, contours_new - def delete_separator_around(self,spliter_y,peaks_neg,image_by_region, pixel_line, pixel_table): + + def delete_separator_around(self, spliter_y, peaks_neg, image_by_region, pixel_line, pixel_table): # format of subboxes: box=[x1, x2 , y1, y2] pix_del = 100 - if len(image_by_region.shape)==3: - for i in range(len(spliter_y)-1): - for j in range(1,len(peaks_neg[i])-1): - image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0]==pixel_line ]=0 - image_by_region[spliter_y[i]:spliter_y[i+1],peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,1]==pixel_line ]=0 - image_by_region[spliter_y[i]:spliter_y[i+1],peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,2]==pixel_line ]=0 - - image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0]==pixel_table ]=0 - image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,1]==pixel_table ]=0 - image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,2]==pixel_table ]=0 + if len(image_by_region.shape) == 3: + for i in range(len(spliter_y) - 1): + for j in range(1, len(peaks_neg[i]) - 1): + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 0][ + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 0] == pixel_line] = 0 + image_by_region[spliter_y[i]:spliter_y[i + 1], peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, + 0][image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 1] == pixel_line] = 0 + image_by_region[spliter_y[i]:spliter_y[i + 1], peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, + 0][image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 2] == pixel_line] = 0 + + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 0][ + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 0] == pixel_table] = 0 + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 0][ + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 1] == pixel_table] = 0 + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 0][ + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del, 2] == pixel_table] = 0 else: - for i in range(len(spliter_y)-1): - for j in range(1,len(peaks_neg[i])-1): - image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del]==pixel_line ]=0 - - image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del]==pixel_table ]=0 + for i in range(len(spliter_y) - 1): + for j in range(1, len(peaks_neg[i]) - 1): + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del][ + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del] == pixel_line] = 0 + + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del][ + image_by_region[int(spliter_y[i]):int(spliter_y[i + 1]), + peaks_neg[i][j] - pix_del:peaks_neg[i][j] + pix_del] == pixel_table] = 0 return image_by_region - def add_tables_heuristic_to_layout(self, image_regions_eraly_p,boxes, slope_mean_hor, spliter_y,peaks_neg_tot, image_revised, num_col_classifier, min_area, pixel_line): - pixel_table =10 + + def add_tables_heuristic_to_layout(self, image_regions_eraly_p, boxes, slope_mean_hor, spliter_y, peaks_neg_tot, + image_revised, num_col_classifier, min_area, pixel_line): + pixel_table = 10 image_revised_1 = self.delete_separator_around(spliter_y, peaks_neg_tot, image_revised, pixel_line, pixel_table) - + try: - image_revised_1[:,:30][image_revised_1[:,:30]==pixel_line] = 0 - image_revised_1[:,image_revised_1.shape[1]-30:][image_revised_1[:,image_revised_1.shape[1]-30:]==pixel_line] = 0 + image_revised_1[:, :30][image_revised_1[:, :30] == pixel_line] = 0 + image_revised_1[:, image_revised_1.shape[1] - 30:][ + image_revised_1[:, image_revised_1.shape[1] - 30:] == pixel_line] = 0 except: pass - + img_comm_e = np.zeros(image_revised_1.shape) img_comm = np.repeat(img_comm_e[:, :, np.newaxis], 3, axis=2) for indiv in np.unique(image_revised_1): - image_col=(image_revised_1==indiv)*255 - img_comm_in=np.repeat(image_col[:, :, np.newaxis], 3, axis=2) - img_comm_in=img_comm_in.astype(np.uint8) + image_col = (image_revised_1 == indiv) * 255 + img_comm_in = np.repeat(image_col[:, :, np.newaxis], 3, axis=2) + img_comm_in = img_comm_in.astype(np.uint8) imgray = cv2.cvtColor(img_comm_in, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours,hirarchy=cv2.findContours(thresh.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours, hirarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if indiv==pixel_table: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = 0.001) + if indiv == pixel_table: + main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area=1, + min_area=0.001) else: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = min_area) + main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area=1, + min_area=min_area) - img_comm = cv2.fillPoly(img_comm, pts = main_contours, color = (indiv, indiv, indiv)) + img_comm = cv2.fillPoly(img_comm, pts=main_contours, color=(indiv, indiv, indiv)) img_comm = img_comm.astype(np.uint8) - + if not self.isNaN(slope_mean_hor): - image_revised_last = np.zeros((image_regions_eraly_p.shape[0], image_regions_eraly_p.shape[1],3)) + image_revised_last = np.zeros((image_regions_eraly_p.shape[0], image_regions_eraly_p.shape[1], 3)) for i in range(len(boxes)): - image_box=img_comm[int(boxes[i][2]):int(boxes[i][3]),int(boxes[i][0]):int(boxes[i][1]),:] + image_box = img_comm[int(boxes[i][2]):int(boxes[i][3]), int(boxes[i][0]):int(boxes[i][1]), :] try: - image_box_tabels_1=(image_box[:,:,0]==pixel_table)*1 - contours_tab,_=return_contours_of_image(image_box_tabels_1) - contours_tab=filter_contours_area_of_image_tables(image_box_tabels_1,contours_tab,_,1,0.003) - image_box_tabels_1=(image_box[:,:,0]==pixel_line)*1 + image_box_tabels_1 = (image_box[:, :, 0] == pixel_table) * 1 + contours_tab, _ = return_contours_of_image(image_box_tabels_1) + contours_tab = filter_contours_area_of_image_tables(image_box_tabels_1, contours_tab, _, 1, 0.003) + image_box_tabels_1 = (image_box[:, :, 0] == pixel_line) * 1 - image_box_tabels_and_m_text=( (image_box[:,:,0]==pixel_table) | (image_box[:,:,0]==1) )*1 - image_box_tabels_and_m_text=image_box_tabels_and_m_text.astype(np.uint8) + image_box_tabels_and_m_text = ((image_box[:, :, 0] == pixel_table) | (image_box[:, :, 0] == 1)) * 1 + image_box_tabels_and_m_text = image_box_tabels_and_m_text.astype(np.uint8) - image_box_tabels_1=image_box_tabels_1.astype(np.uint8) - image_box_tabels_1 = cv2.dilate(image_box_tabels_1,KERNEL,iterations = 5) + image_box_tabels_1 = image_box_tabels_1.astype(np.uint8) + image_box_tabels_1 = cv2.dilate(image_box_tabels_1, KERNEL, iterations=5) - contours_table_m_text,_=return_contours_of_image(image_box_tabels_and_m_text) - image_box_tabels=np.repeat(image_box_tabels_1[:, :, np.newaxis], 3, axis=2) + contours_table_m_text, _ = return_contours_of_image(image_box_tabels_and_m_text) + image_box_tabels = np.repeat(image_box_tabels_1[:, :, np.newaxis], 3, axis=2) - image_box_tabels=image_box_tabels.astype(np.uint8) + image_box_tabels = image_box_tabels.astype(np.uint8) imgray = cv2.cvtColor(image_box_tabels, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_line,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours_line, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - y_min_main_line ,y_max_main_line=find_features_of_contours(contours_line) - y_min_main_tab ,y_max_main_tab=find_features_of_contours(contours_tab) + y_min_main_line, y_max_main_line = find_features_of_contours(contours_line) + y_min_main_tab, y_max_main_tab = find_features_of_contours(contours_tab) - cx_tab_m_text,cy_tab_m_text ,x_min_tab_m_text , x_max_tab_m_text, y_min_tab_m_text ,y_max_tab_m_text, _= find_new_features_of_contours(contours_table_m_text) - cx_tabl,cy_tabl ,x_min_tabl , x_max_tabl, y_min_tabl ,y_max_tabl,_= find_new_features_of_contours(contours_tab) + cx_tab_m_text, cy_tab_m_text, x_min_tab_m_text, x_max_tab_m_text, y_min_tab_m_text, y_max_tab_m_text, _ = find_new_features_of_contours( + contours_table_m_text) + cx_tabl, cy_tabl, x_min_tabl, x_max_tabl, y_min_tabl, y_max_tabl, _ = find_new_features_of_contours( + contours_tab) - if len(y_min_main_tab )>0: - y_down_tabs=[] - y_up_tabs=[] + if len(y_min_main_tab) > 0: + y_down_tabs = [] + y_up_tabs = [] - for i_t in range(len(y_min_main_tab )): - y_down_tab=[] - y_up_tab=[] + for i_t in range(len(y_min_main_tab)): + y_down_tab = [] + y_up_tab = [] for i_l in range(len(y_min_main_line)): - if y_min_main_tab[i_t]>y_min_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l] and y_min_main_tab[i_t]>y_max_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l]: + if y_min_main_tab[i_t] > y_min_main_line[i_l] and y_max_main_tab[i_t] > y_min_main_line[ + i_l] and y_min_main_tab[i_t] > y_max_main_line[i_l] and y_max_main_tab[i_t] > \ + y_min_main_line[i_l]: pass - elif y_min_main_tab[i_t]0: + if num_col_classifier == 1: + img_tables_col_1 = (image_revised_last[:, :, 0] == pixel_table) * 1 + img_tables_col_1 = img_tables_col_1.astype(np.uint8) + contours_table_col1, _ = return_contours_of_image(img_tables_col_1) + + _, _, _, _, y_min_tab_col1, y_max_tab_col1, _ = find_new_features_of_contours(contours_table_col1) + + if len(y_min_tab_col1) > 0: for ijv in range(len(y_min_tab_col1)): - image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv]),:,:]=pixel_table + image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv]), :, :] = pixel_table return image_revised_last + def do_order_of_regions(self, *args, **kwargs): if self.full_layout: return self.do_order_of_regions_full_layout(*args, **kwargs) return self.do_order_of_regions_no_full_layout(*args, **kwargs) - + def get_tables_from_model(self, img, num_col_classifier): img_org = np.copy(img) - + img_height_h = img_org.shape[0] img_width_h = img_org.shape[1] - + model_region, session_region = self.start_new_session_and_model(self.model_tables) - + patches = False - + if num_col_classifier < 4 and num_col_classifier > 2: prediction_table = self.do_prediction(patches, img, model_region) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), model_region) + pre_updown = self.do_prediction(patches, cv2.flip(img[:, :, :], -1), model_region) pre_updown = cv2.flip(pre_updown, -1) - - prediction_table[:,:,0][pre_updown[:,:,0]==1]=1 + + prediction_table[:, :, 0][pre_updown[:, :, 0] == 1] = 1 prediction_table = prediction_table.astype(np.int16) - - elif num_col_classifier ==2: - height_ext = 0#int( img.shape[0]/4. ) - h_start = int(height_ext/2.) - width_ext = int( img.shape[1]/8. ) - w_start = int(width_ext/2.) - - height_new = img.shape[0]+height_ext - width_new = img.shape[1]+width_ext - - img_new =np.ones((height_new,width_new,img.shape[2])).astype(float)*0 - img_new[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] =img[:,:,:] + + elif num_col_classifier == 2: + height_ext = 0 # int( img.shape[0]/4. ) + h_start = int(height_ext / 2.) + width_ext = int(img.shape[1] / 8.) + w_start = int(width_ext / 2.) + + height_new = img.shape[0] + height_ext + width_new = img.shape[1] + width_ext + + img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 0 + img_new[h_start:h_start + img.shape[0], w_start: w_start + img.shape[1], :] = img[:, :, :] prediction_ext = self.do_prediction(patches, img_new, model_region) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), model_region) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:, :, :], -1), model_region) pre_updown = cv2.flip(pre_updown, -1) - - prediction_table = prediction_ext[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] - prediction_table_updown = pre_updown[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] - - prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 + + prediction_table = prediction_ext[h_start:h_start + img.shape[0], w_start: w_start + img.shape[1], :] + prediction_table_updown = pre_updown[h_start:h_start + img.shape[0], w_start: w_start + img.shape[1], :] + + prediction_table[:, :, 0][prediction_table_updown[:, :, 0] == 1] = 1 prediction_table = prediction_table.astype(np.int16) - elif num_col_classifier ==1: - height_ext = 0# int( img.shape[0]/4. ) - h_start = int(height_ext/2.) - width_ext = int( img.shape[1]/4. ) - w_start = int(width_ext/2.) - - height_new = img.shape[0]+height_ext - width_new = img.shape[1]+width_ext - - img_new =np.ones((height_new,width_new,img.shape[2])).astype(float)*0 - img_new[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] =img[:,:,:] + elif num_col_classifier == 1: + height_ext = 0 # int( img.shape[0]/4. ) + h_start = int(height_ext / 2.) + width_ext = int(img.shape[1] / 4.) + w_start = int(width_ext / 2.) + + height_new = img.shape[0] + height_ext + width_new = img.shape[1] + width_ext + + img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 0 + img_new[h_start:h_start + img.shape[0], w_start: w_start + img.shape[1], :] = img[:, :, :] prediction_ext = self.do_prediction(patches, img_new, model_region) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), model_region) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:, :, :], -1), model_region) pre_updown = cv2.flip(pre_updown, -1) - - prediction_table = prediction_ext[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] - prediction_table_updown = pre_updown[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] - - prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 + + prediction_table = prediction_ext[h_start:h_start + img.shape[0], w_start: w_start + img.shape[1], :] + prediction_table_updown = pre_updown[h_start:h_start + img.shape[0], w_start: w_start + img.shape[1], :] + + prediction_table[:, :, 0][prediction_table_updown[:, :, 0] == 1] = 1 prediction_table = prediction_table.astype(np.int16) else: prediction_table = np.zeros(img.shape) - img_w_half = int(img.shape[1]/2.) + img_w_half = int(img.shape[1] / 2.) - pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], model_region) - pre2 = self.do_prediction(patches, img[:,img_w_half:,:], model_region) - pre_full = self.do_prediction(patches, img[:,:,:], model_region) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), model_region) + pre1 = self.do_prediction(patches, img[:, 0:img_w_half, :], model_region) + pre2 = self.do_prediction(patches, img[:, img_w_half:, :], model_region) + pre_full = self.do_prediction(patches, img[:, :, :], model_region) + pre_updown = self.do_prediction(patches, cv2.flip(img[:, :, :], -1), model_region) pre_updown = cv2.flip(pre_updown, -1) - - prediction_table_full_erode = cv2.erode(pre_full[:,:,0], KERNEL, iterations=4) + + prediction_table_full_erode = cv2.erode(pre_full[:, :, 0], KERNEL, iterations=4) prediction_table_full_erode = cv2.dilate(prediction_table_full_erode, KERNEL, iterations=4) - - prediction_table_full_updown_erode = cv2.erode(pre_updown[:,:,0], KERNEL, iterations=4) + + prediction_table_full_updown_erode = cv2.erode(pre_updown[:, :, 0], KERNEL, iterations=4) prediction_table_full_updown_erode = cv2.dilate(prediction_table_full_updown_erode, KERNEL, iterations=4) - prediction_table[:,0:img_w_half,:] = pre1[:,:,:] - prediction_table[:,img_w_half:,:] = pre2[:,:,:] - - prediction_table[:,:,0][prediction_table_full_erode[:,:]==1]=1 - prediction_table[:,:,0][prediction_table_full_updown_erode[:,:]==1]=1 + prediction_table[:, 0:img_w_half, :] = pre1[:, :, :] + prediction_table[:, img_w_half:, :] = pre2[:, :, :] + + prediction_table[:, :, 0][prediction_table_full_erode[:, :] == 1] = 1 + prediction_table[:, :, 0][prediction_table_full_updown_erode[:, :] == 1] = 1 prediction_table = prediction_table.astype(np.int16) - - #prediction_table_erode = cv2.erode(prediction_table[:,:,0], self.kernel, iterations=6) - #prediction_table_erode = cv2.dilate(prediction_table_erode, self.kernel, iterations=6) - - prediction_table_erode = cv2.erode(prediction_table[:,:,0], KERNEL, iterations=20) + + # prediction_table_erode = cv2.erode(prediction_table[:,:,0], self.kernel, iterations=6) + # prediction_table_erode = cv2.dilate(prediction_table_erode, self.kernel, iterations=6) + + prediction_table_erode = cv2.erode(prediction_table[:, :, 0], KERNEL, iterations=20) prediction_table_erode = cv2.dilate(prediction_table_erode, KERNEL, iterations=20) return prediction_table_erode.astype(np.int16) - def run_graphics_and_columns_light(self, text_regions_p_1, textline_mask_tot_ea, num_col_classifier, num_column_is_classified, erosion_hurts): + def run_graphics_and_columns_light(self, text_regions_p_1, textline_mask_tot_ea, num_col_classifier, + num_column_is_classified, erosion_hurts): img_g = self.imread(grayscale=True, uint8=True) img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) @@ -2601,17 +2735,17 @@ class Eynollah: img_g3[:, :, 2] = img_g[:, :] image_page, page_coord, cont_page = self.extract_page() - + if self.tables: table_prediction = self.get_tables_from_model(image_page, num_col_classifier) else: table_prediction = (np.zeros((image_page.shape[0], image_page.shape[1]))).astype(np.int16) - + if self.plotter: self.plotter.save_page_image(image_page) - text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + text_regions_p_1 = text_regions_p_1[page_coord[0]: page_coord[1], page_coord[2]: page_coord[3]] + textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0]: page_coord[1], page_coord[2]: page_coord[3]] mask_images = (text_regions_p_1[:, :] == 2) * 1 mask_images = mask_images.astype(np.uint8) mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) @@ -2619,17 +2753,16 @@ class Eynollah: mask_lines = mask_lines.astype(np.uint8) img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) - - + if erosion_hurts: - img_only_regions = np.copy(img_only_regions_with_sep[:,:]) + img_only_regions = np.copy(img_only_regions_with_sep[:, :]) else: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) - - ##print(img_only_regions.shape,'img_only_regions') - ##plt.imshow(img_only_regions[:,:]) - ##plt.show() - ##num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) + img_only_regions = cv2.erode(img_only_regions_with_sep[:, :], KERNEL, iterations=6) + + # print(img_only_regions.shape,'img_only_regions') + # plt.imshow(img_only_regions[:,:]) + # plt.show() + # num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) try: num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) num_col = num_col + 1 @@ -2639,6 +2772,7 @@ class Eynollah: self.logger.error(why) num_col = None return num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea + def run_graphics_and_columns(self, text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts): img_g = self.imread(grayscale=True, uint8=True) @@ -2649,16 +2783,16 @@ class Eynollah: img_g3[:, :, 2] = img_g[:, :] image_page, page_coord, cont_page = self.extract_page() - + if self.tables: table_prediction = self.get_tables_from_model(image_page, num_col_classifier) else: table_prediction = (np.zeros((image_page.shape[0], image_page.shape[1]))).astype(np.int16) - + if self.plotter: self.plotter.save_page_image(image_page) - text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + text_regions_p_1 = text_regions_p_1[page_coord[0]: page_coord[1], page_coord[2]: page_coord[3]] mask_images = (text_regions_p_1[:, :] == 2) * 1 mask_images = mask_images.astype(np.uint8) mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) @@ -2666,14 +2800,12 @@ class Eynollah: mask_lines = mask_lines.astype(np.uint8) img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) - - + if erosion_hurts: - img_only_regions = np.copy(img_only_regions_with_sep[:,:]) + img_only_regions = np.copy(img_only_regions_with_sep[:, :]) else: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) - - + img_only_regions = cv2.erode(img_only_regions_with_sep[:, :], KERNEL, iterations=6) + try: num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) num_col = num_col + 1 @@ -2684,9 +2816,10 @@ class Eynollah: num_col = None return num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page, table_prediction - def run_enhancement(self,light_version): + def run_enhancement(self, light_version): self.logger.info("Resizing and enhancing image...") - is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = self.resize_and_enhance_image_with_column_classifier(light_version) + is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = self.resize_and_enhance_image_with_column_classifier( + light_version) self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ') scale = 1 @@ -2704,7 +2837,8 @@ class Eynollah: else: self.get_image_and_scales(img_org, img_res, scale) if self.allow_scaling: - img_org, img_res, is_image_enhanced = self.resize_image_with_column_classifier(is_image_enhanced, img_bin) + img_org, img_res, is_image_enhanced = self.resize_image_with_column_classifier(is_image_enhanced, + img_bin) self.get_image_and_scales_after_enhancing(img_org, img_res) return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified @@ -2722,7 +2856,8 @@ class Eynollah: def run_deskew(self, textline_mask_tot_ea): sigma = 2 main_page_deskew = True - slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), sigma, main_page_deskew, plotter=self.plotter) + slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), sigma, + main_page_deskew, plotter=self.plotter) slope_first = 0 if self.plotter: @@ -2730,7 +2865,8 @@ class Eynollah: self.logger.info("slope_deskew: %.2f°", slope_deskew) return slope_deskew, slope_first - def run_marginals(self, image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction): + def run_marginals(self, image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, + text_regions_p_1, table_prediction): image_page_rotated, textline_mask_tot = image_page[:, :], textline_mask_tot_ea[:, :] textline_mask_tot[mask_images[:, :] == 1] = 0 @@ -2742,9 +2878,10 @@ class Eynollah: try: regions_without_separators = (text_regions_p[:, :] == 1) * 1 if self.tables: - regions_without_separators[table_prediction==1] = 1 + regions_without_separators[table_prediction == 1] = 1 regions_without_separators = regions_without_separators.astype(np.uint8) - text_regions_p = get_marginals(rotate_image(regions_without_separators, slope_deskew), text_regions_p, num_col_classifier, slope_deskew, kernel=KERNEL) + text_regions_p = get_marginals(rotate_image(regions_without_separators, slope_deskew), text_regions_p, + num_col_classifier, slope_deskew, kernel=KERNEL) except Exception as e: self.logger.error("exception %s", e) @@ -2753,29 +2890,38 @@ class Eynollah: self.plotter.save_plot_of_layout_main(text_regions_p, image_page) return textline_mask_tot, text_regions_p, image_page_rotated - def run_boxes_no_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, table_prediction, erosion_hurts): + def run_boxes_no_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, + table_prediction, erosion_hurts): self.logger.debug('enter run_boxes_no_full_layout') if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) + _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = rotation_not_90_func(image_page, + textline_mask_tot, + text_regions_p, + table_prediction, + slope_deskew) text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) table_prediction_n = resize_image(table_prediction_n, text_regions_p.shape[0], text_regions_p.shape[1]) regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 if self.tables: - regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 - regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) + regions_without_separators_d[table_prediction_n[:, :] == 1] = 1 + regions_without_separators = (text_regions_p[:, + :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) if self.tables: - regions_without_separators[table_prediction ==1 ] = 1 + regions_without_separators[table_prediction == 1] = 1 if np.abs(slope_deskew) < SLOPE_THRESHOLD: text_regions_p_1_n = None textline_mask_tot_d = None regions_without_separators_d = None pixel_lines = 3 if np.abs(slope_deskew) < SLOPE_THRESHOLD: - _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines) + _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines) if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines) + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, + pixel_lines) self.logger.info("num_col_classifier: %s", num_col_classifier) @@ -2788,149 +2934,194 @@ class Eynollah: regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) t1 = time.time() if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, self.tables, self.right2left) + boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, + regions_without_separators, + matrix_of_lines_ch, + num_col_classifier, + erosion_hurts, self.tables, + self.right2left) boxes_d = None self.logger.debug("len(boxes): %s", len(boxes)) - + text_regions_p_tables = np.copy(text_regions_p) - text_regions_p_tables[:,:][(table_prediction[:,:] == 1)] = 10 + text_regions_p_tables[:, :][(table_prediction[:, :] == 1)] = 10 pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables , num_col_classifier , 0.000005, pixel_line) - img_revised_tab2, contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2,table_prediction, 10, num_col_classifier) + img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes, 0, splitter_y_new, + peaks_neg_tot_tables, text_regions_p_tables, + num_col_classifier, 0.000005, pixel_line) + img_revised_tab2, contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2, + table_prediction, + 10, + num_col_classifier) else: - boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts, self.tables, self.right2left) + boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, + regions_without_separators_d, + matrix_of_lines_ch_d, + num_col_classifier, + erosion_hurts, self.tables, + self.right2left) boxes = None self.logger.debug("len(boxes): %s", len(boxes_d)) - + text_regions_p_tables = np.copy(text_regions_p_1_n) - text_regions_p_tables =np.round(text_regions_p_tables) - text_regions_p_tables[:,:][(text_regions_p_tables[:,:] != 3) & (table_prediction_n[:,:] == 1)] = 10 - + text_regions_p_tables = np.round(text_regions_p_tables) + text_regions_p_tables[:, :][(text_regions_p_tables[:, :] != 3) & (table_prediction_n[:, :] == 1)] = 10 + pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables,boxes_d,0,splitter_y_new_d,peaks_neg_tot_tables_d,text_regions_p_tables, num_col_classifier, 0.000005, pixel_line) - img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2,table_prediction_n, 10, num_col_classifier) - + img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes_d, 0, splitter_y_new_d, + peaks_neg_tot_tables_d, text_regions_p_tables, + num_col_classifier, 0.000005, pixel_line) + img_revised_tab2_d, _ = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2, + table_prediction_n, 10, + num_col_classifier) + img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) - img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], text_regions_p.shape[1]) + img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], + text_regions_p.shape[1]) self.logger.info("detecting boxes took %.1fs", time.time() - t1) - + if self.tables: if np.abs(slope_deskew) < SLOPE_THRESHOLD: - img_revised_tab = np.copy(img_revised_tab2[:,:,0]) - img_revised_tab[:,:][(text_regions_p[:,:] == 1) & (img_revised_tab[:,:] != 10)] = 1 + img_revised_tab = np.copy(img_revised_tab2[:, :, 0]) + img_revised_tab[:, :][(text_regions_p[:, :] == 1) & (img_revised_tab[:, :] != 10)] = 1 else: - img_revised_tab = np.copy(text_regions_p[:,:]) - img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0 - img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10 - - text_regions_p[:,:][text_regions_p[:,:]==10] = 0 - text_regions_p[:,:][img_revised_tab[:,:]==10] = 10 + img_revised_tab = np.copy(text_regions_p[:, :]) + img_revised_tab[:, :][img_revised_tab[:, :] == 10] = 0 + img_revised_tab[:, :][img_revised_tab2_d_rotated[:, :, 0] == 10] = 10 + + text_regions_p[:, :][text_regions_p[:, :] == 10] = 0 + text_regions_p[:, :][img_revised_tab[:, :] == 10] = 10 else: - img_revised_tab=text_regions_p[:,:] - #img_revised_tab = text_regions_p[:, :] + img_revised_tab = text_regions_p[:, :] + # img_revised_tab = text_regions_p[:, :] polygons_of_images = return_contours_of_interested_region(img_revised_tab, 2) pixel_img = 4 min_area_mar = 0.00001 polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - + pixel_img = 10 contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - + self.logger.debug('exit run_boxes_no_full_layout') return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d, polygons_of_marginals, contours_tables - def run_boxes_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions, table_prediction, erosion_hurts): + def run_boxes_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, + img_only_regions, table_prediction, erosion_hurts): self.logger.debug('enter run_boxes_full_layout') - + if self.tables: if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - image_page_rotated_n,textline_mask_tot_d,text_regions_p_1_n , table_prediction_n = rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) - - text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d,text_regions_p.shape[0],text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n,text_regions_p.shape[0],text_regions_p.shape[1]) - - regions_without_separators_d=(text_regions_p_1_n[:,:] == 1)*1 - regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 + image_page_rotated_n, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = rotation_not_90_func( + image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) + + text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], + text_regions_p.shape[1]) + table_prediction_n = resize_image(table_prediction_n, text_regions_p.shape[0], text_regions_p.shape[1]) + + regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 + regions_without_separators_d[table_prediction_n[:, :] == 1] = 1 else: text_regions_p_1_n = None textline_mask_tot_d = None regions_without_separators_d = None - - regions_without_separators = (text_regions_p[:,:] == 1)*1#( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_seperators_new(text_regions_p[:,:,0],img_only_regions) + + regions_without_separators = (text_regions_p[:, + :] == 1) * 1 # ((text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_seperators_new(text_regions_p[:,:,0],img_only_regions) regions_without_separators[table_prediction == 1] = 1 - - pixel_lines=3 + + pixel_lines = 3 if np.abs(slope_deskew) < SLOPE_THRESHOLD: - num_col, peaks_neg_fin, matrix_of_lines_ch, splitter_y_new, seperators_closeup_n = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines) - + num_col, peaks_neg_fin, matrix_of_lines_ch, splitter_y_new, seperators_closeup_n = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, + pixel_lines) + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - num_col_d, peaks_neg_fin_d, matrix_of_lines_ch_d, splitter_y_new_d, seperators_closeup_n_d = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2),num_col_classifier, self.tables, pixel_lines) + num_col_d, peaks_neg_fin_d, matrix_of_lines_ch_d, splitter_y_new_d, seperators_closeup_n_d = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, + pixel_lines) - if num_col_classifier>=3: + if num_col_classifier >= 3: if np.abs(slope_deskew) < SLOPE_THRESHOLD: regions_without_separators = regions_without_separators.astype(np.uint8) - regions_without_separators = cv2.erode(regions_without_separators[:,:], KERNEL, iterations=6) - + regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: regions_without_separators_d = regions_without_separators_d.astype(np.uint8) - regions_without_separators_d = cv2.erode(regions_without_separators_d[:,:], KERNEL, iterations=6) + regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) else: pass - + if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, self.tables, self.right2left) + boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, + regions_without_separators, + matrix_of_lines_ch, + num_col_classifier, + erosion_hurts, self.tables, + self.right2left) text_regions_p_tables = np.copy(text_regions_p) - text_regions_p_tables[:,:][(table_prediction[:,:]==1)] = 10 + text_regions_p_tables[:, :][(table_prediction[:, :] == 1)] = 10 pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables , num_col_classifier , 0.000005, pixel_line) - - img_revised_tab2,contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2, table_prediction, 10, num_col_classifier) - + img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes, 0, splitter_y_new, + peaks_neg_tot_tables, text_regions_p_tables, + num_col_classifier, 0.000005, pixel_line) + + img_revised_tab2, contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables( + img_revised_tab2, table_prediction, 10, num_col_classifier) + else: - boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts, self.tables, self.right2left) + boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, + regions_without_separators_d, + matrix_of_lines_ch_d, + num_col_classifier, + erosion_hurts, + self.tables, + self.right2left) text_regions_p_tables = np.copy(text_regions_p_1_n) text_regions_p_tables = np.round(text_regions_p_tables) - text_regions_p_tables[:,:][(text_regions_p_tables[:,:]!=3) & (table_prediction_n[:,:]==1)] = 10 - + text_regions_p_tables[:, :][(text_regions_p_tables[:, :] != 3) & (table_prediction_n[:, :] == 1)] = 10 + pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables,boxes_d,0,splitter_y_new_d,peaks_neg_tot_tables_d,text_regions_p_tables, num_col_classifier, 0.000005, pixel_line) - - img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2, table_prediction_n, 10, num_col_classifier) + img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes_d, 0, + splitter_y_new_d, peaks_neg_tot_tables_d, + text_regions_p_tables, num_col_classifier, + 0.000005, pixel_line) + + img_revised_tab2_d, _ = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2, + table_prediction_n, 10, + num_col_classifier) img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) - img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) - img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], text_regions_p.shape[1]) - + img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], + text_regions_p.shape[1]) if np.abs(slope_deskew) < 0.13: - img_revised_tab = np.copy(img_revised_tab2[:,:,0]) + img_revised_tab = np.copy(img_revised_tab2[:, :, 0]) else: - img_revised_tab = np.copy(text_regions_p[:,:]) - img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0 - img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10 - - - ##img_revised_tab=img_revised_tab2[:,:,0] - #img_revised_tab=text_regions_p[:,:] - text_regions_p[:,:][text_regions_p[:,:]==10] = 0 - text_regions_p[:,:][img_revised_tab[:,:]==10] = 10 - #img_revised_tab[img_revised_tab2[:,:,0]==10] =10 - + img_revised_tab = np.copy(text_regions_p[:, :]) + img_revised_tab[:, :][img_revised_tab[:, :] == 10] = 0 + img_revised_tab[:, :][img_revised_tab2_d_rotated[:, :, 0] == 10] = 10 + + # img_revised_tab=img_revised_tab2[:,:,0] + # img_revised_tab=text_regions_p[:,:] + text_regions_p[:, :][text_regions_p[:, :] == 10] = 0 + text_regions_p[:, :][img_revised_tab[:, :] == 10] = 10 + # img_revised_tab[img_revised_tab2[:,:,0]==10] =10 + pixel_img = 4 min_area_mar = 0.00001 polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - + pixel_img = 10 contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - + # set first model with second model text_regions_p[:, :][text_regions_p[:, :] == 2] = 5 text_regions_p[:, :][text_regions_p[:, :] == 3] = 6 @@ -2939,7 +3130,7 @@ class Eynollah: image_page = image_page.astype(np.uint8) regions_fully, regions_fully_only_drop = self.extract_text_regions(image_page, True, cols=num_col_classifier) - text_regions_p[:,:][regions_fully[:,:,0]==6]=6 + text_regions_p[:, :][regions_fully[:, :, 0] == 6] = 6 regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p) regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 4] = 4 @@ -2950,16 +3141,21 @@ class Eynollah: else: regions_fully_np = filter_small_drop_capitals_from_no_patch_layout(regions_fully_np, text_regions_p) - regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, regions_fully_np, img_only_regions) + regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, regions_fully_np, + img_only_regions) # plt.imshow(regions_fully[:,:,0]) # plt.show() text_regions_p[:, :][regions_fully[:, :, 0] == 4] = 4 text_regions_p[:, :][regions_fully_np[:, :, 0] == 4] = 4 - #plt.imshow(text_regions_p) - #plt.show() - ####if not self.tables: + # plt.imshow(text_regions_p) + # plt.show() + # if not self.tables: if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, regions_fully_n = rotation_not_90_func_full_layout(image_page, textline_mask_tot, text_regions_p, regions_fully, slope_deskew) + _, textline_mask_tot_d, text_regions_p_1_n, regions_fully_n = rotation_not_90_func_full_layout(image_page, + textline_mask_tot, + text_regions_p, + regions_fully, + slope_deskew) text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) @@ -2976,13 +3172,14 @@ class Eynollah: polygons_of_images = return_contours_of_interested_region(img_revised_tab, 5) self.logger.debug('exit run_boxes_full_layout') return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators, polygons_of_marginals, contours_tables - + def our_load_model(self, model_file): - + try: model = load_model(model_file, compile=False) except: - model = load_model(model_file , compile=False,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) + model = load_model(model_file, compile=False, + custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches}) return model @@ -2996,20 +3193,22 @@ class Eynollah: if not self.dir_in: self.ls_imgs = [1] - + for img_name in self.ls_imgs: t0 = time.time() if self.dir_in: - self.reset_file_name_dir(os.path.join(self.dir_in,img_name)) - + self.reset_file_name_dir(os.path.join(self.dir_in, img_name)) if self.extract_only_images: - img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement( + self.light_version) self.logger.info("Enhancing took %.1fs ", time.time() - t0) - text_regions_p_1 ,erosion_hurts, polygons_lines_xml,polygons_of_images,image_page, page_coord, cont_page = self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) + text_regions_p_1, erosion_hurts, polygons_lines_xml, polygons_of_images, image_page, page_coord, cont_page = self.get_regions_light_v_extract_only_images( + img_res, is_image_enhanced, num_col_classifier) - pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], polygons_of_images, [], [], [], [], [], cont_page, [], []) + pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], polygons_of_images, [], + [], [], [], [], cont_page, [], []) if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) @@ -3020,31 +3219,38 @@ class Eynollah: return pcgts else: - img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement( + self.light_version) self.logger.info("Enhancing took %.1fs ", time.time() - t0) t1 = time.time() if self.light_version: - text_regions_p_1 ,erosion_hurts, polygons_lines_xml, textline_mask_tot_ea = self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) + text_regions_p_1, erosion_hurts, polygons_lines_xml, textline_mask_tot_ea = self.get_regions_light_v( + img_res, is_image_enhanced, num_col_classifier) slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) - #self.logger.info("Textregion detection took %.1fs ", time.time() - t1t) + # self.logger.info("Textregion detection took %.1fs ", time.time() - t1t) num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea = \ - self.run_graphics_and_columns_light(text_regions_p_1, textline_mask_tot_ea, num_col_classifier, num_column_is_classified, erosion_hurts) - #self.logger.info("run graphics %.1fs ", time.time() - t1t) + self.run_graphics_and_columns_light(text_regions_p_1, textline_mask_tot_ea, num_col_classifier, + num_column_is_classified, erosion_hurts) + # self.logger.info("run graphics %.1fs ", time.time() - t1t) textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) else: - text_regions_p_1 ,erosion_hurts, polygons_lines_xml = self.get_regions_from_xy_2models(img_res, is_image_enhanced, num_col_classifier) + text_regions_p_1, erosion_hurts, polygons_lines_xml = self.get_regions_from_xy_2models(img_res, + is_image_enhanced, + num_col_classifier) self.logger.info("Textregion detection took %.1fs ", time.time() - t1) t1 = time.time() num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page, table_prediction = \ - self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts) + self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, + erosion_hurts) self.logger.info("Graphics detection took %.1fs ", time.time() - t1) - #self.logger.info('cont_page %s', cont_page) + # self.logger.info('cont_page %s', cont_page) if not num_col: self.logger.info("No columns detected, outputting an empty PAGE-XML") - pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], [], [], [], [], [], [], cont_page, [], []) + pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], [], [], [], [], [], + [], cont_page, [], []) self.logger.info("Job done in %.1fs", time.time() - t1) if self.dir_in: self.writer.write_pagexml(pcgts) @@ -3061,21 +3267,30 @@ class Eynollah: slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) self.logger.info("deskewing took %.1fs", time.time() - t1) t1 = time.time() - #plt.imshow(table_prediction) - #plt.show() + # plt.imshow(table_prediction) + # plt.show() - textline_mask_tot, text_regions_p, image_page_rotated = self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction) + textline_mask_tot, text_regions_p, image_page_rotated = self.run_marginals(image_page, + textline_mask_tot_ea, + mask_images, mask_lines, + num_col_classifier, + slope_deskew, + text_regions_p_1, + table_prediction) self.logger.info("detection of marginals took %.1fs", time.time() - t1) t1 = time.time() if not self.full_layout: - polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d, polygons_of_marginals, contours_tables = self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, table_prediction, erosion_hurts) + polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d, polygons_of_marginals, contours_tables = self.run_boxes_no_full_layout( + image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, + table_prediction, erosion_hurts) if self.full_layout: - polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions, table_prediction, erosion_hurts) - text_only = ((img_revised_tab[:, :] == 1)) * 1 + polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = self.run_boxes_full_layout( + image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, + img_only_regions, table_prediction, erosion_hurts) + text_only = (img_revised_tab[:, :] == 1) * 1 if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - text_only_d = ((text_regions_p_1_n[:, :] == 1)) * 1 - + text_only_d = (text_regions_p_1_n[:, :] == 1) * 1 min_con_area = 0.000005 if np.abs(slope_deskew) >= SLOPE_THRESHOLD: @@ -3085,12 +3300,14 @@ class Eynollah: if len(contours_only_text_parent) > 0: areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) - #self.logger.info('areas_cnt_text %s', areas_cnt_text) + # self.logger.info('areas_cnt_text %s', areas_cnt_text) contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)] - contours_only_text_parent = [c for jz, c in enumerate(contours_only_text_parent) if areas_cnt_text[jz] > min_con_area] + contours_only_text_parent = [c for jz, c in enumerate(contours_only_text_parent) if + areas_cnt_text[jz] > min_con_area] areas_cnt_text_parent = [area for area in areas_cnt_text if area > min_con_area] index_con_parents = np.argsort(areas_cnt_text_parent) - contours_only_text_parent = list(np.array(contours_only_text_parent,dtype=object)[index_con_parents]) + contours_only_text_parent = list( + np.array(contours_only_text_parent, dtype=object)[index_con_parents]) areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents]) cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest]) @@ -3102,24 +3319,31 @@ class Eynollah: areas_cnt_text_d = np.array([cv2.contourArea(c) for c in contours_only_text_parent_d]) areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1]) - if len(areas_cnt_text_d)>0: + if len(areas_cnt_text_d) > 0: contours_biggest_d = contours_only_text_parent_d[np.argmax(areas_cnt_text_d)] index_con_parents_d = np.argsort(areas_cnt_text_d) - contours_only_text_parent_d = list(np.array(contours_only_text_parent_d,dtype=object)[index_con_parents_d]) + contours_only_text_parent_d = list( + np.array(contours_only_text_parent_d, dtype=object)[index_con_parents_d]) areas_cnt_text_d = list(np.array(areas_cnt_text_d)[index_con_parents_d]) - cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest_d]) - cx_bigest_d, cy_biggest_d, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_d) + cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = find_new_features_of_contours( + [contours_biggest_d]) + cx_bigest_d, cy_biggest_d, _, _, _, _, _ = find_new_features_of_contours( + contours_only_text_parent_d) try: if len(cx_bigest_d) >= 5: cx_bigest_d_last5 = cx_bigest_d[-5:] cy_biggest_d_last5 = cy_biggest_d[-5:] - dists_d = [math.sqrt((cx_bigest_big[0] - cx_bigest_d_last5[j]) ** 2 + (cy_biggest_big[0] - cy_biggest_d_last5[j]) ** 2) for j in range(len(cy_biggest_d_last5))] - ind_largest = len(cx_bigest_d) -5 + np.argmin(dists_d) + dists_d = [math.sqrt((cx_bigest_big[0] - cx_bigest_d_last5[j]) ** 2 + ( + cy_biggest_big[0] - cy_biggest_d_last5[j]) ** 2) for j in + range(len(cy_biggest_d_last5))] + ind_largest = len(cx_bigest_d) - 5 + np.argmin(dists_d) else: cx_bigest_d_last5 = cx_bigest_d[-len(cx_bigest_d):] cy_biggest_d_last5 = cy_biggest_d[-len(cx_bigest_d):] - dists_d = [math.sqrt((cx_bigest_big[0]-cx_bigest_d_last5[j])**2 + (cy_biggest_big[0]-cy_biggest_d_last5[j])**2) for j in range(len(cy_biggest_d_last5))] + dists_d = [math.sqrt((cx_bigest_big[0] - cx_bigest_d_last5[j]) ** 2 + ( + cy_biggest_big[0] - cy_biggest_d_last5[j]) ** 2) for j in + range(len(cy_biggest_d_last5))] ind_largest = len(cx_bigest_d) - len(cx_bigest_d) + np.argmin(dists_d) cx_bigest_d_big[0] = cx_bigest_d[ind_largest] @@ -3140,8 +3364,10 @@ class Eynollah: p = np.dot(M_22, [cx_bigest[i], cy_biggest[i]]) p[0] = p[0] - x_diff[0] p[1] = p[1] - y_diff[0] - dists = [math.sqrt((p[0] - cx_bigest_d[j]) ** 2 + (p[1] - cy_biggest_d[j]) ** 2) for j in range(len(cx_bigest_d))] - contours_only_text_parent_d_ordered.append(contours_only_text_parent_d[np.argmin(dists)]) + dists = [math.sqrt((p[0] - cx_bigest_d[j]) ** 2 + (p[1] - cy_biggest_d[j]) ** 2) for j + in range(len(cx_bigest_d))] + contours_only_text_parent_d_ordered.append( + contours_only_text_parent_d[np.argmin(dists)]) # img2=np.zeros((text_only.shape[0],text_only.shape[1],3)) # img2=cv2.fillPoly(img2,pts=[contours_only_text_parent_d[np.argmin(dists)]] ,color=(1,1,1)) # plt.imshow(img2[:,:,0]) @@ -3164,115 +3390,188 @@ class Eynollah: areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)] - contours_only_text_parent = [c for jz, c in enumerate(contours_only_text_parent) if areas_cnt_text[jz] > min_con_area] + contours_only_text_parent = [c for jz, c in enumerate(contours_only_text_parent) if + areas_cnt_text[jz] > min_con_area] areas_cnt_text_parent = [area for area in areas_cnt_text if area > min_con_area] index_con_parents = np.argsort(areas_cnt_text_parent) - contours_only_text_parent = list(np.array(contours_only_text_parent,dtype=object)[index_con_parents]) + contours_only_text_parent = list( + np.array(contours_only_text_parent, dtype=object)[index_con_parents]) areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents]) cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest]) cx_bigest, cy_biggest, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) - #self.logger.debug('areas_cnt_text_parent %s', areas_cnt_text_parent) + # self.logger.debug('areas_cnt_text_parent %s', areas_cnt_text_parent) # self.logger.debug('areas_cnt_text_parent_d %s', areas_cnt_text_parent_d) # self.logger.debug('len(contours_only_text_parent) %s', len(contours_only_text_parent_d)) else: pass if self.light_version: - txt_con_org = get_textregion_contours_in_org_image_light(contours_only_text_parent, self.image, slope_first) + txt_con_org = get_textregion_contours_in_org_image_light(contours_only_text_parent, self.image, + slope_first) else: - txt_con_org = get_textregion_contours_in_org_image(contours_only_text_parent, self.image, slope_first) + txt_con_org = get_textregion_contours_in_org_image(contours_only_text_parent, self.image, + slope_first) boxes_text, _ = get_text_region_boxes_by_given_contours(contours_only_text_parent) boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals) if not self.curved_line: if self.light_version: if self.textline_light: - slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new_light(txt_con_org, contours_only_text_parent, textline_mask_tot_ea_org, image_page_rotated, boxes_text, slope_deskew) - slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new_light(polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_org, image_page_rotated, boxes_marginals, slope_deskew) + slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new_light( + txt_con_org, contours_only_text_parent, textline_mask_tot_ea_org, image_page_rotated, + boxes_text, slope_deskew) + slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new_light( + polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_org, + image_page_rotated, boxes_marginals, slope_deskew) else: - slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new_light(txt_con_org, contours_only_text_parent, textline_mask_tot_ea, image_page_rotated, boxes_text, slope_deskew) - slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new_light(polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, image_page_rotated, boxes_marginals, slope_deskew) + slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new_light( + txt_con_org, contours_only_text_parent, textline_mask_tot_ea, image_page_rotated, + boxes_text, slope_deskew) + slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new_light( + polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, image_page_rotated, + boxes_marginals, slope_deskew) else: - slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new(txt_con_org, contours_only_text_parent, textline_mask_tot_ea, image_page_rotated, boxes_text, slope_deskew) - slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new(polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, image_page_rotated, boxes_marginals, slope_deskew) + slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new( + txt_con_org, contours_only_text_parent, textline_mask_tot_ea, image_page_rotated, + boxes_text, slope_deskew) + slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new( + polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, image_page_rotated, + boxes_marginals, slope_deskew) else: scale_param = 1 - all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_curved(txt_con_org, contours_only_text_parent, cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, boxes_text, text_only, num_col_classifier, scale_param, slope_deskew) - all_found_textline_polygons = small_textlines_to_parent_adherence2(all_found_textline_polygons, textline_mask_tot_ea, num_col_classifier) - all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_curved(polygons_of_marginals, polygons_of_marginals, cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, boxes_marginals, text_only, num_col_classifier, scale_param, slope_deskew) - all_found_textline_polygons_marginals = small_textlines_to_parent_adherence2(all_found_textline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) + all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_curved( + txt_con_org, contours_only_text_parent, + cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, boxes_text, + text_only, num_col_classifier, scale_param, slope_deskew) + all_found_textline_polygons = small_textlines_to_parent_adherence2(all_found_textline_polygons, + textline_mask_tot_ea, + num_col_classifier) + all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_curved( + polygons_of_marginals, polygons_of_marginals, + cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, + boxes_marginals, text_only, num_col_classifier, scale_param, slope_deskew) + all_found_textline_polygons_marginals = small_textlines_to_parent_adherence2( + all_found_textline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) if self.full_layout: if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - contours_only_text_parent_d_ordered = list(np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) + contours_only_text_parent_d_ordered = list( + np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) if self.light_version: - text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header_light(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) + text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header_light( + text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, + all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) else: - text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) + text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header( + text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, + all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) else: - #takes long timee + # takes long time contours_only_text_parent_d_ordered = None if self.light_version: - text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header_light(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) + text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header_light( + text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, + all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) else: - text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) + text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header( + text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, + all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered) if self.plotter: self.plotter.save_plot_of_layout(text_regions_p, image_page) self.plotter.save_plot_of_layout_all(text_regions_p, image_page) pixel_img = 4 - polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, pixel_img) - all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline(text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, kernel=KERNEL, curved_line=self.curved_line) + polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, + pixel_img) + all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline(text_regions_p, + polygons_of_drop_capitals, + contours_only_text_parent, + contours_only_text_parent_h, + all_box_coord, + all_box_coord_h, + all_found_textline_polygons, + all_found_textline_polygons_h, + kernel=KERNEL, + curved_line=self.curved_line) pixel_lines = 6 - if not self.headers_off: if np.abs(slope_deskew) < SLOPE_THRESHOLD: - num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines, contours_only_text_parent_h) + num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, + pixel_lines, contours_only_text_parent_h) else: - _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines, contours_only_text_parent_h_d_ordered) + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, + self.tables, pixel_lines, contours_only_text_parent_h_d_ordered) elif self.headers_off: if np.abs(slope_deskew) < SLOPE_THRESHOLD: - num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines) + num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, + pixel_lines) else: - _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines) + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, + self.tables, pixel_lines) if num_col_classifier >= 3: if np.abs(slope_deskew) < SLOPE_THRESHOLD: regions_without_separators = regions_without_separators.astype(np.uint8) - regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) + regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, + iterations=6) else: regions_without_separators_d = regions_without_separators_d.astype(np.uint8) - regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) - + regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, + iterations=6) if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, self.tables, self.right2left) + boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, + regions_without_separators, + matrix_of_lines_ch, + num_col_classifier, + erosion_hurts, + self.tables, + self.right2left) else: - boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts, self.tables, self.right2left) - - #print(boxes_d,'boxes_d') - #img_once = np.zeros((textline_mask_tot_d.shape[0],textline_mask_tot_d.shape[1])) - #for box_i in boxes_d: - #img_once[int(box_i[2]):int(box_i[3]),int(box_i[0]):int(box_i[1]) ] =1 - #plt.imshow(img_once) - #plt.show() - #print(np.unique(img_once),'img_once') + boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( + splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, + erosion_hurts, self.tables, self.right2left) + + # print(boxes_d,'boxes_d') + # img_once = np.zeros((textline_mask_tot_d.shape[0],textline_mask_tot_d.shape[1])) + # for box_i in boxes_d: + # img_once[int(box_i[2]):int(box_i[3]),int(box_i[0]):int(box_i[1]) ] =1 + # plt.imshow(img_once) + # plt.show() + # print(np.unique(img_once),'img_once') if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) t_order = time.time() if self.full_layout: if np.abs(slope_deskew) < SLOPE_THRESHOLD: - order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, + contours_only_text_parent_h, boxes, + textline_mask_tot) else: - order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d) - - pcgts = self.writer.build_pagexml_full_layout(contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_xml) + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, + contours_only_text_parent_h_d_ordered, + boxes_d, textline_mask_tot_d) + + pcgts = self.writer.build_pagexml_full_layout(contours_only_text_parent, + contours_only_text_parent_h, page_coord, + order_text_new, id_of_texts_tot, + all_found_textline_polygons, + all_found_textline_polygons_h, all_box_coord, + all_box_coord_h, polygons_of_images, contours_tables, + polygons_of_drop_capitals, polygons_of_marginals, + all_found_textline_polygons_marginals, + all_box_coord_marginals, slopes, slopes_h, + slopes_marginals, cont_page, polygons_lines_xml) self.logger.info("Job done in %.1fs", time.time() - t0) if not self.dir_in: @@ -3280,17 +3579,28 @@ class Eynollah: else: contours_only_text_parent_h = None if np.abs(slope_deskew) < SLOPE_THRESHOLD: - order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, + contours_only_text_parent_h, boxes, + textline_mask_tot) else: - contours_only_text_parent_d_ordered = list(np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) - order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) - pcgts = self.writer.build_pagexml_no_full_layout(txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml, contours_tables) + contours_only_text_parent_d_ordered = list( + np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, + contours_only_text_parent_h, boxes_d, + textline_mask_tot_d) + pcgts = self.writer.build_pagexml_no_full_layout(txt_con_org, page_coord, order_text_new, + id_of_texts_tot, all_found_textline_polygons, + all_box_coord, polygons_of_images, + polygons_of_marginals, + all_found_textline_polygons_marginals, + all_box_coord_marginals, slopes, slopes_marginals, + cont_page, polygons_lines_xml, contours_tables) self.logger.info("Job done in %.1fs", time.time() - t0) if not self.dir_in: return pcgts if self.dir_in: self.writer.write_pagexml(pcgts) - #self.logger.info("Job done in %.1fs", time.time() - t0) + # self.logger.info("Job done in %.1fs", time.time() - t0) if self.dir_in: self.logger.info("All jobs done in %.1fs", time.time() - t0_tot) diff --git a/src/eynollah/ocrd_cli.py b/src/eynollah/ocrd_cli.py index 8929927..499661b 100644 --- a/src/eynollah/ocrd_cli.py +++ b/src/eynollah/ocrd_cli.py @@ -2,10 +2,12 @@ from .processor import EynollahProcessor from click import command from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor + @command() @ocrd_cli_options def main(*args, **kwargs): return ocrd_cli_wrap_processor(EynollahProcessor, *args, **kwargs) + if __name__ == '__main__': main() diff --git a/src/eynollah/plot.py b/src/eynollah/plot.py index b01fc04..e12f56d 100644 --- a/src/eynollah/plot.py +++ b/src/eynollah/plot.py @@ -9,24 +9,25 @@ from .utils import crop_image_inside_box from .utils.rotate import rotate_image_different from .utils.resize import resize_image + class EynollahPlotter(): """ Class collecting all the plotting and image writing methods """ def __init__( - self, - *, - dir_out, - dir_of_all, - dir_save_page, - dir_of_deskewed, - dir_of_layout, - dir_of_cropped_images, - image_filename_stem, - image_org=None, - scale_x=1, - scale_y=1, + self, + *, + dir_out, + dir_of_all, + dir_save_page, + dir_of_deskewed, + dir_of_layout, + dir_of_cropped_images, + image_filename_stem, + image_org=None, + scale_x=1, + scale_y=1, ): self.dir_out = dir_out self.dir_of_all = dir_of_all @@ -44,7 +45,7 @@ class EynollahPlotter(): if self.dir_of_layout is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] - pixels=['Background' , 'Main text' , 'Image' , 'Separator','Marginalia'] + pixels = ['Background', 'Main text', 'Image', 'Separator', 'Marginalia'] values_indexes = [0, 1, 2, 3, 4] plt.figure(figsize=(40, 40)) plt.rcParams["font.size"] = "40" @@ -53,13 +54,12 @@ class EynollahPlotter(): patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=40) plt.savefig(os.path.join(self.dir_of_layout, self.image_filename_stem + "_layout_main.png")) - def save_plot_of_layout_main_all(self, text_regions_p, image_page): if self.dir_of_all is not None: values = np.unique(text_regions_p[:, :]) - # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] - pixels=['Background' , 'Main text' , 'Image' , 'Separator','Marginalia'] + # pixels = ['Background', 'Main text', 'Heading', 'Marginalia', 'Drop capitals', 'Images', 'Seperators', 'Tables', 'Graphics'] + pixels = ['Background', 'Main text', 'Image', 'Separator', 'Marginalia'] values_indexes = [0, 1, 2, 3, 4] plt.figure(figsize=(80, 40)) plt.rcParams["font.size"] = "40" @@ -131,33 +131,34 @@ class EynollahPlotter(): cv2.imwrite(os.path.join(self.dir_of_all, self.image_filename_stem + "_page.png"), image_page) if self.dir_save_page is not None: cv2.imwrite(os.path.join(self.dir_save_page, self.image_filename_stem + "_page.png"), image_page) + def save_enhanced_image(self, img_res): cv2.imwrite(os.path.join(self.dir_out, self.image_filename_stem + "_enhanced.png"), img_res) - + def save_plot_of_textline_density(self, img_patch_org): if self.dir_of_all is not None: - plt.figure(figsize=(80,40)) - plt.rcParams['font.size']='50' - plt.subplot(1,2,1) + plt.figure(figsize=(80, 40)) + plt.rcParams['font.size'] = '50' + plt.subplot(1, 2, 1) plt.imshow(img_patch_org) - plt.subplot(1,2,2) - plt.plot(gaussian_filter1d(img_patch_org.sum(axis=1), 3),np.array(range(len(gaussian_filter1d(img_patch_org.sum(axis=1), 3)))),linewidth=8) - plt.xlabel('Density of textline prediction in direction of X axis',fontsize=60) - plt.ylabel('Height',fontsize=60) - plt.yticks([0,len(gaussian_filter1d(img_patch_org.sum(axis=1), 3))]) + plt.subplot(1, 2, 2) + plt.plot(gaussian_filter1d(img_patch_org.sum(axis=1), 3), np.array(range(len(gaussian_filter1d(img_patch_org.sum(axis=1), 3)))), linewidth=8) + plt.xlabel('Density of textline prediction in direction of X axis', fontsize=60) + plt.ylabel('Height', fontsize=60) + plt.yticks([0, len(gaussian_filter1d(img_patch_org.sum(axis=1), 3))]) plt.gca().invert_yaxis() - plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem+'_density_of_textline.png')) + plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + '_density_of_textline.png')) def save_plot_of_rotation_angle(self, angels, var_res): if self.dir_of_all is not None: - plt.figure(figsize=(60,30)) - plt.rcParams['font.size']='50' - plt.plot(angels,np.array(var_res),'-o',markersize=25,linewidth=4) - plt.xlabel('angle',fontsize=50) - plt.ylabel('variance of sum of rotated textline in direction of x axis',fontsize=50) - plt.plot(angels[np.argmax(var_res)],var_res[np.argmax(np.array(var_res))] ,'*',markersize=50,label='Angle of deskewing=' +str("{:.2f}".format(angels[np.argmax(var_res)]))+r'$\degree$') + plt.figure(figsize=(60, 30)) + plt.rcParams['font.size'] = '50' + plt.plot(angels, np.array(var_res), '-o', markersize=25, linewidth=4) + plt.xlabel('angle', fontsize=50) + plt.ylabel('variance of sum of rotated textline in direction of x axis', fontsize=50) + plt.plot(angels[np.argmax(var_res)], var_res[np.argmax(np.array(var_res))], '*', markersize=50, label='Angle of deskewing=' + str("{:.2f}".format(angels[np.argmax(var_res)])) + r'$\degree$') plt.legend(loc='best') - plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem+'_rotation_angle.png')) + plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + '_rotation_angle.png')) def write_images_into_directory(self, img_contours, image_page): if self.dir_of_cropped_images is not None: @@ -165,11 +166,10 @@ class EynollahPlotter(): for cont_ind in img_contours: x, y, w, h = cv2.boundingRect(cont_ind) box = [x, y, w, h] - croped_page, page_coord = crop_image_inside_box(box, image_page) + cropped_page, page_coord = crop_image_inside_box(box, image_page) - croped_page = resize_image(croped_page, int(croped_page.shape[0] / self.scale_y), int(croped_page.shape[1] / self.scale_x)) + cropped_page = resize_image(cropped_page, int(cropped_page.shape[0] / self.scale_y), int(cropped_page.shape[1] / self.scale_x)) path = os.path.join(self.dir_of_cropped_images, self.image_filename_stem + "_" + str(index) + ".jpg") - cv2.imwrite(path, croped_page) + cv2.imwrite(path, cropped_page) index += 1 - diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py index 1bd190e..c77d856 100644 --- a/src/eynollah/processor.py +++ b/src/eynollah/processor.py @@ -22,6 +22,7 @@ from .utils.pil_cv2 import pil2cv OCRD_TOOL = loads(resource_string(__name__, 'ocrd-tool.json').decode('utf8')) + class EynollahProcessor(Processor): def __init__(self, *args, **kwargs): diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index d2b2488..ae79d75 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1,5 +1,4 @@ import math - import matplotlib.pyplot as plt import numpy as np from shapely import geometry @@ -14,291 +13,281 @@ from .contour import (contours_in_same_horizon, return_contours_of_image, return_parent_contours) -def return_x_start_end_mothers_childs_and_type_of_reading_order(x_min_hor_some,x_max_hor_some,cy_hor_some,peak_points,cy_hor_diff): - - - x_start=[] - x_end=[] - kind=[]#if covers 2 and more than 2 columns set it to 1 otherwise 0 - len_sep=[] - y_sep=[] - y_diff=[] - new_main_sep_y=[] - - indexer=0 + +def return_x_start_end_mothers_childs_and_type_of_reading_order(x_min_hor_some, x_max_hor_some, cy_hor_some, peak_points, cy_hor_diff): + x_start = [] + x_end = [] + kind = [] # if covers 2 and more than 2 columns set it to 1 otherwise 0 + len_sep = [] + y_sep = [] + y_diff = [] + new_main_sep_y = [] + + indexer = 0 for i in range(len(x_min_hor_some)): - starting=x_min_hor_some[i]-peak_points - starting=starting[starting>=0] - min_start=np.argmin(starting) - - - ending=peak_points-x_max_hor_some[i] - len_ending_neg=len(ending[ending<=0]) - - ending=ending[ending>0] - max_end=np.argmin(ending)+len_ending_neg - - - if (max_end-min_start)>=2: - if (max_end-min_start)==(len(peak_points)-1): + starting = x_min_hor_some[i] - peak_points + starting = starting[starting >= 0] + min_start = np.argmin(starting) + + ending = peak_points - x_max_hor_some[i] + len_ending_neg = len(ending[ending <= 0]) + + ending = ending[ending > 0] + max_end = np.argmin(ending) + len_ending_neg + + if (max_end - min_start) >= 2: + if (max_end - min_start) == (len(peak_points) - 1): new_main_sep_y.append(indexer) - - #print((max_end-min_start),len(peak_points),'(max_end-min_start)') + + # print((max_end-min_start),len(peak_points),'(max_end-min_start)') y_sep.append(cy_hor_some[i]) y_diff.append(cy_hor_diff[i]) x_end.append(max_end) - - x_start.append( min_start) - - len_sep.append(max_end-min_start) - if max_end==min_start+1: + + x_start.append(min_start) + + len_sep.append(max_end - min_start) + if max_end == min_start + 1: kind.append(0) else: kind.append(1) - - indexer+=1 - - - x_start_returned=np.copy(x_start) - x_end_returned=np.copy(x_end) - y_sep_returned=np.copy(y_sep) - y_diff_returned=np.copy(y_diff) - - - - - all_args_uniq=contours_in_same_horizon(y_sep_returned) - - args_to_be_unified=[] - y_unified=[] - y_diff_unified=[] - x_s_unified=[] - x_e_unified=[] - if len(all_args_uniq)>0: - #print('burda') + + indexer += 1 + + x_start_returned = np.copy(x_start) + x_end_returned = np.copy(x_end) + y_sep_returned = np.copy(y_sep) + y_diff_returned = np.copy(y_diff) + + all_args_uniq = contours_in_same_horizon(y_sep_returned) + + args_to_be_unified = [] + y_unified = [] + y_diff_unified = [] + x_s_unified = [] + x_e_unified = [] + if len(all_args_uniq) > 0: + # print('burda') if type(all_args_uniq[0]) is list: for dd in range(len(all_args_uniq)): - if len(all_args_uniq[dd])==2: - x_s_same_hor=np.array(x_start_returned)[all_args_uniq[dd]] - x_e_same_hor=np.array(x_end_returned)[all_args_uniq[dd]] - y_sep_same_hor=np.array(y_sep_returned)[all_args_uniq[dd]] - y_diff_same_hor=np.array(y_diff_returned)[all_args_uniq[dd]] - #print('burda2') - if x_s_same_hor[0]==(x_e_same_hor[1]-1) or x_s_same_hor[1]==(x_e_same_hor[0]-1) and x_s_same_hor[0]!=x_s_same_hor[1] and x_e_same_hor[0]!=x_e_same_hor[1]: - #print('burda3') + if len(all_args_uniq[dd]) == 2: + x_s_same_hor = np.array(x_start_returned)[all_args_uniq[dd]] + x_e_same_hor = np.array(x_end_returned)[all_args_uniq[dd]] + y_sep_same_hor = np.array(y_sep_returned)[all_args_uniq[dd]] + y_diff_same_hor = np.array(y_diff_returned)[all_args_uniq[dd]] + # print('burda2') + if x_s_same_hor[0] == (x_e_same_hor[1] - 1) or x_s_same_hor[1] == (x_e_same_hor[0] - 1) and \ + x_s_same_hor[0] != x_s_same_hor[1] and x_e_same_hor[0] != x_e_same_hor[1]: + # print('burda3') for arg_in in all_args_uniq[dd]: - #print(arg_in,'arg_in') + # print(arg_in,'arg_in') args_to_be_unified.append(arg_in) - y_selected=np.min(y_sep_same_hor) - y_diff_selected=np.max(y_diff_same_hor) - x_s_selected=np.min(x_s_same_hor) - x_e_selected=np.max(x_e_same_hor) - + y_selected = np.min(y_sep_same_hor) + y_diff_selected = np.max(y_diff_same_hor) + x_s_selected = np.min(x_s_same_hor) + x_e_selected = np.max(x_e_same_hor) + x_s_unified.append(x_s_selected) x_e_unified.append(x_e_selected) y_unified.append(y_selected) y_diff_unified.append(y_diff_selected) - - - - #print(x_s_same_hor,'x_s_same_hor') - #print(x_e_same_hor[:]-1,'x_e_same_hor') - #print('#############################') - - #print(x_s_unified,'y_selected') - #print(x_e_unified,'x_s_selected') - #print(y_unified,'x_e_same_hor') - - args_lines_not_unified=list( set(range(len(y_sep_returned)))-set(args_to_be_unified) ) - - #print(args_lines_not_unified,'args_lines_not_unified') - - x_start_returned_not_unified=list( np.array(x_start_returned)[args_lines_not_unified] ) - x_end_returned_not_unified=list( np.array(x_end_returned)[args_lines_not_unified] ) - y_sep_returned_not_unified=list (np.array(y_sep_returned)[args_lines_not_unified] ) - y_diff_returned_not_unified=list (np.array(y_diff_returned)[args_lines_not_unified] ) - + + # print(x_s_same_hor,'x_s_same_hor') + # print(x_e_same_hor[:]-1,'x_e_same_hor') + # print('#############################') + + # print(x_s_unified,'y_selected') + # print(x_e_unified,'x_s_selected') + # print(y_unified,'x_e_same_hor') + + args_lines_not_unified = list(set(range(len(y_sep_returned))) - set(args_to_be_unified)) + + # print(args_lines_not_unified,'args_lines_not_unified') + + x_start_returned_not_unified = list(np.array(x_start_returned)[args_lines_not_unified]) + x_end_returned_not_unified = list(np.array(x_end_returned)[args_lines_not_unified]) + y_sep_returned_not_unified = list(np.array(y_sep_returned)[args_lines_not_unified]) + y_diff_returned_not_unified = list(np.array(y_diff_returned)[args_lines_not_unified]) + for dv in range(len(y_unified)): y_sep_returned_not_unified.append(y_unified[dv]) y_diff_returned_not_unified.append(y_diff_unified[dv]) x_start_returned_not_unified.append(x_s_unified[dv]) x_end_returned_not_unified.append(x_e_unified[dv]) - - #print(y_sep_returned,'y_sep_returned') - #print(x_start_returned,'x_start_returned') - #print(x_end_returned,'x_end_returned') - - x_start_returned=np.copy(x_start_returned_not_unified) - x_end_returned=np.copy(x_end_returned_not_unified) - y_sep_returned=np.copy(y_sep_returned_not_unified) - y_diff_returned=np.copy(y_diff_returned_not_unified) - - - #print(y_sep_returned,'y_sep_returned2') - #print(x_start_returned,'x_start_returned2') - #print(x_end_returned,'x_end_returned2') - #print(new_main_sep_y,'new_main_sep_y') - - #print(x_start,'x_start') - #print(x_end,'x_end') - if len(new_main_sep_y)>0: - - min_ys=np.min(y_sep) - max_ys=np.max(y_sep) - - y_mains=[] + + # print(y_sep_returned,'y_sep_returned') + # print(x_start_returned,'x_start_returned') + # print(x_end_returned,'x_end_returned') + + x_start_returned = np.copy(x_start_returned_not_unified) + x_end_returned = np.copy(x_end_returned_not_unified) + y_sep_returned = np.copy(y_sep_returned_not_unified) + y_diff_returned = np.copy(y_diff_returned_not_unified) + + # print(y_sep_returned,'y_sep_returned2') + # print(x_start_returned,'x_start_returned2') + # print(x_end_returned,'x_end_returned2') + # print(new_main_sep_y,'new_main_sep_y') + + # print(x_start,'x_start') + # print(x_end,'x_end') + if len(new_main_sep_y) > 0: + + min_ys = np.min(y_sep) + max_ys = np.max(y_sep) + + y_mains = [] y_mains.append(min_ys) - y_mains_sep_ohne_grenzen=[] - + y_mains_sep_ohne_grenzen = [] + for ii in range(len(new_main_sep_y)): y_mains.append(y_sep[new_main_sep_y[ii]]) y_mains_sep_ohne_grenzen.append(y_sep[new_main_sep_y[ii]]) - + y_mains.append(max_ys) - - y_mains_sorted=np.sort(y_mains) - diff=np.diff(y_mains_sorted) - argm=np.argmax(diff) - - y_min_new=y_mains_sorted[argm] - y_max_new=y_mains_sorted[argm+1] - - #print(y_min_new,'y_min_new') - #print(y_max_new,'y_max_new') - - - #print(y_sep[new_main_sep_y[0]],y_sep,'yseps') - x_start=np.array(x_start) - x_end=np.array(x_end) - kind=np.array(kind) - y_sep=np.array(y_sep) + + y_mains_sorted = np.sort(y_mains) + diff = np.diff(y_mains_sorted) + argm = np.argmax(diff) + + y_min_new = y_mains_sorted[argm] + y_max_new = y_mains_sorted[argm + 1] + + # print(y_min_new,'y_min_new') + # print(y_max_new,'y_max_new') + + # print(y_sep[new_main_sep_y[0]],y_sep,'yseps') + x_start = np.array(x_start) + x_end = np.array(x_end) + kind = np.array(kind) + y_sep = np.array(y_sep) if (y_min_new in y_mains_sep_ohne_grenzen) and (y_max_new in y_mains_sep_ohne_grenzen): - x_start=x_start[(y_sep>y_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sep y_min_new) & (y_sep < y_max_new)] + x_end = x_end[(y_sep > y_min_new) & (y_sep < y_max_new)] + kind = kind[(y_sep > y_min_new) & (y_sep < y_max_new)] + y_sep = y_sep[(y_sep > y_min_new) & (y_sep < y_max_new)] elif (y_min_new in y_mains_sep_ohne_grenzen) and (y_max_new not in y_mains_sep_ohne_grenzen): - #print('burda') - x_start=x_start[(y_sep>y_min_new) & (y_sep<=y_max_new)] - #print('burda1') - x_end=x_end[(y_sep>y_min_new) & (y_sep<=y_max_new)] - #print('burda2') - kind=kind[(y_sep>y_min_new) & (y_sep<=y_max_new)] - y_sep=y_sep[(y_sep>y_min_new) & (y_sep<=y_max_new)] + # print('burda') + x_start = x_start[(y_sep > y_min_new) & (y_sep <= y_max_new)] + # print('burda1') + x_end = x_end[(y_sep > y_min_new) & (y_sep <= y_max_new)] + # print('burda2') + kind = kind[(y_sep > y_min_new) & (y_sep <= y_max_new)] + y_sep = y_sep[(y_sep > y_min_new) & (y_sep <= y_max_new)] elif (y_min_new not in y_mains_sep_ohne_grenzen) and (y_max_new in y_mains_sep_ohne_grenzen): - x_start=x_start[(y_sep>=y_min_new) & (y_sep=y_min_new) & (y_sep=y_min_new) & (y_sep=y_min_new) & (y_sep= y_min_new) & (y_sep < y_max_new)] + x_end = x_end[(y_sep >= y_min_new) & (y_sep < y_max_new)] + kind = kind[(y_sep >= y_min_new) & (y_sep < y_max_new)] + y_sep = y_sep[(y_sep >= y_min_new) & (y_sep < y_max_new)] else: - x_start=x_start[(y_sep>=y_min_new) & (y_sep<=y_max_new)] - x_end=x_end[(y_sep>=y_min_new) & (y_sep<=y_max_new)] - kind=kind[(y_sep>=y_min_new) & (y_sep<=y_max_new)] - y_sep=y_sep[(y_sep>=y_min_new) & (y_sep<=y_max_new)] - #print(x_start,'x_start') - #print(x_end,'x_end') - #print(len_sep) - - - deleted=[] - for i in range(len(x_start)-1): - nodes_i=set(range(x_start[i],x_end[i]+1)) - for j in range(i+1,len(x_start)): - if nodes_i==set(range(x_start[j],x_end[j]+1)): - deleted.append(j) - #print(np.unique(deleted)) - - remained_sep_indexes=set(range(len(x_start)))-set(np.unique(deleted) ) - #print(remained_sep_indexes,'remained_sep_indexes') - mother=[]#if it has mother - child=[] + x_start = x_start[(y_sep >= y_min_new) & (y_sep <= y_max_new)] + x_end = x_end[(y_sep >= y_min_new) & (y_sep <= y_max_new)] + kind = kind[(y_sep >= y_min_new) & (y_sep <= y_max_new)] + y_sep = y_sep[(y_sep >= y_min_new) & (y_sep <= y_max_new)] + # print(x_start,'x_start') + # print(x_end,'x_end') + # print(len_sep) + + deleted = [] + for i in range(len(x_start) - 1): + nodes_i = set(range(x_start[i], x_end[i] + 1)) + for j in range(i + 1, len(x_start)): + if nodes_i == set(range(x_start[j], x_end[j] + 1)): + deleted.append(j) + # print(np.unique(deleted)) + + remained_sep_indexes = set(range(len(x_start))) - set(np.unique(deleted)) + # print(remained_sep_indexes,'remained_sep_indexes') + mother = [] # if it has mother + child = [] for index_i in remained_sep_indexes: - have_mother=0 - have_child=0 - nodes_ind=set(range(x_start[index_i],x_end[index_i]+1)) + have_mother = 0 + have_child = 0 + nodes_ind = set(range(x_start[index_i], x_end[index_i] + 1)) for index_j in remained_sep_indexes: - nodes_ind_j=set(range(x_start[index_j],x_end[index_j]+1)) - if nodes_indnodes_ind_j: - have_child=1 + nodes_ind_j = set(range(x_start[index_j], x_end[index_j] + 1)) + if nodes_ind < nodes_ind_j: + have_mother = 1 + if nodes_ind > nodes_ind_j: + have_child = 1 mother.append(have_mother) child.append(have_child) - - #print(mother,'mother') - #print(len(remained_sep_indexes)) - #print(len(remained_sep_indexes),len(x_start),len(x_end),len(y_sep),'lens') - y_lines_without_mother=[] - x_start_without_mother=[] - x_end_without_mother=[] - - y_lines_with_child_without_mother=[] - x_start_with_child_without_mother=[] - x_end_with_child_without_mother=[] - - #print(mother,'mother') - #print(child,'child') - - if len(remained_sep_indexes)>1: - #print(np.array(remained_sep_indexes),'np.array(remained_sep_indexes)') - #print(np.array(mother),'mother') - remained_sep_indexes_without_mother=np.array(list(remained_sep_indexes))[np.array(mother)==0] - remained_sep_indexes_with_child_without_mother=np.array(list(remained_sep_indexes))[(np.array(mother)==0) & (np.array(child)==1)] - #print(remained_sep_indexes_without_mother,'remained_sep_indexes_without_mother') - - - - x_end_with_child_without_mother=np.array(x_end)[np.array(remained_sep_indexes_with_child_without_mother)] - - x_start_with_child_without_mother=np.array(x_start)[np.array(remained_sep_indexes_with_child_without_mother)] - - y_lines_with_child_without_mother=np.array(y_sep)[np.array(remained_sep_indexes_with_child_without_mother)] - - - reading_orther_type=0 - - - x_end_without_mother=np.array(x_end)[np.array(remained_sep_indexes_without_mother)] - x_start_without_mother=np.array(x_start)[np.array(remained_sep_indexes_without_mother)] - y_lines_without_mother=np.array(y_sep)[np.array(remained_sep_indexes_without_mother)] - - if len(remained_sep_indexes_without_mother)>=2: - for i in range(len(remained_sep_indexes_without_mother)-1): - ##nodes_i=set(range(x_start[remained_sep_indexes_without_mother[i]],x_end[remained_sep_indexes_without_mother[i]]+1)) - nodes_i=set(range(x_start[remained_sep_indexes_without_mother[i]],x_end[remained_sep_indexes_without_mother[i]])) - for j in range(i+1,len(remained_sep_indexes_without_mother)): - #nodes_j=set(range(x_start[remained_sep_indexes_without_mother[j]],x_end[remained_sep_indexes_without_mother[j]]+1)) - nodes_j=set(range(x_start[remained_sep_indexes_without_mother[j]],x_end[remained_sep_indexes_without_mother[j]])) - - set_diff=nodes_i-nodes_j - - if set_diff!=nodes_i: - reading_orther_type=1 + + # print(mother,'mother') + # print(len(remained_sep_indexes)) + # print(len(remained_sep_indexes),len(x_start),len(x_end),len(y_sep),'lens') + y_lines_without_mother = [] + x_start_without_mother = [] + x_end_without_mother = [] + + y_lines_with_child_without_mother = [] + x_start_with_child_without_mother = [] + x_end_with_child_without_mother = [] + + # print(mother,'mother') + # print(child,'child') + + if len(remained_sep_indexes) > 1: + # print(np.array(remained_sep_indexes),'np.array(remained_sep_indexes)') + # print(np.array(mother),'mother') + remained_sep_indexes_without_mother = np.array(list(remained_sep_indexes))[np.array(mother) == 0] + remained_sep_indexes_with_child_without_mother = np.array(list(remained_sep_indexes))[ + (np.array(mother) == 0) & (np.array(child) == 1)] + # print(remained_sep_indexes_without_mother,'remained_sep_indexes_without_mother') + + x_end_with_child_without_mother = np.array(x_end)[np.array(remained_sep_indexes_with_child_without_mother)] + + x_start_with_child_without_mother = np.array(x_start)[np.array(remained_sep_indexes_with_child_without_mother)] + + y_lines_with_child_without_mother = np.array(y_sep)[np.array(remained_sep_indexes_with_child_without_mother)] + + reading_order_type = 0 + + x_end_without_mother = np.array(x_end)[np.array(remained_sep_indexes_without_mother)] + x_start_without_mother = np.array(x_start)[np.array(remained_sep_indexes_without_mother)] + y_lines_without_mother = np.array(y_sep)[np.array(remained_sep_indexes_without_mother)] + + if len(remained_sep_indexes_without_mother) >= 2: + for i in range(len(remained_sep_indexes_without_mother) - 1): + # nodes_i=set(range(x_start[remained_sep_indexes_without_mother[i]],x_end[remained_sep_indexes_without_mother[i]]+1)) + nodes_i = set(range(x_start[remained_sep_indexes_without_mother[i]], + x_end[remained_sep_indexes_without_mother[i]])) + for j in range(i + 1, len(remained_sep_indexes_without_mother)): + # nodes_j = set(range(x_start[remained_sep_indexes_without_mother[j]],x_end[remained_sep_indexes_without_mother[j]]+1)) + nodes_j = set(range(x_start[remained_sep_indexes_without_mother[j]], + x_end[remained_sep_indexes_without_mother[j]])) + + set_diff = nodes_i - nodes_j + + if set_diff != nodes_i: + reading_order_type = 1 else: - reading_orther_type=0 - #print(reading_orther_type,'javab') - - #print(y_lines_with_child_without_mother,'y_lines_with_child_without_mother') - #print(x_start_with_child_without_mother,'x_start_with_child_without_mother') - #print(x_end_with_child_without_mother,'x_end_with_hild_without_mother') - - len_sep_with_child=len(np.array(child)[np.array(child)==1]) - - #print(len_sep_with_child,'len_sep_with_child') - there_is_sep_with_child=0 - - if len_sep_with_child>=1: - there_is_sep_with_child=1 - - #print(all_args_uniq,'all_args_uniq') - #print(args_to_be_unified,'args_to_be_unified') - - - return reading_orther_type,x_start_returned, x_end_returned ,y_sep_returned,y_diff_returned,y_lines_without_mother,x_start_without_mother,x_end_without_mother,there_is_sep_with_child,y_lines_with_child_without_mother,x_start_with_child_without_mother,x_end_with_child_without_mother,new_main_sep_y + reading_order_type = 0 + # print(reading_order_type,'javab') + + # print(y_lines_with_child_without_mother,'y_lines_with_child_without_mother') + # print(x_start_with_child_without_mother,'x_start_with_child_without_mother') + # print(x_end_with_child_without_mother,'x_end_with_hild_without_mother') + + len_sep_with_child = len(np.array(child)[np.array(child) == 1]) + + # print(len_sep_with_child,'len_sep_with_child') + there_is_sep_with_child = 0 + + if len_sep_with_child >= 1: + there_is_sep_with_child = 1 + + # print(all_args_uniq,'all_args_uniq') + # print(args_to_be_unified,'args_to_be_unified') + + return reading_order_type, x_start_returned, x_end_returned, y_sep_returned, y_diff_returned, y_lines_without_mother, x_start_without_mother, x_end_without_mother, there_is_sep_with_child, y_lines_with_child_without_mother, x_start_with_child_without_mother, x_end_with_child_without_mother, new_main_sep_y + + def crop_image_inside_box(box, img_org_copy): - image_box = img_org_copy[box[1] : box[1] + box[3], box[0] : box[0] + box[2]] + image_box = img_org_copy[box[1]: box[1] + box[3], box[0]: box[0] + box[2]] return image_box, [box[1], box[1] + box[3], box[0], box[0] + box[2]] + def otsu_copy_binary(img): img_r = np.zeros((img.shape[0], img.shape[1], 3)) img1 = img[:, :, 0] @@ -314,7 +303,6 @@ def otsu_copy_binary(img): def find_features_of_lines(contours_main): - areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] @@ -339,7 +327,9 @@ def find_features_of_lines(contours_main): slope_lines[(slope_lines != 0) & (slope_lines != 1)] = 2 dis_x = np.abs(x_max_main - x_min_main) - return slope_lines, dis_x, x_min_main, x_max_main, np.array(cy_main), np.array(slope_lines_org), y_min_main, y_max_main, np.array(cx_main) + return slope_lines, dis_x, x_min_main, x_max_main, np.array(cy_main), np.array( + slope_lines_org), y_min_main, y_max_main, np.array(cx_main) + def boosting_headers_by_longshot_region_segmentation(textregion_pre_p, textregion_pre_np, img_only_text): textregion_pre_p_org = np.copy(textregion_pre_p) @@ -350,32 +340,34 @@ def boosting_headers_by_longshot_region_segmentation(textregion_pre_p, textregio textregion_pre_p[:, :, 0][textregion_pre_p[:, :, 0] == 1] = 0 # earlier it was so, but by this manner the drop capitals are also deleted # textregion_pre_p[:,:,0][( img_only_text[:,:]==1) & (textregion_pre_p[:,:,0]!=7) & (textregion_pre_p[:,:,0]!=2)]=1 - textregion_pre_p[:, :, 0][(img_only_text[:, :] == 1) & (textregion_pre_p[:, :, 0] != 7) & (textregion_pre_p[:, :, 0] != 4) & (textregion_pre_p[:, :, 0] != 2)] = 1 + textregion_pre_p[:, :, 0][ + (img_only_text[:, :] == 1) & (textregion_pre_p[:, :, 0] != 7) & (textregion_pre_p[:, :, 0] != 4) & ( + textregion_pre_p[:, :, 0] != 2)] = 1 return textregion_pre_p def find_num_col_deskew(regions_without_separators, sigma_, multiplier=3.8): - regions_without_separators_0 = regions_without_separators[:,:].sum(axis=1) + regions_without_separators_0 = regions_without_separators[:, :].sum(axis=1) z = gaussian_filter1d(regions_without_separators_0, sigma_) return np.std(z) def find_num_col(regions_without_separators, num_col_classifier, tables, multiplier=3.8): regions_without_separators_0 = regions_without_separators[:, :].sum(axis=0) - ##plt.plot(regions_without_separators_0) - ##plt.show() + # plt.plot(regions_without_separators_0) + # plt.show() sigma_ = 35 # 70#35 - meda_n_updown = regions_without_separators_0[len(regions_without_separators_0) :: -1] + meda_n_updown = regions_without_separators_0[len(regions_without_separators_0):: -1] first_nonzero = next((i for i, x in enumerate(regions_without_separators_0) if x), 0) last_nonzero = next((i for i, x in enumerate(meda_n_updown) if x), 0) last_nonzero = len(regions_without_separators_0) - last_nonzero y = regions_without_separators_0 # [first_nonzero:last_nonzero] y_help = np.zeros(len(y) + 20) - y_help[10 : len(y) + 10] = y + y_help[10: len(y) + 10] = y x = np.array(range(len(y))) zneg_rev = -y_help + np.max(y_help) zneg = np.zeros(len(zneg_rev) + 20) - zneg[10 : len(zneg_rev) + 10] = zneg_rev + zneg[10: len(zneg_rev) + 10] = zneg_rev z = gaussian_filter1d(y, sigma_) zneg = gaussian_filter1d(zneg, sigma_) @@ -387,7 +379,8 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl first_nonzero = first_nonzero + 200 peaks_neg = peaks_neg[(peaks_neg > first_nonzero) & (peaks_neg < last_nonzero)] - peaks = peaks[(peaks > 0.06 * regions_without_separators.shape[1]) & (peaks < 0.94 * regions_without_separators.shape[1])] + peaks = peaks[ + (peaks > 0.06 * regions_without_separators.shape[1]) & (peaks < 0.94 * regions_without_separators.shape[1])] peaks_neg = peaks_neg[(peaks_neg > 370) & (peaks_neg < (regions_without_separators.shape[1] - 370))] interest_pos = z[peaks] interest_pos = interest_pos[interest_pos > 10] @@ -416,16 +409,16 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl interest_neg_fin = interest_neg[(interest_neg < grenze)] peaks_neg_fin = peaks_neg[(interest_neg < grenze)] # interest_neg_fin=interest_neg[(interest_neg= 3: - index_sort_interest_neg_fin= np.argsort(interest_neg_fin) + if (num_col_classifier - ((len(interest_neg_fin)) + 1)) >= 3: + index_sort_interest_neg_fin = np.argsort(interest_neg_fin) peaks_neg_sorted = np.array(peaks_neg)[index_sort_interest_neg_fin] interest_neg_fin_sorted = np.array(interest_neg_fin)[index_sort_interest_neg_fin] - - if len(index_sort_interest_neg_fin)>=num_col_classifier: - peaks_neg_fin = list( peaks_neg_sorted[:num_col_classifier] ) - interest_neg_fin = list( interest_neg_fin_sorted[:num_col_classifier] ) + + if len(index_sort_interest_neg_fin) >= num_col_classifier: + peaks_neg_fin = list(peaks_neg_sorted[:num_col_classifier]) + interest_neg_fin = list(interest_neg_fin_sorted[:num_col_classifier]) else: peaks_neg_fin = peaks_neg[:] interest_neg_fin = interest_neg[:] @@ -441,7 +434,10 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl p_g_u = len(y) - int(len(y) / 4.0) if num_col == 3: - if (peaks_neg_fin[0] > p_g_u and peaks_neg_fin[1] > p_g_u) or (peaks_neg_fin[0] < p_g_l and peaks_neg_fin[1] < p_g_l) or ((peaks_neg_fin[0] + 200) < p_m and peaks_neg_fin[1] < p_m) or ((peaks_neg_fin[0] - 200) > p_m and peaks_neg_fin[1] > p_m): + if (peaks_neg_fin[0] > p_g_u and peaks_neg_fin[1] > p_g_u) or ( + peaks_neg_fin[0] < p_g_l and peaks_neg_fin[1] < p_g_l) or ( + (peaks_neg_fin[0] + 200) < p_m and peaks_neg_fin[1] < p_m) or ( + (peaks_neg_fin[0] - 200) > p_m and peaks_neg_fin[1] > p_m): num_col = 1 peaks_neg_fin = [] @@ -450,7 +446,7 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl num_col = 1 peaks_neg_fin = [] - ##print(len(peaks_neg_fin)) + # print(len(peaks_neg_fin)) diff_peaks = np.abs(np.diff(peaks_neg_fin)) @@ -487,9 +483,12 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl p_u_quarter = len(y) - p_quarter - ##print(num_col,'early') + # print(num_col,'early') if num_col == 3: - if (peaks_neg_true[0] > p_g_u and peaks_neg_true[1] > p_g_u) or (peaks_neg_true[0] < p_g_l and peaks_neg_true[1] < p_g_l) or (peaks_neg_true[0] < p_m and (peaks_neg_true[1] + 200) < p_m) or ((peaks_neg_true[0] - 200) > p_m and peaks_neg_true[1] > p_m): + if (peaks_neg_true[0] > p_g_u and peaks_neg_true[1] > p_g_u) or ( + peaks_neg_true[0] < p_g_l and peaks_neg_true[1] < p_g_l) or ( + peaks_neg_true[0] < p_m and (peaks_neg_true[1] + 200) < p_m) or ( + (peaks_neg_true[0] - 200) > p_m and peaks_neg_true[1] > p_m): num_col = 1 peaks_neg_true = [] elif (peaks_neg_true[0] < p_g_u and peaks_neg_true[0] > p_g_l) and (peaks_neg_true[1] > p_u_quarter): @@ -528,18 +527,19 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # plt.plot([0,len(y)], [grenze,grenze]) # plt.show() - ##print(len(peaks_neg_true)) + # print(len(peaks_neg_true)) return len(peaks_neg_true), peaks_neg_true + def find_num_col_only_image(regions_without_separators, multiplier=3.8): regions_without_separators_0 = regions_without_separators[:, :].sum(axis=0) - ##plt.plot(regions_without_separators_0) - ##plt.show() + # plt.plot(regions_without_separators_0) + # plt.show() sigma_ = 15 - meda_n_updown = regions_without_separators_0[len(regions_without_separators_0) :: -1] + meda_n_updown = regions_without_separators_0[len(regions_without_separators_0):: -1] first_nonzero = next((i for i, x in enumerate(regions_without_separators_0) if x), 0) last_nonzero = next((i for i, x in enumerate(meda_n_updown) if x), 0) @@ -550,7 +550,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): y_help = np.zeros(len(y) + 20) - y_help[10 : len(y) + 10] = y + y_help[10: len(y) + 10] = y x = np.array(range(len(y))) @@ -558,7 +558,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): zneg = np.zeros(len(zneg_rev) + 20) - zneg[10 : len(zneg_rev) + 10] = zneg_rev + zneg[10: len(zneg_rev) + 10] = zneg_rev z = gaussian_filter1d(y, sigma_) zneg = gaussian_filter1d(zneg, sigma_) @@ -572,7 +572,8 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): peaks_neg = peaks_neg[(peaks_neg > first_nonzero) & (peaks_neg < last_nonzero)] - peaks = peaks[(peaks > 0.09 * regions_without_separators.shape[1]) & (peaks < 0.91 * regions_without_separators.shape[1])] + peaks = peaks[ + (peaks > 0.09 * regions_without_separators.shape[1]) & (peaks < 0.91 * regions_without_separators.shape[1])] peaks_neg = peaks_neg[(peaks_neg > 500) & (peaks_neg < (regions_without_separators.shape[1] - 500))] # print(peaks) @@ -601,7 +602,10 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): p_g_u = len(y) - int(len(y) / 3.0) if num_col == 3: - if (peaks_neg_fin[0] > p_g_u and peaks_neg_fin[1] > p_g_u) or (peaks_neg_fin[0] < p_g_l and peaks_neg_fin[1] < p_g_l) or (peaks_neg_fin[0] < p_m and peaks_neg_fin[1] < p_m) or (peaks_neg_fin[0] > p_m and peaks_neg_fin[1] > p_m): + if (peaks_neg_fin[0] > p_g_u and peaks_neg_fin[1] > p_g_u) or ( + peaks_neg_fin[0] < p_g_l and peaks_neg_fin[1] < p_g_l) or ( + peaks_neg_fin[0] < p_m and peaks_neg_fin[1] < p_m) or ( + peaks_neg_fin[0] > p_m and peaks_neg_fin[1] > p_m): num_col = 1 else: pass @@ -646,7 +650,10 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): p_u_quarter = len(y) - p_quarter if num_col == 3: - if (peaks_neg_true[0] > p_g_u and peaks_neg_true[1] > p_g_u) or (peaks_neg_true[0] < p_g_l and peaks_neg_true[1] < p_g_l) or (peaks_neg_true[0] < p_m and peaks_neg_true[1] < p_m) or (peaks_neg_true[0] > p_m and peaks_neg_true[1] > p_m): + if (peaks_neg_true[0] > p_g_u and peaks_neg_true[1] > p_g_u) or ( + peaks_neg_true[0] < p_g_l and peaks_neg_true[1] < p_g_l) or ( + peaks_neg_true[0] < p_m and peaks_neg_true[1] < p_m) or ( + peaks_neg_true[0] > p_m and peaks_neg_true[1] > p_m): num_col = 1 peaks_neg_true = [] elif (peaks_neg_true[0] < p_g_u and peaks_neg_true[0] > p_g_l) and (peaks_neg_true[1] > p_u_quarter): @@ -662,7 +669,8 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): peaks_neg_true = [] if num_col == 4: - if len(np.array(peaks_neg_true)[np.array(peaks_neg_true) < p_g_l]) == 2 or len(np.array(peaks_neg_true)[np.array(peaks_neg_true) > (len(y) - p_g_l)]) == 2: + if len(np.array(peaks_neg_true)[np.array(peaks_neg_true) < p_g_l]) == 2 or len( + np.array(peaks_neg_true)[np.array(peaks_neg_true) > (len(y) - p_g_l)]) == 2: num_col = 1 peaks_neg_true = [] else: @@ -674,7 +682,8 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): for i in range(len(peaks_neg_true)): hill_main = peaks_neg_true[i] # deep_depth=z[peaks_neg] - hills_around = peaks_neg_org[((peaks_neg_org > hill_main) & (peaks_neg_org <= hill_main + 400)) | ((peaks_neg_org < hill_main) & (peaks_neg_org >= hill_main - 400))] + hills_around = peaks_neg_org[((peaks_neg_org > hill_main) & (peaks_neg_org <= hill_main + 400)) | ( + (peaks_neg_org < hill_main) & (peaks_neg_org >= hill_main - 400))] deep_depth_around = z[hills_around] # print(hill_main,z[hill_main],hills_around,deep_depth_around,'manoooo') @@ -717,11 +726,12 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): return len(peaks_fin_true), peaks_fin_true + def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): regions_without_separators_0 = regions_without_separators[:, :, 0].sum(axis=0) - ##plt.plot(regions_without_separators_0) - ##plt.show() + # plt.plot(regions_without_separators_0) + # plt.show() sigma_ = 35 # 70#35 @@ -732,6 +742,7 @@ def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): # print(peaks,'peaksnew') return peaks + def return_regions_without_separators(regions_pre): kernel = np.ones((5, 5), np.uint8) regions_without_separators = ((regions_pre[:, :] != 6) & (regions_pre[:, :] != 0)) * 1 @@ -745,7 +756,6 @@ def return_regions_without_separators(regions_pre): def put_drop_out_from_only_drop_model(layout_no_patch, layout1): - drop_only = (layout_no_patch[:, :, 0] == 4) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) @@ -753,7 +763,8 @@ def put_drop_out_from_only_drop_model(layout_no_patch, layout1): areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) for j in range(len(contours_drop_parent))]) areas_cnt_text = areas_cnt_text / float(drop_only.shape[0] * drop_only.shape[1]) - contours_drop_parent = [contours_drop_parent[jz] for jz in range(len(contours_drop_parent)) if areas_cnt_text[jz] > 0.00001] + contours_drop_parent = [contours_drop_parent[jz] for jz in range(len(contours_drop_parent)) if + areas_cnt_text[jz] > 0.00001] areas_cnt_text = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > 0.00001] @@ -764,7 +775,7 @@ def put_drop_out_from_only_drop_model(layout_no_patch, layout1): # boxes.append([int(x), int(y), int(w), int(h)]) map_of_drop_contour_bb = np.zeros((layout1.shape[0], layout1.shape[1])) - map_of_drop_contour_bb[y : y + h, x : x + w] = layout1[y : y + h, x : x + w] + map_of_drop_contour_bb[y: y + h, x: x + w] = layout1[y: y + h, x: x + w] if (((map_of_drop_contour_bb == 1) * 1).sum() / float(((map_of_drop_contour_bb == 5) * 1).sum()) * 100) >= 15: contours_drop_parent_final.append(contours_drop_parent[jj]) @@ -775,8 +786,8 @@ def put_drop_out_from_only_drop_model(layout_no_patch, layout1): return layout_no_patch -def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch): +def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch): drop_only = (layout_in_patch[:, :, 0] == 4) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) @@ -784,7 +795,8 @@ def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch): areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) for j in range(len(contours_drop_parent))]) areas_cnt_text = areas_cnt_text / float(drop_only.shape[0] * drop_only.shape[1]) - contours_drop_parent = [contours_drop_parent[jz] for jz in range(len(contours_drop_parent)) if areas_cnt_text[jz] > 0.00001] + contours_drop_parent = [contours_drop_parent[jz] for jz in range(len(contours_drop_parent)) if + areas_cnt_text[jz] > 0.00001] areas_cnt_text = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > 0.001] @@ -792,49 +804,47 @@ def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch): for jj in range(len(contours_drop_parent)): x, y, w, h = cv2.boundingRect(contours_drop_parent[jj]) - layout_in_patch[y : y + h, x : x + w, 0] = 4 + layout_in_patch[y: y + h, x: x + w, 0] = 4 return layout_in_patch -def check_any_text_region_in_model_one_is_main_or_header(regions_model_1,regions_model_full,contours_only_text_parent,all_box_coord,all_found_textline_polygons,slopes,contours_only_text_parent_d_ordered): - - cx_main,cy_main ,x_min_main , x_max_main, y_min_main ,y_max_main,y_corr_x_min_from_argmin=find_new_features_of_contours(contours_only_text_parent) - length_con=x_max_main-x_min_main - height_con=y_max_main-y_min_main +def check_any_text_region_in_model_one_is_main_or_header(regions_model_1, regions_model_full, contours_only_text_parent, + all_box_coord, all_found_textline_polygons, slopes, + contours_only_text_parent_d_ordered): + cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin = find_new_features_of_contours( + contours_only_text_parent) + length_con = x_max_main - x_min_main + height_con = y_max_main - y_min_main + all_found_textline_polygons_main = [] + all_found_textline_polygons_head = [] - all_found_textline_polygons_main=[] - all_found_textline_polygons_head=[] + all_box_coord_main = [] + all_box_coord_head = [] - all_box_coord_main=[] - all_box_coord_head=[] + slopes_main = [] + slopes_head = [] - slopes_main=[] - slopes_head=[] + contours_only_text_parent_main = [] + contours_only_text_parent_head = [] - contours_only_text_parent_main=[] - contours_only_text_parent_head=[] - - contours_only_text_parent_main_d=[] - contours_only_text_parent_head_d=[] + contours_only_text_parent_main_d = [] + contours_only_text_parent_head_d = [] for ii in range(len(contours_only_text_parent)): - con=contours_only_text_parent[ii] - img=np.zeros((regions_model_1.shape[0],regions_model_1.shape[1],3)) + con = contours_only_text_parent[ii] + img = np.zeros((regions_model_1.shape[0], regions_model_1.shape[1], 3)) img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) + all_pixels = ((img[:, :, 0] == 255) * 1).sum() + pixels_header = (((img[:, :, 0] == 255) & (regions_model_full[:, :, 0] == 2)) * 1).sum() + pixels_main = all_pixels - pixels_header - all_pixels=((img[:,:,0]==255)*1).sum() - - pixels_header=( ( (img[:,:,0]==255) & (regions_model_full[:,:,0]==2) )*1 ).sum() - pixels_main=all_pixels-pixels_header - - - if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 + if (pixels_header >= pixels_main) and ((length_con[ii] / float(height_con[ii])) >= 1.3): + regions_model_1[:, :][(regions_model_1[:, :] == 1) & (img[:, :, 0] == 255)] = 2 contours_only_text_parent_head.append(con) if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) @@ -842,7 +852,7 @@ def check_any_text_region_in_model_one_is_main_or_header(regions_model_1,regions slopes_head.append(slopes[ii]) all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 + regions_model_1[:, :][(regions_model_1[:, :] == 1) & (img[:, :, 0] == 255)] = 1 contours_only_text_parent_main.append(con) if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) @@ -850,60 +860,62 @@ def check_any_text_region_in_model_one_is_main_or_header(regions_model_1,regions slopes_main.append(slopes[ii]) all_found_textline_polygons_main.append(all_found_textline_polygons[ii]) - #print(all_pixels,pixels_main,pixels_header) + # print(all_pixels,pixels_main,pixels_header) - return regions_model_1,contours_only_text_parent_main,contours_only_text_parent_head,all_box_coord_main,all_box_coord_head,all_found_textline_polygons_main,all_found_textline_polygons_head,slopes_main,slopes_head,contours_only_text_parent_main_d,contours_only_text_parent_head_d + return regions_model_1, contours_only_text_parent_main, contours_only_text_parent_head, all_box_coord_main, all_box_coord_head, all_found_textline_polygons_main, all_found_textline_polygons_head, slopes_main, slopes_head, contours_only_text_parent_main_d, contours_only_text_parent_head_d -def check_any_text_region_in_model_one_is_main_or_header_light(regions_model_1,regions_model_full,contours_only_text_parent,all_box_coord,all_found_textline_polygons,slopes,contours_only_text_parent_d_ordered): - - ### to make it faster +def check_any_text_region_in_model_one_is_main_or_header_light(regions_model_1, regions_model_full, + contours_only_text_parent, all_box_coord, + all_found_textline_polygons, slopes, + contours_only_text_parent_d_ordered): + # make it faster h_o = regions_model_1.shape[0] w_o = regions_model_1.shape[1] - - regions_model_1 = cv2.resize(regions_model_1, (int(regions_model_1.shape[1]/3.), int(regions_model_1.shape[0]/3.)), interpolation=cv2.INTER_NEAREST) - regions_model_full = cv2.resize(regions_model_full, (int(regions_model_full.shape[1]/3.), int(regions_model_full.shape[0]/3.)), interpolation=cv2.INTER_NEAREST) - contours_only_text_parent = [ (i/3.).astype(np.int32) for i in contours_only_text_parent] - ### - - cx_main,cy_main ,x_min_main , x_max_main, y_min_main ,y_max_main,y_corr_x_min_from_argmin=find_new_features_of_contours(contours_only_text_parent) + regions_model_1 = cv2.resize(regions_model_1, + (int(regions_model_1.shape[1] / 3.), int(regions_model_1.shape[0] / 3.)), + interpolation=cv2.INTER_NEAREST) + regions_model_full = cv2.resize(regions_model_full, + (int(regions_model_full.shape[1] / 3.), int(regions_model_full.shape[0] / 3.)), + interpolation=cv2.INTER_NEAREST) + contours_only_text_parent = [(i / 3.).astype(np.int32) for i in contours_only_text_parent] - length_con=x_max_main-x_min_main - height_con=y_max_main-y_min_main + ### + cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin = find_new_features_of_contours( + contours_only_text_parent) + length_con = x_max_main - x_min_main + height_con = y_max_main - y_min_main - all_found_textline_polygons_main=[] - all_found_textline_polygons_head=[] + all_found_textline_polygons_main = [] + all_found_textline_polygons_head = [] - all_box_coord_main=[] - all_box_coord_head=[] + all_box_coord_main = [] + all_box_coord_head = [] - slopes_main=[] - slopes_head=[] + slopes_main = [] + slopes_head = [] - contours_only_text_parent_main=[] - contours_only_text_parent_head=[] + contours_only_text_parent_main = [] + contours_only_text_parent_head = [] - contours_only_text_parent_main_d=[] - contours_only_text_parent_head_d=[] + contours_only_text_parent_main_d = [] + contours_only_text_parent_head_d = [] for ii in range(len(contours_only_text_parent)): - con=contours_only_text_parent[ii] - img=np.zeros((regions_model_1.shape[0],regions_model_1.shape[1],3)) + con = contours_only_text_parent[ii] + img = np.zeros((regions_model_1.shape[0], regions_model_1.shape[1], 3)) img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) + all_pixels = ((img[:, :, 0] == 255) * 1).sum() + pixels_header = (((img[:, :, 0] == 255) & (regions_model_full[:, :, 0] == 2)) * 1).sum() + pixels_main = all_pixels - pixels_header - all_pixels=((img[:,:,0]==255)*1).sum() - - pixels_header=( ( (img[:,:,0]==255) & (regions_model_full[:,:,0]==2) )*1 ).sum() - pixels_main=all_pixels-pixels_header - - - if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 + if (pixels_header >= pixels_main) and ((length_con[ii] / float(height_con[ii])) >= 1.3): + regions_model_1[:, :][(regions_model_1[:, :] == 1) & (img[:, :, 0] == 255)] = 2 contours_only_text_parent_head.append(con) if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) @@ -911,7 +923,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light(regions_model_1,r slopes_head.append(slopes[ii]) all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 + regions_model_1[:, :][(regions_model_1[:, :] == 1) & (img[:, :, 0] == 255)] = 1 contours_only_text_parent_main.append(con) if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) @@ -919,19 +931,18 @@ def check_any_text_region_in_model_one_is_main_or_header_light(regions_model_1,r slopes_main.append(slopes[ii]) all_found_textline_polygons_main.append(all_found_textline_polygons[ii]) - #print(all_pixels,pixels_main,pixels_header) + # print(all_pixels,pixels_main,pixels_header) + # make it faster - - ### to make it faster - regions_model_1 = cv2.resize(regions_model_1, (w_o, h_o), interpolation=cv2.INTER_NEAREST) - #regions_model_full = cv2.resize(img, (int(regions_model_full.shape[1]/3.), int(regions_model_full.shape[0]/3.)), interpolation=cv2.INTER_NEAREST) - contours_only_text_parent_head = [ (i*3.).astype(np.int32) for i in contours_only_text_parent_head] - contours_only_text_parent_main = [ (i*3.).astype(np.int32) for i in contours_only_text_parent_main] + # regions_model_full = cv2.resize(img, (int(regions_model_full.shape[1]/3.), int(regions_model_full.shape[0]/3.)), interpolation=cv2.INTER_NEAREST) + contours_only_text_parent_head = [(i * 3.).astype(np.int32) for i in contours_only_text_parent_head] + contours_only_text_parent_main = [(i * 3.).astype(np.int32) for i in contours_only_text_parent_main] ### - - return regions_model_1,contours_only_text_parent_main,contours_only_text_parent_head,all_box_coord_main,all_box_coord_head,all_found_textline_polygons_main,all_found_textline_polygons_head,slopes_main,slopes_head,contours_only_text_parent_main_d,contours_only_text_parent_head_d + + return regions_model_1, contours_only_text_parent_main, contours_only_text_parent_head, all_box_coord_main, all_box_coord_head, all_found_textline_polygons_main, all_found_textline_polygons_head, slopes_main, slopes_head, contours_only_text_parent_main_d, contours_only_text_parent_head_d + def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col): # print(textlines_con) @@ -950,11 +961,11 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) textlines_tot.append(np.array(textlines_con[m1][nn], dtype=np.int32)) textlines_tot_org_form.append(textlines_con[m1][nn]) - ##img_text_all=np.zeros((textline_iamge.shape[0],textline_iamge.shape[1])) - ##img_text_all=cv2.fillPoly(img_text_all, pts =textlines_tot , color=(1,1,1)) + # img_text_all=np.zeros((textline_iamge.shape[0],textline_iamge.shape[1])) + # img_text_all=cv2.fillPoly(img_text_all, pts =textlines_tot , color=(1,1,1)) - ##plt.imshow(img_text_all) - ##plt.show() + # plt.imshow(img_text_all) + # plt.show() areas_cnt_text = np.array([cv2.contourArea(textlines_tot[j]) for j in range(len(textlines_tot))]) areas_cnt_text = areas_cnt_text / float(textline_iamge.shape[0] * textline_iamge.shape[1]) indexes_textlines = np.array(range(len(textlines_tot))) @@ -1066,10 +1077,10 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) textlines_con_changed.append(textlines_big_org_form) return textlines_con_changed -def order_of_regions(textline_mask, contours_main, contours_header, y_ref): - ##plt.imshow(textline_mask) - ##plt.show() +def order_of_regions(textline_mask, contours_main, contours_header, y_ref): + # plt.imshow(textline_mask) + # plt.show() """ print(len(contours_main),'contours_main') mada_n=textline_mask.sum(axis=1) @@ -1107,7 +1118,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): y = textline_sum_along_width[:] y_padded = np.zeros(len(y) + 40) - y_padded[20 : len(y) + 20] = y + y_padded[20: len(y) + 20] = y x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) @@ -1118,7 +1129,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): zneg_rev = -y_padded + np.max(y_padded) zneg = np.zeros(len(zneg_rev) + 40) - zneg[20 : len(zneg_rev) + 20] = zneg_rev + zneg[20: len(zneg_rev) + 20] = zneg_rev zneg = gaussian_filter1d(zneg, sigma_gaus) peaks, _ = find_peaks(z, height=0) @@ -1127,10 +1138,10 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): peaks_neg = peaks_neg - 20 - 20 peaks = peaks - 20 - ##plt.plot(z) - ##plt.show() + # plt.plot(z) + # plt.show() - if contours_main != None: + if contours_main is not None: areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] @@ -1141,7 +1152,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) - if len(contours_header) != None: + if len(contours_header) is not None: areas_header = np.array([cv2.contourArea(contours_header[j]) for j in range(len(contours_header))]) M_header = [cv2.moments(contours_header[j]) for j in range(len(contours_header))] cx_header = [(M_header[j]["m10"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] @@ -1165,10 +1176,10 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): if len(cy_main) > 0 and np.max(cy_main) > np.max(peaks_neg_new): cy_main = np.array(cy_main) * (np.max(peaks_neg_new) / np.max(cy_main)) - 10 - if contours_main != None: + if contours_main is not None: indexer_main = np.array(range(len(contours_main))) - if contours_main != None: + if contours_main is not None: len_main = len(contours_main) else: len_main = 0 @@ -1178,16 +1189,16 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): matrix_of_orders[:, 0] = np.array(range(len(contours_main) + len(contours_header))) matrix_of_orders[: len(contours_main), 1] = 1 - matrix_of_orders[len(contours_main) :, 1] = 2 + matrix_of_orders[len(contours_main):, 1] = 2 matrix_of_orders[: len(contours_main), 2] = cx_main - matrix_of_orders[len(contours_main) :, 2] = cx_header + matrix_of_orders[len(contours_main):, 2] = cx_header matrix_of_orders[: len(contours_main), 3] = cy_main - matrix_of_orders[len(contours_main) :, 3] = cy_header + matrix_of_orders[len(contours_main):, 3] = cy_header matrix_of_orders[: len(contours_main), 4] = np.array(range(len(contours_main))) - matrix_of_orders[len(contours_main) :, 4] = np.array(range(len(contours_header))) + matrix_of_orders[len(contours_main):, 4] = np.array(range(len(contours_header))) # print(peaks_neg_new,'peaks_neg_new') @@ -1202,11 +1213,11 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): # print(top,down,'topdown') - indexes_in = matrix_of_orders[:, 0][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] - cxs_in = matrix_of_orders[:, 2][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] - cys_in = matrix_of_orders[:, 3][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] - types_of_text = matrix_of_orders[:, 1][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] - index_types_of_text = matrix_of_orders[:, 4][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] + indexes_in = matrix_of_orders[:, 0][(matrix_of_orders[:, 3] >= top) & (matrix_of_orders[:, 3] < down)] + cxs_in = matrix_of_orders[:, 2][(matrix_of_orders[:, 3] >= top) & (matrix_of_orders[:, 3] < down)] + cys_in = matrix_of_orders[:, 3][(matrix_of_orders[:, 3] >= top) & (matrix_of_orders[:, 3] < down)] + types_of_text = matrix_of_orders[:, 1][(matrix_of_orders[:, 3] >= top) & (matrix_of_orders[:, 3] < down)] + index_types_of_text = matrix_of_orders[:, 4][(matrix_of_orders[:, 3] >= top) & (matrix_of_orders[:, 3] < down)] # print(top,down) # print(cys_in,'cyyyins') @@ -1222,7 +1233,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): final_types.append(int(ind_in_type[j])) final_index_type.append(int(ind_ind_type[j])) - ##matrix_of_orders[:len_main,4]=final_indexers_sorted[:] + # matrix_of_orders[:len_main,4] = final_indexers_sorted[:] # print(peaks_neg_new,'peaks') # print(final_indexers_sorted,'indexsorted') @@ -1231,103 +1242,106 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): return final_indexers_sorted, matrix_of_orders, final_types, final_index_type -def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(img_p_in_ver, img_in_hor,num_col_classifier): - #img_p_in_ver = cv2.erode(img_p_in_ver, self.kernel, iterations=2) - img_p_in_ver=img_p_in_ver.astype(np.uint8) - img_p_in_ver=np.repeat(img_p_in_ver[:, :, np.newaxis], 3, axis=2) + +def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(img_p_in_ver, img_in_hor, + num_col_classifier): + # img_p_in_ver = cv2.erode(img_p_in_ver, self.kernel, iterations=2) + img_p_in_ver = img_p_in_ver.astype(np.uint8) + img_p_in_ver = np.repeat(img_p_in_ver[:, :, np.newaxis], 3, axis=2) imgray = cv2.cvtColor(img_p_in_ver, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_lines_ver,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - slope_lines_ver,dist_x_ver, x_min_main_ver ,x_max_main_ver ,cy_main_ver,slope_lines_org_ver,y_min_main_ver, y_max_main_ver, cx_main_ver=find_features_of_lines(contours_lines_ver) - + contours_lines_ver, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + slope_lines_ver, dist_x_ver, x_min_main_ver, x_max_main_ver, cy_main_ver, slope_lines_org_ver, y_min_main_ver, y_max_main_ver, cx_main_ver = find_features_of_lines( + contours_lines_ver) + for i in range(len(x_min_main_ver)): - img_p_in_ver[int(y_min_main_ver[i]):int(y_min_main_ver[i])+30,int(cx_main_ver[i])-25:int(cx_main_ver[i])+25,0]=0 - img_p_in_ver[int(y_max_main_ver[i])-30:int(y_max_main_ver[i]),int(cx_main_ver[i])-25:int(cx_main_ver[i])+25,0]=0 - - - img_in_hor=img_in_hor.astype(np.uint8) - img_in_hor=np.repeat(img_in_hor[:, :, np.newaxis], 3, axis=2) + img_p_in_ver[int(y_min_main_ver[i]):int(y_min_main_ver[i]) + 30, int(cx_main_ver[i]) - 25:int(cx_main_ver[i]) + 25, 0] = 0 + img_p_in_ver[int(y_max_main_ver[i]) - 30:int(y_max_main_ver[i]), int(cx_main_ver[i]) - 25:int(cx_main_ver[i]) + 25, 0] = 0 + + img_in_hor = img_in_hor.astype(np.uint8) + img_in_hor = np.repeat(img_in_hor[:, :, np.newaxis], 3, axis=2) imgray = cv2.cvtColor(img_in_hor, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_lines_hor,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - slope_lines_hor,dist_x_hor, x_min_main_hor ,x_max_main_hor ,cy_main_hor,slope_lines_org_hor,y_min_main_hor, y_max_main_hor, cx_main_hor=find_features_of_lines(contours_lines_hor) - - - x_width_smaller_than_acolumn_width=img_in_hor.shape[1]/float(num_col_classifier+1.) - - len_lines_bigger_than_x_width_smaller_than_acolumn_width=len( dist_x_hor[dist_x_hor>=x_width_smaller_than_acolumn_width] ) - - len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column=int( len_lines_bigger_than_x_width_smaller_than_acolumn_width/float(num_col_classifier) ) - - - if len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column<10: - args_hor=np.array( range(len(slope_lines_hor) )) - all_args_uniq=contours_in_same_horizon(cy_main_hor) - #print(all_args_uniq,'all_args_uniq') - if len(all_args_uniq)>0: + contours_lines_hor, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + slope_lines_hor, dist_x_hor, x_min_main_hor, x_max_main_hor, cy_main_hor, slope_lines_org_hor, y_min_main_hor, y_max_main_hor, cx_main_hor = find_features_of_lines( + contours_lines_hor) + + x_width_smaller_than_acolumn_width = img_in_hor.shape[1] / float(num_col_classifier + 1.) + + len_lines_bigger_than_x_width_smaller_than_acolumn_width = len( + dist_x_hor[dist_x_hor >= x_width_smaller_than_acolumn_width]) + + len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column = int( + len_lines_bigger_than_x_width_smaller_than_acolumn_width / float(num_col_classifier)) + + if len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column < 10: + args_hor = np.array(range(len(slope_lines_hor))) + all_args_uniq = contours_in_same_horizon(cy_main_hor) + # print(all_args_uniq,'all_args_uniq') + if len(all_args_uniq) > 0: if type(all_args_uniq[0]) is list: - special_separators=[] - contours_new=[] + special_separators = [] + contours_new = [] for dd in range(len(all_args_uniq)): - merged_all=None - some_args=args_hor[all_args_uniq[dd]] - some_cy=cy_main_hor[all_args_uniq[dd]] - some_x_min=x_min_main_hor[all_args_uniq[dd]] - some_x_max=x_max_main_hor[all_args_uniq[dd]] - - #img_in=np.zeros(separators_closeup_n[:,:,2].shape) - #print(img_p_in_ver.shape[1],some_x_max-some_x_min,'xdiff') - diff_x_some=some_x_max-some_x_min + merged_all = None + some_args = args_hor[all_args_uniq[dd]] + some_cy = cy_main_hor[all_args_uniq[dd]] + some_x_min = x_min_main_hor[all_args_uniq[dd]] + some_x_max = x_max_main_hor[all_args_uniq[dd]] + + # img_in=np.zeros(separators_closeup_n[:,:,2].shape) + # print(img_p_in_ver.shape[1],some_x_max-some_x_min,'xdiff') + diff_x_some = some_x_max - some_x_min for jv in range(len(some_args)): - img_p_in=cv2.fillPoly(img_in_hor, pts =[contours_lines_hor[some_args[jv]]], color=(1,1,1)) - - if any(i_diff>(img_p_in_ver.shape[1]/float(3.3)) for i_diff in diff_x_some): - img_p_in[int(np.mean(some_cy))-5:int(np.mean(some_cy))+5, int(np.min(some_x_min)):int(np.max(some_x_max)) ]=1 - - sum_dis=dist_x_hor[some_args].sum() - diff_max_min_uniques=np.max(x_max_main_hor[some_args])-np.min(x_min_main_hor[some_args]) - - - if diff_max_min_uniques>sum_dis and ( (sum_dis/float(diff_max_min_uniques) ) >0.85 ) and ( (diff_max_min_uniques/float(img_p_in_ver.shape[1]))>0.85 ) and np.std( dist_x_hor[some_args] )<(0.55*np.mean( dist_x_hor[some_args] )): - #print(dist_x_hor[some_args],dist_x_hor[some_args].sum(),np.min(x_min_main_hor[some_args]) ,np.max(x_max_main_hor[some_args]),'jalibdi') - #print(np.mean( dist_x_hor[some_args] ),np.std( dist_x_hor[some_args] ),np.var( dist_x_hor[some_args] ),'jalibdiha') + img_p_in = cv2.fillPoly(img_in_hor, pts=[contours_lines_hor[some_args[jv]]], color=(1, 1, 1)) + + if any(i_diff > (img_p_in_ver.shape[1] / float(3.3)) for i_diff in diff_x_some): + img_p_in[int(np.mean(some_cy)) - 5:int(np.mean(some_cy)) + 5, int(np.min(some_x_min)):int(np.max(some_x_max))] = 1 + + sum_dis = dist_x_hor[some_args].sum() + diff_max_min_uniques = np.max(x_max_main_hor[some_args]) - np.min(x_min_main_hor[some_args]) + + if diff_max_min_uniques > sum_dis and ((sum_dis / float(diff_max_min_uniques)) > 0.85) and ( + (diff_max_min_uniques / float(img_p_in_ver.shape[1])) > 0.85) and np.std( + dist_x_hor[some_args]) < (0.55 * np.mean(dist_x_hor[some_args])): + # print(dist_x_hor[some_args],dist_x_hor[some_args].sum(),np.min(x_min_main_hor[some_args]) ,np.max(x_max_main_hor[some_args]),'jalibdi') + # print(np.mean( dist_x_hor[some_args] ),np.std( dist_x_hor[some_args] ),np.var( dist_x_hor[some_args] ),'jalibdiha') special_separators.append(np.mean(cy_main_hor[some_args])) else: - img_p_in=img_in_hor - special_separators=[] + img_p_in = img_in_hor + special_separators = [] else: - img_p_in=img_in_hor - special_separators=[] - - - img_p_in_ver[:,:,0][img_p_in_ver[:,:,0]==255]=1 - sep_ver_hor=img_p_in+img_p_in_ver + img_p_in = img_in_hor + special_separators = [] + img_p_in_ver[:, :, 0][img_p_in_ver[:, :, 0] == 255] = 1 + sep_ver_hor = img_p_in + img_p_in_ver - sep_ver_hor_cross=(sep_ver_hor[:,:,0]==2)*1 + sep_ver_hor_cross = (sep_ver_hor[:, :, 0] == 2) * 1 - sep_ver_hor_cross=np.repeat(sep_ver_hor_cross[:, :, np.newaxis], 3, axis=2) - sep_ver_hor_cross=sep_ver_hor_cross.astype(np.uint8) + sep_ver_hor_cross = np.repeat(sep_ver_hor_cross[:, :, np.newaxis], 3, axis=2) + sep_ver_hor_cross = sep_ver_hor_cross.astype(np.uint8) imgray = cv2.cvtColor(sep_ver_hor_cross, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_cross,_=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - cx_cross,cy_cross ,_ , _, _ ,_,_=find_new_features_of_contours(contours_cross) - + contours_cross, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + cx_cross, cy_cross, _, _, _, _, _ = find_new_features_of_contours(contours_cross) + for ii in range(len(cx_cross)): - img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])+5:int(cx_cross[ii])+40,0]=0 - img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])-40:int(cx_cross[ii])-4,0]=0 - + img_p_in[int(cy_cross[ii]) - 30:int(cy_cross[ii]) + 30, int(cx_cross[ii]) + 5:int(cx_cross[ii]) + 40, 0] = 0 + img_p_in[int(cy_cross[ii]) - 30:int(cy_cross[ii]) + 30, int(cx_cross[ii]) - 40:int(cx_cross[ii]) - 4, 0] = 0 + else: - img_p_in=np.copy(img_in_hor) - special_separators=[] - return img_p_in[:,:,0],special_separators + img_p_in = np.copy(img_in_hor) + special_separators = [] + return img_p_in[:, :, 0], special_separators + def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot = [] @@ -1337,102 +1351,95 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot.append(last_point) return peaks_neg_tot + def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, pixel_lines, contours_h=None): + separators_closeup = (region_pre_p[:, :, :] == pixel_lines) * 1 + + separators_closeup[0:110, :, :] = 0 + separators_closeup[separators_closeup.shape[0] - 150:, :, :] = 0 + + kernel = np.ones((5, 5), np.uint8) + + separators_closeup = separators_closeup.astype(np.uint8) + separators_closeup = cv2.dilate(separators_closeup, kernel, iterations=1) + separators_closeup = cv2.erode(separators_closeup, kernel, iterations=1) + + separators_closeup_new = np.zeros((separators_closeup.shape[0], separators_closeup.shape[1])) + + # _,separators_closeup_n=self.combine_hor_lines_and_delete_cross_points_and_get_lines_features_back(region_pre_p[:,:,0]) + separators_closeup_n = np.copy(separators_closeup) + + separators_closeup_n = separators_closeup_n.astype(np.uint8) + # plt.imshow(separators_closeup_n[:,:,0]) + # plt.show() + + separators_closeup_n_binary = np.zeros((separators_closeup_n.shape[0], separators_closeup_n.shape[1])) + separators_closeup_n_binary[:, :] = separators_closeup_n[:, :, 0] + + separators_closeup_n_binary[:, :][separators_closeup_n_binary[:, :] != 0] = 1 + # separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]==0]=255 + # separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]==-255]=0 + + # separators_closeup_n_binary=(separators_closeup_n_binary[:,:]==2)*1 + + # gray = cv2.cvtColor(separators_closeup_n, cv2.COLOR_BGR2GRAY) - separators_closeup=( (region_pre_p[:,:,:]==pixel_lines))*1 - - separators_closeup[0:110,:,:]=0 - separators_closeup[separators_closeup.shape[0]-150:,:,:]=0 - - kernel = np.ones((5,5),np.uint8) - - separators_closeup=separators_closeup.astype(np.uint8) - separators_closeup = cv2.dilate(separators_closeup,kernel,iterations = 1) - separators_closeup = cv2.erode(separators_closeup,kernel,iterations = 1) - - - separators_closeup_new=np.zeros((separators_closeup.shape[0] ,separators_closeup.shape[1] )) - - - - ##_,separators_closeup_n=self.combine_hor_lines_and_delete_cross_points_and_get_lines_features_back(region_pre_p[:,:,0]) - separators_closeup_n=np.copy(separators_closeup) - - separators_closeup_n=separators_closeup_n.astype(np.uint8) - ##plt.imshow(separators_closeup_n[:,:,0]) - ##plt.show() - - separators_closeup_n_binary=np.zeros(( separators_closeup_n.shape[0],separators_closeup_n.shape[1]) ) - separators_closeup_n_binary[:,:]=separators_closeup_n[:,:,0] - - separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]!=0]=1 - #separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]==0]=255 - #separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]==-255]=0 - - - #separators_closeup_n_binary=(separators_closeup_n_binary[:,:]==2)*1 - - #gray = cv2.cvtColor(separators_closeup_n, cv2.COLOR_BGR2GRAY) - ### - - #print(separators_closeup_n_binary.shape) - gray_early=np.repeat(separators_closeup_n_binary[:, :, np.newaxis], 3, axis=2) - gray_early=gray_early.astype(np.uint8) - - #print(gray_early.shape,'burda') + + # print(separators_closeup_n_binary.shape) + gray_early = np.repeat(separators_closeup_n_binary[:, :, np.newaxis], 3, axis=2) + gray_early = gray_early.astype(np.uint8) + + # print(gray_early.shape,'burda') imgray_e = cv2.cvtColor(gray_early, cv2.COLOR_BGR2GRAY) - #print('burda2') + # print('burda2') ret_e, thresh_e = cv2.threshold(imgray_e, 0, 255, 0) - - #print('burda3') - contours_line_e,hierarchy_e=cv2.findContours(thresh_e,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - #slope_lines_e,dist_x_e, x_min_main_e ,x_max_main_e ,cy_main_e,slope_lines_org_e,y_min_main_e, y_max_main_e, cx_main_e=self.find_features_of_lines(contours_line_e) - - slope_linese,dist_xe, x_min_maine ,x_max_maine ,cy_maine,slope_lines_orge,y_min_maine, y_max_maine, cx_maine=find_features_of_lines(contours_line_e) - - dist_ye=y_max_maine-y_min_maine - #print(y_max_maine-y_min_maine,'y') - #print(dist_xe,'x') - - - args_e=np.array(range(len(contours_line_e))) - args_hor_e=args_e[(dist_ye<=50) & (dist_xe>=3*dist_ye)] - - #print(args_hor_e,'jidi',len(args_hor_e),'jilva') - - cnts_hor_e=[] + + # print('burda3') + contours_line_e, hierarchy_e = cv2.findContours(thresh_e, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # slope_lines_e,dist_x_e, x_min_main_e ,x_max_main_e ,cy_main_e,slope_lines_org_e,y_min_main_e, y_max_main_e, cx_main_e=self.find_features_of_lines(contours_line_e) + + slope_linese, dist_xe, x_min_maine, x_max_maine, cy_maine, slope_lines_orge, y_min_maine, y_max_maine, cx_maine = find_features_of_lines( + contours_line_e) + + dist_ye = y_max_maine - y_min_maine + # print(y_max_maine-y_min_maine,'y') + # print(dist_xe,'x') + + args_e = np.array(range(len(contours_line_e))) + args_hor_e = args_e[(dist_ye <= 50) & (dist_xe >= 3 * dist_ye)] + + # print(args_hor_e,'jidi',len(args_hor_e),'jilva') + + cnts_hor_e = [] for ce in args_hor_e: cnts_hor_e.append(contours_line_e[ce]) - #print(len(slope_linese),'lieee') - - figs_e=np.zeros(thresh_e.shape) - figs_e=cv2.fillPoly(figs_e,pts=cnts_hor_e,color=(1,1,1)) - - #plt.imshow(figs_e) - #plt.show() - + # print(len(slope_linese),'lieee') + + figs_e = np.zeros(thresh_e.shape) + figs_e = cv2.fillPoly(figs_e, pts=cnts_hor_e, color=(1, 1, 1)) + + # plt.imshow(figs_e) + # plt.show() + ### - - separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary,pts=cnts_hor_e,color=(0,0,0)) - + + separators_closeup_n_binary = cv2.fillPoly(separators_closeup_n_binary, pts=cnts_hor_e, color=(0, 0, 0)) + gray = cv2.bitwise_not(separators_closeup_n_binary) - gray=gray.astype(np.uint8) - - - #plt.imshow(gray) - #plt.show() - - - bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ - cv2.THRESH_BINARY, 15, -2) - ##plt.imshow(bw[:,:]) - ##plt.show() - + gray = gray.astype(np.uint8) + + # plt.imshow(gray) + # plt.show() + + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) + # plt.imshow(bw[:,:]) + # plt.show() + horizontal = np.copy(bw) vertical = np.copy(bw) - + cols = horizontal.shape[1] horizontal_size = cols // 30 # Create structure element for extracting horizontal lines through morphology operations @@ -1441,23 +1448,19 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, horizontal = cv2.erode(horizontal, horizontalStructure) horizontal = cv2.dilate(horizontal, horizontalStructure) - kernel = np.ones((5,5),np.uint8) + kernel = np.ones((5, 5), np.uint8) + horizontal = cv2.dilate(horizontal, kernel, iterations=2) + horizontal = cv2.erode(horizontal, kernel, iterations=2) - horizontal = cv2.dilate(horizontal,kernel,iterations = 2) - horizontal = cv2.erode(horizontal,kernel,iterations = 2) - - ### - #print(np.unique(horizontal),'uni') - horizontal=cv2.fillPoly(horizontal,pts=cnts_hor_e,color=(255,255,255)) + # print(np.unique(horizontal),'uni') + horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=(255, 255, 255)) ### - - - - #plt.imshow(horizontal) - #plt.show() - + + # plt.imshow(horizontal) + # plt.show() + rows = vertical.shape[0] verticalsize = rows // 30 # Create structure element for extracting vertical lines through morphology operations @@ -1465,655 +1468,647 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, # Apply morphology operations vertical = cv2.erode(vertical, verticalStructure) vertical = cv2.dilate(vertical, verticalStructure) - - vertical = cv2.dilate(vertical,kernel,iterations = 1) + + vertical = cv2.dilate(vertical, kernel, iterations=1) # Show extracted vertical lines - horizontal,special_separators=combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(vertical,horizontal,num_col_classifier) - - - #plt.imshow(horizontal) - #plt.show() - #print(vertical.shape,np.unique(vertical),'verticalvertical') - separators_closeup_new[:,:][vertical[:,:]!=0]=1 - separators_closeup_new[:,:][horizontal[:,:]!=0]=1 - - ##plt.imshow(separators_closeup_new) - ##plt.show() - ##separators_closeup_n - vertical=np.repeat(vertical[:, :, np.newaxis], 3, axis=2) - vertical=vertical.astype(np.uint8) - - ##plt.plot(vertical[:,:,0].sum(axis=0)) - ##plt.show() - - #plt.plot(vertical[:,:,0].sum(axis=1)) - #plt.show() + horizontal, special_separators = combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(vertical, horizontal, num_col_classifier) + + # plt.imshow(horizontal) + # plt.show() + # print(vertical.shape,np.unique(vertical),'verticalvertical') + separators_closeup_new[:, :][vertical[:, :] != 0] = 1 + separators_closeup_new[:, :][horizontal[:, :] != 0] = 1 + + # plt.imshow(separators_closeup_new) + # plt.show() + # separators_closeup_n + vertical = np.repeat(vertical[:, :, np.newaxis], 3, axis=2) + vertical = vertical.astype(np.uint8) + + # plt.plot(vertical[:,:,0].sum(axis=0)) + # plt.show() + + # plt.plot(vertical[:,:,0].sum(axis=1)) + # plt.show() imgray = cv2.cvtColor(vertical, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_line_vers,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - slope_lines,dist_x, x_min_main ,x_max_main ,cy_main,slope_lines_org,y_min_main, y_max_main, cx_main=find_features_of_lines(contours_line_vers) - #print(slope_lines,'vertical') - args=np.array( range(len(slope_lines) )) - args_ver=args[slope_lines==1] - dist_x_ver=dist_x[slope_lines==1] - y_min_main_ver=y_min_main[slope_lines==1] - y_max_main_ver=y_max_main[slope_lines==1] - x_min_main_ver=x_min_main[slope_lines==1] - x_max_main_ver=x_max_main[slope_lines==1] - cx_main_ver=cx_main[slope_lines==1] - dist_y_ver=y_max_main_ver-y_min_main_ver - len_y=separators_closeup.shape[0]/3.0 - - - #plt.imshow(horizontal) - #plt.show() - - horizontal=np.repeat(horizontal[:, :, np.newaxis], 3, axis=2) - horizontal=horizontal.astype(np.uint8) + + contours_line_vers, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = find_features_of_lines( + contours_line_vers) + # print(slope_lines,'vertical') + args = np.array(range(len(slope_lines))) + args_ver = args[slope_lines == 1] + dist_x_ver = dist_x[slope_lines == 1] + y_min_main_ver = y_min_main[slope_lines == 1] + y_max_main_ver = y_max_main[slope_lines == 1] + x_min_main_ver = x_min_main[slope_lines == 1] + x_max_main_ver = x_max_main[slope_lines == 1] + cx_main_ver = cx_main[slope_lines == 1] + dist_y_ver = y_max_main_ver - y_min_main_ver + len_y = separators_closeup.shape[0] / 3.0 + + # plt.imshow(horizontal) + # plt.show() + + horizontal = np.repeat(horizontal[:, :, np.newaxis], 3, axis=2) + horizontal = horizontal.astype(np.uint8) imgray = cv2.cvtColor(horizontal, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_line_hors,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - slope_lines,dist_x, x_min_main ,x_max_main ,cy_main,slope_lines_org,y_min_main, y_max_main, cx_main=find_features_of_lines(contours_line_hors) - - slope_lines_org_hor=slope_lines_org[slope_lines==0] - args=np.array( range(len(slope_lines) )) - len_x=separators_closeup.shape[1]/5.0 - - dist_y=np.abs(y_max_main-y_min_main) - - args_hor=args[slope_lines==0] - dist_x_hor=dist_x[slope_lines==0] - y_min_main_hor=y_min_main[slope_lines==0] - y_max_main_hor=y_max_main[slope_lines==0] - x_min_main_hor=x_min_main[slope_lines==0] - x_max_main_hor=x_max_main[slope_lines==0] - dist_y_hor=dist_y[slope_lines==0] - cy_main_hor=cy_main[slope_lines==0] - - args_hor=args_hor[dist_x_hor>=len_x/2.0] - x_max_main_hor=x_max_main_hor[dist_x_hor>=len_x/2.0] - x_min_main_hor=x_min_main_hor[dist_x_hor>=len_x/2.0] - cy_main_hor=cy_main_hor[dist_x_hor>=len_x/2.0] - y_min_main_hor=y_min_main_hor[dist_x_hor>=len_x/2.0] - y_max_main_hor=y_max_main_hor[dist_x_hor>=len_x/2.0] - dist_y_hor=dist_y_hor[dist_x_hor>=len_x/2.0] - - slope_lines_org_hor=slope_lines_org_hor[dist_x_hor>=len_x/2.0] - dist_x_hor=dist_x_hor[dist_x_hor>=len_x/2.0] - - - matrix_of_lines_ch=np.zeros((len(cy_main_hor)+len(cx_main_ver),10)) - - matrix_of_lines_ch[:len(cy_main_hor),0]=args_hor - matrix_of_lines_ch[len(cy_main_hor):,0]=args_ver - - - matrix_of_lines_ch[len(cy_main_hor):,1]=cx_main_ver - - matrix_of_lines_ch[:len(cy_main_hor),2]=x_min_main_hor+50#x_min_main_hor+150 - matrix_of_lines_ch[len(cy_main_hor):,2]=x_min_main_ver - - matrix_of_lines_ch[:len(cy_main_hor),3]=x_max_main_hor-50#x_max_main_hor-150 - matrix_of_lines_ch[len(cy_main_hor):,3]=x_max_main_ver - - matrix_of_lines_ch[:len(cy_main_hor),4]=dist_x_hor - matrix_of_lines_ch[len(cy_main_hor):,4]=dist_x_ver - - matrix_of_lines_ch[:len(cy_main_hor),5]=cy_main_hor - - - matrix_of_lines_ch[:len(cy_main_hor),6]=y_min_main_hor - matrix_of_lines_ch[len(cy_main_hor):,6]=y_min_main_ver - - matrix_of_lines_ch[:len(cy_main_hor),7]=y_max_main_hor - matrix_of_lines_ch[len(cy_main_hor):,7]=y_max_main_ver - - matrix_of_lines_ch[:len(cy_main_hor),8]=dist_y_hor - matrix_of_lines_ch[len(cy_main_hor):,8]=dist_y_ver - - - matrix_of_lines_ch[len(cy_main_hor):,9]=1 - - - + + contours_line_hors, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = find_features_of_lines( + contours_line_hors) + + slope_lines_org_hor = slope_lines_org[slope_lines == 0] + args = np.array(range(len(slope_lines))) + len_x = separators_closeup.shape[1] / 5.0 + + dist_y = np.abs(y_max_main - y_min_main) + + args_hor = args[slope_lines == 0] + dist_x_hor = dist_x[slope_lines == 0] + y_min_main_hor = y_min_main[slope_lines == 0] + y_max_main_hor = y_max_main[slope_lines == 0] + x_min_main_hor = x_min_main[slope_lines == 0] + x_max_main_hor = x_max_main[slope_lines == 0] + dist_y_hor = dist_y[slope_lines == 0] + cy_main_hor = cy_main[slope_lines == 0] + + args_hor = args_hor[dist_x_hor >= len_x / 2.0] + x_max_main_hor = x_max_main_hor[dist_x_hor >= len_x / 2.0] + x_min_main_hor = x_min_main_hor[dist_x_hor >= len_x / 2.0] + cy_main_hor = cy_main_hor[dist_x_hor >= len_x / 2.0] + y_min_main_hor = y_min_main_hor[dist_x_hor >= len_x / 2.0] + y_max_main_hor = y_max_main_hor[dist_x_hor >= len_x / 2.0] + dist_y_hor = dist_y_hor[dist_x_hor >= len_x / 2.0] + + slope_lines_org_hor = slope_lines_org_hor[dist_x_hor >= len_x / 2.0] + dist_x_hor = dist_x_hor[dist_x_hor >= len_x / 2.0] + + matrix_of_lines_ch = np.zeros((len(cy_main_hor) + len(cx_main_ver), 10)) + + matrix_of_lines_ch[:len(cy_main_hor), 0] = args_hor + matrix_of_lines_ch[len(cy_main_hor):, 0] = args_ver + + matrix_of_lines_ch[len(cy_main_hor):, 1] = cx_main_ver + + matrix_of_lines_ch[:len(cy_main_hor), 2] = x_min_main_hor + 50 # x_min_main_hor+150 + matrix_of_lines_ch[len(cy_main_hor):, 2] = x_min_main_ver + + matrix_of_lines_ch[:len(cy_main_hor), 3] = x_max_main_hor - 50 # x_max_main_hor-150 + matrix_of_lines_ch[len(cy_main_hor):, 3] = x_max_main_ver + + matrix_of_lines_ch[:len(cy_main_hor), 4] = dist_x_hor + matrix_of_lines_ch[len(cy_main_hor):, 4] = dist_x_ver + + matrix_of_lines_ch[:len(cy_main_hor), 5] = cy_main_hor + + matrix_of_lines_ch[:len(cy_main_hor), 6] = y_min_main_hor + matrix_of_lines_ch[len(cy_main_hor):, 6] = y_min_main_ver + + matrix_of_lines_ch[:len(cy_main_hor), 7] = y_max_main_hor + matrix_of_lines_ch[len(cy_main_hor):, 7] = y_max_main_ver + + matrix_of_lines_ch[:len(cy_main_hor), 8] = dist_y_hor + matrix_of_lines_ch[len(cy_main_hor):, 8] = dist_y_ver + + matrix_of_lines_ch[len(cy_main_hor):, 9] = 1 + if contours_h is not None: - slope_lines_head,dist_x_head, x_min_main_head ,x_max_main_head ,cy_main_head,slope_lines_org_head,y_min_main_head, y_max_main_head, cx_main_head=find_features_of_lines(contours_h) - matrix_l_n=np.zeros((matrix_of_lines_ch.shape[0]+len(cy_main_head),matrix_of_lines_ch.shape[1])) - matrix_l_n[:matrix_of_lines_ch.shape[0],:]=np.copy(matrix_of_lines_ch[:,:]) - args_head=np.array(range(len(cy_main_head)))+len(cy_main_hor) - - matrix_l_n[matrix_of_lines_ch.shape[0]:,0]=args_head - matrix_l_n[matrix_of_lines_ch.shape[0]:,2]=x_min_main_head+30 - matrix_l_n[matrix_of_lines_ch.shape[0]:,3]=x_max_main_head-30 - - matrix_l_n[matrix_of_lines_ch.shape[0]:,4]=dist_x_head - - matrix_l_n[matrix_of_lines_ch.shape[0]:,5]=y_min_main_head-3-8 - matrix_l_n[matrix_of_lines_ch.shape[0]:,6]=y_min_main_head-5-8 - matrix_l_n[matrix_of_lines_ch.shape[0]:,7]=y_max_main_head#y_min_main_head+1-8 - matrix_l_n[matrix_of_lines_ch.shape[0]:,8]=4 - - matrix_of_lines_ch=np.copy(matrix_l_n) - - - cy_main_splitters=cy_main_hor[ (x_min_main_hor<=.16*region_pre_p.shape[1]) & (x_max_main_hor>=.84*region_pre_p.shape[1] )] - - cy_main_splitters=np.array( list(cy_main_splitters)+list(special_separators)) - + slope_lines_head, dist_x_head, x_min_main_head, x_max_main_head, cy_main_head, slope_lines_org_head, y_min_main_head, y_max_main_head, cx_main_head = find_features_of_lines( + contours_h) + matrix_l_n = np.zeros((matrix_of_lines_ch.shape[0] + len(cy_main_head), matrix_of_lines_ch.shape[1])) + matrix_l_n[:matrix_of_lines_ch.shape[0], :] = np.copy(matrix_of_lines_ch[:, :]) + args_head = np.array(range(len(cy_main_head))) + len(cy_main_hor) + + matrix_l_n[matrix_of_lines_ch.shape[0]:, 0] = args_head + matrix_l_n[matrix_of_lines_ch.shape[0]:, 2] = x_min_main_head + 30 + matrix_l_n[matrix_of_lines_ch.shape[0]:, 3] = x_max_main_head - 30 + + matrix_l_n[matrix_of_lines_ch.shape[0]:, 4] = dist_x_head + + matrix_l_n[matrix_of_lines_ch.shape[0]:, 5] = y_min_main_head - 3 - 8 + matrix_l_n[matrix_of_lines_ch.shape[0]:, 6] = y_min_main_head - 5 - 8 + matrix_l_n[matrix_of_lines_ch.shape[0]:, 7] = y_max_main_head # y_min_main_head+1-8 + matrix_l_n[matrix_of_lines_ch.shape[0]:, 8] = 4 + + matrix_of_lines_ch = np.copy(matrix_l_n) + + cy_main_splitters = cy_main_hor[ + (x_min_main_hor <= .16 * region_pre_p.shape[1]) & (x_max_main_hor >= .84 * region_pre_p.shape[1])] + + cy_main_splitters = np.array(list(cy_main_splitters) + list(special_separators)) + if contours_h is not None: try: - cy_main_splitters_head=cy_main_head[ (x_min_main_head<=.16*region_pre_p.shape[1]) & (x_max_main_head>=.84*region_pre_p.shape[1] )] - cy_main_splitters=np.array( list(cy_main_splitters)+list(cy_main_splitters_head)) + cy_main_splitters_head = cy_main_head[ + (x_min_main_head <= .16 * region_pre_p.shape[1]) & (x_max_main_head >= .84 * region_pre_p.shape[1])] + cy_main_splitters = np.array(list(cy_main_splitters) + list(cy_main_splitters_head)) except: pass - args_cy_splitter=np.argsort(cy_main_splitters) - - cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - - splitter_y_new=[] + args_cy_splitter = np.argsort(cy_main_splitters) + + cy_main_splitters_sort = cy_main_splitters[args_cy_splitter] + + splitter_y_new = [] splitter_y_new.append(0) for i in range(len(cy_main_splitters_sort)): - splitter_y_new.append( cy_main_splitters_sort[i] ) - + splitter_y_new.append(cy_main_splitters_sort[i]) + splitter_y_new.append(region_pre_p.shape[0]) - - splitter_y_new_diff=np.diff(splitter_y_new)/float(region_pre_p.shape[0])*100 - - args_big_parts=np.array(range(len(splitter_y_new_diff))) [ splitter_y_new_diff>22 ] - - - - regions_without_separators=return_regions_without_separators(region_pre_p) - - - length_y_threshold=regions_without_separators.shape[0]/4.0 - - num_col_fin=0 - peaks_neg_fin_fin=[] - + + splitter_y_new_diff = np.diff(splitter_y_new) / float(region_pre_p.shape[0]) * 100 + + args_big_parts = np.array(range(len(splitter_y_new_diff)))[splitter_y_new_diff > 22] + + regions_without_separators = return_regions_without_separators(region_pre_p) + + length_y_threshold = regions_without_separators.shape[0] / 4.0 + + num_col_fin = 0 + peaks_neg_fin_fin = [] + for itiles in args_big_parts: - - - regions_without_separators_tile=regions_without_separators[int(splitter_y_new[itiles]):int(splitter_y_new[itiles+1]),:,0] - #image_page_background_zero_tile=image_page_background_zero[int(splitter_y_new[itiles]):int(splitter_y_new[itiles+1]),:] - - #print(regions_without_separators_tile.shape) - ##plt.imshow(regions_without_separators_tile) - ##plt.show() - - #num_col, peaks_neg_fin=self.find_num_col(regions_without_separators_tile,multiplier=6.0) - - #regions_without_separators_tile=cv2.erode(regions_without_separators_tile,kernel,iterations = 3) - # + + regions_without_separators_tile = regions_without_separators[ + int(splitter_y_new[itiles]):int(splitter_y_new[itiles + 1]), :, 0] + # image_page_background_zero_tile=image_page_background_zero[int(splitter_y_new[itiles]):int(splitter_y_new[itiles+1]),:] + + # print(regions_without_separators_tile.shape) + # plt.imshow(regions_without_separators_tile) + # plt.show() + + # num_col, peaks_neg_fin=self.find_num_col(regions_without_separators_tile,multiplier=6.0) + + # regions_without_separators_tile=cv2.erode(regions_without_separators_tile,kernel,iterations = 3) + try: - num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile, num_col_classifier, tables, multiplier=7.0) + num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile, num_col_classifier, tables, + multiplier=7.0) except: num_col = 0 peaks_neg_fin = [] - - if num_col>num_col_fin: - num_col_fin=num_col - peaks_neg_fin_fin=peaks_neg_fin - - - if len(args_big_parts)==1 and (len(peaks_neg_fin_fin)+1)=500] - peaks_neg_fin=peaks_neg_fin[peaks_neg_fin<=(vertical.shape[1]-500)] - peaks_neg_fin_fin=peaks_neg_fin[:] - - #print(peaks_neg_fin_fin,'peaks_neg_fin_fintaza') - - - return num_col_fin, peaks_neg_fin_fin,matrix_of_lines_ch,splitter_y_new,separators_closeup_n - - -def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, tables, right2left_readingorder): + + if num_col > num_col_fin: + num_col_fin = num_col + peaks_neg_fin_fin = peaks_neg_fin + + if len(args_big_parts) == 1 and (len(peaks_neg_fin_fin) + 1) < num_col_classifier: + peaks_neg_fin = find_num_col_by_vertical_lines(vertical) + peaks_neg_fin = peaks_neg_fin[peaks_neg_fin >= 500] + peaks_neg_fin = peaks_neg_fin[peaks_neg_fin <= (vertical.shape[1] - 500)] + peaks_neg_fin_fin = peaks_neg_fin[:] + + # print(peaks_neg_fin_fin,'peaks_neg_fin_fintaza') + + return num_col_fin, peaks_neg_fin_fin, matrix_of_lines_ch, splitter_y_new, separators_closeup_n + + +def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, + num_col_classifier, erosion_hurts, tables, right2left_readingorder): if right2left_readingorder: - regions_without_separators = cv2.flip(regions_without_separators,1) - boxes=[] + regions_without_separators = cv2.flip(regions_without_separators, 1) + boxes = [] peaks_neg_tot_tables = [] - for i in range(len(splitter_y_new)-1): - #print(splitter_y_new[i],splitter_y_new[i+1]) - matrix_new=matrix_of_lines_ch[:,:][ (matrix_of_lines_ch[:,6]> splitter_y_new[i] ) & (matrix_of_lines_ch[:,7]< splitter_y_new[i+1] ) ] - #print(len( matrix_new[:,9][matrix_new[:,9]==1] )) - - #print(matrix_new[:,8][matrix_new[:,9]==1],'gaddaaa') - + for i in range(len(splitter_y_new) - 1): + # print(splitter_y_new[i],splitter_y_new[i+1]) + matrix_new = matrix_of_lines_ch[:, :][ + (matrix_of_lines_ch[:, 6] > splitter_y_new[i]) & (matrix_of_lines_ch[:, 7] < splitter_y_new[i + 1])] + # print(len( matrix_new[:,9][matrix_new[:,9]==1] )) + + # print(matrix_new[:,8][matrix_new[:,9]==1],'gaddaaa') + # check to see is there any vertical separator to find holes. - if 1>0:#len( matrix_new[:,9][matrix_new[:,9]==1] )>0 and np.max(matrix_new[:,8][matrix_new[:,9]==1])>=0.1*(np.abs(splitter_y_new[i+1]-splitter_y_new[i] )): - + if 1 > 0: # len( matrix_new[:,9][matrix_new[:,9]==1] )>0 and np.max(matrix_new[:,8][matrix_new[:,9]==1])>=0.1*(np.abs(splitter_y_new[i+1]-splitter_y_new[i] )): + try: if erosion_hurts: - num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], num_col_classifier, tables, multiplier=6.) + num_col, peaks_neg_fin = find_num_col( + regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i + 1]), :], + num_col_classifier, tables, multiplier=6.) else: - num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],num_col_classifier, tables, multiplier=7.) + num_col, peaks_neg_fin = find_num_col( + regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i + 1]), :], + num_col_classifier, tables, multiplier=7.) except: - peaks_neg_fin=[] + peaks_neg_fin = [] num_col = 0 - try: - peaks_neg_fin_org=np.copy(peaks_neg_fin) - if (len(peaks_neg_fin)+1)=len(peaks_neg_fin2): - peaks_neg_fin=list(np.copy(peaks_neg_fin1)) + peaks_neg_fin2 = [] + + if len(peaks_neg_fin1) >= len(peaks_neg_fin2): + peaks_neg_fin = list(np.copy(peaks_neg_fin1)) else: - peaks_neg_fin=list(np.copy(peaks_neg_fin2)) - - - - peaks_neg_fin=list(np.array(peaks_neg_fin)+peaks_neg_fin_early[i_n]) - - if i_n!=(len(peaks_neg_fin_early)-2): - peaks_neg_fin_rev.append(peaks_neg_fin_early[i_n+1]) - #print(peaks_neg_fin,'peaks_neg_fin') - peaks_neg_fin_rev=peaks_neg_fin_rev+peaks_neg_fin - - - - - - if len(peaks_neg_fin_rev)>=len(peaks_neg_fin_org): - peaks_neg_fin=list(np.sort(peaks_neg_fin_rev)) - num_col=len(peaks_neg_fin) + peaks_neg_fin = list(np.copy(peaks_neg_fin2)) + + peaks_neg_fin = list(np.array(peaks_neg_fin) + peaks_neg_fin_early[i_n]) + + if i_n != (len(peaks_neg_fin_early) - 2): + peaks_neg_fin_rev.append(peaks_neg_fin_early[i_n + 1]) + # print(peaks_neg_fin,'peaks_neg_fin') + peaks_neg_fin_rev = peaks_neg_fin_rev + peaks_neg_fin + + if len(peaks_neg_fin_rev) >= len(peaks_neg_fin_org): + peaks_neg_fin = list(np.sort(peaks_neg_fin_rev)) + num_col = len(peaks_neg_fin) else: - peaks_neg_fin=list(np.copy(peaks_neg_fin_org)) - num_col=len(peaks_neg_fin) - - #print(peaks_neg_fin,'peaks_neg_fin') + peaks_neg_fin = list(np.copy(peaks_neg_fin_org)) + num_col = len(peaks_neg_fin) + + # print(peaks_neg_fin,'peaks_neg_fin') except: pass - #num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=7.0) - x_min_hor_some=matrix_new[:,2][ (matrix_new[:,9]==0) ] - x_max_hor_some=matrix_new[:,3][ (matrix_new[:,9]==0) ] - cy_hor_some=matrix_new[:,5][ (matrix_new[:,9]==0) ] - cy_hor_diff=matrix_new[:,7][ (matrix_new[:,9]==0) ] - arg_org_hor_some=matrix_new[:,0][ (matrix_new[:,9]==0) ] - + # num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=7.0) + x_min_hor_some = matrix_new[:, 2][(matrix_new[:, 9] == 0)] + x_max_hor_some = matrix_new[:, 3][(matrix_new[:, 9] == 0)] + cy_hor_some = matrix_new[:, 5][(matrix_new[:, 9] == 0)] + cy_hor_diff = matrix_new[:, 7][(matrix_new[:, 9] == 0)] + arg_org_hor_some = matrix_new[:, 0][(matrix_new[:, 9] == 0)] + if right2left_readingorder: x_max_hor_some_new = regions_without_separators.shape[1] - x_min_hor_some x_min_hor_some_new = regions_without_separators.shape[1] - x_max_hor_some - - x_min_hor_some =list(np.copy(x_min_hor_some_new)) - x_max_hor_some =list(np.copy(x_max_hor_some_new)) - - - - - - peaks_neg_tot=return_points_with_boundies(peaks_neg_fin,0, regions_without_separators[:,:].shape[1]) - + + x_min_hor_some = list(np.copy(x_min_hor_some_new)) + x_max_hor_some = list(np.copy(x_max_hor_some_new)) + + peaks_neg_tot = return_points_with_boundies(peaks_neg_fin, 0, regions_without_separators[:, :].shape[1]) + peaks_neg_tot_tables.append(peaks_neg_tot) - - reading_order_type,x_starting,x_ending,y_type_2,y_diff_type_2,y_lines_without_mother,x_start_without_mother,x_end_without_mother,there_is_sep_with_child,y_lines_with_child_without_mother,x_start_with_child_without_mother,x_end_with_child_without_mother,new_main_sep_y=return_x_start_end_mothers_childs_and_type_of_reading_order(x_min_hor_some,x_max_hor_some,cy_hor_some,peaks_neg_tot,cy_hor_diff) - - if (reading_order_type==1) or (reading_order_type==0 and (len(y_lines_without_mother)>=2 or there_is_sep_with_child==1)): + reading_order_type, x_starting, x_ending, y_type_2, y_diff_type_2, y_lines_without_mother, x_start_without_mother, x_end_without_mother, there_is_sep_with_child, y_lines_with_child_without_mother, x_start_with_child_without_mother, x_end_with_child_without_mother, new_main_sep_y = return_x_start_end_mothers_childs_and_type_of_reading_order( + x_min_hor_some, x_max_hor_some, cy_hor_some, peaks_neg_tot, cy_hor_diff) + + if (reading_order_type == 1) or ( + reading_order_type == 0 and (len(y_lines_without_mother) >= 2 or there_is_sep_with_child == 1)): - try: - y_grenze=int(splitter_y_new[i])+300 - - - - #check if there is a big separator in this y_mains_sep_ohne_grenzen - - args_early_ys=np.array(range(len(y_type_2))) - - #print(args_early_ys,'args_early_ys') - #print(int(splitter_y_new[i]),int(splitter_y_new[i+1])) - - y_type_2_up=np.array(y_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - x_starting_up=np.array(x_starting)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - x_ending_up=np.array(x_ending)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - y_diff_type_2_up=np.array(y_diff_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - args_up=args_early_ys[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - - - - if len(y_type_2_up)>0: - y_main_separator_up=y_type_2_up[(x_starting_up==0) & (x_ending_up==(len(peaks_neg_tot)-1) )] - y_diff_main_separator_up=y_diff_type_2_up[(x_starting_up==0) & (x_ending_up==(len(peaks_neg_tot)-1) )] - args_main_to_deleted=args_up[(x_starting_up==0) & (x_ending_up==(len(peaks_neg_tot)-1) )] - #print(y_main_separator_up,y_diff_main_separator_up,args_main_to_deleted,'fffffjammmm') - - if len(y_diff_main_separator_up)>0: - args_to_be_kept=np.array( list( set(args_early_ys)-set(args_main_to_deleted) ) ) - #print(args_to_be_kept,'args_to_be_kept') - boxes.append([0,peaks_neg_tot[len(peaks_neg_tot)-1],int(splitter_y_new[i]),int( np.max(y_diff_main_separator_up))]) - splitter_y_new[i]=[ np.max(y_diff_main_separator_up) ][0] - - #print(splitter_y_new[i],'splitter_y_new[i]') - y_type_2=np.array(y_type_2)[args_to_be_kept] - x_starting=np.array(x_starting)[args_to_be_kept] - x_ending=np.array(x_ending)[args_to_be_kept] - y_diff_type_2=np.array(y_diff_type_2)[args_to_be_kept] - - #print('galdiha') - y_grenze=int(splitter_y_new[i])+200 - - - args_early_ys2=np.array(range(len(y_type_2))) - y_type_2_up=np.array(y_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - x_starting_up=np.array(x_starting)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - x_ending_up=np.array(x_ending)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - y_diff_type_2_up=np.array(y_diff_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - args_up2=args_early_ys2[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] - - - #print(y_type_2_up,x_starting_up,x_ending_up,'didid') - - nodes_in=[] + y_grenze = int(splitter_y_new[i]) + 300 + + # check if there is a big separator in this y_mains_sep_ohne_grenzen + + args_early_ys = np.array(range(len(y_type_2))) + + # print(args_early_ys,'args_early_ys') + # print(int(splitter_y_new[i]),int(splitter_y_new[i+1])) + + y_type_2_up = np.array(y_type_2)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + x_starting_up = np.array(x_starting)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + x_ending_up = np.array(x_ending)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + y_diff_type_2_up = np.array(y_diff_type_2)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + args_up = args_early_ys[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + + if len(y_type_2_up) > 0: + y_main_separator_up = y_type_2_up[ + (x_starting_up == 0) & (x_ending_up == (len(peaks_neg_tot) - 1))] + y_diff_main_separator_up = y_diff_type_2_up[ + (x_starting_up == 0) & (x_ending_up == (len(peaks_neg_tot) - 1))] + args_main_to_deleted = args_up[(x_starting_up == 0) & (x_ending_up == (len(peaks_neg_tot) - 1))] + # print(y_main_separator_up,y_diff_main_separator_up,args_main_to_deleted,'fffffjammmm') + + if len(y_diff_main_separator_up) > 0: + args_to_be_kept = np.array(list(set(args_early_ys) - set(args_main_to_deleted))) + # print(args_to_be_kept,'args_to_be_kept') + boxes.append([0, peaks_neg_tot[len(peaks_neg_tot) - 1], int(splitter_y_new[i]), + int(np.max(y_diff_main_separator_up))]) + splitter_y_new[i] = [np.max(y_diff_main_separator_up)][0] + + # print(splitter_y_new[i],'splitter_y_new[i]') + y_type_2 = np.array(y_type_2)[args_to_be_kept] + x_starting = np.array(x_starting)[args_to_be_kept] + x_ending = np.array(x_ending)[args_to_be_kept] + y_diff_type_2 = np.array(y_diff_type_2)[args_to_be_kept] + + # print('galdiha') + y_grenze = int(splitter_y_new[i]) + 200 + + args_early_ys2 = np.array(range(len(y_type_2))) + y_type_2_up = np.array(y_type_2)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + x_starting_up = np.array(x_starting)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + x_ending_up = np.array(x_ending)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + y_diff_type_2_up = np.array(y_diff_type_2)[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + args_up2 = args_early_ys2[ + (np.array(y_type_2) > int(splitter_y_new[i])) & (np.array(y_type_2) <= y_grenze)] + + # print(y_type_2_up,x_starting_up,x_ending_up,'didid') + + nodes_in = [] for ij in range(len(x_starting_up)): - nodes_in=nodes_in+list(np.array(range(x_starting_up[ij],x_ending_up[ij]))) - - #print(np.unique(nodes_in),'nodes_in') - - if set(np.unique(nodes_in))==set(np.array(range(len(peaks_neg_tot)-1)) ): + nodes_in = nodes_in + list(np.array(range(x_starting_up[ij], x_ending_up[ij]))) + + # print(np.unique(nodes_in),'nodes_in') + + if set(np.unique(nodes_in)) == set(np.array(range(len(peaks_neg_tot) - 1))): pass - elif set( np.unique(nodes_in) )==set( np.array(range(1,len(peaks_neg_tot)-1)) ): + elif set(np.unique(nodes_in)) == set(np.array(range(1, len(peaks_neg_tot) - 1))): pass else: - #print('burdaydikh') - args_to_be_kept2=np.array( list( set(args_early_ys2)-set(args_up2) ) ) - - if len(args_to_be_kept2)>0: - y_type_2=np.array(y_type_2)[args_to_be_kept2] - x_starting=np.array(x_starting)[args_to_be_kept2] - x_ending=np.array(x_ending)[args_to_be_kept2] - y_diff_type_2=np.array(y_diff_type_2)[args_to_be_kept2] + # print('burdaydikh') + args_to_be_kept2 = np.array(list(set(args_early_ys2) - set(args_up2))) + + if len(args_to_be_kept2) > 0: + y_type_2 = np.array(y_type_2)[args_to_be_kept2] + x_starting = np.array(x_starting)[args_to_be_kept2] + x_ending = np.array(x_ending)[args_to_be_kept2] + y_diff_type_2 = np.array(y_diff_type_2)[args_to_be_kept2] else: pass - - #print('burdaydikh2') - - - - elif len(y_diff_main_separator_up)==0: - nodes_in=[] + + # print('burdaydikh2') + + elif len(y_diff_main_separator_up) == 0: + nodes_in = [] for ij in range(len(x_starting_up)): - nodes_in=nodes_in+list(np.array(range(x_starting_up[ij],x_ending_up[ij]))) - - #print(np.unique(nodes_in),'nodes_in2') - #print(np.array(range(len(peaks_neg_tot)-1)),'np.array(range(len(peaks_neg_tot)-1))') - - - - if set(np.unique(nodes_in))==set(np.array(range(len(peaks_neg_tot)-1)) ): + nodes_in = nodes_in + list(np.array(range(x_starting_up[ij], x_ending_up[ij]))) + + # print(np.unique(nodes_in),'nodes_in2') + # print(np.array(range(len(peaks_neg_tot)-1)),'np.array(range(len(peaks_neg_tot)-1))') + + if set(np.unique(nodes_in)) == set(np.array(range(len(peaks_neg_tot) - 1))): pass - elif set(np.unique(nodes_in) )==set( np.array(range(1,len(peaks_neg_tot)-1)) ): + elif set(np.unique(nodes_in)) == set(np.array(range(1, len(peaks_neg_tot) - 1))): pass else: - #print('burdaydikh') - #print(args_early_ys,'args_early_ys') - #print(args_up,'args_up') - args_to_be_kept2=np.array( list( set(args_early_ys)-set(args_up) ) ) - - #print(args_to_be_kept2,'args_to_be_kept2') - - #print(len(y_type_2),len(x_starting),len(x_ending),len(y_diff_type_2)) - - if len(args_to_be_kept2)>0: - y_type_2=np.array(y_type_2)[args_to_be_kept2] - x_starting=np.array(x_starting)[args_to_be_kept2] - x_ending=np.array(x_ending)[args_to_be_kept2] - y_diff_type_2=np.array(y_diff_type_2)[args_to_be_kept2] + # print('burdaydikh') + # print(args_early_ys,'args_early_ys') + # print(args_up,'args_up') + args_to_be_kept2 = np.array(list(set(args_early_ys) - set(args_up))) + + # print(args_to_be_kept2,'args_to_be_kept2') + + # print(len(y_type_2),len(x_starting),len(x_ending),len(y_diff_type_2)) + + if len(args_to_be_kept2) > 0: + y_type_2 = np.array(y_type_2)[args_to_be_kept2] + x_starting = np.array(x_starting)[args_to_be_kept2] + x_ending = np.array(x_ending)[args_to_be_kept2] + y_diff_type_2 = np.array(y_diff_type_2)[args_to_be_kept2] else: pass - - #print('burdaydikh2') - - - - - - - x_starting=np.array(x_starting) - x_ending=np.array(x_ending) - y_type_2=np.array(y_type_2) - y_diff_type_2_up=np.array(y_diff_type_2_up) - - #int(splitter_y_new[i]) - - y_lines_by_order=[] - x_start_by_order=[] - x_end_by_order=[] - - if (len(x_end_with_child_without_mother)==0 and reading_order_type==0) or reading_order_type==1: - - - if reading_order_type==1: + + # print('burdaydikh2') + + x_starting = np.array(x_starting) + x_ending = np.array(x_ending) + y_type_2 = np.array(y_type_2) + y_diff_type_2_up = np.array(y_diff_type_2_up) + + # int(splitter_y_new[i]) + + y_lines_by_order = [] + x_start_by_order = [] + x_end_by_order = [] + + if ( + len(x_end_with_child_without_mother) == 0 and reading_order_type == 0) or reading_order_type == 1: + + if reading_order_type == 1: y_lines_by_order.append(int(splitter_y_new[i])) x_start_by_order.append(0) - x_end_by_order.append(len(peaks_neg_tot)-2) + x_end_by_order.append(len(peaks_neg_tot) - 2) else: - #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') - - columns_covered_by_mothers=[] - + # print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') + + columns_covered_by_mothers = [] + for dj in range(len(x_start_without_mother)): - columns_covered_by_mothers=columns_covered_by_mothers+list(np.array(range(x_start_without_mother[dj],x_end_without_mother[dj])) ) - columns_covered_by_mothers=list(set(columns_covered_by_mothers)) - - all_columns=np.array(range(len(peaks_neg_tot)-1)) - - columns_not_covered=list( set(all_columns)-set(columns_covered_by_mothers) ) - - - y_type_2=list(y_type_2) - x_starting=list(x_starting) - x_ending=list(x_ending) - + columns_covered_by_mothers = columns_covered_by_mothers + list( + np.array(range(x_start_without_mother[dj], x_end_without_mother[dj]))) + columns_covered_by_mothers = list(set(columns_covered_by_mothers)) + + all_columns = np.array(range(len(peaks_neg_tot) - 1)) + + columns_not_covered = list(set(all_columns) - set(columns_covered_by_mothers)) + + y_type_2 = list(y_type_2) + x_starting = list(x_starting) + x_ending = list(x_ending) + for lj in columns_not_covered: y_type_2.append(int(splitter_y_new[i])) x_starting.append(lj) - x_ending.append(lj+1) - ##y_lines_by_order.append(int(splitter_y_new[i])) - ##x_start_by_order.append(0) + x_ending.append(lj + 1) + # y_lines_by_order.append(int(splitter_y_new[i])) + # x_start_by_order.append(0) for lk in range(len(x_start_without_mother)): y_type_2.append(int(splitter_y_new[i])) x_starting.append(x_start_without_mother[lk]) x_ending.append(x_end_without_mother[lk]) - - - y_type_2=np.array(y_type_2) - x_starting=np.array(x_starting) - x_ending=np.array(x_ending) - - - - - ind_args=np.array(range(len(y_type_2))) - #ind_args=np.array(ind_args) - #print(ind_args,'ind_args') - for column in range(len(peaks_neg_tot)-1): - #print(column,'column') - ind_args_in_col=ind_args[x_starting==column] - #print('babali2') - #print(ind_args_in_col,'ind_args_in_col') - ind_args_in_col=np.array(ind_args_in_col) - #print(len(y_type_2)) - y_column=y_type_2[ind_args_in_col] - x_start_column=x_starting[ind_args_in_col] - x_end_column=x_ending[ind_args_in_col] - #print('babali3') - ind_args_col_sorted=np.argsort(y_column) - y_col_sort=y_column[ind_args_col_sorted] - x_start_column_sort=x_start_column[ind_args_col_sorted] - x_end_column_sort=x_end_column[ind_args_col_sorted] - #print('babali4') + + y_type_2 = np.array(y_type_2) + x_starting = np.array(x_starting) + x_ending = np.array(x_ending) + + ind_args = np.array(range(len(y_type_2))) + # ind_args=np.array(ind_args) + # print(ind_args,'ind_args') + for column in range(len(peaks_neg_tot) - 1): + # print(column,'column') + ind_args_in_col = ind_args[x_starting == column] + # print('babali2') + # print(ind_args_in_col,'ind_args_in_col') + ind_args_in_col = np.array(ind_args_in_col) + # print(len(y_type_2)) + y_column = y_type_2[ind_args_in_col] + x_start_column = x_starting[ind_args_in_col] + x_end_column = x_ending[ind_args_in_col] + # print('babali3') + ind_args_col_sorted = np.argsort(y_column) + y_col_sort = y_column[ind_args_col_sorted] + x_start_column_sort = x_start_column[ind_args_col_sorted] + x_end_column_sort = x_end_column[ind_args_col_sorted] + # print('babali4') for ii in range(len(y_col_sort)): - #print('babali5') + # print('babali5') y_lines_by_order.append(y_col_sort[ii]) x_start_by_order.append(x_start_column_sort[ii]) - x_end_by_order.append(x_end_column_sort[ii]-1) - + x_end_by_order.append(x_end_column_sort[ii] - 1) + else: - #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') - - columns_covered_by_mothers=[] - + # print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') + + columns_covered_by_mothers = [] + for dj in range(len(x_start_without_mother)): - columns_covered_by_mothers=columns_covered_by_mothers+list(np.array(range(x_start_without_mother[dj],x_end_without_mother[dj])) ) - columns_covered_by_mothers=list(set(columns_covered_by_mothers)) - - all_columns=np.array(range(len(peaks_neg_tot)-1)) - - columns_not_covered=list( set(all_columns)-set(columns_covered_by_mothers) ) - - - y_type_2=list(y_type_2) - x_starting=list(x_starting) - x_ending=list(x_ending) - + columns_covered_by_mothers = columns_covered_by_mothers + list( + np.array(range(x_start_without_mother[dj], x_end_without_mother[dj]))) + columns_covered_by_mothers = list(set(columns_covered_by_mothers)) + + all_columns = np.array(range(len(peaks_neg_tot) - 1)) + + columns_not_covered = list(set(all_columns) - set(columns_covered_by_mothers)) + + y_type_2 = list(y_type_2) + x_starting = list(x_starting) + x_ending = list(x_ending) + for lj in columns_not_covered: y_type_2.append(int(splitter_y_new[i])) x_starting.append(lj) - x_ending.append(lj+1) - ##y_lines_by_order.append(int(splitter_y_new[i])) - ##x_start_by_order.append(0) + x_ending.append(lj + 1) + # y_lines_by_order.append(int(splitter_y_new[i])) + # x_start_by_order.append(0) for lk in range(len(x_start_without_mother)): y_type_2.append(int(splitter_y_new[i])) x_starting.append(x_start_without_mother[lk]) x_ending.append(x_end_without_mother[lk]) - - - y_type_2=np.array(y_type_2) - x_starting=np.array(x_starting) - x_ending=np.array(x_ending) - - columns_covered_by_with_child_no_mothers=[] - + + y_type_2 = np.array(y_type_2) + x_starting = np.array(x_starting) + x_ending = np.array(x_ending) + + columns_covered_by_with_child_no_mothers = [] + for dj in range(len(x_end_with_child_without_mother)): - columns_covered_by_with_child_no_mothers=columns_covered_by_with_child_no_mothers+list(np.array(range(x_start_with_child_without_mother[dj],x_end_with_child_without_mother[dj])) ) - columns_covered_by_with_child_no_mothers=list(set(columns_covered_by_with_child_no_mothers)) - - all_columns=np.array(range(len(peaks_neg_tot)-1)) - - columns_not_covered_child_no_mother=list( set(all_columns)-set(columns_covered_by_with_child_no_mothers) ) - #indexes_to_be_spanned=[] - for i_s in range( len(x_end_with_child_without_mother) ): + columns_covered_by_with_child_no_mothers = columns_covered_by_with_child_no_mothers + list( + np.array( + range(x_start_with_child_without_mother[dj], x_end_with_child_without_mother[dj]))) + columns_covered_by_with_child_no_mothers = list(set(columns_covered_by_with_child_no_mothers)) + + all_columns = np.array(range(len(peaks_neg_tot) - 1)) + + columns_not_covered_child_no_mother = list( + set(all_columns) - set(columns_covered_by_with_child_no_mothers)) + # indexes_to_be_spanned=[] + for i_s in range(len(x_end_with_child_without_mother)): columns_not_covered_child_no_mother.append(x_start_with_child_without_mother[i_s]) - - - - columns_not_covered_child_no_mother=np.sort(columns_not_covered_child_no_mother) - - - - ind_args=np.array(range(len(y_type_2))) - - - + + columns_not_covered_child_no_mother = np.sort(columns_not_covered_child_no_mother) + + ind_args = np.array(range(len(y_type_2))) + for i_s_nc in columns_not_covered_child_no_mother: if i_s_nc in x_start_with_child_without_mother: - x_end_biggest_column=np.array(x_end_with_child_without_mother)[np.array(x_start_with_child_without_mother)==i_s_nc][0] - args_all_biggest_lines=ind_args[(x_starting==i_s_nc) & (x_ending==x_end_biggest_column)] - - args_all_biggest_lines=np.array(args_all_biggest_lines) - y_column_nc=y_type_2[args_all_biggest_lines] - x_start_column_nc=x_starting[args_all_biggest_lines] - x_end_column_nc=x_ending[args_all_biggest_lines] - - y_column_nc=np.sort(y_column_nc) - + x_end_biggest_column = np.array(x_end_with_child_without_mother)[ + np.array(x_start_with_child_without_mother) == i_s_nc][0] + args_all_biggest_lines = ind_args[ + (x_starting == i_s_nc) & (x_ending == x_end_biggest_column)] + + args_all_biggest_lines = np.array(args_all_biggest_lines) + y_column_nc = y_type_2[args_all_biggest_lines] + x_start_column_nc = x_starting[args_all_biggest_lines] + x_end_column_nc = x_ending[args_all_biggest_lines] + + y_column_nc = np.sort(y_column_nc) + for i_c in range(len(y_column_nc)): - if i_c==(len(y_column_nc)-1): - ind_all_lines_betweeen_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & (y_type_2=i_s_nc) & (x_ending<=x_end_biggest_column)] + if i_c == (len(y_column_nc) - 1): + ind_all_lines_betweeen_nm_wc = ind_args[ + (y_type_2 > y_column_nc[i_c]) & (y_type_2 < int(splitter_y_new[i + 1])) & ( + x_starting >= i_s_nc) & (x_ending <= x_end_biggest_column)] else: - ind_all_lines_betweeen_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & (y_type_2=i_s_nc) & (x_ending<=x_end_biggest_column)] - - y_all_between_nm_wc=y_type_2[ind_all_lines_betweeen_nm_wc] - x_starting_all_between_nm_wc=x_starting[ind_all_lines_betweeen_nm_wc] - x_ending_all_between_nm_wc=x_ending[ind_all_lines_betweeen_nm_wc] - - x_diff_all_between_nm_wc=x_ending_all_between_nm_wc-x_starting_all_between_nm_wc - - - if len(x_diff_all_between_nm_wc)>0: - biggest=np.argmax(x_diff_all_between_nm_wc) - - - columns_covered_by_mothers=[] - + ind_all_lines_betweeen_nm_wc = ind_args[ + (y_type_2 > y_column_nc[i_c]) & (y_type_2 < y_column_nc[i_c + 1]) & ( + x_starting >= i_s_nc) & (x_ending <= x_end_biggest_column)] + + y_all_between_nm_wc = y_type_2[ind_all_lines_betweeen_nm_wc] + x_starting_all_between_nm_wc = x_starting[ind_all_lines_betweeen_nm_wc] + x_ending_all_between_nm_wc = x_ending[ind_all_lines_betweeen_nm_wc] + + x_diff_all_between_nm_wc = x_ending_all_between_nm_wc - x_starting_all_between_nm_wc + + if len(x_diff_all_between_nm_wc) > 0: + biggest = np.argmax(x_diff_all_between_nm_wc) + + columns_covered_by_mothers = [] + for dj in range(len(x_starting_all_between_nm_wc)): - columns_covered_by_mothers=columns_covered_by_mothers+list(np.array(range(x_starting_all_between_nm_wc[dj],x_ending_all_between_nm_wc[dj])) ) - columns_covered_by_mothers=list(set(columns_covered_by_mothers)) - - - all_columns=np.array(range(i_s_nc,x_end_biggest_column)) - - columns_not_covered=list( set(all_columns)-set(columns_covered_by_mothers) ) - - should_longest_line_be_extended=0 - if len(x_diff_all_between_nm_wc)>0 and set( list( np.array(range(x_starting_all_between_nm_wc[biggest],x_ending_all_between_nm_wc[biggest])) )+list(columns_not_covered) ) !=set(all_columns): - should_longest_line_be_extended=1 - - index_lines_so_close_to_top_separator=np.array(range(len(y_all_between_nm_wc)))[(y_all_between_nm_wc>y_column_nc[i_c]) & (y_all_between_nm_wc<=(y_column_nc[i_c]+500))] - - - if len(index_lines_so_close_to_top_separator)>0: - indexes_remained_after_deleting_closed_lines= np.array( list ( set( list( np.array(range(len(y_all_between_nm_wc))) ) ) -set(list( index_lines_so_close_to_top_separator) ) ) ) - - if len(indexes_remained_after_deleting_closed_lines)>0: - y_all_between_nm_wc=y_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] - x_starting_all_between_nm_wc=x_starting_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] - x_ending_all_between_nm_wc=x_ending_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] - - - y_all_between_nm_wc=list(y_all_between_nm_wc) - x_starting_all_between_nm_wc=list(x_starting_all_between_nm_wc) - x_ending_all_between_nm_wc=list(x_ending_all_between_nm_wc) - - - y_all_between_nm_wc.append(y_column_nc[i_c] ) + columns_covered_by_mothers = columns_covered_by_mothers + list(np.array( + range(x_starting_all_between_nm_wc[dj], x_ending_all_between_nm_wc[dj]))) + columns_covered_by_mothers = list(set(columns_covered_by_mothers)) + + all_columns = np.array(range(i_s_nc, x_end_biggest_column)) + + columns_not_covered = list(set(all_columns) - set(columns_covered_by_mothers)) + + should_longest_line_be_extended = 0 + if len(x_diff_all_between_nm_wc) > 0 and set(list(np.array( + range(x_starting_all_between_nm_wc[biggest], + x_ending_all_between_nm_wc[biggest]))) + list( + columns_not_covered)) != set(all_columns): + should_longest_line_be_extended = 1 + + index_lines_so_close_to_top_separator = np.array(range(len(y_all_between_nm_wc)))[ + (y_all_between_nm_wc > y_column_nc[i_c]) & ( + y_all_between_nm_wc <= (y_column_nc[i_c] + 500))] + + if len(index_lines_so_close_to_top_separator) > 0: + indexes_remained_after_deleting_closed_lines = np.array(list( + set(list(np.array(range(len(y_all_between_nm_wc))))) - set( + list(index_lines_so_close_to_top_separator)))) + + if len(indexes_remained_after_deleting_closed_lines) > 0: + y_all_between_nm_wc = y_all_between_nm_wc[ + indexes_remained_after_deleting_closed_lines] + x_starting_all_between_nm_wc = x_starting_all_between_nm_wc[ + indexes_remained_after_deleting_closed_lines] + x_ending_all_between_nm_wc = x_ending_all_between_nm_wc[ + indexes_remained_after_deleting_closed_lines] + + y_all_between_nm_wc = list(y_all_between_nm_wc) + x_starting_all_between_nm_wc = list(x_starting_all_between_nm_wc) + x_ending_all_between_nm_wc = list(x_ending_all_between_nm_wc) + + y_all_between_nm_wc.append(y_column_nc[i_c]) x_starting_all_between_nm_wc.append(i_s_nc) x_ending_all_between_nm_wc.append(x_end_biggest_column) - - - - - y_all_between_nm_wc=list(y_all_between_nm_wc) - x_starting_all_between_nm_wc=list(x_starting_all_between_nm_wc) - x_ending_all_between_nm_wc=list(x_ending_all_between_nm_wc) - - if len(x_diff_all_between_nm_wc)>0: + + y_all_between_nm_wc = list(y_all_between_nm_wc) + x_starting_all_between_nm_wc = list(x_starting_all_between_nm_wc) + x_ending_all_between_nm_wc = list(x_ending_all_between_nm_wc) + + if len(x_diff_all_between_nm_wc) > 0: try: x_starting_all_between_nm_wc.append(x_starting_all_between_nm_wc[biggest]) x_ending_all_between_nm_wc.append(x_ending_all_between_nm_wc[biggest]) @@ -2121,240 +2116,219 @@ def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_witho except: pass - - for c_n_c in columns_not_covered: y_all_between_nm_wc.append(y_column_nc[i_c]) x_starting_all_between_nm_wc.append(c_n_c) - x_ending_all_between_nm_wc.append(c_n_c+1) - - y_all_between_nm_wc=np.array(y_all_between_nm_wc) - x_starting_all_between_nm_wc=np.array(x_starting_all_between_nm_wc) - x_ending_all_between_nm_wc=np.array(x_ending_all_between_nm_wc) - - ind_args_between=np.array(range(len(x_ending_all_between_nm_wc))) - - for column in range(i_s_nc,x_end_biggest_column): - ind_args_in_col=ind_args_between[x_starting_all_between_nm_wc==column] - #print('babali2') - #print(ind_args_in_col,'ind_args_in_col') - ind_args_in_col=np.array(ind_args_in_col) - #print(len(y_type_2)) - y_column=y_all_between_nm_wc[ind_args_in_col] - x_start_column=x_starting_all_between_nm_wc[ind_args_in_col] - x_end_column=x_ending_all_between_nm_wc[ind_args_in_col] - #print('babali3') - ind_args_col_sorted=np.argsort(y_column) - y_col_sort=y_column[ind_args_col_sorted] - x_start_column_sort=x_start_column[ind_args_col_sorted] - x_end_column_sort=x_end_column[ind_args_col_sorted] - #print('babali4') + x_ending_all_between_nm_wc.append(c_n_c + 1) + + y_all_between_nm_wc = np.array(y_all_between_nm_wc) + x_starting_all_between_nm_wc = np.array(x_starting_all_between_nm_wc) + x_ending_all_between_nm_wc = np.array(x_ending_all_between_nm_wc) + + ind_args_between = np.array(range(len(x_ending_all_between_nm_wc))) + + for column in range(i_s_nc, x_end_biggest_column): + ind_args_in_col = ind_args_between[x_starting_all_between_nm_wc == column] + # print('babali2') + # print(ind_args_in_col,'ind_args_in_col') + ind_args_in_col = np.array(ind_args_in_col) + # print(len(y_type_2)) + y_column = y_all_between_nm_wc[ind_args_in_col] + x_start_column = x_starting_all_between_nm_wc[ind_args_in_col] + x_end_column = x_ending_all_between_nm_wc[ind_args_in_col] + # print('babali3') + ind_args_col_sorted = np.argsort(y_column) + y_col_sort = y_column[ind_args_col_sorted] + x_start_column_sort = x_start_column[ind_args_col_sorted] + x_end_column_sort = x_end_column[ind_args_col_sorted] + # print('babali4') for ii in range(len(y_col_sort)): - #print('babali5') + # print('babali5') y_lines_by_order.append(y_col_sort[ii]) x_start_by_order.append(x_start_column_sort[ii]) - x_end_by_order.append(x_end_column_sort[ii]-1) - - - - - - + x_end_by_order.append(x_end_column_sort[ii] - 1) + else: - - #print(column,'column') - ind_args_in_col=ind_args[x_starting==i_s_nc] - #print('babali2') - #print(ind_args_in_col,'ind_args_in_col') - ind_args_in_col=np.array(ind_args_in_col) - #print(len(y_type_2)) - y_column=y_type_2[ind_args_in_col] - x_start_column=x_starting[ind_args_in_col] - x_end_column=x_ending[ind_args_in_col] - #print('babali3') - ind_args_col_sorted=np.argsort(y_column) - y_col_sort=y_column[ind_args_col_sorted] - x_start_column_sort=x_start_column[ind_args_col_sorted] - x_end_column_sort=x_end_column[ind_args_col_sorted] - #print('babali4') + + # print(column,'column') + ind_args_in_col = ind_args[x_starting == i_s_nc] + # print('babali2') + # print(ind_args_in_col,'ind_args_in_col') + ind_args_in_col = np.array(ind_args_in_col) + # print(len(y_type_2)) + y_column = y_type_2[ind_args_in_col] + x_start_column = x_starting[ind_args_in_col] + x_end_column = x_ending[ind_args_in_col] + # print('babali3') + ind_args_col_sorted = np.argsort(y_column) + y_col_sort = y_column[ind_args_col_sorted] + x_start_column_sort = x_start_column[ind_args_col_sorted] + x_end_column_sort = x_end_column[ind_args_col_sorted] + # print('babali4') for ii in range(len(y_col_sort)): y_lines_by_order.append(y_col_sort[ii]) x_start_by_order.append(x_start_column_sort[ii]) - x_end_by_order.append(x_end_column_sort[ii]-1) + x_end_by_order.append(x_end_column_sort[ii] - 1) - - for il in range(len(y_lines_by_order)): - - - y_copy=list( np.copy(y_lines_by_order) ) - x_start_copy=list( np.copy(x_start_by_order) ) - x_end_copy=list ( np.copy(x_end_by_order) ) - - #print(y_copy,'y_copy') - y_itself=y_copy.pop(il) - x_start_itself=x_start_copy.pop(il) - x_end_itself=x_end_copy.pop(il) - - #print(y_copy,'y_copy2') - - for column in range(x_start_itself,x_end_itself+1): - #print(column,'cols') - y_in_cols=[] + + y_copy = list(np.copy(y_lines_by_order)) + x_start_copy = list(np.copy(x_start_by_order)) + x_end_copy = list(np.copy(x_end_by_order)) + + # print(y_copy,'y_copy') + y_itself = y_copy.pop(il) + x_start_itself = x_start_copy.pop(il) + x_end_itself = x_end_copy.pop(il) + + # print(y_copy,'y_copy2') + + for column in range(x_start_itself, x_end_itself + 1): + # print(column,'cols') + y_in_cols = [] for yic in range(len(y_copy)): - #print('burda') - if y_copy[yic]>y_itself and column>=x_start_copy[yic] and column<=x_end_copy[yic]: + # print('burda') + if y_copy[yic] > y_itself and column >= x_start_copy[yic] and column <= x_end_copy[yic]: y_in_cols.append(y_copy[yic]) - #print('burda2') - #print(y_in_cols,'y_in_cols') - if len(y_in_cols)>0: - y_down=np.min(y_in_cols) + # print('burda2') + # print(y_in_cols,'y_in_cols') + if len(y_in_cols) > 0: + y_down = np.min(y_in_cols) else: - y_down=[int(splitter_y_new[i+1])][0] - #print(y_itself,'y_itself') - boxes.append([peaks_neg_tot[column],peaks_neg_tot[column+1],y_itself,y_down]) + y_down = [int(splitter_y_new[i + 1])][0] + # print(y_itself,'y_itself') + boxes.append([peaks_neg_tot[column], peaks_neg_tot[column + 1], y_itself, y_down]) except: - boxes.append([0,peaks_neg_tot[len(peaks_neg_tot)-1],int(splitter_y_new[i]),int(splitter_y_new[i+1])]) - + boxes.append( + [0, peaks_neg_tot[len(peaks_neg_tot) - 1], int(splitter_y_new[i]), int(splitter_y_new[i + 1])]) - else: - y_lines_by_order=[] - x_start_by_order=[] - x_end_by_order=[] - if len(x_starting)>0: - all_columns = np.array(range(len(peaks_neg_tot)-1)) - columns_covered_by_lines_covered_more_than_2col=[] - + y_lines_by_order = [] + x_start_by_order = [] + x_end_by_order = [] + if len(x_starting) > 0: + all_columns = np.array(range(len(peaks_neg_tot) - 1)) + columns_covered_by_lines_covered_more_than_2col = [] + for dj in range(len(x_starting)): - if set( list(np.array(range(x_starting[dj],x_ending[dj])) ) ) == set(all_columns): + if set(list(np.array(range(x_starting[dj], x_ending[dj])))) == set(all_columns): pass else: - columns_covered_by_lines_covered_more_than_2col=columns_covered_by_lines_covered_more_than_2col+list(np.array(range(x_starting[dj],x_ending[dj])) ) - columns_covered_by_lines_covered_more_than_2col=list(set(columns_covered_by_lines_covered_more_than_2col)) - - - - columns_not_covered=list( set(all_columns)-set(columns_covered_by_lines_covered_more_than_2col) ) - - - y_type_2=list(y_type_2) - x_starting=list(x_starting) - x_ending=list(x_ending) - + columns_covered_by_lines_covered_more_than_2col = columns_covered_by_lines_covered_more_than_2col + list( + np.array(range(x_starting[dj], x_ending[dj]))) + columns_covered_by_lines_covered_more_than_2col = list( + set(columns_covered_by_lines_covered_more_than_2col)) + + columns_not_covered = list(set(all_columns) - set(columns_covered_by_lines_covered_more_than_2col)) + + y_type_2 = list(y_type_2) + x_starting = list(x_starting) + x_ending = list(x_ending) + for lj in columns_not_covered: y_type_2.append(int(splitter_y_new[i])) x_starting.append(lj) - x_ending.append(lj+1) - ##y_lines_by_order.append(int(splitter_y_new[i])) - ##x_start_by_order.append(0) - - #y_type_2.append(int(splitter_y_new[i])) - #x_starting.append(x_starting[0]) - #x_ending.append(x_ending[0]) - - if len(new_main_sep_y)>0: + x_ending.append(lj + 1) + # y_lines_by_order.append(int(splitter_y_new[i])) + # x_start_by_order.append(0) + + # y_type_2.append(int(splitter_y_new[i])) + # x_starting.append(x_starting[0]) + # x_ending.append(x_ending[0]) + + if len(new_main_sep_y) > 0: y_type_2.append(int(splitter_y_new[i])) x_starting.append(0) - x_ending.append(len(peaks_neg_tot)-1) + x_ending.append(len(peaks_neg_tot) - 1) else: y_type_2.append(int(splitter_y_new[i])) x_starting.append(x_starting[0]) x_ending.append(x_ending[0]) - - - y_type_2=np.array(y_type_2) - x_starting=np.array(x_starting) - x_ending=np.array(x_ending) + + y_type_2 = np.array(y_type_2) + x_starting = np.array(x_starting) + x_ending = np.array(x_ending) else: - all_columns=np.array(range(len(peaks_neg_tot)-1)) - columns_not_covered=list( set(all_columns) ) - - - y_type_2=list(y_type_2) - x_starting=list(x_starting) - x_ending=list(x_ending) - + all_columns = np.array(range(len(peaks_neg_tot) - 1)) + columns_not_covered = list(set(all_columns)) + + y_type_2 = list(y_type_2) + x_starting = list(x_starting) + x_ending = list(x_ending) + for lj in columns_not_covered: y_type_2.append(int(splitter_y_new[i])) x_starting.append(lj) - x_ending.append(lj+1) - ##y_lines_by_order.append(int(splitter_y_new[i])) - ##x_start_by_order.append(0) - - - - y_type_2=np.array(y_type_2) - x_starting=np.array(x_starting) - x_ending=np.array(x_ending) - - ind_args=np.array(range(len(y_type_2))) - #ind_args=np.array(ind_args) - for column in range(len(peaks_neg_tot)-1): - #print(column,'column') - ind_args_in_col=ind_args[x_starting==column] - ind_args_in_col=np.array(ind_args_in_col) - #print(len(y_type_2)) - y_column=y_type_2[ind_args_in_col] - x_start_column=x_starting[ind_args_in_col] - x_end_column=x_ending[ind_args_in_col] - - ind_args_col_sorted=np.argsort(y_column) - y_col_sort=y_column[ind_args_col_sorted] - x_start_column_sort=x_start_column[ind_args_col_sorted] - x_end_column_sort=x_end_column[ind_args_col_sorted] - #print('babali4') + x_ending.append(lj + 1) + # y_lines_by_order.append(int(splitter_y_new[i])) + # x_start_by_order.append(0) + + y_type_2 = np.array(y_type_2) + x_starting = np.array(x_starting) + x_ending = np.array(x_ending) + + ind_args = np.array(range(len(y_type_2))) + # ind_args=np.array(ind_args) + for column in range(len(peaks_neg_tot) - 1): + # print(column,'column') + ind_args_in_col = ind_args[x_starting == column] + ind_args_in_col = np.array(ind_args_in_col) + # print(len(y_type_2)) + y_column = y_type_2[ind_args_in_col] + x_start_column = x_starting[ind_args_in_col] + x_end_column = x_ending[ind_args_in_col] + + ind_args_col_sorted = np.argsort(y_column) + y_col_sort = y_column[ind_args_col_sorted] + x_start_column_sort = x_start_column[ind_args_col_sorted] + x_end_column_sort = x_end_column[ind_args_col_sorted] + # print('babali4') for ii in range(len(y_col_sort)): - #print('babali5') + # print('babali5') y_lines_by_order.append(y_col_sort[ii]) x_start_by_order.append(x_start_column_sort[ii]) - x_end_by_order.append(x_end_column_sort[ii]-1) - - + x_end_by_order.append(x_end_column_sort[ii] - 1) + for il in range(len(y_lines_by_order)): - - - y_copy=list( np.copy(y_lines_by_order) ) - x_start_copy=list( np.copy(x_start_by_order) ) - x_end_copy=list ( np.copy(x_end_by_order) ) - - #print(y_copy,'y_copy') - y_itself=y_copy.pop(il) - x_start_itself=x_start_copy.pop(il) - x_end_itself=x_end_copy.pop(il) - - #print(y_copy,'y_copy2') - - for column in range(x_start_itself,x_end_itself+1): - #print(column,'cols') - y_in_cols=[] + + y_copy = list(np.copy(y_lines_by_order)) + x_start_copy = list(np.copy(x_start_by_order)) + x_end_copy = list(np.copy(x_end_by_order)) + + # print(y_copy,'y_copy') + y_itself = y_copy.pop(il) + x_start_itself = x_start_copy.pop(il) + x_end_itself = x_end_copy.pop(il) + + # print(y_copy,'y_copy2') + + for column in range(x_start_itself, x_end_itself + 1): + # print(column,'cols') + y_in_cols = [] for yic in range(len(y_copy)): - #print('burda') - if y_copy[yic]>y_itself and column>=x_start_copy[yic] and column<=x_end_copy[yic]: + # print('burda') + if y_copy[yic] > y_itself and column >= x_start_copy[yic] and column <= x_end_copy[yic]: y_in_cols.append(y_copy[yic]) - #print('burda2') - #print(y_in_cols,'y_in_cols') - if len(y_in_cols)>0: - y_down=np.min(y_in_cols) + # print('burda2') + # print(y_in_cols,'y_in_cols') + if len(y_in_cols) > 0: + y_down = np.min(y_in_cols) else: - y_down=[int(splitter_y_new[i+1])][0] - #print(y_itself,'y_itself') - boxes.append([peaks_neg_tot[column],peaks_neg_tot[column+1],y_itself,y_down]) + y_down = [int(splitter_y_new[i + 1])][0] + # print(y_itself,'y_itself') + boxes.append([peaks_neg_tot[column], peaks_neg_tot[column + 1], y_itself, y_down]) + # else: + # boxes.append([ 0, regions_without_separators[:,:].shape[1] ,splitter_y_new[i],splitter_y_new[i+1]]) - - #else: - #boxes.append([ 0, regions_without_separators[:,:].shape[1] ,splitter_y_new[i],splitter_y_new[i+1]]) - - if right2left_readingorder: + if right2left_readingorder: peaks_neg_tot_tables_new = [] - if len(peaks_neg_tot_tables)>=1: + if len(peaks_neg_tot_tables) >= 1: for peaks_tab_ind in peaks_neg_tot_tables: peaks_neg_tot_tables_ind = regions_without_separators.shape[1] - np.array(peaks_tab_ind) peaks_neg_tot_tables_ind = list(peaks_neg_tot_tables_ind[::-1]) peaks_neg_tot_tables_new.append(peaks_neg_tot_tables_ind) - - + for i in range(len(boxes)): x_start_new = regions_without_separators.shape[1] - boxes[i][1] x_end_new = regions_without_separators.shape[1] - boxes[i][0] diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 53b39b5..0c7e90a 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -5,6 +5,8 @@ from shapely import geometry from .rotate import rotate_image, rotation_image_new from multiprocessing import Process, Queue, cpu_count from multiprocessing import Pool + + def contours_in_same_horizon(cy_main_hor): X1 = np.zeros((len(cy_main_hor), len(cy_main_hor))) X2 = np.zeros((len(cy_main_hor), len(cy_main_hor))) @@ -22,6 +24,7 @@ def contours_in_same_horizon(cy_main_hor): all_args.append(list(set(list_h))) return np.unique(np.array(all_args, dtype=object)) + def find_contours_mean_y_diff(contours_main): M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] @@ -29,7 +32,6 @@ def find_contours_mean_y_diff(contours_main): def get_text_region_boxes_by_given_contours(contours): - kernel = np.ones((5, 5), np.uint8) boxes = [] contours_new = [] @@ -42,39 +44,43 @@ def get_text_region_boxes_by_given_contours(contours): del contours return boxes, contours_new + def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area): found_polygons_early = list() - for jv,c in enumerate(contours): + for jv, c in enumerate(contours): if len(c) < 3: # A polygon cannot have less than 3 points continue polygon = geometry.Polygon([point[0] for point in c]) area = polygon.area - if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1: # and hierarchy[0][jv][3]==-1 : + if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and \ + hierarchy[0][jv][3] == -1: # and hierarchy[0][jv][3]==-1 : found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) return found_polygons_early + def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): found_polygons_early = list() - for jv,c in enumerate(contours): + for jv, c in enumerate(contours): if len(c) < 3: # A polygon cannot have less than 3 points continue polygon = geometry.Polygon([point[0] for point in c]) # area = cv2.contourArea(c) area = polygon.area - ##print(np.prod(thresh.shape[:2])) + # print(np.prod(thresh.shape[:2])) # Check that polygon has area greater than minimal area # print(hierarchy[0][jv][3],hierarchy ) - if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : + if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod( + image.shape[:2]): # and hierarchy[0][jv][3]==-1 : # print(c[0][0][1]) found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) return found_polygons_early -def find_new_features_of_contours(contours_main): +def find_new_features_of_contours(contours_main): areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] @@ -85,7 +91,8 @@ def find_new_features_of_contours(contours_main): argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0] for j in range(len(contours_main))]) - y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1] for j in range(len(contours_main))]) + y_corr_x_min_from_argmin = np.array( + [contours_main[j][argmin_x_main[j], 0, 1] for j in range(len(contours_main))]) x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) @@ -107,27 +114,28 @@ def find_new_features_of_contours(contours_main): # dis_x=np.abs(x_max_main-x_min_main) return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin -def find_features_of_contours(contours_main): - - areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) - M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))] - cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] - cy_main=[(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] - x_min_main=np.array([np.min(contours_main[j][:,0,0]) for j in range(len(contours_main))]) - x_max_main=np.array([np.max(contours_main[j][:,0,0]) for j in range(len(contours_main))]) - y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))]) - y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))]) +def find_features_of_contours(contours_main): + areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) + M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cx_main = [(M_main[j]['m10'] / (M_main[j]['m00'] + 1e-32)) for j in range(len(M_main))] + cy_main = [(M_main[j]['m01'] / (M_main[j]['m00'] + 1e-32)) for j in range(len(M_main))] + x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + + y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) - return y_min_main, y_max_main + + def return_parent_contours(contours, hierarchy): contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] return contours_parent -def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): +def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 @@ -145,12 +153,13 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): return contours_imgs + def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, indexes_r_con_per_pro, img, slope_first): cnts_org_per_each_subprocess = [] index_by_text_region_contours = [] for mv in range(len(contours_per_process)): index_by_text_region_contours.append(indexes_r_con_per_pro[mv]) - + img_copy = np.zeros(img.shape) img_copy = cv2.fillPoly(img_copy, pts=[contours_per_process[mv]], color=(1, 1, 1)) @@ -165,14 +174,12 @@ def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, inde cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) - cnts_org_per_each_subprocess.append(cont_int[0]) - queue_of_all_params.put([ cnts_org_per_each_subprocess, index_by_text_region_contours]) + queue_of_all_params.put([cnts_org_per_each_subprocess, index_by_text_region_contours]) def get_textregion_contours_in_org_image_multi(cnts, img, slope_first): - num_cores = cpu_count() queue_of_all_params = Queue() @@ -180,10 +187,10 @@ def get_textregion_contours_in_org_image_multi(cnts, img, slope_first): nh = np.linspace(0, len(cnts), num_cores + 1) indexes_by_text_con = np.array(range(len(cnts))) for i in range(num_cores): - contours_per_process = cnts[int(nh[i]) : int(nh[i + 1])] - indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])] + contours_per_process = cnts[int(nh[i]): int(nh[i + 1])] + indexes_text_con_per_process = indexes_by_text_con[int(nh[i]): int(nh[i + 1])] - processes.append(Process(target=do_work_of_contours_in_image, args=(queue_of_all_params, contours_per_process, indexes_text_con_per_process, img,slope_first ))) + processes.append(Process(target=do_work_of_contours_in_image, args=(queue_of_all_params, contours_per_process, indexes_text_con_per_process, img, slope_first))) for i in range(num_cores): processes[i].start() cnts_org = [] @@ -200,7 +207,9 @@ def get_textregion_contours_in_org_image_multi(cnts, img, slope_first): print(all_index_text_con) return cnts_org -def loop_contour_image(index_l, cnts,img, slope_first): + + +def loop_contour_image(index_l, cnts, img, slope_first): img_copy = np.zeros(img.shape) img_copy = cv2.fillPoly(img_copy, pts=[cnts[index_l]], color=(1, 1, 1)) @@ -209,7 +218,7 @@ def loop_contour_image(index_l, cnts,img, slope_first): # print(img.shape,'img') img_copy = rotation_image_new(img_copy, -slope_first) - ##print(img_copy.shape,'img_copy') + # print(img_copy.shape,'img_copy') # plt.imshow(img_copy) # plt.show() @@ -224,17 +233,17 @@ def loop_contour_image(index_l, cnts,img, slope_first): # print(np.shape(cont_int[0])) return cont_int[0] -def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first): +def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first): cnts_org = [] # print(cnts,'cnts') with Pool(cpu_count()) as p: - cnts_org = p.starmap(loop_contour_image, [(index_l,cnts, img,slope_first) for index_l in range(len(cnts))]) - + cnts_org = p.starmap(loop_contour_image, [(index_l, cnts, img, slope_first) for index_l in range(len(cnts))]) + return cnts_org -def get_textregion_contours_in_org_image(cnts, img, slope_first): +def get_textregion_contours_in_org_image(cnts, img, slope_first): cnts_org = [] # print(cnts,'cnts') for i in range(len(cnts)): @@ -246,7 +255,7 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): # print(img.shape,'img') img_copy = rotation_image_new(img_copy, -slope_first) - ##print(img_copy.shape,'img_copy') + # print(img_copy.shape,'img_copy') # plt.imshow(img_copy) # plt.show() @@ -263,17 +272,17 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): return cnts_org + def get_textregion_contours_in_org_image_light(cnts, img, slope_first): - h_o = img.shape[0] w_o = img.shape[1] - - img = cv2.resize(img, (int(img.shape[1]/3.), int(img.shape[0]/3.)), interpolation=cv2.INTER_NEAREST) - ##cnts = list( (np.array(cnts)/2).astype(np.int16) ) - #cnts = cnts/2 - cnts = [(i/ 3).astype(np.int32) for i in cnts] + + img = cv2.resize(img, (int(img.shape[1] / 3.), int(img.shape[0] / 3.)), interpolation=cv2.INTER_NEAREST) + # cnts = list( (np.array(cnts)/2).astype(np.int16) ) + # cnts = cnts/2 + cnts = [(i / 3).astype(np.int32) for i in cnts] cnts_org = [] - #print(cnts,'cnts') + # print(cnts,'cnts') for i in range(len(cnts)): img_copy = np.zeros(img.shape) img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1)) @@ -283,7 +292,7 @@ def get_textregion_contours_in_org_image_light(cnts, img, slope_first): # print(img.shape,'img') img_copy = rotation_image_new(img_copy, -slope_first) - ##print(img_copy.shape,'img_copy') + # print(img_copy.shape,'img_copy') # plt.imshow(img_copy) # plt.show() @@ -296,12 +305,12 @@ def get_textregion_contours_in_org_image_light(cnts, img, slope_first): cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) # print(np.shape(cont_int[0])) - cnts_org.append(cont_int[0]*3) + cnts_org.append(cont_int[0] * 3) return cnts_org -def return_contours_of_interested_textline(region_pre_p, pixel): +def return_contours_of_interested_textline(region_pre_p, pixel): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 @@ -317,8 +326,8 @@ def return_contours_of_interested_textline(region_pre_p, pixel): contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003) return contours_imgs -def return_contours_of_image(image): +def return_contours_of_image(image): if len(image.shape) == 2: image = np.repeat(image[:, :, np.newaxis], 3, axis=2) image = image.astype(np.uint8) @@ -329,8 +338,8 @@ def return_contours_of_image(image): contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours, hierarchy -def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003): +def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 @@ -348,8 +357,8 @@ def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_si return contours_imgs -def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area): +def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 @@ -367,4 +376,3 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3)) img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1)) return img_ret[:, :, 0] - diff --git a/src/eynollah/utils/counter.py b/src/eynollah/utils/counter.py index 9a3ed70..ac32dc9 100644 --- a/src/eynollah/utils/counter.py +++ b/src/eynollah/utils/counter.py @@ -3,6 +3,7 @@ from collections import Counter REGION_ID_TEMPLATE = 'region_%04d' LINE_ID_TEMPLATE = 'region_%04d_line_%04d' + class EynollahIdCounter(): def __init__(self, region_idx=0, line_idx=0): diff --git a/src/eynollah/utils/drop_capitals.py b/src/eynollah/utils/drop_capitals.py index e12028f..084f11a 100644 --- a/src/eynollah/utils/drop_capitals.py +++ b/src/eynollah/utils/drop_capitals.py @@ -6,6 +6,7 @@ from .contour import ( return_parent_contours, ) + def adhere_drop_capital_region_into_corresponding_textline( text_regions_p, polygons_of_drop_capitals, @@ -26,8 +27,8 @@ def adhere_drop_capital_region_into_corresponding_textline( img_con_all = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) for j_cont in range(len(contours_only_text_parent)): - img_con_all[all_box_coord[j_cont][0] : all_box_coord[j_cont][1], all_box_coord[j_cont][2] : all_box_coord[j_cont][3], 0] = (j_cont + 1) * 3 - # img_con_all=cv2.fillPoly(img_con_all,pts=[contours_only_text_parent[j_cont]],color=((j_cont+1)*3,(j_cont+1)*3,(j_cont+1)*3)) + img_con_all[all_box_coord[j_cont][0]: all_box_coord[j_cont][1], all_box_coord[j_cont][2]: all_box_coord[j_cont][3], 0] = (j_cont + 1) * 3 + # img_con_all=cv2.fillPoly(img_con_all,pts = [contours_only_text_parent[j_cont]],color = ((j_cont+1)*3,(j_cont+1)*3,(j_cont+1)*3)) # plt.imshow(img_con_all[:,:,0]) # plt.show() @@ -44,7 +45,7 @@ def adhere_drop_capital_region_into_corresponding_textline( # plt.imshow(img_con[:,:,0]) # plt.show() - ##img_con=cv2.dilate(img_con, kernel, iterations=30) + # img_con=cv2.dilate(img_con, kernel, iterations=30) # plt.imshow(img_con[:,:,0]) # plt.show() @@ -185,7 +186,7 @@ def adhere_drop_capital_region_into_corresponding_textline( # contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0] # print(np.shape(contours_biggest),'contours_biggest') # print(np.shape(all_found_textline_polygons[int(region_final)][arg_min])) - ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) + # contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest except: pass @@ -230,7 +231,7 @@ def adhere_drop_capital_region_into_corresponding_textline( contours_biggest[:, 0, 0] = contours_biggest[:, 0, 0] # -all_box_coord[int(region_final)][2] contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] # -all_box_coord[int(region_final)][0] - ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) + # contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest # all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest @@ -239,49 +240,49 @@ def adhere_drop_capital_region_into_corresponding_textline( else: pass - ##cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) - ###print(all_box_coord[j_cont]) - ###print(cx_t) - ###print(cy_t) - ###print(cx_d[i_drop]) - ###print(cy_d[i_drop]) - ##y_lines=all_box_coord[int(region_final)][0]+np.array(cy_t) + # cx_t,cy_t ,_, _, _ ,_,_ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + # print(all_box_coord[j_cont]) + # print(cx_t) + # print(cy_t) + # print(cx_d[i_drop]) + # print(cy_d[i_drop]) + # y_lines = all_box_coord[int(region_final)][0]+np.array(cy_t) - ##y_lines[y_lines 1: @@ -399,71 +400,72 @@ def adhere_drop_capital_region_into_corresponding_textline( else: pass - #####for i_drop in range(len(polygons_of_drop_capitals)): - #####for j_cont in range(len(contours_only_text_parent)): - #####img_con=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) - #####img_con=cv2.fillPoly(img_con,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) - #####img_con=cv2.fillPoly(img_con,pts=[contours_only_text_parent[j_cont]],color=(255,255,255)) - - #####img_con=img_con.astype(np.uint8) - ######imgray = cv2.cvtColor(img_con, cv2.COLOR_BGR2GRAY) - ######ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - ######contours_new,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - #####contours_new,hir_new=return_contours_of_image(img_con) - #####contours_new_parent=return_parent_contours( contours_new,hir_new) - ######plt.imshow(img_con) - ######plt.show() - #####try: - #####if len(contours_new_parent)==1: - ######print(all_found_textline_polygons[j_cont][0]) - #####cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[j_cont]) - ######print(all_box_coord[j_cont]) - ######print(cx_t) - ######print(cy_t) - ######print(cx_d[i_drop]) - ######print(cy_d[i_drop]) - #####y_lines=all_box_coord[j_cont][0]+np.array(cy_t) - - ######print(y_lines) - - #####arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) ) - ######print(arg_min) - - #####cnt_nearest=np.copy(all_found_textline_polygons[j_cont][arg_min]) - #####cnt_nearest[:,0]=all_found_textline_polygons[j_cont][arg_min][:,0]+all_box_coord[j_cont][2] - #####cnt_nearest[:,1]=all_found_textline_polygons[j_cont][arg_min][:,1]+all_box_coord[j_cont][0] - - #####img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) - #####img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255)) - #####img_textlines=cv2.fillPoly(img_textlines,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) - - #####img_textlines=img_textlines.astype(np.uint8) - #####imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) - #####ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - #####contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - #####areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) - - #####contours_biggest=contours_combined[np.argmax(areas_cnt_text)] - - ######print(np.shape(contours_biggest)) - ######print(contours_biggest[:]) - #####contours_biggest[:,0,0]=contours_biggest[:,0,0]-all_box_coord[j_cont][2] - #####contours_biggest[:,0,1]=contours_biggest[:,0,1]-all_box_coord[j_cont][0] - - #####all_found_textline_polygons[j_cont][arg_min]=contours_biggest - ######print(contours_biggest) - ######plt.imshow(img_textlines[:,:,0]) - ######plt.show() - #####else: - #####pass - #####except: - #####pass + # for i_drop in range(len(polygons_of_drop_capitals)): + # for j_cont in range(len(contours_only_text_parent)): + # img_con=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) + # img_con=cv2.fillPoly(img_con,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) + # img_con=cv2.fillPoly(img_con,pts=[contours_only_text_parent[j_cont]],color=(255,255,255)) + + # img_con=img_con.astype(np.uint8) + # imgray = cv2.cvtColor(img_con, cv2.COLOR_BGR2GRAY) + # ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + # contours_new,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + # contours_new,hir_new=return_contours_of_image(img_con) + # contours_new_parent=return_parent_contours( contours_new,hir_new) + # plt.imshow(img_con) + # plt.show() + # try: + # if len(contours_new_parent)==1: + # print(all_found_textline_polygons[j_cont][0]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[j_cont]) + # print(all_box_coord[j_cont]) + # print(cx_t) + # print(cy_t) + # print(cx_d[i_drop]) + # print(cy_d[i_drop]) + # y_lines=all_box_coord[j_cont][0]+np.array(cy_t) + + # print(y_lines) + + # arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) ) + # print(arg_min) + + # cnt_nearest=np.copy(all_found_textline_polygons[j_cont][arg_min]) + # cnt_nearest[:,0]=all_found_textline_polygons[j_cont][arg_min][:,0]+all_box_coord[j_cont][2] + # cnt_nearest[:,1]=all_found_textline_polygons[j_cont][arg_min][:,1]+all_box_coord[j_cont][0] + + # img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) + # img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255)) + # img_textlines=cv2.fillPoly(img_textlines,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) + + # img_textlines=img_textlines.astype(np.uint8) + # imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) + # ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + # contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + # areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) + + # contours_biggest=contours_combined[np.argmax(areas_cnt_text)] + + # print(np.shape(contours_biggest)) + # print(contours_biggest[:]) + # contours_biggest[:,0,0]=contours_biggest[:,0,0]-all_box_coord[j_cont][2] + # contours_biggest[:,0,1]=contours_biggest[:,0,1]-all_box_coord[j_cont][0] + + # all_found_textline_polygons[j_cont][arg_min]=contours_biggest + # print(contours_biggest) + # plt.imshow(img_textlines[:,:,0]) + # plt.show() + # else: + # pass + # except: + # pass return all_found_textline_polygons + def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): drop_only = (layout_no_patch[:, :, 0] == 4) * 1 @@ -489,7 +491,7 @@ def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): if iou_of_box_and_contoure > 60 and weigh_to_height_ratio < 1.2 and height_to_weight_ratio < 2: map_of_drop_contour_bb = np.zeros((layout1.shape[0], layout1.shape[1])) - map_of_drop_contour_bb[y : y + h, x : x + w] = layout1[y : y + h, x : x + w] + map_of_drop_contour_bb[y: y + h, x: x + w] = layout1[y: y + h, x: x + w] if (((map_of_drop_contour_bb == 1) * 1).sum() / float(((map_of_drop_contour_bb == 5) * 1).sum()) * 100) >= 15: contours_drop_parent_final.append(contours_drop_parent[jj]) diff --git a/src/eynollah/utils/marginals.py b/src/eynollah/utils/marginals.py index 7c43de6..af7b9aa 100644 --- a/src/eynollah/utils/marginals.py +++ b/src/eynollah/utils/marginals.py @@ -3,250 +3,223 @@ import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d - from .contour import find_new_features_of_contours, return_contours_of_interested_region from .resize import resize_image from .rotate import rotate_image -def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None): - mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1])) - mask_marginals=mask_marginals.astype(np.uint8) +def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None): + mask_marginals = np.zeros((text_with_lines.shape[0], text_with_lines.shape[1])) + mask_marginals = mask_marginals.astype(np.uint8) - text_with_lines=text_with_lines.astype(np.uint8) - ##text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3) + text_with_lines = text_with_lines.astype(np.uint8) + # text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3) - text_with_lines_eroded=cv2.erode(text_with_lines,kernel,iterations=5) + text_with_lines_eroded = cv2.erode(text_with_lines, kernel, iterations=5) - if text_with_lines.shape[0]<=1500: + if text_with_lines.shape[0] <= 1500: pass - elif text_with_lines.shape[0]>1500 and text_with_lines.shape[0]<=1800: - text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.5),text_with_lines.shape[1]) - text_with_lines=cv2.erode(text_with_lines,kernel,iterations=5) - text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) + elif text_with_lines.shape[0] > 1500 and text_with_lines.shape[0] <= 1800: + text_with_lines = resize_image(text_with_lines, int(text_with_lines.shape[0] * 1.5), text_with_lines.shape[1]) + text_with_lines = cv2.erode(text_with_lines, kernel, iterations=5) + text_with_lines = resize_image(text_with_lines, text_with_lines_eroded.shape[0], text_with_lines_eroded.shape[1]) else: - text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.8),text_with_lines.shape[1]) - text_with_lines=cv2.erode(text_with_lines,kernel,iterations=7) - text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - + text_with_lines = resize_image(text_with_lines, int(text_with_lines.shape[0] * 1.8), text_with_lines.shape[1]) + text_with_lines = cv2.erode(text_with_lines, kernel, iterations=7) + text_with_lines = resize_image(text_with_lines, text_with_lines_eroded.shape[0], text_with_lines_eroded.shape[1]) - text_with_lines_y=text_with_lines.sum(axis=0) - text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0) + text_with_lines_y = text_with_lines.sum(axis=0) + text_with_lines_y_eroded = text_with_lines_eroded.sum(axis=0) - thickness_along_y_percent=text_with_lines_y_eroded.max()/(float(text_with_lines.shape[0]))*100 + thickness_along_y_percent = text_with_lines_y_eroded.max() / (float(text_with_lines.shape[0])) * 100 - #print(thickness_along_y_percent,'thickness_along_y_percent') + # print(thickness_along_y_percent,'thickness_along_y_percent') - if thickness_along_y_percent<30: - min_textline_thickness=8 - elif thickness_along_y_percent>=30 and thickness_along_y_percent<50: - min_textline_thickness=20 + if thickness_along_y_percent < 30: + min_textline_thickness = 8 + elif thickness_along_y_percent >= 30 and thickness_along_y_percent < 50: + min_textline_thickness = 20 else: - min_textline_thickness=40 - - - - if thickness_along_y_percent>=14: - - text_with_lines_y_rev=-1*text_with_lines_y[:] - #print(text_with_lines_y) - #print(text_with_lines_y_rev) - - - - - #plt.plot(text_with_lines_y) - #plt.show() - - - text_with_lines_y_rev=text_with_lines_y_rev-np.min(text_with_lines_y_rev) - - #plt.plot(text_with_lines_y_rev) - #plt.show() - sigma_gaus=1 - region_sum_0= gaussian_filter1d(text_with_lines_y, sigma_gaus) + min_textline_thickness = 40 - region_sum_0_rev=gaussian_filter1d(text_with_lines_y_rev, sigma_gaus) + if thickness_along_y_percent >= 14: - #plt.plot(region_sum_0_rev) - #plt.show() - region_sum_0_updown=region_sum_0[len(region_sum_0)::-1] + text_with_lines_y_rev = -1 * text_with_lines_y[:] + # print(text_with_lines_y) + # print(text_with_lines_y_rev) - first_nonzero=(next((i for i, x in enumerate(region_sum_0) if x), None)) - last_nonzero=(next((i for i, x in enumerate(region_sum_0_updown) if x), None)) + # plt.plot(text_with_lines_y) + # plt.show() + text_with_lines_y_rev = text_with_lines_y_rev - np.min(text_with_lines_y_rev) - last_nonzero=len(region_sum_0)-last_nonzero + # plt.plot(text_with_lines_y_rev) + # plt.show() + sigma_gaus = 1 + region_sum_0 = gaussian_filter1d(text_with_lines_y, sigma_gaus) - ##img_sum_0_smooth_rev=-region_sum_0 + region_sum_0_rev = gaussian_filter1d(text_with_lines_y_rev, sigma_gaus) + # plt.plot(region_sum_0_rev) + # plt.show() + region_sum_0_updown = region_sum_0[len(region_sum_0)::-1] - mid_point=(last_nonzero+first_nonzero)/2. + first_nonzero = (next((i for i, x in enumerate(region_sum_0) if x), None)) + last_nonzero = (next((i for i, x in enumerate(region_sum_0_updown) if x), None)) + last_nonzero = len(region_sum_0) - last_nonzero - one_third_right=(last_nonzero-mid_point)/3.0 - one_third_left=(mid_point-first_nonzero)/3.0 - - #img_sum_0_smooth_rev=img_sum_0_smooth_rev-np.min(img_sum_0_smooth_rev) + # img_sum_0_smooth_rev=-region_sum_0 + mid_point = (last_nonzero + first_nonzero) / 2. + one_third_right = (last_nonzero - mid_point) / 3.0 + one_third_left = (mid_point - first_nonzero) / 3.0 + # img_sum_0_smooth_rev=img_sum_0_smooth_rev-np.min(img_sum_0_smooth_rev) peaks, _ = find_peaks(text_with_lines_y_rev, height=0) + peaks = np.array(peaks) - peaks=np.array(peaks) - - - #print(region_sum_0[peaks]) - ##plt.plot(region_sum_0) - ##plt.plot(peaks,region_sum_0[peaks],'*') - ##plt.show() - #print(first_nonzero,last_nonzero,peaks) - peaks=peaks[(peaks>first_nonzero) & ((peaks first_nonzero) & (peaks < last_nonzero)] + # print(first_nonzero,last_nonzero,peaks) - #print(region_sum_0[peaks]<10) - ####peaks=peaks[region_sum_0[peaks]<25 ] + # print(region_sum_0[peaks]<10) + # peaks=peaks[region_sum_0[peaks]<25 ] - #print(region_sum_0[peaks]) - peaks=peaks[region_sum_0[peaks]mid_point] - peaks_left=peaks[peaks(mid_point+one_third_right)] - peaks_left=peaks[peaks<(mid_point-one_third_left)] + # print(region_sum_0[peaks]) + peaks = peaks[region_sum_0[peaks] < min_textline_thickness] + # print(peaks) + # print(first_nonzero,last_nonzero,one_third_right,one_third_left) + if num_col == 1: + peaks_right = peaks[peaks > mid_point] + peaks_left = peaks[peaks < mid_point] + if num_col == 2: + peaks_right = peaks[peaks > (mid_point + one_third_right)] + peaks_left = peaks[peaks < (mid_point - one_third_left)] try: - point_right=np.min(peaks_right) + point_right = np.min(peaks_right) except: - point_right=last_nonzero - + point_right = last_nonzero try: - point_left=np.max(peaks_left) + point_left = np.max(peaks_left) except: - point_left=first_nonzero - + point_left = first_nonzero - - - #print(point_left,point_right) - #print(text_regions.shape) - if point_right>=mask_marginals.shape[1]: - point_right=mask_marginals.shape[1]-1 + # print(point_left,point_right) + # print(text_regions.shape) + if point_right >= mask_marginals.shape[1]: + point_right = mask_marginals.shape[1] - 1 try: - mask_marginals[:,point_left:point_right]=1 + mask_marginals[:, point_left:point_right] = 1 except: - mask_marginals[:,:]=1 + mask_marginals[:, :] = 1 - #print(mask_marginals.shape,point_left,point_right,'nadosh') - mask_marginals_rotated=rotate_image(mask_marginals,-slope_deskew) + # print(mask_marginals.shape,point_left,point_right,'nadosh') + mask_marginals_rotated = rotate_image(mask_marginals, -slope_deskew) - #print(mask_marginals_rotated.shape,'nadosh') - mask_marginals_rotated_sum=mask_marginals_rotated.sum(axis=0) + # print(mask_marginals_rotated.shape,'nadosh') + mask_marginals_rotated_sum = mask_marginals_rotated.sum(axis=0) - mask_marginals_rotated_sum[mask_marginals_rotated_sum!=0]=1 - index_x=np.array(range(len(mask_marginals_rotated_sum)))+1 + mask_marginals_rotated_sum[mask_marginals_rotated_sum != 0] = 1 + index_x = np.array(range(len(mask_marginals_rotated_sum))) + 1 - index_x_interest=index_x[mask_marginals_rotated_sum==1] + index_x_interest = index_x[mask_marginals_rotated_sum == 1] - min_point_of_left_marginal=np.min(index_x_interest)-16 - max_point_of_right_marginal=np.max(index_x_interest)+16 + min_point_of_left_marginal = np.min(index_x_interest) - 16 + max_point_of_right_marginal = np.max(index_x_interest) + 16 - if min_point_of_left_marginal<0: - min_point_of_left_marginal=0 - if max_point_of_right_marginal>=text_regions.shape[1]: - max_point_of_right_marginal=text_regions.shape[1]-1 + if min_point_of_left_marginal < 0: + min_point_of_left_marginal = 0 + if max_point_of_right_marginal >= text_regions.shape[1]: + max_point_of_right_marginal = text_regions.shape[1] - 1 + # print(np.min(index_x_interest) ,np.max(index_x_interest),'minmaxnew') + # print(mask_marginals_rotated.shape,text_regions.shape,'mask_marginals_rotated') + # plt.imshow(mask_marginals) + # plt.show() - #print(np.min(index_x_interest) ,np.max(index_x_interest),'minmaxnew') - #print(mask_marginals_rotated.shape,text_regions.shape,'mask_marginals_rotated') - #plt.imshow(mask_marginals) - #plt.show() + # plt.imshow(mask_marginals_rotated) + # plt.show() - #plt.imshow(mask_marginals_rotated) - #plt.show() + text_regions[(mask_marginals_rotated[:, :] != 1) & (text_regions[:, :] == 1)] = 4 - text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4 + # plt.imshow(text_regions) + # plt.show() - #plt.imshow(text_regions) - #plt.show() + pixel_img = 4 + min_area_text = 0.00001 + polygons_of_marginals = return_contours_of_interested_region(text_regions, pixel_img, min_area_text) - pixel_img=4 - min_area_text=0.00001 - polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) + cx_text_only, cy_text_only, x_min_text_only, x_max_text_only, y_min_text_only, y_max_text_only, y_cor_x_min_main = find_new_features_of_contours(polygons_of_marginals) - cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) + text_regions[(text_regions[:, :] == 4)] = 1 - text_regions[(text_regions[:,:]==4)]=1 + marginlas_should_be_main_text = [] - marginlas_should_be_main_text=[] - - x_min_marginals_left=[] - x_min_marginals_right=[] + x_min_marginals_left = [] + x_min_marginals_right = [] for i in range(len(cx_text_only)): - x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i]) - y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i]) - #print(x_width_mar,y_height_mar,y_height_mar/x_width_mar,'y_height_mar') - if x_width_mar>16 and y_height_mar/x_width_mar<18: + x_width_mar = abs(x_min_text_only[i] - x_max_text_only[i]) + y_height_mar = abs(y_min_text_only[i] - y_max_text_only[i]) + # print(x_width_mar,y_height_mar,y_height_mar/x_width_mar,'y_height_mar') + if x_width_mar > 16 and y_height_mar / x_width_mar < 18: marginlas_should_be_main_text.append(polygons_of_marginals[i]) - if x_min_text_only[i]<(mid_point-one_third_left): - x_min_marginals_left_new=x_min_text_only[i] - if len(x_min_marginals_left)==0: + if x_min_text_only[i] < (mid_point - one_third_left): + x_min_marginals_left_new = x_min_text_only[i] + if len(x_min_marginals_left) == 0: x_min_marginals_left.append(x_min_marginals_left_new) else: - x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new) + x_min_marginals_left[0] = min(x_min_marginals_left[0], x_min_marginals_left_new) else: - x_min_marginals_right_new=x_min_text_only[i] - if len(x_min_marginals_right)==0: + x_min_marginals_right_new = x_min_text_only[i] + if len(x_min_marginals_right) == 0: x_min_marginals_right.append(x_min_marginals_right_new) else: - x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new) - - if len(x_min_marginals_left)==0: - x_min_marginals_left=[0] - if len(x_min_marginals_right)==0: - x_min_marginals_right=[text_regions.shape[1]-1] - - - + x_min_marginals_right[0] = min(x_min_marginals_right[0], x_min_marginals_right_new) - #print(x_min_marginals_left[0],x_min_marginals_right[0],'margo') + if len(x_min_marginals_left) == 0: + x_min_marginals_left = [0] + if len(x_min_marginals_right) == 0: + x_min_marginals_right = [text_regions.shape[1] - 1] - #print(marginlas_should_be_main_text,'marginlas_should_be_main_text') - text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4)) + # print(x_min_marginals_left[0],x_min_marginals_right[0],'margo') - #print(np.unique(text_regions)) + # print(marginlas_should_be_main_text,'marginlas_should_be_main_text') + text_regions = cv2.fillPoly(text_regions, pts=marginlas_should_be_main_text, color=(4, 4)) - #text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 - #text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 + # print(np.unique(text_regions)) - text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0 - text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0 + # text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 + # text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 - ###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4 + text_regions[:, :int(min_point_of_left_marginal)][text_regions[:, :int(min_point_of_left_marginal)] == 1] = 0 + text_regions[:, int(max_point_of_right_marginal):][text_regions[:, int(max_point_of_right_marginal):] == 1] = 0 - ###text_regions[:,point_right:][ text_regions[:,point_right:]==1]=4 - #plt.plot(region_sum_0) - #plt.plot(peaks,region_sum_0[peaks],'*') - #plt.show() + # text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4 + # text_regions[:,point_right:][ text_regions[:,point_right:]==1]=4 + # plt.plot(region_sum_0) + # plt.plot(peaks,region_sum_0[peaks],'*') + # plt.show() - #plt.imshow(text_regions) - #plt.show() + # plt.imshow(text_regions) + # plt.show() - #sys.exit() + # sys.exit() else: pass return text_regions diff --git a/src/eynollah/utils/pil_cv2.py b/src/eynollah/utils/pil_cv2.py index 83ae47d..34ef9e1 100644 --- a/src/eynollah/utils/pil_cv2.py +++ b/src/eynollah/utils/pil_cv2.py @@ -5,15 +5,18 @@ from cv2 import COLOR_GRAY2BGR, COLOR_RGB2BGR, COLOR_BGR2RGB, cvtColor, imread # from sbb_binarization + def cv2pil(img): return Image.fromarray(np.array(cvtColor(img, COLOR_BGR2RGB))) + def pil2cv(img): # from ocrd/workspace.py - color_conversion = COLOR_GRAY2BGR if img.mode in ('1', 'L') else COLOR_RGB2BGR + color_conversion = COLOR_GRAY2BGR if img.mode in ('1', 'L') else COLOR_RGB2BGR pil_as_np_array = np.array(img).astype('uint8') if img.mode == '1' else np.array(img) return cvtColor(pil_as_np_array, color_conversion) + def check_dpi(img): try: if isinstance(img, Image.Image): diff --git a/src/eynollah/utils/resize.py b/src/eynollah/utils/resize.py index fdc49ec..8c09b04 100644 --- a/src/eynollah/utils/resize.py +++ b/src/eynollah/utils/resize.py @@ -1,4 +1,5 @@ import cv2 + def resize_image(img_in, input_height, input_width): return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) diff --git a/src/eynollah/utils/rotate.py b/src/eynollah/utils/rotate.py index 603c2d9..879302c 100644 --- a/src/eynollah/utils/rotate.py +++ b/src/eynollah/utils/rotate.py @@ -3,6 +3,7 @@ import math import imutils import cv2 + def rotatedRectWithMaxArea(w, h, angle): if w <= 0 or h <= 0: return 0, 0 @@ -25,6 +26,7 @@ def rotatedRectWithMaxArea(w, h, angle): return wr, hr + def rotate_max_area_new(image, rotated, angle): wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape @@ -34,17 +36,20 @@ def rotate_max_area_new(image, rotated, angle): x2 = x1 + int(wr) return rotated[y1:y2, x1:x2] + def rotation_image_new(img, thetha): rotated = imutils.rotate(img, thetha) return rotate_max_area_new(img, rotated, thetha) + def rotate_image(img_patch, slope): (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, slope, 1.0) return cv2.warpAffine(img_patch, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) -def rotate_image_different( img, slope): + +def rotate_image_different(img, slope): # img = cv2.imread('images/input.jpg') num_rows, num_cols = img.shape[:2] @@ -52,6 +57,7 @@ def rotate_image_different( img, slope): img_rotation = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows)) return img_rotation + def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_table_prediction, angle): wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape @@ -61,6 +67,7 @@ def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_ta x2 = x1 + int(wr) return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_table_prediction[y1:y2, x1:x2] + def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thetha): rotated = imutils.rotate(img, thetha) rotated_textline = imutils.rotate(textline, thetha) @@ -68,6 +75,7 @@ def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thet rotated_table_prediction = imutils.rotate(table_prediction, thetha) return rotate_max_area(img, rotated, rotated_textline, rotated_layout, rotated_table_prediction, thetha) + def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regions_p_fully, thetha): rotated = imutils.rotate(img, thetha) rotated_textline = imutils.rotate(textline, thetha) @@ -75,6 +83,7 @@ def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regio rotated_layout_full = imutils.rotate(text_regions_p_fully, thetha) return rotate_max_area_full_layout(img, rotated, rotated_textline, rotated_layout, rotated_layout_full, thetha) + def rotate_max_area_full_layout(image, rotated, rotated_textline, rotated_layout, rotated_layout_full, angle): wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index acdc2e9..a6bc60f 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -17,6 +17,7 @@ from . import ( isNaN, ) + def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) @@ -45,7 +46,7 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): y = textline_patch_sum_along_width[:] # [first_nonzero:last_nonzero] y_padded = np.zeros(len(y) + 40) - y_padded[20 : len(y) + 20] = y + y_padded[20: len(y) + 20] = y x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) @@ -57,7 +58,7 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): y_padded_smoothed_e = gaussian_filter1d(y_padded, 2) y_padded_up_to_down_e = -y_padded + np.max(y_padded) y_padded_up_to_down_padded_e = np.zeros(len(y_padded_up_to_down_e) + 40) - y_padded_up_to_down_padded_e[20 : len(y_padded_up_to_down_e) + 20] = y_padded_up_to_down_e + y_padded_up_to_down_padded_e[20: len(y_padded_up_to_down_e) + 20] = y_padded_up_to_down_e y_padded_up_to_down_padded_e = gaussian_filter1d(y_padded_up_to_down_padded_e, 2) peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) @@ -76,10 +77,10 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): clusters_to_be_deleted = [] if len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0: arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1: arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1:]) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] @@ -117,7 +118,7 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down = -y_padded + np.max(y_padded) y_padded_up_to_down_padded = np.zeros(len(y_padded_up_to_down) + 40) - y_padded_up_to_down_padded[20 : len(y_padded_up_to_down) + 20] = y_padded_up_to_down + y_padded_up_to_down_padded[20: len(y_padded_up_to_down) + 20] = y_padded_up_to_down y_padded_up_to_down_padded = gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) peaks, _ = find_peaks(y_padded_smoothed, height=0) @@ -125,14 +126,14 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): return x, y, x_d, y_d, xv, x_min_cont, y_min_cont, x_max_cont, y_max_cont, first_nonzero, y_padded_up_to_down_padded, y_padded_smoothed, peaks, peaks_neg, rotation_matrix -def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): +def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, -thetha, 1.0) x_d = M[0, 2] y_d = M[1, 2] - + thetha = thetha / 180. * np.pi rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]]) contour_text_interest_copy = contour_text_interest.copy() @@ -159,182 +160,160 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) - - if 1>0: + + if 1 > 0: try: - y_padded_smoothed_e= gaussian_filter1d(y_padded, 2) - y_padded_up_to_down_e=-y_padded+np.max(y_padded) - y_padded_up_to_down_padded_e=np.zeros(len(y_padded_up_to_down_e)+40) - y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e)+20]=y_padded_up_to_down_e - y_padded_up_to_down_padded_e= gaussian_filter1d(y_padded_up_to_down_padded_e, 2) - + y_padded_smoothed_e = gaussian_filter1d(y_padded, 2) + y_padded_up_to_down_e = -y_padded + np.max(y_padded) + y_padded_up_to_down_padded_e = np.zeros(len(y_padded_up_to_down_e) + 40) + y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e) + 20] = y_padded_up_to_down_e + y_padded_up_to_down_padded_e = gaussian_filter1d(y_padded_up_to_down_padded_e, 2) peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) - neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - - arg_neg_must_be_deleted= np.array(range(len(peaks_neg_e)))[y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3 ] - diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) - - - - arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) - arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] - - - peaks_new=peaks_e[:] - peaks_neg_new=peaks_neg_e[:] - - clusters_to_be_deleted=[] - if len(arg_diff_cluster)>0: - - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) - for i in range(len(arg_diff_cluster)-1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1:arg_diff_cluster[i+1]+1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) - - - if len(clusters_to_be_deleted)>0: - peaks_new_extra=[] + neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) + + arg_neg_must_be_deleted = np.array(range(len(peaks_neg_e)))[ + y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] + diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) + + arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) + arg_diff_cluster = arg_diff[diff_arg_neg_must_be_deleted > 1] + + peaks_new = peaks_e[:] + peaks_neg_new = peaks_neg_e[:] + + clusters_to_be_deleted = [] + if len(arg_diff_cluster) > 0: + + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0] + 1]) + for i in range(len(arg_diff_cluster) - 1): + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[arg_diff_cluster[i] + 1:arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1:]) + + if len(clusters_to_be_deleted) > 0: + peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): - min_cluster=np.min(peaks_e[clusters_to_be_deleted[m]]) - max_cluster=np.max(peaks_e[clusters_to_be_deleted[m]]) - peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) + min_cluster = np.min(peaks_e[clusters_to_be_deleted[m]]) + max_cluster = np.max(peaks_e[clusters_to_be_deleted[m]]) + peaks_new_extra.append(int((min_cluster + max_cluster) / 2.0)) for m1 in range(len(clusters_to_be_deleted[m])): - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]-1]] - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]]] - - peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg_e[clusters_to_be_deleted[m][m1]]] - peaks_new_tot=[] + peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1] - 1]] + peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1]]] + + peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg_e[clusters_to_be_deleted[m][m1]]] + peaks_new_tot = [] for i1 in peaks_new: peaks_new_tot.append(i1) for i1 in peaks_new_extra: peaks_new_tot.append(i1) - peaks_new_tot=np.sort(peaks_new_tot) - - - else: - peaks_new_tot=peaks_e[:] + peaks_new_tot = np.sort(peaks_new_tot) + else: + peaks_new_tot = peaks_e[:] - textline_con,hierarchy=return_contours_of_image(img_patch) - textline_con_fil=filter_contours_area_of_image(img_patch,textline_con,hierarchy,max_area=1,min_area=0.0008) - y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) + textline_con, hierarchy = return_contours_of_image(img_patch) + textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) + y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus=int( y_diff_mean * (7./40.0) ) - #print(sigma_gaus,'sigma_gaus') + sigma_gaus = int(y_diff_mean * (7. / 40.0)) + # print(sigma_gaus,'sigma_gaus') except: - sigma_gaus=12 - if sigma_gaus<3: - sigma_gaus=3 - #print(sigma_gaus,'sigma') - + sigma_gaus = 12 + if sigma_gaus < 3: + sigma_gaus = 3 + # print(sigma_gaus,'sigma') - y_padded_smoothed= gaussian_filter1d(y_padded, sigma_gaus) - y_padded_up_to_down=-y_padded+np.max(y_padded) - y_padded_up_to_down_padded=np.zeros(len(y_padded_up_to_down)+40) - y_padded_up_to_down_padded[20:len(y_padded_up_to_down)+20]=y_padded_up_to_down - y_padded_up_to_down_padded= gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) - + y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) + y_padded_up_to_down = -y_padded + np.max(y_padded) + y_padded_up_to_down_padded = np.zeros(len(y_padded_up_to_down) + 40) + y_padded_up_to_down_padded[20:len(y_padded_up_to_down) + 20] = y_padded_up_to_down + y_padded_up_to_down_padded = gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) peaks, _ = find_peaks(y_padded_smoothed, height=0) peaks_neg, _ = find_peaks(y_padded_up_to_down_padded, height=0) - - - - try: - neg_peaks_max=np.max(y_padded_smoothed[peaks]) - - - arg_neg_must_be_deleted= np.array(range(len(peaks_neg)))[y_padded_up_to_down_padded[peaks_neg]/float(neg_peaks_max)<0.42 ] + neg_peaks_max = np.max(y_padded_smoothed[peaks]) + arg_neg_must_be_deleted = np.array(range(len(peaks_neg)))[y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.42] - diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) - + diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) - - arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) - arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] + arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) + arg_diff_cluster = arg_diff[diff_arg_neg_must_be_deleted > 1] except: - arg_neg_must_be_deleted=[] - arg_diff_cluster=[] - - + arg_neg_must_be_deleted = [] + arg_diff_cluster = [] + try: - peaks_new=peaks[:] - peaks_neg_new=peaks_neg[:] - clusters_to_be_deleted=[] - - - if len(arg_diff_cluster)>=2 and len(arg_diff_cluster)>0: - - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) - for i in range(len(arg_diff_cluster)-1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1:arg_diff_cluster[i+1]+1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) - elif len(arg_neg_must_be_deleted)>=2 and len(arg_diff_cluster)==0: + peaks_new = peaks[:] + peaks_neg_new = peaks_neg[:] + clusters_to_be_deleted = [] + + if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: + + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0] + 1]) + for i in range(len(arg_diff_cluster) - 1): + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[arg_diff_cluster[i] + 1:arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1:]) + elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) - - - - if len(arg_neg_must_be_deleted)==1: + + if len(arg_neg_must_be_deleted) == 1: clusters_to_be_deleted.append(arg_neg_must_be_deleted) - - if len(clusters_to_be_deleted)>0: - peaks_new_extra=[] + if len(clusters_to_be_deleted) > 0: + peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): - min_cluster=np.min(peaks[clusters_to_be_deleted[m]]) - max_cluster=np.max(peaks[clusters_to_be_deleted[m]]) - peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) + min_cluster = np.min(peaks[clusters_to_be_deleted[m]]) + max_cluster = np.max(peaks[clusters_to_be_deleted[m]]) + peaks_new_extra.append(int((min_cluster + max_cluster) / 2.0)) for m1 in range(len(clusters_to_be_deleted[m])): - peaks_new=peaks_new[peaks_new!=peaks[clusters_to_be_deleted[m][m1]-1]] - peaks_new=peaks_new[peaks_new!=peaks[clusters_to_be_deleted[m][m1]]] - - peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg[clusters_to_be_deleted[m][m1]]] - peaks_new_tot=[] + peaks_new = peaks_new[peaks_new != peaks[clusters_to_be_deleted[m][m1] - 1]] + peaks_new = peaks_new[peaks_new != peaks[clusters_to_be_deleted[m][m1]]] + + peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg[clusters_to_be_deleted[m][m1]]] + peaks_new_tot = [] for i1 in peaks_new: peaks_new_tot.append(i1) for i1 in peaks_new_extra: peaks_new_tot.append(i1) - peaks_new_tot=np.sort(peaks_new_tot) - - ##plt.plot(y_padded_up_to_down_padded) - ##plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') - ##plt.show() - - ##plt.plot(y_padded_up_to_down_padded) - ##plt.plot(peaks_neg_new,y_padded_up_to_down_padded[peaks_neg_new],'*') - ##plt.show() - - ##plt.plot(y_padded_smoothed) - ##plt.plot(peaks,y_padded_smoothed[peaks],'*') - ##plt.show() - - ##plt.plot(y_padded_smoothed) - ##plt.plot(peaks_new_tot,y_padded_smoothed[peaks_new_tot],'*') - ##plt.show() - - peaks=peaks_new_tot[:] - peaks_neg=peaks_neg_new[:] - - + peaks_new_tot = np.sort(peaks_new_tot) + + # plt.plot(y_padded_up_to_down_padded) + # plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') + # plt.show() + + # plt.plot(y_padded_up_to_down_padded) + # plt.plot(peaks_neg_new,y_padded_up_to_down_padded[peaks_neg_new],'*') + # plt.show() + + # plt.plot(y_padded_smoothed) + # plt.plot(peaks,y_padded_smoothed[peaks],'*') + # plt.show() + + # plt.plot(y_padded_smoothed) + # plt.plot(peaks_new_tot,y_padded_smoothed[peaks_new_tot],'*') + # plt.show() + + peaks = peaks_new_tot[:] + peaks_neg = peaks_neg_new[:] + else: - peaks_new_tot=peaks[:] - peaks=peaks_new_tot[:] - peaks_neg=peaks_neg_new[:] + peaks_new_tot = peaks[:] + peaks = peaks_new_tot[:] + peaks_neg = peaks_neg_new[:] except: pass - - - mean_value_of_peaks=np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks=np.std(y_padded_smoothed[peaks]) - peaks_values=y_padded_smoothed[peaks] - + + mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks = np.std(y_padded_smoothed[peaks]) + peaks_values = y_padded_smoothed[peaks] peaks_neg = peaks_neg - 20 - 20 peaks = peaks - 20 @@ -346,50 +325,47 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): for jj in range(len(peaks)): if peaks[jj] > len(x) - 1: peaks[jj] = len(x) - 1 - - textline_boxes = [] textline_boxes_rot = [] - + if len(peaks_neg) == len(peaks) + 1 and len(peaks) >= 3: for jj in range(len(peaks)): - - if jj==(len(peaks)-1): + + if jj == (len(peaks) - 1): dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) - - if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: - point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + + if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.: + point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = y_max_cont - 1 # peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = y_max_cont - 1 # peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( - 1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + 1.4 * dis_to_next_down) # -int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) - - if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + + if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.: + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int( + 1.1 * dis_to_next_down) # -int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int( + 1.33 * dis_to_next_down) # -int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( - 1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) - - + 1.1 * dis_to_next_down) # -int(dis_to_next_down*1./2) if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 - distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) - for mj in range(len(xv))] + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] @@ -410,30 +386,25 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x_max_rot2, point_up_rot2 = p2[0] + x_d, p2[1] + y_d x_max_rot3, point_down_rot3 = p3[0] + x_d, p3[1] + y_d x_min_rot4, point_down_rot4 = p4[0] + x_d, p4[1] + y_d - - if x_min_rot1<0: - x_min_rot1=0 - if x_min_rot4<0: - x_min_rot4=0 - if point_up_rot1<0: - point_up_rot1=0 - if point_up_rot2<0: - point_up_rot2=0 - - - - x_min_rot1=x_min_rot1-x_help - x_max_rot2=x_max_rot2-x_help - x_max_rot3=x_max_rot3-x_help - x_min_rot4=x_min_rot4-x_help - - point_up_rot1=point_up_rot1-y_help - point_up_rot2=point_up_rot2-y_help - point_down_rot3=point_down_rot3-y_help - point_down_rot4=point_down_rot4-y_help - - + if x_min_rot1 < 0: + x_min_rot1 = 0 + if x_min_rot4 < 0: + x_min_rot4 = 0 + if point_up_rot1 < 0: + point_up_rot1 = 0 + if point_up_rot2 < 0: + point_up_rot2 = 0 + + x_min_rot1 = x_min_rot1 - x_help + x_max_rot2 = x_max_rot2 - x_help + x_max_rot3 = x_max_rot3 - x_help + x_min_rot4 = x_min_rot4 - x_help + + point_up_rot1 = point_up_rot1 - y_help + point_up_rot2 = point_up_rot2 - y_help + point_down_rot3 = point_down_rot3 - y_help + point_down_rot4 = point_down_rot4 - y_help textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], @@ -450,19 +421,19 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): elif len(peaks) == 1: distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[0] + first_nonzero])), True) - for mj in range(len(xv))] + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] - + if len(xvinside) == 0: x_min = x_min_cont x_max = x_max_cont else: x_min = np.min(xvinside) # max(x_min_interest,x_min_cont) x_max = np.max(xvinside) # min(x_max_interest,x_max_cont) - #x_min = x_min_cont - #x_max = x_max_cont + # x_min = x_min_cont + # x_max = x_max_cont y_min = y_min_cont y_max = y_max_cont @@ -476,30 +447,25 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x_max_rot2, point_up_rot2 = p2[0] + x_d, p2[1] + y_d x_max_rot3, point_down_rot3 = p3[0] + x_d, p3[1] + y_d x_min_rot4, point_down_rot4 = p4[0] + x_d, p4[1] + y_d - - - if x_min_rot1<0: - x_min_rot1=0 - if x_min_rot4<0: - x_min_rot4=0 - if point_up_rot1<0: - point_up_rot1=0 - if point_up_rot2<0: - point_up_rot2=0 - - - x_min_rot1=x_min_rot1-x_help - x_max_rot2=x_max_rot2-x_help - x_max_rot3=x_max_rot3-x_help - x_min_rot4=x_min_rot4-x_help - - point_up_rot1=point_up_rot1-y_help - point_up_rot2=point_up_rot2-y_help - point_down_rot3=point_down_rot3-y_help - point_down_rot4=point_down_rot4-y_help - - + if x_min_rot1 < 0: + x_min_rot1 = 0 + if x_min_rot4 < 0: + x_min_rot4 = 0 + if point_up_rot1 < 0: + point_up_rot1 = 0 + if point_up_rot2 < 0: + point_up_rot2 = 0 + + x_min_rot1 = x_min_rot1 - x_help + x_max_rot2 = x_max_rot2 - x_help + x_max_rot3 = x_max_rot3 - x_help + x_min_rot4 = x_min_rot4 - x_help + + point_up_rot1 = point_up_rot1 - y_help + point_up_rot2 = point_up_rot2 - y_help + point_down_rot3 = point_down_rot3 - y_help + point_down_rot4 = point_down_rot4 - y_help textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], @@ -512,26 +478,25 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): [int(x_min), int(y_max)]])) - elif len(peaks) == 2: dis_to_next = np.abs(peaks[1] - peaks[0]) for jj in range(len(peaks)): if jj == 0: - point_up = 0#peaks[jj] + first_nonzero - int(1. / 1.7 * dis_to_next) + point_up = 0 # peaks[jj] + first_nonzero - int(1. / 1.7 * dis_to_next) if point_up < 0: point_up = 1 - point_down = peaks_neg[1] + first_nonzero# peaks[jj] + first_nonzero + int(1. / 1.8 * dis_to_next) + point_down = peaks_neg[1] + first_nonzero # peaks[jj] + first_nonzero + int(1. / 1.8 * dis_to_next) elif jj == 1: - point_down =peaks_neg[1] + first_nonzero# peaks[jj] + first_nonzero + int(1. / 1.8 * dis_to_next) + point_down = peaks_neg[1] + first_nonzero # peaks[jj] + first_nonzero + int(1. / 1.8 * dis_to_next) if point_down >= img_patch.shape[0]: point_down = img_patch.shape[0] - 2 try: - point_up = peaks_neg[2] + first_nonzero#peaks[jj] + first_nonzero - int(1. / 1.8 * dis_to_next) + point_up = peaks_neg[2] + first_nonzero # peaks[jj] + first_nonzero - int(1. / 1.8 * dis_to_next) except: - point_up =peaks[jj] + first_nonzero - int(1. / 1.8 * dis_to_next) - + point_up = peaks[jj] + first_nonzero - int(1. / 1.8 * dis_to_next) + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) - for mj in range(len(xv))] + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] @@ -552,31 +517,26 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x_max_rot2, point_up_rot2 = p2[0] + x_d, p2[1] + y_d x_max_rot3, point_down_rot3 = p3[0] + x_d, p3[1] + y_d x_min_rot4, point_down_rot4 = p4[0] + x_d, p4[1] + y_d - - - - if x_min_rot1<0: - x_min_rot1=0 - if x_min_rot4<0: - x_min_rot4=0 - if point_up_rot1<0: - point_up_rot1=0 - if point_up_rot2<0: - point_up_rot2=0 - - x_min_rot1=x_min_rot1-x_help - x_max_rot2=x_max_rot2-x_help - x_max_rot3=x_max_rot3-x_help - x_min_rot4=x_min_rot4-x_help - - point_up_rot1=point_up_rot1-y_help - point_up_rot2=point_up_rot2-y_help - point_down_rot3=point_down_rot3-y_help - point_down_rot4=point_down_rot4-y_help - - - - + + if x_min_rot1 < 0: + x_min_rot1 = 0 + if x_min_rot4 < 0: + x_min_rot4 = 0 + if point_up_rot1 < 0: + point_up_rot1 = 0 + if point_up_rot2 < 0: + point_up_rot2 = 0 + + x_min_rot1 = x_min_rot1 - x_help + x_max_rot2 = x_max_rot2 - x_help + x_max_rot3 = x_max_rot3 - x_help + x_min_rot4 = x_min_rot4 - x_help + + point_up_rot1 = point_up_rot1 - y_help + point_up_rot2 = point_up_rot2 - y_help + point_down_rot3 = point_down_rot3 - y_help + point_down_rot4 = point_down_rot4 - y_help + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], @@ -611,9 +571,9 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up = peaks[jj] + first_nonzero - int(1. / 1.9 * dis_to_next_up) point_down = peaks[jj] + first_nonzero + int(1. / 1.9 * dis_to_next_down) - + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) - for mj in range(len(xv))] + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] @@ -634,29 +594,25 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x_max_rot2, point_up_rot2 = p2[0] + x_d, p2[1] + y_d x_max_rot3, point_down_rot3 = p3[0] + x_d, p3[1] + y_d x_min_rot4, point_down_rot4 = p4[0] + x_d, p4[1] + y_d - - if x_min_rot1<0: - x_min_rot1=0 - if x_min_rot4<0: - x_min_rot4=0 - if point_up_rot1<0: - point_up_rot1=0 - if point_up_rot2<0: - point_up_rot2=0 - - - x_min_rot1=x_min_rot1-x_help - x_max_rot2=x_max_rot2-x_help - x_max_rot3=x_max_rot3-x_help - x_min_rot4=x_min_rot4-x_help - - point_up_rot1=point_up_rot1-y_help - point_up_rot2=point_up_rot2-y_help - point_down_rot3=point_down_rot3-y_help - point_down_rot4=point_down_rot4-y_help - + if x_min_rot1 < 0: + x_min_rot1 = 0 + if x_min_rot4 < 0: + x_min_rot4 = 0 + if point_up_rot1 < 0: + point_up_rot1 = 0 + if point_up_rot2 < 0: + point_up_rot2 = 0 + + x_min_rot1 = x_min_rot1 - x_help + x_max_rot2 = x_max_rot2 - x_help + x_max_rot3 = x_max_rot3 - x_help + x_min_rot4 = x_min_rot4 - x_help + point_up_rot1 = point_up_rot1 - y_help + point_up_rot2 = point_up_rot2 - y_help + point_down_rot3 = point_down_rot3 - y_help + point_down_rot4 = point_down_rot4 - y_help textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], @@ -668,15 +624,14 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) - return peaks, textline_boxes_rot -def separate_lines_vertical(img_patch, contour_text_interest, thetha): +def separate_lines_vertical(img_patch, contour_text_interest, thetha): thetha = thetha + 90 contour_text_interest_copy = contour_text_interest.copy() - x, y, x_d, y_d, xv, x_min_cont, y_min_cont, x_max_cont, y_max_cont, first_nonzero, y_padded_up_to_down_padded, y_padded_smoothed, peaks, peaks_neg, rotation_matrix = dedup_separate_lines(img_patch, contour_text_interest, thetha, 0) - + x, y, x_d, y_d, xv, x_min_cont, y_min_cont, x_max_cont, y_max_cont, first_nonzero, y_padded_up_to_down_padded, y_padded_smoothed, peaks, peaks_neg, rotation_matrix = dedup_separate_lines( + img_patch, contour_text_interest, thetha, 0) # plt.plot(y_padded_up_to_down_padded) # plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') @@ -703,10 +658,10 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0: arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1: arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1:]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) @@ -766,29 +721,29 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 # peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 # peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) # -int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) # -int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) # +int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) # -int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) # -int(dis_to_next_down*1./2) if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 - + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -820,9 +775,13 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes_rot.append(np.array( + [[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) + textline_boxes.append(np.array( + [[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) elif len(peaks) < 1: pass @@ -853,9 +812,12 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes_rot.append(np.array( + [[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(y_min)], [int(x_max), int(y_min)], [int(x_max), int(y_max)], [int(x_min), int(y_max)]])) + textline_boxes.append(np.array( + [[int(x_min), int(y_min)], [int(x_max), int(y_min)], [int(x_max), int(y_max)], [int(x_min), int(y_max)]])) elif len(peaks) == 2: dis_to_next = np.abs(peaks[1] - peaks[0]) @@ -870,7 +832,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_down >= img_patch.shape[0]: point_down = img_patch.shape[0] - 2 point_up = peaks[jj] + first_nonzero - int(1.0 / 1.8 * dis_to_next) - + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -902,9 +864,13 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes_rot.append(np.array( + [[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) + textline_boxes.append(np.array( + [[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) else: for jj in range(len(peaks)): @@ -930,7 +896,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): point_up = peaks[jj] + first_nonzero - int(1.0 / 1.9 * dis_to_next_up) point_down = peaks[jj] + first_nonzero + int(1.0 / 1.9 * dis_to_next_down) - + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -962,14 +928,18 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes_rot.append(np.array( + [[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) + textline_boxes.append(np.array( + [[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) return peaks, textline_boxes_rot -def separate_lines_new_inside_tiles2(img_patch, thetha): +def separate_lines_new_inside_tiles2(img_patch, thetha): (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, -thetha, 1.0) @@ -998,7 +968,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): y = textline_patch_sum_along_width[:] # [first_nonzero:last_nonzero] y_padded = np.zeros(len(y) + 40) - y_padded[20 : len(y) + 20] = y + y_padded[20: len(y) + 20] = y x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) @@ -1009,7 +979,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): y_padded_smoothed_e = gaussian_filter1d(y_padded, 2) y_padded_up_to_down_e = -y_padded + np.max(y_padded) y_padded_up_to_down_padded_e = np.zeros(len(y_padded_up_to_down_e) + 40) - y_padded_up_to_down_padded_e[20 : len(y_padded_up_to_down_e) + 20] = y_padded_up_to_down_e + y_padded_up_to_down_padded_e[20: len(y_padded_up_to_down_e) + 20] = y_padded_up_to_down_e y_padded_up_to_down_padded_e = gaussian_filter1d(y_padded_up_to_down_padded_e, 2) peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) @@ -1028,10 +998,10 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): clusters_to_be_deleted = [] if len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0: arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1: arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1:]) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] @@ -1069,7 +1039,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down = -y_padded + np.max(y_padded) y_padded_up_to_down_padded = np.zeros(len(y_padded_up_to_down) + 40) - y_padded_up_to_down_padded[20 : len(y_padded_up_to_down) + 20] = y_padded_up_to_down + y_padded_up_to_down_padded[20: len(y_padded_up_to_down) + 20] = y_padded_up_to_down y_padded_up_to_down_padded = gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) peaks, _ = find_peaks(y_padded_smoothed, height=0) @@ -1092,10 +1062,11 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0: arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[arg_diff_cluster[i] + 1: arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1:]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) @@ -1150,8 +1121,8 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): std_value_of_peaks = np.std(y_padded_smoothed[peaks]) peaks_values = y_padded_smoothed[peaks] - ###peaks_neg = peaks_neg - 20 - 20 - ###peaks = peaks - 20 + # peaks_neg = peaks_neg - 20 - 20 + # peaks = peaks - 20 peaks_neg_true = peaks_neg[:] peaks_pos_true = peaks[:] @@ -1162,7 +1133,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): # print(peaks_neg_true) for i in range(len(peaks_neg_true)): - img_patch[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 + img_patch[peaks_neg_true[i] - 6: peaks_neg_true[i] + 6, :] = 0 else: pass @@ -1172,17 +1143,18 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): peaks_pos_true = peaks_pos_true - 20 for i in range(len(peaks_pos_true)): - ##img_patch[peaks_pos_true[i]-8:peaks_pos_true[i]+8,:]=1 - img_patch[peaks_pos_true[i] - 6 : peaks_pos_true[i] + 6, :] = 1 + # img_patch[peaks_pos_true[i]-8:peaks_pos_true[i]+8,:]=1 + img_patch[peaks_pos_true[i] - 6: peaks_pos_true[i] + 6, :] = 1 else: pass kernel = np.ones((5, 5), np.uint8) # img_patch = cv2.erode(img_patch,kernel,iterations = 3) - #######################img_patch = cv2.erode(img_patch,kernel,iterations = 2) + # img_patch = cv2.erode(img_patch,kernel,iterations = 2) img_patch = cv2.erode(img_patch, kernel, iterations=1) return img_patch + def separate_lines_new_inside_tiles(img_path, thetha): (h, w) = img_path.shape[:2] center = (w // 2, h // 2) @@ -1202,14 +1174,14 @@ def separate_lines_new_inside_tiles(img_path, thetha): mada_n = img_path.sum(axis=1) - ##plt.plot(mada_n) - ##plt.show() + # plt.plot(mada_n) + # plt.show() first_nonzero = 0 # (next((i for i, x in enumerate(mada_n) if x), None)) y = mada_n[:] # [first_nonzero:last_nonzero] y_help = np.zeros(len(y) + 40) - y_help[20 : len(y) + 20] = y + y_help[20: len(y) + 20] = y x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) @@ -1221,7 +1193,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): z = gaussian_filter1d(y_help, sigma_gaus) zneg_rev = -y_help + np.max(y_help) zneg = np.zeros(len(zneg_rev) + 40) - zneg[20 : len(zneg_rev) + 20] = zneg_rev + zneg[20: len(zneg_rev) + 20] = zneg_rev zneg = gaussian_filter1d(zneg, sigma_gaus) peaks, _ = find_peaks(z, height=0) @@ -1307,7 +1279,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): # print(peaks_neg_true) for i in range(len(peaks_neg_true)): - img_path[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 + img_path[peaks_neg_true[i] - 6: peaks_neg_true[i] + 6, :] = 0 else: pass @@ -1317,7 +1289,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): peaks_pos_true = peaks_pos_true - 20 for i in range(len(peaks_pos_true)): - img_path[peaks_pos_true[i] - 8 : peaks_pos_true[i] + 8, :] = 1 + img_path[peaks_pos_true[i] - 8: peaks_pos_true[i] + 8, :] = 1 else: pass kernel = np.ones((5, 5), np.uint8) @@ -1326,6 +1298,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): img_path = cv2.erode(img_path, kernel, iterations=2) return img_path + def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_ind, add_boxes_coor_into_textlines): kernel = np.ones((5, 5), np.uint8) pixel = 255 @@ -1346,7 +1319,7 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area) cont_final = [] - ###print(add_boxes_coor_into_textlines,'ikki') + # print(add_boxes_coor_into_textlines,'ikki') for i in range(len(contours_imgs)): img_contour = np.zeros((cnts_images.shape[0], cnts_images.shape[1], 3)) img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=(255, 255, 255)) @@ -1358,21 +1331,20 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i _, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - ##contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[ - ##0] - ##contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] - ##if add_boxes_coor_into_textlines: - ##print(np.shape(contours_text_rot[0]),'sjppo') - ##contours_text_rot[0][:, 0, 0]=contours_text_rot[0][:, 0, 0] + box_ind[0] - ##contours_text_rot[0][:, 0, 1]=contours_text_rot[0][:, 0, 1] + box_ind[1] + # contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[ + # 0] + # contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] + # if add_boxes_coor_into_textlines: + # print(np.shape(contours_text_rot[0]),'sjppo') + # contours_text_rot[0][:, 0, 0]=contours_text_rot[0][:, 0, 0] + box_ind[0] + # contours_text_rot[0][:, 0, 1]=contours_text_rot[0][:, 0, 1] + box_ind[1] cont_final.append(contours_text_rot[0]) - ##print(cont_final,'nadizzzz') + # print(cont_final,'nadizzzz') return None, cont_final def textline_contours_postprocessing(textline_mask, slope, contour_text_interest, box_ind, add_boxes_coor_into_textlines=False): - textline_mask = np.repeat(textline_mask[:, :, np.newaxis], 3, axis=2) * 255 textline_mask = textline_mask.astype(np.uint8) kernel = np.ones((5, 5), np.uint8) @@ -1398,7 +1370,7 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_interest y_help = 2 textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), textline_mask.shape[1] + int(2 * x_help), 3)) - textline_mask_help[y_help : y_help + textline_mask.shape[0], x_help : x_help + textline_mask.shape[1], :] = np.copy(textline_mask[:, :, :]) + textline_mask_help[y_help: y_help + textline_mask.shape[0], x_help: x_help + textline_mask.shape[1], :] = np.copy(textline_mask[:, :, :]) dst = rotate_image(textline_mask_help, slope) dst = dst[:, :, 0] @@ -1422,7 +1394,7 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_interest img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), img_contour.shape[1] + int(2 * x_help), 3)) - img_contour_help[y_help : y_help + img_contour.shape[0], x_help : x_help + img_contour.shape[1], :] = np.copy(img_contour[:, :, :]) + img_contour_help[y_help: y_help + img_contour.shape[0], x_help: x_help + img_contour.shape[1], :] = np.copy(img_contour[:, :, :]) img_contour_rot = rotate_image(img_contour_help, slope) @@ -1461,8 +1433,8 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_interest return contours_rotated_clean -def separate_lines_new2(img_path, thetha, num_col, slope_region, plotter=None): +def separate_lines_new2(img_path, thetha, num_col, slope_region, plotter=None): if num_col == 1: num_patches = int(img_path.shape[1] / 200.0) else: @@ -1547,9 +1519,10 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, plotter=None): img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] - img_resized = np.zeros((int(img_int.shape[0] * (1.2)), int(img_int.shape[1] * (3)))) + img_resized = np.zeros((int(img_int.shape[0] * 1.2), int(img_int.shape[1] * 3))) - img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], int(img_int.shape[1] * (1)) : int(img_int.shape[1] * (1)) + img_int.shape[1]] = img_int[:, :] + img_resized[int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], + int(img_int.shape[1] * 1): int(img_int.shape[1] * 1) + img_int.shape[1]] = img_int[:, :] # plt.imshow(img_xline) # plt.show() img_line_rotated = rotate_image(img_resized, slopes_tile_wise[i]) @@ -1560,240 +1533,233 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, plotter=None): img_patch_separated_returned = rotate_image(img_patch_separated, -slopes_tile_wise[i]) img_patch_separated_returned[:, :][img_patch_separated_returned[:, :] != 0] = 1 - img_patch_separated_returned_true_size = img_patch_separated_returned[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], int(img_int.shape[1] * (1)) : int(img_int.shape[1] * (1)) + img_int.shape[1]] + img_patch_separated_returned_true_size = img_patch_separated_returned[ + int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], + int(img_int.shape[1] * 1): int(img_int.shape[1] * 1) + img_int.shape[1]] - img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] - img_patch_ineterst_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size + img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin: length_x - margin] + img_patch_ineterst_revised[:, index_x_d + margin: index_x_u - margin] = img_patch_separated_returned_true_size # plt.imshow(img_patch_ineterst_revised) # plt.show() return img_patch_ineterst_revised -def return_deskew_slop(img_patch_org, sigma_des, main_page=False, plotter=None): +def return_deskew_slop(img_patch_org, sigma_des, main_page=False, plotter=None): if main_page and plotter: plotter.save_plot_of_textline_density(img_patch_org) - img_int=np.zeros((img_patch_org.shape[0],img_patch_org.shape[1])) - img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] - - - - max_shape=np.max(img_int.shape) - img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) - + img_int = np.zeros((img_patch_org.shape[0], img_patch_org.shape[1])) + img_int[:, :] = img_patch_org[:, :] # img_patch_org[:,:,0] - onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) - onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) + max_shape = np.max(img_int.shape) + img_resized = np.zeros((int(max_shape * 1.1), int(max_shape * 1.1))) + onset_x = int((img_resized.shape[1] - img_int.shape[1]) / 2.) + onset_y = int((img_resized.shape[0] - img_int.shape[0]) / 2.) - #img_resized=np.zeros((int( img_int.shape[0]*(1.8) ) , int( img_int.shape[1]*(2.6) ) )) + # img_resized = np.zeros((int( img_int.shape[0]*(1.8) ) , int( img_int.shape[1]*(2.6) ) )) + # img_resized[int(img_int.shape[0]*(.4)):int(img_int.shape[0]*(.4))+img_int.shape[0], int(img_int.shape[1]*(.8)):int(img_int.shape[1]*(.8))+img_int.shape[1]]=img_int[:,:] + img_resized[onset_y:onset_y + img_int.shape[0], onset_x:onset_x + img_int.shape[1]] = img_int[:, :] + # print(img_resized.shape,'img_resizedshape') + # plt.imshow(img_resized) + # plt.show() - #img_resized[ int( img_int.shape[0]*(.4)):int( img_int.shape[0]*(.4))+img_int.shape[0] , int( img_int.shape[1]*(.8)):int( img_int.shape[1]*(.8))+img_int.shape[1] ]=img_int[:,:] - img_resized[ onset_y:onset_y+img_int.shape[0] , onset_x:onset_x+img_int.shape[1] ]=img_int[:,:] - - #print(img_resized.shape,'img_resizedshape') - #plt.imshow(img_resized) - #plt.show() - - if main_page and img_patch_org.shape[1]>img_patch_org.shape[0]: + if main_page and img_patch_org.shape[1] > img_patch_org.shape[0]: - #plt.imshow(img_resized) - #plt.show() - angels=np.array([-45, 0 , 45 , 90 , ])#np.linspace(-12,12,100)#np.array([0 , 45 , 90 , -45]) + # plt.imshow(img_resized) + # plt.show() + angels = np.array([-45, 0, 45, 90, ]) # np.linspace(-12,12,100)#np.array([0 , 45 , 90 , -45]) - var_res=[] + var_res = [] for rot in angels: - img_rot=rotate_image(img_resized,rot) - #plt.imshow(img_rot) - #plt.show() - img_rot[img_rot!=0]=1 - #neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) - #print(var_spectrum,'var_spectrum') + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 + # neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) + # print(var_spectrum,'var_spectrum') try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) - ##print(rot,var_spectrum,'var_spectrum') + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) + # print(rot,var_spectrum,'var_spectrum') except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax( + var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 + ang_int = 0 + angels = np.linspace(ang_int - 22.5, ang_int + 22.5, 100) - angels=np.linspace(ang_int-22.5,ang_int+22.5,100) - - var_res=[] + var_res = [] for rot in angels: - img_rot=rotate_image(img_resized,rot) - ##plt.imshow(img_rot) - ##plt.show() - img_rot[img_rot!=0]=1 + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax(var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 - - elif main_page and img_patch_org.shape[1]<=img_patch_org.shape[0]: + ang_int = 0 - #plt.imshow(img_resized) - #plt.show() - angels=np.linspace(-12,12,100)#np.array([0 , 45 , 90 , -45]) + elif main_page and img_patch_org.shape[1] <= img_patch_org.shape[0]: + # plt.imshow(img_resized) + # plt.show() + angels = np.linspace(-12, 12, 100) # np.array([0 , 45 , 90 , -45]) - var_res=[] + var_res = [] for rot in angels: - img_rot=rotate_image(img_resized,rot) - #plt.imshow(img_rot) - #plt.show() - img_rot[img_rot!=0]=1 - #neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) - #print(var_spectrum,'var_spectrum') + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 + # neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) + # print(var_spectrum,'var_spectrum') try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) - if plotter: plotter.save_plot_of_rotation_angle(angels, var_res) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax(var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 + ang_int = 0 - early_slope_edge=11 - if abs(ang_int)>early_slope_edge and ang_int<0: - angels=np.linspace(-90,-12,100) - var_res=[] + early_slope_edge = 11 + if abs(ang_int) > early_slope_edge and ang_int < 0: + angels = np.linspace(-90, -12, 100) + var_res = [] for rot in angels: - img_rot=rotate_image(img_resized,rot) - ##plt.imshow(img_rot) - ##plt.show() - img_rot[img_rot!=0]=1 + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax(var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 + ang_int = 0 - elif abs(ang_int)>early_slope_edge and ang_int>0: + elif abs(ang_int) > early_slope_edge and ang_int > 0: - angels=np.linspace(90,12,100) - var_res=[] + angels = np.linspace(90, 12, 100) + var_res = [] for rot in angels: - img_rot=rotate_image(img_resized,rot) - ##plt.imshow(img_rot) - ##plt.show() - img_rot[img_rot!=0]=1 + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) - #print(indexer,'indexer') + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) + # print(indexer,'indexer') except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax(var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 + ang_int = 0 else: - angels=np.linspace(-25,25,60) - var_res=[] - indexer=0 + angels = np.linspace(-25, 25, 60) + var_res = [] + indexer = 0 for rot in angels: - img_rot=rotate_image(img_resized,rot) - #plt.imshow(img_rot) - #plt.show() - img_rot[img_rot!=0]=1 - #neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) - #print(var_spectrum,'var_spectrum') + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 + # neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) + # print(var_spectrum,'var_spectrum') try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax(var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 + ang_int = 0 - #plt.plot(var_res) - #plt.show() - ##plt.plot(mom3_res) - ##plt.show() - #print(ang_int,'ang_int111') + # plt.plot(var_res) + # plt.show() + # plt.plot(mom3_res) + # plt.show() + # print(ang_int,'ang_int111') - early_slope_edge=22 - if abs(ang_int)>early_slope_edge and ang_int<0: + early_slope_edge = 22 + if abs(ang_int) > early_slope_edge and ang_int < 0: - angels=np.linspace(-90,-25,60) + angels = np.linspace(-90, -25, 60) - var_res=[] + var_res = [] for rot in angels: - img_rot=rotate_image(img_resized,rot) - ##plt.imshow(img_rot) - ##plt.show() - img_rot[img_rot!=0]=1 + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax(var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 + ang_int = 0 - elif abs(ang_int)>early_slope_edge and ang_int>0: + elif abs(ang_int) > early_slope_edge and ang_int > 0: - angels=np.linspace(90,25,60) + angels = np.linspace(90, 25, 60) - var_res=[] + var_res = [] - indexer=0 + indexer = 0 for rot in angels: - img_rot=rotate_image(img_resized,rot) - ##plt.imshow(img_rot) - ##plt.show() - img_rot[img_rot!=0]=1 + img_rot = rotate_image(img_resized, rot) + # plt.imshow(img_rot) + # plt.show() + img_rot[img_rot != 0] = 1 try: - var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) - #print(indexer,'indexer') + var_spectrum = find_num_col_deskew(img_rot, sigma_des, 20.3) + # print(indexer,'indexer') except: - var_spectrum=0 + var_spectrum = 0 var_res.append(var_spectrum) try: - var_res=np.array(var_res) - ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + var_res = np.array(var_res) + ang_int = angels[np.argmax(var_res)] # angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] except: - ang_int=0 + ang_int = 0 return ang_int - diff --git a/src/eynollah/utils/xml.py b/src/eynollah/utils/xml.py index 0386b25..009edd2 100644 --- a/src/eynollah/utils/xml.py +++ b/src/eynollah/utils/xml.py @@ -29,6 +29,7 @@ from ocrd_models.ocrd_page import ( to_xml) + def create_page_xml(imageFilename, height, width): now = datetime.now() pcgts = PcGtsType( @@ -46,6 +47,7 @@ def create_page_xml(imageFilename, height, width): )) return pcgts + def xml_reading_order(page, order_of_texts, id_of_marginalia): region_order = ReadingOrderType() og = OrderedGroupType(id="ro357564684568544579089") @@ -59,6 +61,7 @@ def xml_reading_order(page, order_of_texts, id_of_marginalia): og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) region_counter.inc('region') + def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, matrix_of_orders, indexes_sorted, index_of_types, kind_of_texts, ref_point): indexes_sorted = np.array(indexes_sorted) index_of_types = np.array(index_of_types) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 4487af5..e48cf43 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -8,21 +8,22 @@ from .utils.counter import EynollahIdCounter from ocrd_utils import getLogger from ocrd_models.ocrd_page import ( - BorderType, - CoordsType, - PcGtsType, - TextLineType, - TextRegionType, - ImageRegionType, - TableRegionType, - SeparatorRegionType, - to_xml - ) + BorderType, + CoordsType, + PcGtsType, + TextLineType, + TextRegionType, + ImageRegionType, + TableRegionType, + SeparatorRegionType, + to_xml +) import numpy as np + class EynollahXmlWriter(): - def __init__(self, *, dir_out, image_filename, curved_line,textline_light, pcgts=None): + def __init__(self, *, dir_out, image_filename, curved_line, textline_light, pcgts=None): self.logger = getLogger('eynollah.writer') self.counter = EynollahIdCounter() self.dir_out = dir_out @@ -30,10 +31,10 @@ class EynollahXmlWriter(): self.curved_line = curved_line self.textline_light = textline_light self.pcgts = pcgts - self.scale_x = None # XXX set outside __init__ - self.scale_y = None # XXX set outside __init__ - self.height_org = None # XXX set outside __init__ - self.width_org = None # XXX set outside __init__ + self.scale_x = None # XXX set outside __init__ + self.scale_y = None # XXX set outside __init__ + self.height_org = None # XXX set outside __init__ + self.width_org = None # XXX set outside __init__ @property def image_filename_stem(self): @@ -50,7 +51,7 @@ class EynollahXmlWriter(): else: points_page_print += str(int((contour[0][0]) / self.scale_x)) points_page_print += ',' - points_page_print += str(int((contour[0][1] ) / self.scale_y)) + points_page_print += str(int((contour[0][1]) / self.scale_y)) points_page_print = points_page_print + ' ' return points_page_print[:-1] @@ -63,11 +64,11 @@ class EynollahXmlWriter(): for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])): if not (self.curved_line or self.textline_light): if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: - textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) - textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) else: - textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) - textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) points_co += str(textline_x_coord) points_co += ',' points_co += str(textline_y_coord) @@ -121,16 +122,16 @@ class EynollahXmlWriter(): else: points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) + points_co += str(int((contour_textline[0][1] + page_coord[0]) / self.scale_y)) elif (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) > 45: - if len(contour_textline)==2: - points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2])/self.scale_x)) + if len(contour_textline) == 2: + points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0])/self.scale_y)) + points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) else: - points_co += str(int((contour_textline[0][0] + region_bboxes[2]+page_coord[2])/self.scale_x)) + points_co += str(int((contour_textline[0][0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((contour_textline[0][1] + region_bboxes[0]+page_coord[0])/self.scale_y)) + points_co += str(int((contour_textline[0][1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) points_co += ' ' coords.set_points(points_co[:-1]) @@ -140,7 +141,11 @@ class EynollahXmlWriter(): with open(out_fname, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables): + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, + all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, + found_polygons_marginals, all_found_textline_polygons_marginals, + all_box_coord_marginals, slopes, slopes_marginals, cont_page, + polygons_lines_to_be_written_in_xml, found_polygons_tables): self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure @@ -156,14 +161,13 @@ class EynollahXmlWriter(): for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)), - ) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)),) page.add_TextRegion(textregion) self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter) for mm in range(len(found_polygons_marginals)): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) page.add_TextRegion(marginal) self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) @@ -173,27 +177,27 @@ class EynollahXmlWriter(): points_co = '' for lmm in range(len(found_polygons_text_region_img[mm])): try: - points_co += str(int((found_polygons_text_region_img[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) + points_co += str(int((found_polygons_text_region_img[mm][lmm, 0, 0] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((found_polygons_text_region_img[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) + points_co += str(int((found_polygons_text_region_img[mm][lmm, 0, 1] + page_coord[0]) / self.scale_y)) points_co += ' ' except: - points_co += str(int((found_polygons_text_region_img[mm][lmm][0] + page_coord[2])/ self.scale_x )) + points_co += str(int((found_polygons_text_region_img[mm][lmm][0] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((found_polygons_text_region_img[mm][lmm][1] + page_coord[0])/ self.scale_y )) + points_co += str(int((found_polygons_text_region_img[mm][lmm][1] + page_coord[0]) / self.scale_y)) points_co += ' ' - + img_region.get_Coords().set_points(points_co[:-1]) - + for mm in range(len(polygons_lines_to_be_written_in_xml)): sep_hor = SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType()) page.add_SeparatorRegion(sep_hor) points_co = '' for lmm in range(len(polygons_lines_to_be_written_in_xml[mm])): - points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,0] ) / self.scale_x)) + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm, 0, 0]) / self.scale_x)) points_co += ',' - points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,1] ) / self.scale_y)) + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm, 0, 1]) / self.scale_y)) points_co += ' ' sep_hor.get_Coords().set_points(points_co[:-1]) for mm in range(len(found_polygons_tables)): @@ -201,15 +205,21 @@ class EynollahXmlWriter(): page.add_TableRegion(tab_region) points_co = '' for lmm in range(len(found_polygons_tables[mm])): - points_co += str(int((found_polygons_tables[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) + points_co += str(int((found_polygons_tables[mm][lmm, 0, 0] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((found_polygons_tables[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) + points_co += str(int((found_polygons_tables[mm][lmm, 0, 1] + page_coord[0]) / self.scale_y)) points_co += ' ' tab_region.get_Coords().set_points(points_co[:-1]) return pcgts - def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml): + def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, + order_of_texts, id_of_texts, all_found_textline_polygons, + all_found_textline_polygons_h, all_box_coord, all_box_coord_h, + found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, + found_polygons_marginals, all_found_textline_polygons_marginals, + all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, + polygons_lines_to_be_written_in_xml): self.logger.debug('enter build_pagexml_full_layout') # create the file structure @@ -224,35 +234,38 @@ class EynollahXmlWriter(): for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord))) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord))) page.add_TextRegion(textregion) self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter) self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) for mm in range(len(found_polygons_text_region_h)): textregion = TextRegionType(id=counter.next_region_id, type_='header', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) page.add_TextRegion(textregion) self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter) for mm in range(len(found_polygons_marginals)): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) page.add_TextRegion(marginal) self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) for mm in range(len(found_polygons_drop_capitals)): page.add_TextRegion(TextRegionType(id=counter.next_region_id, type_='drop-capital', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord)))) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord)))) for mm in range(len(found_polygons_text_region_img)): - page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) - + page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) + for mm in range(len(polygons_lines_to_be_written_in_xml)): - page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0])))) - + page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType( + points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0, 0, 0, 0])))) + for mm in range(len(found_polygons_tables)): - page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) + page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) return pcgts @@ -268,6 +281,5 @@ class EynollahXmlWriter(): coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) coords += ',' coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) - coords=coords + ' ' + coords = coords + ' ' return coords[:-1] - diff --git a/tests/base.py b/tests/base.py index 9de35ef..841355d 100644 --- a/tests/base.py +++ b/tests/base.py @@ -10,12 +10,14 @@ from unittest import TestCase as VanillaTestCase, skip, main as unittests_main import pytest from ocrd_utils import disableLogging, initLogging + def main(fn=None): if fn: sys.exit(pytest.main([fn])) else: unittests_main() + class TestCase(VanillaTestCase): @classmethod @@ -26,6 +28,7 @@ class TestCase(VanillaTestCase): disableLogging() initLogging() + class CapturingTestCase(TestCase): """ A TestCase that needs to capture stderr/stdout and invoke click CLI. @@ -42,7 +45,7 @@ class CapturingTestCase(TestCase): """ self.capture_out_err() # XXX snapshot just before executing the CLI code = 0 - sys.argv[1:] = args # XXX necessary because sys.argv reflects pytest args not cli args + sys.argv[1:] = args # XXX necessary because sys.argv reflects pytest args not cli args try: cli.main(args=args) except SystemExit as e: diff --git a/tests/test_counter.py b/tests/test_counter.py index f00d2d5..9102799 100644 --- a/tests/test_counter.py +++ b/tests/test_counter.py @@ -1,6 +1,7 @@ from tests.base import main from src.eynollah.utils.counter import EynollahIdCounter + def test_counter_string(): c = EynollahIdCounter() assert c.next_region_id == 'region_0001' @@ -11,6 +12,7 @@ def test_counter_string(): assert c.region_id(999) == 'region_0999' assert c.line_id(999, 888) == 'region_0999_line_0888' + def test_counter_init(): c = EynollahIdCounter(region_idx=2) assert c.get('region') == 2 @@ -19,6 +21,7 @@ def test_counter_init(): c.reset() assert c.get('region') == 2 + def test_counter_methods(): c = EynollahIdCounter() assert c.get('region') == 0 @@ -29,5 +32,6 @@ def test_counter_methods(): c.inc('region', -9) assert c.get('region') == 1 + if __name__ == '__main__': main(__file__) diff --git a/tests/test_dpi.py b/tests/test_dpi.py index 62397c7..0ff347b 100644 --- a/tests/test_dpi.py +++ b/tests/test_dpi.py @@ -3,9 +3,11 @@ from pathlib import Path from src.eynollah.utils.pil_cv2 import check_dpi from tests.base import main + def test_dpi(): fpath = str(Path(__file__).parent.joinpath('resources', 'kant_aufklaerung_1784_0020.tif')) assert 230 == check_dpi(cv2.imread(fpath)) + if __name__ == '__main__': main(__file__) diff --git a/tests/test_run.py b/tests/test_run.py index b64963d..8934556 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -8,6 +8,7 @@ testdir = Path(__file__).parent.resolve() EYNOLLAH_MODELS = environ.get('EYNOLLAH_MODELS', str(testdir.joinpath('..', 'models_eynollah').resolve())) + class TestEynollahRun(TestCase): def test_full_run(self): @@ -20,5 +21,6 @@ class TestEynollahRun(TestCase): print(code, out, err) assert not code + if __name__ == '__main__': main(__file__) diff --git a/tests/test_xml.py b/tests/test_xml.py index 78e22ed..68e6f36 100644 --- a/tests/test_xml.py +++ b/tests/test_xml.py @@ -4,11 +4,13 @@ from ocrd_models.ocrd_page import to_xml PAGE_2019 = 'http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15' + def test_create_xml(): pcgts = create_page_xml('/path/to/img.tif', 100, 100) xmlstr = to_xml(pcgts) assert 'xmlns:pc="%s"' % PAGE_2019 in xmlstr assert 'Metadata' in xmlstr + if __name__ == '__main__': main([__file__])