diff --git a/README.md b/README.md index 1720f7f..d57e06b 100644 --- a/README.md +++ b/README.md @@ -17,11 +17,12 @@ * Detection of reading order (left-to-right or right-to-left) * Output in [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML) * [OCR-D](https://github.com/qurator-spk/eynollah#use-as-ocr-d-processor) interface +* [Examples](https://github.com/qurator-spk/eynollah/wiki#examples) :warning: Development is currently focused on achieving the best possible quality of results for a wide variety of historical documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. ## Installation -Python `3.8-3.11` with Tensorflow `2.12-2.15` on Linux are currently supported. +Python versions `3.8-3.11` with Tensorflow versions `<2.16` on Linux are currently supported. For (limited) GPU support the CUDA toolkit needs to be installed. @@ -38,7 +39,7 @@ git clone git@github.com:qurator-spk/eynollah.git cd eynollah; pip install -e . ``` -Alternatively, you can run `make install` or `make install-dev` for editable installation. +Alternatively, run `make install` or `make install-dev` for editable installation. ## Models Pre-trained models can be downloaded from [qurator-data.de](https://qurator-data.de/eynollah/) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). @@ -46,9 +47,9 @@ Pre-trained models can be downloaded from [qurator-data.de](https://qurator-data ## Train 🚧 **Work in progress** -In case you want to train your own model, have a look at [`sbb_pixelwise_segmentation`](https://github.com/qurator-spk/sbb_pixelwise_segmentation). +In case you want to train your own model, have a look at [`train`](https://github.com/qurator-spk/eynollah/tree/main/eynollah/eynollah/train). -## Usage +## Use The command-line interface can be called like this: ```sh @@ -82,7 +83,6 @@ If no option is set, the tool performs layout detection of main regions (backgro The best output quality is produced when RGB images are used as input rather than greyscale or binarized images. #### Use as OCR-D processor -🚧 **Work in progress** Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) processor. @@ -104,7 +104,7 @@ uses the original (RGB) image despite any binarization that may have occured in Please check the [wiki](https://github.com/qurator-spk/eynollah/wiki). ## How to cite -If you find this tool useful in your work, please consider citing our paper: +If you find this useful in your work, please consider citing our paper: ```bibtex @inproceedings{hip23rezanezhad, diff --git a/qurator/.gitkeep b/eynollah/.gitkeep similarity index 100% rename from qurator/.gitkeep rename to eynollah/.gitkeep diff --git a/eynollah/__init__.py b/eynollah/__init__.py new file mode 100644 index 0000000..5284146 --- /dev/null +++ b/eynollah/__init__.py @@ -0,0 +1 @@ +__import__("pkg_resources").declare_namespace(__name__) diff --git a/qurator/eynollah/__init__.py b/eynollah/eynollah/__init__.py similarity index 100% rename from qurator/eynollah/__init__.py rename to eynollah/eynollah/__init__.py diff --git a/qurator/eynollah/cli.py b/eynollah/eynollah/cli.py similarity index 98% rename from qurator/eynollah/cli.py rename to eynollah/eynollah/cli.py index 390a762..f5b32a9 100644 --- a/qurator/eynollah/cli.py +++ b/eynollah/eynollah/cli.py @@ -1,8 +1,8 @@ import sys import click from ocrd_utils import getLogger, initLogging, setOverrideLogLevel -from qurator.eynollah.eynollah import Eynollah -from qurator.eynollah.utils.dirs import EynollahDirs +from eynollah.eynollah.eynollah import Eynollah +from eynollah.eynollah.utils.dirs import EynollahDirs @click.command() @@ -11,6 +11,7 @@ from qurator.eynollah.utils.dirs import EynollahDirs "-i", help="image filename", type=click.Path(exists=True, dir_okay=False), + # required=True, ) @click.option( "--out", diff --git a/qurator/eynollah/eynollah.py b/eynollah/eynollah/eynollah.py similarity index 100% rename from qurator/eynollah/eynollah.py rename to eynollah/eynollah/eynollah.py diff --git a/qurator/eynollah/ocrd-tool.json b/eynollah/eynollah/ocrd-tool.json similarity index 100% rename from qurator/eynollah/ocrd-tool.json rename to eynollah/eynollah/ocrd-tool.json diff --git a/qurator/eynollah/ocrd_cli.py b/eynollah/eynollah/ocrd_cli.py similarity index 99% rename from qurator/eynollah/ocrd_cli.py rename to eynollah/eynollah/ocrd_cli.py index 8929927..499661b 100644 --- a/qurator/eynollah/ocrd_cli.py +++ b/eynollah/eynollah/ocrd_cli.py @@ -2,10 +2,12 @@ from .processor import EynollahProcessor from click import command from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor + @command() @ocrd_cli_options def main(*args, **kwargs): return ocrd_cli_wrap_processor(EynollahProcessor, *args, **kwargs) + if __name__ == '__main__': main() diff --git a/qurator/eynollah/plot.py b/eynollah/eynollah/plot.py similarity index 85% rename from qurator/eynollah/plot.py rename to eynollah/eynollah/plot.py index bb7e32c..adcd0c4 100644 --- a/qurator/eynollah/plot.py +++ b/eynollah/eynollah/plot.py @@ -10,6 +10,7 @@ from .utils.rotate import rotate_image_different from .utils.resize import resize_image from .utils.dirs import EynollahDirs + class EynollahPlotter(): """ Class collecting all the plotting and image writing methods @@ -34,13 +35,15 @@ class EynollahPlotter(): if self.dirs.dir_of_layout is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] - pixels=['Background' , 'Main text' , 'Image' , 'Separator','Marginalia'] + pixels = ['Background', 'Main text', 'Image', 'Separator', 'Marginalia'] values_indexes = [0, 1, 2, 3, 4] plt.figure(figsize=(40, 40)) plt.rcParams["font.size"] = "40" im = plt.imshow(text_regions_p[:, :]) colors = [im.cmap(im.norm(value)) for value in values] - patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] + patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], + label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in + values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=40) plt.savefig(os.path.join(self.dirs.dir_of_layout, self.image_filename_stem + "_layout_main.png")) @@ -49,7 +52,7 @@ class EynollahPlotter(): if self.dirs.dir_of_all is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] - pixels=['Background' , 'Main text' , 'Image' , 'Separator','Marginalia'] + pixels = ['Background', 'Main text', 'Image', 'Separator', 'Marginalia'] values_indexes = [0, 1, 2, 3, 4] plt.figure(figsize=(80, 40)) plt.rcParams["font.size"] = "40" @@ -58,7 +61,9 @@ class EynollahPlotter(): plt.subplot(1, 2, 2) im = plt.imshow(text_regions_p[:, :]) colors = [im.cmap(im.norm(value)) for value in values] - patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] + patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], + label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in + values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60) plt.savefig(os.path.join(self.dirs.dir_of_all, self.image_filename_stem + "_layout_main_and_page.png")) @@ -72,7 +77,9 @@ class EynollahPlotter(): plt.rcParams["font.size"] = "40" im = plt.imshow(text_regions_p[:, :]) colors = [im.cmap(im.norm(value)) for value in values] - patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] + patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], + label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in + values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=40) plt.savefig(os.path.join(self.dirs.dir_of_layout, self.image_filename_stem + "_layout.png")) @@ -89,7 +96,9 @@ class EynollahPlotter(): plt.subplot(1, 2, 2) im = plt.imshow(text_regions_p[:, :]) colors = [im.cmap(im.norm(value)) for value in values] - patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] + patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], + label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in + values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60) plt.savefig(os.path.join(self.dirs.dir_of_all, self.image_filename_stem + "_layout_and_page.png")) @@ -105,7 +114,9 @@ class EynollahPlotter(): plt.subplot(1, 2, 2) im = plt.imshow(textline_mask_tot_ea[:, :]) colors = [im.cmap(im.norm(value)) for value in values] - patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] + patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], + label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in + values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60) plt.savefig(os.path.join(self.dirs.dir_of_all, self.image_filename_stem + "_textline_and_page.png")) @@ -130,11 +141,12 @@ class EynollahPlotter(): plt.rcParams['font.size']='50' plt.subplot(1,2,1) plt.imshow(img_patch_org) - plt.subplot(1,2,2) - plt.plot(gaussian_filter1d(img_patch_org.sum(axis=1), 3),np.array(range(len(gaussian_filter1d(img_patch_org.sum(axis=1), 3)))),linewidth=8) - plt.xlabel('Density of textline prediction in direction of X axis',fontsize=60) - plt.ylabel('Height',fontsize=60) - plt.yticks([0,len(gaussian_filter1d(img_patch_org.sum(axis=1), 3))]) + plt.subplot(1, 2, 2) + plt.plot(gaussian_filter1d(img_patch_org.sum(axis=1), 3), + np.array(range(len(gaussian_filter1d(img_patch_org.sum(axis=1), 3)))), linewidth=8) + plt.xlabel('Density of textline prediction in direction of X axis', fontsize=60) + plt.ylabel('Height', fontsize=60) + plt.yticks([0, len(gaussian_filter1d(img_patch_org.sum(axis=1), 3))]) plt.gca().invert_yaxis() plt.savefig(os.path.join(self.dirs.dir_of_all, self.image_filename_stem+'_density_of_textline.png')) @@ -157,9 +169,9 @@ class EynollahPlotter(): box = [x, y, w, h] croped_page, page_coord = crop_image_inside_box(box, image_page) - croped_page = resize_image(croped_page, int(croped_page.shape[0] / self.scale_y), int(croped_page.shape[1] / self.scale_x)) + croped_page = resize_image(croped_page, int(croped_page.shape[0] / self.scale_y), + int(croped_page.shape[1] / self.scale_x)) path = os.path.join(self.dirs.dir_of_cropped_images, self.image_filename_stem + "_" + str(index) + ".jpg") cv2.imwrite(path, croped_page) index += 1 - diff --git a/qurator/eynollah/processor.py b/eynollah/eynollah/processor.py similarity index 99% rename from qurator/eynollah/processor.py rename to eynollah/eynollah/processor.py index 97ad61b..9a28f58 100644 --- a/qurator/eynollah/processor.py +++ b/eynollah/eynollah/processor.py @@ -7,6 +7,7 @@ from qurator.eynollah.utils.dirs import EynollahDirs from .eynollah import Eynollah + class EynollahProcessor(Processor): @property diff --git a/eynollah/eynollah/train/README.md b/eynollah/eynollah/train/README.md new file mode 100644 index 0000000..8acfa12 --- /dev/null +++ b/eynollah/eynollah/train/README.md @@ -0,0 +1,67 @@ +# Pixelwise Segmentation +> Pixelwise segmentation for document images + +## Introduction +This repository contains the source code for training an encoder model for document image segmentation. + +## Installation +Either clone the repository via `git clone https://github.com/qurator-spk/sbb_pixelwise_segmentation.git` or download and unpack the [ZIP](https://github.com/qurator-spk/sbb_pixelwise_segmentation/archive/master.zip). + +### Pretrained encoder +Download our pretrained weights and add them to a ``pretrained_model`` folder: +https://qurator-data.de/sbb_pixelwise_segmentation/pretrained_encoder/ +## Usage + +### Train +To train a model, run: ``python train.py with config_params.json`` + +### Ground truth format +Lables for each pixel are identified by a number. So if you have a +binary case, ``n_classes`` should be set to ``2`` and labels should +be ``0`` and ``1`` for each class and pixel. + +In the case of multiclass, just set ``n_classes`` to the number of classes +you have and the try to produce the labels by pixels set from ``0 , 1 ,2 .., n_classes-1``. +The labels format should be png. +Our lables are 3 channel png images but only information of first channel is used. +If you have an image label with height and width of 10, for a binary case the first channel should look like this: + + Label: [ [1, 0, 0, 1, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ..., + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] + + This means that you have an image by `10*10*3` and `pixel[0,0]` belongs + to class `1` and `pixel[0,1]` belongs to class `0`. + + A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/~vahid.rezanezhad/binarization_training_data_sample/), which contains images and lables folders. + +### Training , evaluation and output +The train and evaluation folders should contain subfolders of images and labels. +The output folder should be an empty folder where the output model will be written to. + +### Parameter configuration +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be set to ``false``. +* n_batch: Number of batches at each iteration. +* n_classes: Number of classes. In the case of binary classification this should be 2. +* n_epochs: Number of epochs. +* input_height: This indicates the height of model's input. +* input_width: This indicates the width of model's input. +* weight_decay: Weight decay of l2 regularization of model layers. +* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. +* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" in train.py file. +* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" in train.py file. +* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" in train.py file. +* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rothation angles are given with "thetha" in train.py file. +* rotation: If ``true``, 90 degree rotation will be applied on image. +* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. +* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. +* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. +* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set "index_start" to 3 to start naming model with index 3. +* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. + + diff --git a/eynollah/eynollah/train/__init__.py b/eynollah/eynollah/train/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/eynollah/eynollah/train/__init__.py @@ -0,0 +1 @@ + diff --git a/eynollah/eynollah/train/build_model_load_pretrained_weights_and_save.py b/eynollah/eynollah/train/build_model_load_pretrained_weights_and_save.py new file mode 100644 index 0000000..40cc1b6 --- /dev/null +++ b/eynollah/eynollah/train/build_model_load_pretrained_weights_and_save.py @@ -0,0 +1,29 @@ +import os +import sys +import tensorflow as tf +import keras, warnings +from keras.optimizers import * +from sacred import Experiment +from models import * +from utils import * +from metrics import * + + +def configuration(): + gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) + + +if __name__ == '__main__': + n_classes = 2 + input_height = 224 + input_width = 448 + weight_decay = 1e-6 + pretraining = False + dir_of_weights = 'model_bin_sbb_ens.h5' + + # configuration() + + model = resnet50_unet(n_classes, input_height, input_width, weight_decay, pretraining) + model.load_weights(dir_of_weights) + model.save('./name_in_another_python_version.h5') diff --git a/eynollah/eynollah/train/config_params.json b/eynollah/eynollah/train/config_params.json new file mode 100644 index 0000000..b07d28d --- /dev/null +++ b/eynollah/eynollah/train/config_params.json @@ -0,0 +1,30 @@ +{ + "n_classes" : 3, + "n_epochs" : 2, + "input_height" : 448, + "input_width" : 672, + "weight_decay" : 1e-6, + "n_batch" : 2, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : false, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "continue_training": false, + "index_start": 0, + "dir_of_start_model": " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "/path/to/training/files/train", + "dir_eval": "/path/to/training/files/eval", + "dir_output": "/path/to/training/files/output" +} diff --git a/eynollah/eynollah/train/metrics.py b/eynollah/eynollah/train/metrics.py new file mode 100644 index 0000000..9d41d9e --- /dev/null +++ b/eynollah/eynollah/train/metrics.py @@ -0,0 +1,357 @@ +from keras import backend as K +import tensorflow as tf +import numpy as np + + +def focal_loss(gamma=2., alpha=4.): + gamma = float(gamma) + alpha = float(alpha) + + def focal_loss_fixed(y_true, y_pred): + """Focal loss for multi-classification + FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t) + Notice: y_pred is probability after softmax + gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper + d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x) + Focal Loss for Dense Object Detection + https://arxiv.org/abs/1708.02002 + + Arguments: + y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls] + y_pred {tensor} -- model's output, shape of [batch_size, num_cls] + + Keyword Arguments: + gamma {float} -- (default: {2.0}) + alpha {float} -- (default: {4.0}) + + Returns: + [tensor] -- loss. + """ + epsilon = 1.e-9 + y_true = tf.convert_to_tensor(y_true, tf.float32) + y_pred = tf.convert_to_tensor(y_pred, tf.float32) + + model_out = tf.add(y_pred, epsilon) + ce = tf.multiply(y_true, -tf.log(model_out)) + weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma)) + fl = tf.multiply(alpha, tf.multiply(weight, ce)) + reduced_fl = tf.reduce_max(fl, axis=1) + return tf.reduce_mean(reduced_fl) + + return focal_loss_fixed + + +def weighted_categorical_crossentropy(weights=None): + """ weighted_categorical_crossentropy + + Args: + * weights: crossentropy weights + Returns: + * weighted categorical crossentropy function + """ + + def loss(y_true, y_pred): + labels_floats = tf.cast(y_true, tf.float32) + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) + + if weights is not None: + weight_mask = tf.maximum(tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) + per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + return tf.reduce_mean(per_pixel_loss) + + return loss + + +def image_categorical_cross_entropy(y_true, y_pred, weights=None): + """ + :param y_true: tensor of shape (batch_size, height, width) representing the ground truth. + :param y_pred: tensor of shape (batch_size, height, width) representing the prediction. + :return: The mean cross-entropy on softmaxed tensors. + """ + + labels_floats = tf.cast(y_true, tf.float32) + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) + + if weights is not None: + weight_mask = tf.maximum( + tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) + per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + + return tf.reduce_mean(per_pixel_loss) + + +def class_tversky(y_true, y_pred): + smooth = 1.0 # 1.00 + + y_true = K.permute_dimensions(y_true, (3, 1, 2, 0)) + y_pred = K.permute_dimensions(y_pred, (3, 1, 2, 0)) + + y_true_pos = K.batch_flatten(y_true) + y_pred_pos = K.batch_flatten(y_pred) + true_pos = K.sum(y_true_pos * y_pred_pos, 1) + false_neg = K.sum(y_true_pos * (1 - y_pred_pos), 1) + false_pos = K.sum((1 - y_true_pos) * y_pred_pos, 1) + alpha = 0.2 # 0.5 + beta = 0.8 + return (true_pos + smooth) / (true_pos + alpha * false_neg + (beta) * false_pos + smooth) + + +def focal_tversky_loss(y_true, y_pred): + pt_1 = class_tversky(y_true, y_pred) + gamma = 1.3 # 4./3.0#1.3#4.0/3.00# 0.75 + return K.sum(K.pow((1 - pt_1), gamma)) + + +def generalized_dice_coeff2(y_true, y_pred): + n_el = 1 + for dim in y_true.shape: + n_el *= int(dim) + n_cl = y_true.shape[-1] + w = K.zeros(shape=(n_cl,)) + w = (K.sum(y_true, axis=(0, 1, 2))) / (n_el) + w = 1 / (w ** 2 + 0.000001) + numerator = y_true * y_pred + numerator = w * K.sum(numerator, (0, 1, 2)) + numerator = K.sum(numerator) + denominator = y_true + y_pred + denominator = w * K.sum(denominator, (0, 1, 2)) + denominator = K.sum(denominator) + return 2 * numerator / denominator + + +def generalized_dice_coeff(y_true, y_pred): + axes = tuple(range(1, len(y_pred.shape) - 1)) + Ncl = y_pred.shape[-1] + w = K.zeros(shape=(Ncl,)) + w = K.sum(y_true, axis=axes) + w = 1 / (w ** 2 + 0.000001) + # Compute gen dice coef: + numerator = y_true * y_pred + numerator = w * K.sum(numerator, axes) + numerator = K.sum(numerator) + + denominator = y_true + y_pred + denominator = w * K.sum(denominator, axes) + denominator = K.sum(denominator) + + gen_dice_coef = 2 * numerator / denominator + + return gen_dice_coef + + +def generalized_dice_loss(y_true, y_pred): + return 1 - generalized_dice_coeff2(y_true, y_pred) + + +def soft_dice_loss(y_true, y_pred, epsilon=1e-6): + ''' + Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions. + Assumes the `channels_last` format. + + # Arguments + y_true: b x X x Y( x Z...) x c One hot encoding of ground truth + y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) + epsilon: Used for numerical stability to avoid divide by zero errors + + # References + V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation + https://arxiv.org/abs/1606.04797 + More details on Dice loss formulation + https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72) + + Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022 + ''' + + # skip the batch and class axis for calculating Dice score + axes = tuple(range(1, len(y_pred.shape) - 1)) + + numerator = 2. * K.sum(y_pred * y_true, axes) + + denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) + return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch + + +def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last=True, mean_per_class=False, + verbose=False): + """ + Compute mean metrics of two segmentation masks, via Keras. + + IoU(A,B) = |A & B| / (| A U B|) + Dice(A,B) = 2*|A & B| / (|A| + |B|) + + Args: + y_true: true masks, one-hot encoded. + y_pred: predicted masks, either softmax outputs, or one-hot encoded. + metric_name: metric to be computed, either 'iou' or 'dice'. + metric_type: one of 'standard' (default), 'soft', 'naive'. + In the standard version, y_pred is one-hot encoded and the mean + is taken only over classes that are present (in y_true or y_pred). + The 'soft' version of the metrics are computed without one-hot + encoding y_pred. + The 'naive' version return mean metrics where absent classes contribute + to the class mean as 1.0 (instead of being dropped from the mean). + drop_last = True: boolean flag to drop last class (usually reserved + for background class in semantic segmentation) + mean_per_class = False: return mean along batch axis for each class. + verbose = False: print intermediate results such as intersection, union + (as number of pixels). + Returns: + IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True + in which case it returns the per-class metric, averaged over the batch. + + Inputs are B*W*H*N tensors, with + B = batch size, + W = width, + H = height, + N = number of classes + """ + + flag_soft = (metric_type == 'soft') + flag_naive_mean = (metric_type == 'naive') + + # always assume one or more classes + num_classes = K.shape(y_true)[-1] + + if not flag_soft: + # get one-hot encoded masks from y_pred (true masks should already be one-hot) + y_pred = K.one_hot(K.argmax(y_pred), num_classes) + y_true = K.one_hot(K.argmax(y_true), num_classes) + + # if already one-hot, could have skipped above command + # keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64) + y_true = K.cast(y_true, 'float32') + y_pred = K.cast(y_pred, 'float32') + + # intersection and union shapes are batch_size * n_classes (values = area in pixels) + axes = (1, 2) # W,H axes of each image + intersection = K.sum(K.abs(y_true * y_pred), axis=axes) + mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes) + union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot + + smooth = .001 + iou = (intersection + smooth) / (union + smooth) + dice = 2 * (intersection + smooth) / (mask_sum + smooth) + + metric = {'iou': iou, 'dice': dice}[metric_name] + + # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise + mask = K.cast(K.not_equal(union, 0), 'float32') + + if drop_last: + metric = metric[:, :-1] + mask = mask[:, :-1] + + if verbose: + print('intersection, union') + print(K.eval(intersection), K.eval(union)) + print(K.eval(intersection / union)) + + # return mean metrics: remaining axes are (batch, classes) + if flag_naive_mean: + return K.mean(metric) + + # take mean only over non-absent classes + class_count = K.sum(mask, axis=0) + non_zero = tf.greater(class_count, 0) + non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero) + non_zero_count = tf.boolean_mask(class_count, non_zero) + + if verbose: + print('Counts of inputs with class present, metrics for non-absent classes') + print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count)) + + return K.mean(non_zero_sum / non_zero_count) + + +def mean_iou(y_true, y_pred, **kwargs): + """ + Compute mean Intersection over Union of two segmentation masks, via Keras. + + Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs. + """ + return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs) + + +def Mean_IOU(y_true, y_pred): + nb_classes = K.int_shape(y_pred)[-1] + iou = [] + true_pixels = K.argmax(y_true, axis=-1) + pred_pixels = K.argmax(y_pred, axis=-1) + void_labels = K.equal(K.sum(y_true, axis=-1), 0) + for i in range(0, nb_classes): # exclude first label (background) and last label (void) + true_labels = K.equal(true_pixels, i) # & ~void_labels + pred_labels = K.equal(pred_pixels, i) # & ~void_labels + inter = tf.to_int32(true_labels & pred_labels) + union = tf.to_int32(true_labels | pred_labels) + legal_batches = K.sum(tf.to_int32(true_labels), axis=1) > 0 + ious = K.sum(inter, axis=1) / K.sum(union, axis=1) + iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects + iou = tf.stack(iou) + legal_labels = ~tf.debugging.is_nan(iou) + iou = tf.gather(iou, indices=tf.where(legal_labels)) + return K.mean(iou) + + +def iou_vahid(y_true, y_pred): + nb_classes = tf.shape(y_true)[-1] + tf.to_int32(1) + true_pixels = K.argmax(y_true, axis=-1) + pred_pixels = K.argmax(y_pred, axis=-1) + iou = [] + + for i in tf.range(nb_classes): + tp = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.equal(pred_pixels, i))) + fp = K.sum(tf.to_int32(K.not_equal(true_pixels, i) & K.equal(pred_pixels, i))) + fn = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.not_equal(pred_pixels, i))) + iouh = tp / (tp + fp + fn) + iou.append(iouh) + return K.mean(iou) + + +def IoU_metric(Yi, y_predi): + # mean Intersection over Union + # Mean IoU = TP/(FN + TP + FP) + y_predi = np.argmax(y_predi, axis=3) + y_testi = np.argmax(Yi, axis=3) + IoUs = [] + Nclass = int(np.max(Yi)) + 1 + for c in range(Nclass): + TP = np.sum((Yi == c) & (y_predi == c)) + FP = np.sum((Yi != c) & (y_predi == c)) + FN = np.sum((Yi == c) & (y_predi != c)) + IoU = TP / float(TP + FP + FN) + IoUs.append(IoU) + return K.cast(np.mean(IoUs), dtype='float32') + + +def IoU_metric_keras(y_true, y_pred): + # mean Intersection over Union + # Mean IoU = TP/(FN + TP + FP) + init = tf.global_variables_initializer() + sess = tf.Session() + sess.run(init) + + return IoU_metric(y_true.eval(session=sess), y_pred.eval(session=sess)) + + +def jaccard_distance_loss(y_true, y_pred, smooth=100): + """ + Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) + = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) + + The jaccard distance loss is usefull for unbalanced datasets. This has been + shifted so it converges on 0 and is smoothed to avoid exploding or disapearing + gradient. + + Ref: https://en.wikipedia.org/wiki/Jaccard_index + + @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 + @author: wassname + """ + intersection = K.sum(K.abs(y_true * y_pred), axis=-1) + sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) + jac = (intersection + smooth) / (sum_ - intersection + smooth) + return (1 - jac) * smooth diff --git a/eynollah/eynollah/train/models.py b/eynollah/eynollah/train/models.py new file mode 100644 index 0000000..7a1e246 --- /dev/null +++ b/eynollah/eynollah/train/models.py @@ -0,0 +1,294 @@ +from keras.models import * +from keras.layers import * +from keras import layers +from keras.regularizers import l2 + +resnet50_Weights_path = './pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' +IMAGE_ORDERING = 'channels_last' +MERGE_AXIS = -1 + + +def one_side_pad(x): + x = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(x) + if IMAGE_ORDERING == 'channels_first': + x = Lambda(lambda x: x[:, :, :-1, :-1])(x) + elif IMAGE_ORDERING == 'channels_last': + x = Lambda(lambda x: x[:, :-1, :-1, :])(x) + return x + + +def identity_block(input_tensor, kernel_size, filters, stage, block): + """The identity block is the block that has no conv layer at shortcut. + # Arguments + input_tensor: input tensor + kernel_size: defualt 3, the kernel size of middle conv layer at main path + filters: list of integers, the filterss of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + # Returns + Output tensor for the block. + """ + filters1, filters2, filters3 = filters + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2a')(input_tensor) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) + x = Activation('relu')(x) + + x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, + padding='same', name=conv_name_base + '2b')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) + x = Activation('relu')(x) + + x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) + + x = layers.add([x, input_tensor]) + x = Activation('relu')(x) + return x + + +def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): + """conv_block is the block that has a conv layer at shortcut + # Arguments + input_tensor: input tensor + kernel_size: defualt 3, the kernel size of middle conv layer at main path + filters: list of integers, the filterss of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + # Returns + Output tensor for the block. + Note that from stage 3, the first conv layer at main path is with strides=(2,2) + And the shortcut should have strides=(2,2) as well + """ + filters1, filters2, filters3 = filters + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, strides=strides, + name=conv_name_base + '2a')(input_tensor) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) + x = Activation('relu')(x) + + x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, padding='same', + name=conv_name_base + '2b')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) + x = Activation('relu')(x) + + x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) + + shortcut = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, strides=strides, + name=conv_name_base + '1')(input_tensor) + shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) + + x = layers.add([x, shortcut]) + x = Activation('relu')(x) + return x + + +def resnet50_unet_light(n_classes, input_height=224, input_width=224, weight_decay=1e-6, pretraining=False): + assert input_height % 32 == 0 + assert input_width % 32 == 0 + + img_input = Input(shape=(input_height, input_width, 3)) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2), kernel_regularizer=l2(weight_decay), + name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x) + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + if pretraining: + model = Model(img_input, x).load_weights(resnet50_Weights_path) + + v512_2048 = Conv2D(512, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f5) + v512_2048 = (BatchNormalization(axis=bn_axis))(v512_2048) + v512_2048 = Activation('relu')(v512_2048) + + v512_1024 = Conv2D(512, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f4) + v512_1024 = (BatchNormalization(axis=bn_axis))(v512_1024) + v512_1024 = Activation('relu')(v512_1024) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(v512_2048) + o = (concatenate([o, v512_1024], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f3], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f2], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f1], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, img_input], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + + model = Model(img_input, o) + return model + + +def resnet50_unet(n_classes, input_height=224, input_width=224, weight_decay=1e-6, pretraining=False): + assert input_height % 32 == 0 + assert input_width % 32 == 0 + + img_input = Input(shape=(input_height, input_width, 3)) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2), kernel_regularizer=l2(weight_decay), + name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x) + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + if pretraining: + Model(img_input, x).load_weights(resnet50_Weights_path) + + v1024_2048 = Conv2D(1024, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))( + f5) + v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) + v1024_2048 = Activation('relu')(v1024_2048) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(v1024_2048) + o = (concatenate([o, f4], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f3], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f2], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f1], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, img_input], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + + model = Model(img_input, o) + + return model diff --git a/eynollah/eynollah/train/pagexml2img.py b/eynollah/eynollah/train/pagexml2img.py new file mode 100644 index 0000000..8570f4f --- /dev/null +++ b/eynollah/eynollah/train/pagexml2img.py @@ -0,0 +1,273 @@ +#! /usr/bin/env python3 + +__version__ = '1.0' + +import argparse +import sys +import os +import numpy as np +import warnings +import xml.etree.ElementTree as ET +from tqdm import tqdm +import cv2 + +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + +__doc__ = \ + """ +tool to extract 2d or 3d RGB images from page xml data. In former case output will be 1 +2D image array which each class has filled with a pixel value. In the case of 3D RGB image +each class will be defined with a RGB value and beside images a text file of classes also will be produced. +This classes.txt file is required for dhsegment tool. +""" + + +class pagexml2img: + def __init__(self, dir_in, out_dir, output_type): + self.dir = dir_in + self.output_dir = out_dir + self.output_type = output_type + + def get_content_of_dir(self): + """ + Listing all ground truth page xml files. All files are needed to have xml format. + """ + + gt_all = os.listdir(self.dir) + self.gt_list = [file for file in gt_all if file.split('.')[len(file.split('.')) - 1] == 'xml'] + + def get_images_of_ground_truth(self): + """ + Reading the page xml files and write the ground truth images into given output directory. + """ + + if self.output_type == '3d' or self.output_type == '3D': + classes = np.array([[0, 0, 0, 1, 0, 0, 0, 0], + [255, 0, 0, 0, 1, 0, 0, 0], + [0, 255, 0, 0, 0, 1, 0, 0], + [0, 0, 255, 0, 0, 0, 1, 0], + [0, 255, 255, 0, 0, 0, 0, 1]]) + + for index in tqdm(range(len(self.gt_list))): + try: + tree1 = ET.parse(self.dir + '/' + self.gt_list[index]) + root1 = tree1.getroot() + alltags = [elem.tag for elem in root1.iter()] + link = alltags[0].split('}')[0] + '}' + + region_tags = np.unique([x for x in alltags if x.endswith('Region')]) + + for jj in root1.iter(link + 'Page'): + y_len = int(jj.attrib['imageHeight']) + x_len = int(jj.attrib['imageWidth']) + + co_text = [] + co_sep = [] + co_img = [] + co_table = [] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion') or tag.endswith( + '}textRegion') or tag.endswith('}textregion'): + + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_t_in = [] + for ll in nn.iter(link + 'Point'): + c_t_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_text.append(np.array(c_t_in)) + print(co_text) + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_text.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + + elif tag.endswith('}ImageRegion') or tag.endswith('}Imageregion') or tag.endswith( + '}imageRegion') or tag.endswith('}imageregion'): + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_i_in = [] + for ll in nn.iter(link + 'Point'): + c_i_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_img.append(np.array(c_i_in)) + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_img.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + + elif tag.endswith('}SeparatorRegion') or tag.endswith('}Separatorregion') or tag.endswith( + '}separatorRegion') or tag.endswith('}separatorregion'): + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_s_in = [] + for ll in nn.iter(link + 'Point'): + c_s_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_sep.append(np.array(c_s_in)) + + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_sep.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + + elif tag.endswith('}TableRegion') or tag.endswith('}tableRegion') or tag.endswith( + '}Tableregion') or tag.endswith('}tableregion'): + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_ta_in = [] + for ll in nn.iter(link + 'Point'): + c_ta_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_table.append(np.array(c_ta_in)) + + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_table.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + else: + pass + + img = np.zeros((y_len, x_len, 3)) + img_poly = cv2.fillPoly(img, pts=co_text, color=(255, 0, 0)) + img_poly = cv2.fillPoly(img, pts=co_img, color=(0, 255, 0)) + img_poly = cv2.fillPoly(img, pts=co_sep, color=(0, 0, 255)) + img_poly = cv2.fillPoly(img, pts=co_table, color=(0, 255, 255)) + + try: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('-')[1].split('.')[0] + '.png', + img_poly) + except: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('.')[0] + '.png', img_poly) + except: + pass + np.savetxt(self.output_dir + '/../classes.txt', classes) + + if self.output_type == '2d' or self.output_type == '2D': + for index in tqdm(range(len(self.gt_list))): + try: + tree1 = ET.parse(self.dir + '/' + self.gt_list[index]) + root1 = tree1.getroot() + alltags = [elem.tag for elem in root1.iter()] + link = alltags[0].split('}')[0] + '}' + + region_tags = np.unique([x for x in alltags if x.endswith('Region')]) + + for jj in root1.iter(link + 'Page'): + y_len = int(jj.attrib['imageHeight']) + x_len = int(jj.attrib['imageWidth']) + + co_text = [] + co_sep = [] + co_img = [] + co_table = [] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion') or tag.endswith( + '}textRegion') or tag.endswith('}textregion'): + + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_t_in = [] + for ll in nn.iter(link + 'Point'): + c_t_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_text.append(np.array(c_t_in)) + print(co_text) + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_text.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + + elif tag.endswith('}ImageRegion') or tag.endswith('}Imageregion') or tag.endswith( + '}imageRegion') or tag.endswith('}imageregion'): + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_i_in = [] + for ll in nn.iter(link + 'Point'): + c_i_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_img.append(np.array(c_i_in)) + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_img.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + + elif tag.endswith('}SeparatorRegion') or tag.endswith('}Separatorregion') or tag.endswith( + '}separatorRegion') or tag.endswith('}separatorregion'): + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_s_in = [] + for ll in nn.iter(link + 'Point'): + c_s_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_sep.append(np.array(c_s_in)) + + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_sep.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + + elif tag.endswith('}TableRegion') or tag.endswith('}tableRegion') or tag.endswith( + '}Tableregion') or tag.endswith('}tableregion'): + for nn in root1.iter(tag): + for co_it in nn.iter(link + 'Coords'): + if bool(co_it.attrib) == False: + c_ta_in = [] + for ll in nn.iter(link + 'Point'): + c_ta_in.append( + [int(np.float(ll.attrib['x'])), int(np.float(ll.attrib['y']))]) + co_table.append(np.array(c_ta_in)) + + elif bool(co_it.attrib) == True and 'points' in co_it.attrib.keys(): + p_h = co_it.attrib['points'].split(' ') + co_table.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + else: + pass + + img = np.zeros((y_len, x_len)) + img_poly = cv2.fillPoly(img, pts=co_text, color=(1, 1, 1)) + img_poly = cv2.fillPoly(img, pts=co_img, color=(2, 2, 2)) + img_poly = cv2.fillPoly(img, pts=co_sep, color=(3, 3, 3)) + img_poly = cv2.fillPoly(img, pts=co_table, color=(4, 4, 4)) + try: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('-')[1].split('.')[0] + '.png', + img_poly) + except: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('.')[0] + '.png', img_poly) + except: + pass + + def run(self): + self.get_content_of_dir() + self.get_images_of_ground_truth() + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('-dir_in', '--dir_in', dest='inp1', default=None, help='directory of page-xml files') + parser.add_argument('-dir_out', '--dir_out', dest='inp2', default=None, + help='directory where ground truth images would be written') + parser.add_argument('-type', '--type', dest='inp3', default=None, + help='this defines how output should be. A 2d image array or a 3d image array encoded with RGB color. Just pass 2d or 3d. The file will be saved one directory up. 2D image array is 3d but only information of one channel would be enough since all channels have the same values.') + options = parser.parse_args() + + possibles = globals() + possibles.update(locals()) + x = pagexml2img(options.inp1, options.inp2, options.inp3) + x.run() + + +if __name__ == "__main__": + main() diff --git a/eynollah/eynollah/train/train.py b/eynollah/eynollah/train/train.py new file mode 100644 index 0000000..ec6900c --- /dev/null +++ b/eynollah/eynollah/train/train.py @@ -0,0 +1,221 @@ +import os +import sys +import tensorflow as tf +from tensorflow.compat.v1.keras.backend import set_session +import keras, warnings +from keras.optimizers import * +from sacred import Experiment +from models import * +from utils import * +from metrics import * +from keras.models import load_model +from tqdm import tqdm + + +def configuration(): + keras.backend.clear_session() + tf.reset_default_graph() + warnings.filterwarnings('ignore') + + os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' + config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) + + config.gpu_options.allow_growth = True + config.gpu_options.per_process_gpu_memory_fraction = 0.95 # 0.95 + config.gpu_options.visible_device_list = "0" + set_session(tf.Session(config=config)) + + +def get_dirs_or_files(input_data): + if os.path.isdir(input_data): + image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') + # Check if training dir exists + assert os.path.isdir(image_input), "{} is not a directory".format(image_input) + assert os.path.isdir(labels_input), "{} is not a directory".format(labels_input) + return image_input, labels_input + + +ex = Experiment() + + +@ex.config +def config_params(): + n_classes = None # Number of classes. If your case study is binary case the set it to 2 and otherwise give your number of cases. + n_epochs = 1 + input_height = 224 * 1 + input_width = 224 * 1 + weight_decay = 1e-6 # Weight decay of l2 regularization of model layers. + n_batch = 1 # Number of batches at each iteration. + learning_rate = 1e-4 + patches = False # Make patches of image in order to use all information of image. In the case of page + # extraction this should be set to false since model should see all image. + augmentation = False + flip_aug = False # Flip image (augmentation). + blur_aug = False # Blur patches of image (augmentation). + scaling = False # Scaling of patches (augmentation) will be imposed if this set to true. + binarization = False # Otsu thresholding. Used for augmentation in the case of binary case like textline prediction. For multicases should not be applied. + dir_train = None # Directory of training dataset (sub-folders should be named images and labels). + dir_eval = None # Directory of validation dataset (sub-folders should be named images and labels). + dir_output = None # Directory of output where the model should be saved. + pretraining = False # Set true to load pretrained weights of resnet50 encoder. + scaling_bluring = False + scaling_binarization = False + scaling_flip = False + thetha = [10, -10] + blur_k = ['blur', 'guass', 'median'] # Used in order to blur image. Used for augmentation. + scales = [0.5, 2] # Scale patches with these scales. Used for augmentation. + flip_index = [0, 1, -1] # Flip image. Used for augmentation. + continue_training = False # If + index_start = 0 + dir_of_start_model = '' + is_loss_soft_dice = False + weighted_loss = False + data_is_provided = False + + +@ex.automain +def run(n_classes, n_epochs, input_height, + input_width, weight_decay, weighted_loss, + index_start, dir_of_start_model, is_loss_soft_dice, + n_batch, patches, augmentation, flip_aug, + blur_aug, scaling, binarization, + blur_k, scales, dir_train, data_is_provided, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, continue_training, + flip_index, dir_eval, dir_output, pretraining, learning_rate): + if data_is_provided: + dir_train_flowing = os.path.join(dir_output, 'train') + dir_eval_flowing = os.path.join(dir_output, 'eval') + + dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images') + dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels') + + dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images') + dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels') + + configuration() + + else: + dir_img, dir_seg = get_dirs_or_files(dir_train) + dir_img_val, dir_seg_val = get_dirs_or_files(dir_eval) + + # make first a directory in output for both training and evaluations in order to flow data from these directories. + dir_train_flowing = os.path.join(dir_output, 'train') + dir_eval_flowing = os.path.join(dir_output, 'eval') + + dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images/') + dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels/') + + dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images/') + dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels/') + + if os.path.isdir(dir_train_flowing): + os.system('rm -rf ' + dir_train_flowing) + os.makedirs(dir_train_flowing) + else: + os.makedirs(dir_train_flowing) + + if os.path.isdir(dir_eval_flowing): + os.system('rm -rf ' + dir_eval_flowing) + os.makedirs(dir_eval_flowing) + else: + os.makedirs(dir_eval_flowing) + + os.mkdir(dir_flow_train_imgs) + os.mkdir(dir_flow_train_labels) + + os.mkdir(dir_flow_eval_imgs) + os.mkdir(dir_flow_eval_labels) + + # set the gpu configuration + configuration() + + # writing patches into a sub-folder in order to be flowed from directory. + provide_patches(dir_img, dir_seg, dir_flow_train_imgs, + dir_flow_train_labels, + input_height, input_width, blur_k, blur_aug, + flip_aug, binarization, scaling, scales, flip_index, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, + augmentation=augmentation, patches=patches) + + provide_patches(dir_img_val, dir_seg_val, dir_flow_eval_imgs, + dir_flow_eval_labels, + input_height, input_width, blur_k, blur_aug, + flip_aug, binarization, scaling, scales, flip_index, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, + augmentation=False, patches=patches) + + if weighted_loss: + weights = np.zeros(n_classes) + if data_is_provided: + for obj in os.listdir(dir_flow_train_labels): + try: + label_obj = cv2.imread(dir_flow_train_labels + '/' + obj) + label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) + weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) + except: + pass + else: + + for obj in os.listdir(dir_seg): + try: + label_obj = cv2.imread(dir_seg + '/' + obj) + label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) + weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) + except: + pass + + weights = 1.00 / weights + + weights = weights / float(np.sum(weights)) + weights = weights / float(np.min(weights)) + weights = weights / float(np.sum(weights)) + + if continue_training: + if is_loss_soft_dice: + model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) + if weighted_loss: + model = load_model(dir_of_start_model, compile=True, + custom_objects={'loss': weighted_categorical_crossentropy(weights)}) + if not is_loss_soft_dice and not weighted_loss: + model = load_model(dir_of_start_model, compile=True) + else: + # get our model. + index_start = 0 + model = resnet50_unet(n_classes, input_height, input_width, weight_decay, pretraining) + + # if you want to see the model structure just uncomment model summary. + # model.summary() + + if not is_loss_soft_dice and not weighted_loss: + model.compile(loss='categorical_crossentropy', + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if is_loss_soft_dice: + model.compile(loss=soft_dice_loss, + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + + if weighted_loss: + model.compile(loss=weighted_categorical_crossentropy(weights), + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + + # generating train and evaluation data + train_gen = data_gen(dir_flow_train_imgs, dir_flow_train_labels, batch_size=n_batch, + input_height=input_height, input_width=input_width, n_classes=n_classes) + val_gen = data_gen(dir_flow_eval_imgs, dir_flow_eval_labels, batch_size=n_batch, + input_height=input_height, input_width=input_width, n_classes=n_classes) + + for i in tqdm(range(index_start, n_epochs + index_start)): + model.fit_generator( + train_gen, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, + validation_data=val_gen, + validation_steps=1, + epochs=1) + model.save(dir_output + '/' + 'model_' + str(i) + '.h5') + + # os.system('rm -rf '+dir_train_flowing) + # os.system('rm -rf '+dir_eval_flowing) + + # model.save(dir_output+'/'+'model'+'.h5') diff --git a/eynollah/eynollah/train/utils.py b/eynollah/eynollah/train/utils.py new file mode 100644 index 0000000..64263f4 --- /dev/null +++ b/eynollah/eynollah/train/utils.py @@ -0,0 +1,494 @@ +import os +import cv2 +import numpy as np +import seaborn as sns +from scipy.ndimage.interpolation import map_coordinates +from scipy.ndimage.filters import gaussian_filter +import random +from tqdm import tqdm +import imutils +import math + + +def bluring(img_in, kind): + if kind == 'guass': + img_blur = cv2.GaussianBlur(img_in, (5, 5), 0) + elif kind == "median": + img_blur = cv2.medianBlur(img_in, 5) + elif kind == 'blur': + img_blur = cv2.blur(img_in, (5, 5)) + return img_blur + + +def elastic_transform(image, alpha, sigma, seedj, random_state=None): + """Elastic deformation of images as described in [Simard2003]_. + .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for + Convolutional Neural Networks applied to Visual Document Analysis", in + Proc. of the International Conference on Document Analysis and + Recognition, 2003. + """ + if random_state is None: + random_state = np.random.RandomState(seedj) + + shape = image.shape + dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha + dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha + dz = np.zeros_like(dx) + + x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) + indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1)) + + distored_image = map_coordinates(image, indices, order=1, mode='reflect') + return distored_image.reshape(image.shape) + + +def rotation_90(img): + img_rot = np.zeros((img.shape[1], img.shape[0], img.shape[2])) + img_rot[:, :, 0] = img[:, :, 0].T + img_rot[:, :, 1] = img[:, :, 1].T + img_rot[:, :, 2] = img[:, :, 2].T + return img_rot + + +def rotatedRectWithMaxArea(w, h, angle): + """ + Given a rectangle of size wxh that has been rotated by 'angle' (in + radians), computes the width and height of the largest possible + axis-aligned rectangle (maximal area) within the rotated rectangle. + """ + if w <= 0 or h <= 0: + return 0, 0 + + width_is_longer = w >= h + side_long, side_short = (w, h) if width_is_longer else (h, w) + + # since the solutions for angle, -angle and 180-angle are all the same, + # if suffices to look at the first quadrant and the absolute values of sin,cos: + sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) + if side_short <= 2. * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: + # half constrained case: two crop corners touch the longer side, + # the other two corners are on the mid-line parallel to the longer line + x = 0.5 * side_short + wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) + else: + # fully constrained case: crop touches all 4 sides + cos_2a = cos_a * cos_a - sin_a * sin_a + wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a + + return wr, hr + + +def rotate_max_area(image, rotated, rotated_label, angle): + """ image: cv2 image matrix object + angle: in degree + """ + wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], + math.radians(angle)) + h, w, _ = rotated.shape + y1 = h // 2 - int(hr / 2) + y2 = y1 + int(hr) + x1 = w // 2 - int(wr / 2) + x2 = x1 + int(wr) + return rotated[y1:y2, x1:x2], rotated_label[y1:y2, x1:x2] + + +def rotation_not_90_func(img, label, thetha): + rotated = imutils.rotate(img, thetha) + rotated_label = imutils.rotate(label, thetha) + return rotate_max_area(img, rotated, rotated_label, thetha) + + +def color_images(seg, n_classes): + ann_u = range(n_classes) + if len(np.shape(seg)) == 3: + seg = seg[:, :, 0] + + seg_img = np.zeros((np.shape(seg)[0], np.shape(seg)[1], 3)).astype(float) + colors = sns.color_palette("hls", n_classes) + + for c in ann_u: + c = int(c) + segl = (seg == c) + seg_img[:, :, 0] += segl * (colors[c][0]) + seg_img[:, :, 1] += segl * (colors[c][1]) + seg_img[:, :, 2] += segl * (colors[c][2]) + return seg_img + + +def resize_image(seg_in, input_height, input_width): + return cv2.resize(seg_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) + + +def get_one_hot(seg, input_height, input_width, n_classes): + seg = seg[:, :, 0] + seg_f = np.zeros((input_height, input_width, n_classes)) + for j in range(n_classes): + seg_f[:, :, j] = (seg == j).astype(int) + return seg_f + + +def IoU(Yi, y_predi): + # mean Intersection over Union + # Mean IoU = TP/(FN + TP + FP) + + IoUs = [] + classes_true = np.unique(Yi) + for c in classes_true: + TP = np.sum((Yi == c) & (y_predi == c)) + FP = np.sum((Yi != c) & (y_predi == c)) + FN = np.sum((Yi == c) & (y_predi != c)) + IoU = TP / float(TP + FP + FN) + print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c, TP, FP, FN, IoU)) + IoUs.append(IoU) + mIoU = np.mean(IoUs) + print("_________________") + print("Mean IoU: {:4.3f}".format(mIoU)) + return mIoU + + +def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_classes): + c = 0 + n = [f for f in os.listdir(img_folder) if not f.startswith('.')] # os.listdir(img_folder) #List of training images + random.shuffle(n) + while True: + img = np.zeros((batch_size, input_height, input_width, 3)).astype('float') + mask = np.zeros((batch_size, input_height, input_width, n_classes)).astype('float') + + for i in range(c, c + batch_size): # initially from 0 to 16, c = 0. + # print(img_folder+'/'+n[i]) + + try: + filename = n[i].split('.')[0] + + train_img = cv2.imread(img_folder + '/' + n[i]) / 255. + train_img = cv2.resize(train_img, (input_width, input_height), + interpolation=cv2.INTER_NEAREST) # Read an image from folder and resize + + img[i - c] = train_img # add to array - img[0], img[1], and so on. + train_mask = cv2.imread(mask_folder + '/' + filename + '.png') + # print(mask_folder+'/'+filename+'.png') + # print(train_mask.shape) + train_mask = get_one_hot(resize_image(train_mask, input_height, input_width), input_height, input_width, + n_classes) + # train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] + + mask[i - c] = train_mask + except: + img[i - c] = np.ones((input_height, input_width, 3)).astype('float') + mask[i - c] = np.zeros((input_height, input_width, n_classes)).astype('float') + + c += batch_size + if c + batch_size >= len(os.listdir(img_folder)): + c = 0 + random.shuffle(n) + yield img, mask + + +def otsu_copy(img): + img_r = np.zeros(img.shape) + img1 = img[:, :, 0] + img2 = img[:, :, 1] + img3 = img[:, :, 2] + _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + _, threshold2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + _, threshold3 = cv2.threshold(img3, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + img_r[:, :, 0] = threshold1 + img_r[:, :, 1] = threshold1 + img_r[:, :, 2] = threshold1 + return img_r + + +def get_patches(dir_img_f, dir_seg_f, img, label, height, width, indexer): + if img.shape[0] < height or img.shape[1] < width: + img, label = do_padding(img, label, height, width) + + img_h = img.shape[0] + img_w = img.shape[1] + + nxf = img_w / float(width) + nyf = img_h / float(height) + + if nxf > int(nxf): + nxf = int(nxf) + 1 + if nyf > int(nyf): + nyf = int(nyf) + 1 + + nxf = int(nxf) + nyf = int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d = i * width + index_x_u = (i + 1) * width + + index_y_d = j * height + index_y_u = (j + 1) * height + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - width + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - height + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] + + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) + cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) + indexer += 1 + + return indexer + + +def do_padding(img, label, height, width): + height_new = img.shape[0] + width_new = img.shape[1] + + h_start = 0 + w_start = 0 + + if img.shape[0] < height: + h_start = int(abs(height - img.shape[0]) / 2.) + height_new = height + + if img.shape[1] < width: + w_start = int(abs(width - img.shape[1]) / 2.) + width_new = width + + img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 255 + label_new = np.zeros((height_new, width_new, label.shape[2])).astype(float) + + img_new[h_start:h_start + img.shape[0], w_start:w_start + img.shape[1], :] = np.copy(img[:, :, :]) + label_new[h_start:h_start + label.shape[0], w_start:w_start + label.shape[1], :] = np.copy(label[:, :, :]) + + return img_new, label_new + + +def get_patches_num_scale(dir_img_f, dir_seg_f, img, label, height, width, indexer, n_patches, scaler): + if img.shape[0] < height or img.shape[1] < width: + img, label = do_padding(img, label, height, width) + + img_h = img.shape[0] + img_w = img.shape[1] + + height_scale = int(height * scaler) + width_scale = int(width * scaler) + + nxf = img_w / float(width_scale) + nyf = img_h / float(height_scale) + + if nxf > int(nxf): + nxf = int(nxf) + 1 + if nyf > int(nyf): + nyf = int(nyf) + 1 + + nxf = int(nxf) + nyf = int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d = i * width_scale + index_x_u = (i + 1) * width_scale + + index_y_d = j * height_scale + index_y_u = (j + 1) * height_scale + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - width_scale + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - height_scale + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] + + img_patch = resize_image(img_patch, height, width) + label_patch = resize_image(label_patch, height, width) + + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) + cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) + indexer += 1 + + return indexer + + +def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, indexer, scaler): + img = resize_image(img, int(img.shape[0] * scaler), int(img.shape[1] * scaler)) + label = resize_image(label, int(label.shape[0] * scaler), int(label.shape[1] * scaler)) + + if img.shape[0] < height or img.shape[1] < width: + img, label = do_padding(img, label, height, width) + + img_h = img.shape[0] + img_w = img.shape[1] + + height_scale = int(height * 1) + width_scale = int(width * 1) + + nxf = img_w / float(width_scale) + nyf = img_h / float(height_scale) + + if nxf > int(nxf): + nxf = int(nxf) + 1 + if nyf > int(nyf): + nyf = int(nyf) + 1 + + nxf = int(nxf) + nyf = int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d = i * width_scale + index_x_u = (i + 1) * width_scale + + index_y_d = j * height_scale + index_y_u = (j + 1) * height_scale + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - width_scale + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - height_scale + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] + + # img_patch=resize_image(img_patch,height,width) + # label_patch=resize_image(label_patch,height,width) + + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) + cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) + indexer += 1 + + return indexer + + +def provide_patches(dir_img, dir_seg, dir_flow_train_imgs, + dir_flow_train_labels, + input_height, input_width, blur_k, blur_aug, + flip_aug, binarization, scaling, scales, flip_index, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, + augmentation=False, patches=False): + imgs_cv_train = np.array(os.listdir(dir_img)) + segs_cv_train = np.array(os.listdir(dir_seg)) + + indexer = 0 + for im, seg_i in tqdm(zip(imgs_cv_train, segs_cv_train)): + img_name = im.split('.')[0] + if not patches: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_img + '/' + im), input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + indexer += 1 + + if augmentation: + if flip_aug: + for f_i in flip_index: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(cv2.flip(cv2.imread(dir_img + '/' + im), f_i), input_height, + input_width)) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), + input_height, input_width)) + indexer += 1 + + if blur_aug: + for blur_i in blur_k: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + (resize_image(bluring(cv2.imread(dir_img + '/' + im), blur_i), input_height, + input_width))) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, + input_width)) + indexer += 1 + + if binarization: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(otsu_copy(cv2.imread(dir_img + '/' + im)), input_height, input_width)) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + indexer += 1 + + if patches: + + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + cv2.imread(dir_img + '/' + im), cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) + + if augmentation: + + if rotation: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + rotation_90(cv2.imread(dir_img + '/' + im)), + rotation_90(cv2.imread(dir_seg + '/' + img_name + '.png')), + input_height, input_width, indexer=indexer) + + if rotation_not_90: + + for thetha_i in thetha: + img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/' + im), + cv2.imread( + dir_seg + '/' + img_name + '.png'), + thetha_i) + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + img_max_rotated, + label_max_rotated, + input_height, input_width, indexer=indexer) + if flip_aug: + for f_i in flip_index: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + cv2.flip(cv2.imread(dir_img + '/' + im), f_i), + cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), + input_height, input_width, indexer=indexer) + if blur_aug: + for blur_i in blur_k: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + bluring(cv2.imread(dir_img + '/' + im), blur_i), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) + + if scaling: + for sc_ind in scales: + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + cv2.imread(dir_img + '/' + im), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer, scaler=sc_ind) + if binarization: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + otsu_copy(cv2.imread(dir_img + '/' + im)), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) + + if scaling_bluring: + for sc_ind in scales: + for blur_i in blur_k: + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + bluring(cv2.imread(dir_img + '/' + im), blur_i), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer, + scaler=sc_ind) + + if scaling_binarization: + for sc_ind in scales: + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + otsu_copy(cv2.imread(dir_img + '/' + im)), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer, scaler=sc_ind) + + if scaling_flip: + for sc_ind in scales: + for f_i in flip_index: + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + cv2.flip(cv2.imread(dir_img + '/' + im), f_i), + cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), + f_i), + input_height, input_width, indexer=indexer, + scaler=sc_ind) diff --git a/qurator/eynollah/utils/__init__.py b/eynollah/eynollah/utils/__init__.py similarity index 100% rename from qurator/eynollah/utils/__init__.py rename to eynollah/eynollah/utils/__init__.py diff --git a/qurator/eynollah/utils/contour.py b/eynollah/eynollah/utils/contour.py similarity index 90% rename from qurator/eynollah/utils/contour.py rename to eynollah/eynollah/utils/contour.py index dfefbbf..9190f8d 100644 --- a/qurator/eynollah/utils/contour.py +++ b/eynollah/eynollah/utils/contour.py @@ -5,6 +5,8 @@ from shapely import geometry from .rotate import rotate_image, rotation_image_new from multiprocessing import Process, Queue, cpu_count from multiprocessing import Pool + + def contours_in_same_horizon(cy_main_hor): X1 = np.zeros((len(cy_main_hor), len(cy_main_hor))) X2 = np.zeros((len(cy_main_hor), len(cy_main_hor))) @@ -22,6 +24,7 @@ def contours_in_same_horizon(cy_main_hor): all_args.append(list(set(list_h))) return np.unique(np.array(all_args, dtype=object)) + def find_contours_mean_y_diff(contours_main): M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] @@ -42,10 +45,11 @@ def get_text_region_boxes_by_given_contours(contours): del contours return boxes, contours_new + def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area): found_polygons_early = list() - for jv,c in enumerate(contours): + for jv, c in enumerate(contours): if len(c) < 3: # A polygon cannot have less than 3 points continue @@ -55,17 +59,18 @@ def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) return found_polygons_early + def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): found_polygons_early = list() - for jv,c in enumerate(contours): + for jv, c in enumerate(contours): if len(c) < 3: # A polygon cannot have less than 3 points continue polygon = geometry.Polygon([point[0] for point in c]) # area = cv2.contourArea(c) area = polygon.area - ##print(np.prod(thresh.shape[:2])) + # print(np.prod(thresh.shape[:2])) # Check that polygon has area greater than minimal area # print(hierarchy[0][jv][3],hierarchy ) if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : @@ -73,6 +78,7 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) return found_polygons_early + def find_new_features_of_contours(contours_main): areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) @@ -108,25 +114,26 @@ def find_new_features_of_contours(contours_main): return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin -def find_features_of_contours(contours_main): - - areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) - M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))] - cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] - cy_main=[(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] - x_min_main=np.array([np.min(contours_main[j][:,0,0]) for j in range(len(contours_main))]) - x_max_main=np.array([np.max(contours_main[j][:,0,0]) for j in range(len(contours_main))]) +def find_features_of_contours(contours_main): + areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) + M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cx_main = [(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] + cy_main = [(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] + x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) - y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))]) - y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))]) + y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) - return y_min_main, y_max_main + + def return_parent_contours(contours, hierarchy): contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] return contours_parent + def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): # pixels of images are identified by 5 @@ -146,6 +153,7 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): return contours_imgs + def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, indexes_r_con_per_pro, img, slope_first): cnts_org_per_each_subprocess = [] index_by_text_region_contours = [] @@ -166,10 +174,9 @@ def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, inde cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) - cnts_org_per_each_subprocess.append(cont_int[0]) - queue_of_all_params.put([ cnts_org_per_each_subprocess, index_by_text_region_contours]) + queue_of_all_params.put([cnts_org_per_each_subprocess, index_by_text_region_contours]) def loop_contour_image(index_l, cnts,img, slope_first): @@ -181,7 +188,7 @@ def loop_contour_image(index_l, cnts,img, slope_first): # print(img.shape,'img') img_copy = rotation_image_new(img_copy, -slope_first) - ##print(img_copy.shape,'img_copy') + # print(img_copy.shape,'img_copy') # plt.imshow(img_copy) # plt.show() @@ -196,15 +203,17 @@ def loop_contour_image(index_l, cnts,img, slope_first): # print(np.shape(cont_int[0])) return cont_int[0] + def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first): cnts_org = [] # print(cnts,'cnts') with Pool(cpu_count()) as p: - cnts_org = p.starmap(loop_contour_image, [(index_l,cnts, img,slope_first) for index_l in range(len(cnts))]) + cnts_org = p.starmap(loop_contour_image, [(index_l, cnts, img, slope_first) for index_l in range(len(cnts))]) return cnts_org + def get_textregion_contours_in_org_image(cnts, img, slope_first): cnts_org = [] @@ -218,7 +227,7 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): # print(img.shape,'img') img_copy = rotation_image_new(img_copy, -slope_first) - ##print(img_copy.shape,'img_copy') + # print(img_copy.shape,'img_copy') # plt.imshow(img_copy) # plt.show() @@ -235,17 +244,18 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): return cnts_org + def get_textregion_contours_in_org_image_light(cnts, img, slope_first): h_o = img.shape[0] w_o = img.shape[1] img = cv2.resize(img, (int(img.shape[1]/3.), int(img.shape[0]/3.)), interpolation=cv2.INTER_NEAREST) - ##cnts = list( (np.array(cnts)/2).astype(np.int16) ) - #cnts = cnts/2 - cnts = [(i/ 3).astype(np.int32) for i in cnts] + # cnts = list( (np.array(cnts)/2).astype(np.int16) ) + # cnts = cnts/2 + cnts = [(i / 3).astype(np.int32) for i in cnts] cnts_org = [] - #print(cnts,'cnts') + # print(cnts,'cnts') for i in range(len(cnts)): img_copy = np.zeros(img.shape) img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1)) @@ -255,7 +265,7 @@ def get_textregion_contours_in_org_image_light(cnts, img, slope_first): # print(img.shape,'img') img_copy = rotation_image_new(img_copy, -slope_first) - ##print(img_copy.shape,'img_copy') + # print(img_copy.shape,'img_copy') # plt.imshow(img_copy) # plt.show() @@ -272,6 +282,7 @@ def get_textregion_contours_in_org_image_light(cnts, img, slope_first): return cnts_org + def return_contours_of_interested_textline(region_pre_p, pixel): # pixels of images are identified by 5 @@ -289,6 +300,7 @@ def return_contours_of_interested_textline(region_pre_p, pixel): contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003) return contours_imgs + def return_contours_of_image(image): if len(image.shape) == 2: @@ -301,6 +313,7 @@ def return_contours_of_image(image): contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours, hierarchy + def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003): # pixels of images are identified by 5 @@ -320,6 +333,7 @@ def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_si return contours_imgs + def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area): # pixels of images are identified by 5 @@ -339,4 +353,3 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3)) img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1)) return img_ret[:, :, 0] - diff --git a/qurator/eynollah/utils/counter.py b/eynollah/eynollah/utils/counter.py similarity index 99% rename from qurator/eynollah/utils/counter.py rename to eynollah/eynollah/utils/counter.py index 9a3ed70..ac32dc9 100644 --- a/qurator/eynollah/utils/counter.py +++ b/eynollah/eynollah/utils/counter.py @@ -3,6 +3,7 @@ from collections import Counter REGION_ID_TEMPLATE = 'region_%04d' LINE_ID_TEMPLATE = 'region_%04d_line_%04d' + class EynollahIdCounter(): def __init__(self, region_idx=0, line_idx=0): diff --git a/qurator/eynollah/utils/dirs.py b/eynollah/eynollah/utils/dirs.py similarity index 100% rename from qurator/eynollah/utils/dirs.py rename to eynollah/eynollah/utils/dirs.py diff --git a/qurator/eynollah/utils/drop_capitals.py b/eynollah/eynollah/utils/drop_capitals.py similarity index 78% rename from qurator/eynollah/utils/drop_capitals.py rename to eynollah/eynollah/utils/drop_capitals.py index e12028f..d464c63 100644 --- a/qurator/eynollah/utils/drop_capitals.py +++ b/eynollah/eynollah/utils/drop_capitals.py @@ -6,6 +6,7 @@ from .contour import ( return_parent_contours, ) + def adhere_drop_capital_region_into_corresponding_textline( text_regions_p, polygons_of_drop_capitals, @@ -26,7 +27,7 @@ def adhere_drop_capital_region_into_corresponding_textline( img_con_all = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) for j_cont in range(len(contours_only_text_parent)): - img_con_all[all_box_coord[j_cont][0] : all_box_coord[j_cont][1], all_box_coord[j_cont][2] : all_box_coord[j_cont][3], 0] = (j_cont + 1) * 3 + img_con_all[all_box_coord[j_cont][0]: all_box_coord[j_cont][1], all_box_coord[j_cont][2]: all_box_coord[j_cont][3], 0] = (j_cont + 1) * 3 # img_con_all=cv2.fillPoly(img_con_all,pts=[contours_only_text_parent[j_cont]],color=((j_cont+1)*3,(j_cont+1)*3,(j_cont+1)*3)) # plt.imshow(img_con_all[:,:,0]) @@ -44,7 +45,7 @@ def adhere_drop_capital_region_into_corresponding_textline( # plt.imshow(img_con[:,:,0]) # plt.show() - ##img_con=cv2.dilate(img_con, kernel, iterations=30) + # img_con=cv2.dilate(img_con, kernel, iterations=30) # plt.imshow(img_con[:,:,0]) # plt.show() @@ -185,7 +186,7 @@ def adhere_drop_capital_region_into_corresponding_textline( # contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0] # print(np.shape(contours_biggest),'contours_biggest') # print(np.shape(all_found_textline_polygons[int(region_final)][arg_min])) - ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) + # contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest except: pass @@ -230,7 +231,7 @@ def adhere_drop_capital_region_into_corresponding_textline( contours_biggest[:, 0, 0] = contours_biggest[:, 0, 0] # -all_box_coord[int(region_final)][2] contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] # -all_box_coord[int(region_final)][0] - ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) + # contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest # all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest @@ -239,49 +240,49 @@ def adhere_drop_capital_region_into_corresponding_textline( else: pass - ##cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) - ###print(all_box_coord[j_cont]) - ###print(cx_t) - ###print(cy_t) - ###print(cx_d[i_drop]) - ###print(cy_d[i_drop]) - ##y_lines=all_box_coord[int(region_final)][0]+np.array(cy_t) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + # ##print(all_box_coord[j_cont]) + # ##print(cx_t) + # ##print(cy_t) + # ##print(cx_d[i_drop]) + # ##print(cy_d[i_drop]) + # #y_lines=all_box_coord[int(region_final)][0]+np.array(cy_t) - ##y_lines[y_lines 1: @@ -399,71 +400,72 @@ def adhere_drop_capital_region_into_corresponding_textline( else: pass - #####for i_drop in range(len(polygons_of_drop_capitals)): - #####for j_cont in range(len(contours_only_text_parent)): - #####img_con=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) - #####img_con=cv2.fillPoly(img_con,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) - #####img_con=cv2.fillPoly(img_con,pts=[contours_only_text_parent[j_cont]],color=(255,255,255)) - - #####img_con=img_con.astype(np.uint8) - ######imgray = cv2.cvtColor(img_con, cv2.COLOR_BGR2GRAY) - ######ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - ######contours_new,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - #####contours_new,hir_new=return_contours_of_image(img_con) - #####contours_new_parent=return_parent_contours( contours_new,hir_new) - ######plt.imshow(img_con) - ######plt.show() - #####try: - #####if len(contours_new_parent)==1: - ######print(all_found_textline_polygons[j_cont][0]) - #####cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[j_cont]) - ######print(all_box_coord[j_cont]) - ######print(cx_t) - ######print(cy_t) - ######print(cx_d[i_drop]) - ######print(cy_d[i_drop]) - #####y_lines=all_box_coord[j_cont][0]+np.array(cy_t) - - ######print(y_lines) - - #####arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) ) - ######print(arg_min) - - #####cnt_nearest=np.copy(all_found_textline_polygons[j_cont][arg_min]) - #####cnt_nearest[:,0]=all_found_textline_polygons[j_cont][arg_min][:,0]+all_box_coord[j_cont][2] - #####cnt_nearest[:,1]=all_found_textline_polygons[j_cont][arg_min][:,1]+all_box_coord[j_cont][0] - - #####img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) - #####img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255)) - #####img_textlines=cv2.fillPoly(img_textlines,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) - - #####img_textlines=img_textlines.astype(np.uint8) - #####imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) - #####ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - #####contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - #####areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) - - #####contours_biggest=contours_combined[np.argmax(areas_cnt_text)] - - ######print(np.shape(contours_biggest)) - ######print(contours_biggest[:]) - #####contours_biggest[:,0,0]=contours_biggest[:,0,0]-all_box_coord[j_cont][2] - #####contours_biggest[:,0,1]=contours_biggest[:,0,1]-all_box_coord[j_cont][0] - - #####all_found_textline_polygons[j_cont][arg_min]=contours_biggest - ######print(contours_biggest) - ######plt.imshow(img_textlines[:,:,0]) - ######plt.show() - #####else: - #####pass - #####except: - #####pass + # ####for i_drop in range(len(polygons_of_drop_capitals)): + # ####for j_cont in range(len(contours_only_text_parent)): + # ####img_con=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) + # ####img_con=cv2.fillPoly(img_con,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) + # ####img_con=cv2.fillPoly(img_con,pts=[contours_only_text_parent[j_cont]],color=(255,255,255)) + + # ####img_con=img_con.astype(np.uint8) + # #####imgray = cv2.cvtColor(img_con, cv2.COLOR_BGR2GRAY) + # #####ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + # #####contours_new,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + # ####contours_new,hir_new=return_contours_of_image(img_con) + # ####contours_new_parent=return_parent_contours( contours_new,hir_new) + # #####plt.imshow(img_con) + # #####plt.show() + # ####try: + # ####if len(contours_new_parent)==1: + # #####print(all_found_textline_polygons[j_cont][0]) + # ####cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[j_cont]) + # #####print(all_box_coord[j_cont]) + # #####print(cx_t) + # #####print(cy_t) + # #####print(cx_d[i_drop]) + # #####print(cy_d[i_drop]) + # ####y_lines=all_box_coord[j_cont][0]+np.array(cy_t) + + # #####print(y_lines) + + # ####arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) ) + # #####print(arg_min) + + # ####cnt_nearest=np.copy(all_found_textline_polygons[j_cont][arg_min]) + # ####cnt_nearest[:,0]=all_found_textline_polygons[j_cont][arg_min][:,0]+all_box_coord[j_cont][2] + # ####cnt_nearest[:,1]=all_found_textline_polygons[j_cont][arg_min][:,1]+all_box_coord[j_cont][0] + + # ####img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) + # ####img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255)) + # ####img_textlines=cv2.fillPoly(img_textlines,pts=[polygons_of_drop_capitals[i_drop] ],color=(255,255,255)) + + # ####img_textlines=img_textlines.astype(np.uint8) + # ####imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) + # ####ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + # ####contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + # ####areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) + + # ####contours_biggest=contours_combined[np.argmax(areas_cnt_text)] + + # #####print(np.shape(contours_biggest)) + # #####print(contours_biggest[:]) + # ####contours_biggest[:,0,0]=contours_biggest[:,0,0]-all_box_coord[j_cont][2] + # ####contours_biggest[:,0,1]=contours_biggest[:,0,1]-all_box_coord[j_cont][0] + + # ####all_found_textline_polygons[j_cont][arg_min]=contours_biggest + # #####print(contours_biggest) + # #####plt.imshow(img_textlines[:,:,0]) + # #####plt.show() + # ####else: + # ####pass + # ####except: + # ####pass return all_found_textline_polygons + def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): drop_only = (layout_no_patch[:, :, 0] == 4) * 1 @@ -489,7 +491,7 @@ def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): if iou_of_box_and_contoure > 60 and weigh_to_height_ratio < 1.2 and height_to_weight_ratio < 2: map_of_drop_contour_bb = np.zeros((layout1.shape[0], layout1.shape[1])) - map_of_drop_contour_bb[y : y + h, x : x + w] = layout1[y : y + h, x : x + w] + map_of_drop_contour_bb[y: y + h, x: x + w] = layout1[y: y + h, x: x + w] if (((map_of_drop_contour_bb == 1) * 1).sum() / float(((map_of_drop_contour_bb == 5) * 1).sum()) * 100) >= 15: contours_drop_parent_final.append(contours_drop_parent[jj]) @@ -499,4 +501,3 @@ def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=(4, 4, 4)) return layout_no_patch - diff --git a/qurator/eynollah/utils/is_nan.py b/eynollah/eynollah/utils/is_nan.py similarity index 100% rename from qurator/eynollah/utils/is_nan.py rename to eynollah/eynollah/utils/is_nan.py diff --git a/eynollah/eynollah/utils/marginals.py b/eynollah/eynollah/utils/marginals.py new file mode 100644 index 0000000..d7514bb --- /dev/null +++ b/eynollah/eynollah/utils/marginals.py @@ -0,0 +1,228 @@ +import numpy as np +import cv2 +from scipy.signal import find_peaks +from scipy.ndimage import gaussian_filter1d + +from .contour import find_new_features_of_contours, return_contours_of_interested_region +from .resize import resize_image +from .rotate import rotate_image + + +def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None): + mask_marginals = np.zeros((text_with_lines.shape[0], text_with_lines.shape[1])) + mask_marginals = mask_marginals.astype(np.uint8) + + text_with_lines = text_with_lines.astype(np.uint8) + # text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3) + + text_with_lines_eroded = cv2.erode(text_with_lines, kernel, iterations=5) + + if text_with_lines.shape[0] <= 1500: + pass + elif text_with_lines.shape[0] > 1500 and text_with_lines.shape[0] <= 1800: + text_with_lines = resize_image(text_with_lines, int(text_with_lines.shape[0] * 1.5), text_with_lines.shape[1]) + text_with_lines = cv2.erode(text_with_lines, kernel, iterations=5) + text_with_lines = resize_image(text_with_lines, text_with_lines_eroded.shape[0], + text_with_lines_eroded.shape[1]) + else: + text_with_lines = resize_image(text_with_lines, int(text_with_lines.shape[0] * 1.8), text_with_lines.shape[1]) + text_with_lines = cv2.erode(text_with_lines, kernel, iterations=7) + text_with_lines = resize_image(text_with_lines, text_with_lines_eroded.shape[0], + text_with_lines_eroded.shape[1]) + + text_with_lines_y = text_with_lines.sum(axis=0) + text_with_lines_y_eroded = text_with_lines_eroded.sum(axis=0) + + thickness_along_y_percent = text_with_lines_y_eroded.max() / (float(text_with_lines.shape[0])) * 100 + + # print(thickness_along_y_percent,'thickness_along_y_percent') + + if thickness_along_y_percent < 30: + min_textline_thickness = 8 + elif thickness_along_y_percent >= 30 and thickness_along_y_percent < 50: + min_textline_thickness = 20 + else: + min_textline_thickness = 40 + + if thickness_along_y_percent >= 14: + + text_with_lines_y_rev = -1 * text_with_lines_y[:] + # print(text_with_lines_y) + # print(text_with_lines_y_rev) + + # plt.plot(text_with_lines_y) + # plt.show() + + text_with_lines_y_rev = text_with_lines_y_rev - np.min(text_with_lines_y_rev) + + # plt.plot(text_with_lines_y_rev) + # plt.show() + sigma_gaus = 1 + region_sum_0 = gaussian_filter1d(text_with_lines_y, sigma_gaus) + + region_sum_0_rev = gaussian_filter1d(text_with_lines_y_rev, sigma_gaus) + + # plt.plot(region_sum_0_rev) + # plt.show() + region_sum_0_updown = region_sum_0[len(region_sum_0)::-1] + + first_nonzero = (next((i for i, x in enumerate(region_sum_0) if x), None)) + last_nonzero = (next((i for i, x in enumerate(region_sum_0_updown) if x), None)) + + last_nonzero = len(region_sum_0) - last_nonzero + + # img_sum_0_smooth_rev=-region_sum_0 + + mid_point = (last_nonzero + first_nonzero) / 2. + + one_third_right = (last_nonzero - mid_point) / 3.0 + one_third_left = (mid_point - first_nonzero) / 3.0 + + # img_sum_0_smooth_rev=img_sum_0_smooth_rev-np.min(img_sum_0_smooth_rev) + + peaks, _ = find_peaks(text_with_lines_y_rev, height=0) + + peaks = np.array(peaks) + + # print(region_sum_0[peaks]) + # #plt.plot(region_sum_0) + # #plt.plot(peaks,region_sum_0[peaks],'*') + # #plt.show() + # print(first_nonzero,last_nonzero,peaks) + peaks = peaks[(peaks > first_nonzero) & (peaks < last_nonzero)] + + # print(first_nonzero,last_nonzero,peaks) + + # print(region_sum_0[peaks]<10) + # ###peaks=peaks[region_sum_0[peaks]<25 ] + + # print(region_sum_0[peaks]) + peaks = peaks[region_sum_0[peaks] < min_textline_thickness] + # print(peaks) + # print(first_nonzero,last_nonzero,one_third_right,one_third_left) + + if num_col == 1: + peaks_right = peaks[peaks > mid_point] + peaks_left = peaks[peaks < mid_point] + if num_col == 2: + peaks_right = peaks[peaks > (mid_point + one_third_right)] + peaks_left = peaks[peaks < (mid_point - one_third_left)] + + try: + point_right = np.min(peaks_right) + except: + point_right = last_nonzero + + try: + point_left = np.max(peaks_left) + except: + point_left = first_nonzero + + # print(point_left,point_right) + # print(text_regions.shape) + if point_right >= mask_marginals.shape[1]: + point_right = mask_marginals.shape[1] - 1 + + try: + mask_marginals[:, point_left:point_right] = 1 + except: + mask_marginals[:, :] = 1 + + # print(mask_marginals.shape,point_left,point_right,'nadosh') + mask_marginals_rotated = rotate_image(mask_marginals, -slope_deskew) + + # print(mask_marginals_rotated.shape,'nadosh') + mask_marginals_rotated_sum = mask_marginals_rotated.sum(axis=0) + + mask_marginals_rotated_sum[mask_marginals_rotated_sum != 0] = 1 + index_x = np.array(range(len(mask_marginals_rotated_sum))) + 1 + + index_x_interest = index_x[mask_marginals_rotated_sum == 1] + + min_point_of_left_marginal = np.min(index_x_interest) - 16 + max_point_of_right_marginal = np.max(index_x_interest) + 16 + + if min_point_of_left_marginal < 0: + min_point_of_left_marginal = 0 + if max_point_of_right_marginal >= text_regions.shape[1]: + max_point_of_right_marginal = text_regions.shape[1] - 1 + + # print(np.min(index_x_interest) ,np.max(index_x_interest),'minmaxnew') + # print(mask_marginals_rotated.shape,text_regions.shape,'mask_marginals_rotated') + # plt.imshow(mask_marginals) + # plt.show() + + # plt.imshow(mask_marginals_rotated) + # plt.show() + + text_regions[(mask_marginals_rotated[:, :] != 1) & (text_regions[:, :] == 1)] = 4 + + # plt.imshow(text_regions) + # plt.show() + + pixel_img = 4 + min_area_text = 0.00001 + polygons_of_marginals = return_contours_of_interested_region(text_regions, pixel_img, min_area_text) + + cx_text_only, cy_text_only, x_min_text_only, x_max_text_only, y_min_text_only, y_max_text_only, y_cor_x_min_main = find_new_features_of_contours( + polygons_of_marginals) + + text_regions[(text_regions[:, :] == 4)] = 1 + + marginlas_should_be_main_text = [] + + x_min_marginals_left = [] + x_min_marginals_right = [] + + for i in range(len(cx_text_only)): + + x_width_mar = abs(x_min_text_only[i] - x_max_text_only[i]) + y_height_mar = abs(y_min_text_only[i] - y_max_text_only[i]) + # print(x_width_mar,y_height_mar,y_height_mar/x_width_mar,'y_height_mar') + if x_width_mar > 16 and y_height_mar / x_width_mar < 18: + marginlas_should_be_main_text.append(polygons_of_marginals[i]) + if x_min_text_only[i] < (mid_point - one_third_left): + x_min_marginals_left_new = x_min_text_only[i] + if len(x_min_marginals_left) == 0: + x_min_marginals_left.append(x_min_marginals_left_new) + else: + x_min_marginals_left[0] = min(x_min_marginals_left[0], x_min_marginals_left_new) + else: + x_min_marginals_right_new = x_min_text_only[i] + if len(x_min_marginals_right) == 0: + x_min_marginals_right.append(x_min_marginals_right_new) + else: + x_min_marginals_right[0] = min(x_min_marginals_right[0], x_min_marginals_right_new) + + if len(x_min_marginals_left) == 0: + x_min_marginals_left = [0] + if len(x_min_marginals_right) == 0: + x_min_marginals_right = [text_regions.shape[1] - 1] + + # print(x_min_marginals_left[0],x_min_marginals_right[0],'margo') + + # print(marginlas_should_be_main_text,'marginlas_should_be_main_text') + text_regions = cv2.fillPoly(text_regions, pts=marginlas_should_be_main_text, color=(4, 4)) + + # print(np.unique(text_regions)) + + # text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 + # text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 + + text_regions[:, :int(min_point_of_left_marginal)][text_regions[:, :int(min_point_of_left_marginal)] == 1] = 0 + text_regions[:, int(max_point_of_right_marginal):][text_regions[:, int(max_point_of_right_marginal):] == 1] = 0 + + # ##text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4 + + # ##text_regions[:,point_right:][ text_regions[:,point_right:]==1]=4 + # plt.plot(region_sum_0) + # plt.plot(peaks,region_sum_0[peaks],'*') + # plt.show() + + # plt.imshow(text_regions) + # plt.show() + + # sys.exit() + else: + pass + return text_regions diff --git a/qurator/eynollah/utils/pil_cv2.py b/eynollah/eynollah/utils/pil_cv2.py similarity index 97% rename from qurator/eynollah/utils/pil_cv2.py rename to eynollah/eynollah/utils/pil_cv2.py index 83ae47d..34ef9e1 100644 --- a/qurator/eynollah/utils/pil_cv2.py +++ b/eynollah/eynollah/utils/pil_cv2.py @@ -5,15 +5,18 @@ from cv2 import COLOR_GRAY2BGR, COLOR_RGB2BGR, COLOR_BGR2RGB, cvtColor, imread # from sbb_binarization + def cv2pil(img): return Image.fromarray(np.array(cvtColor(img, COLOR_BGR2RGB))) + def pil2cv(img): # from ocrd/workspace.py - color_conversion = COLOR_GRAY2BGR if img.mode in ('1', 'L') else COLOR_RGB2BGR + color_conversion = COLOR_GRAY2BGR if img.mode in ('1', 'L') else COLOR_RGB2BGR pil_as_np_array = np.array(img).astype('uint8') if img.mode == '1' else np.array(img) return cvtColor(pil_as_np_array, color_conversion) + def check_dpi(img): try: if isinstance(img, Image.Image): diff --git a/qurator/eynollah/utils/resize.py b/eynollah/eynollah/utils/resize.py similarity index 99% rename from qurator/eynollah/utils/resize.py rename to eynollah/eynollah/utils/resize.py index fdc49ec..8c09b04 100644 --- a/qurator/eynollah/utils/resize.py +++ b/eynollah/eynollah/utils/resize.py @@ -1,4 +1,5 @@ import cv2 + def resize_image(img_in, input_height, input_width): return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) diff --git a/qurator/eynollah/utils/rotate.py b/eynollah/eynollah/utils/rotate.py similarity index 98% rename from qurator/eynollah/utils/rotate.py rename to eynollah/eynollah/utils/rotate.py index 603c2d9..4b6fbb6 100644 --- a/qurator/eynollah/utils/rotate.py +++ b/eynollah/eynollah/utils/rotate.py @@ -3,6 +3,7 @@ import math import imutils import cv2 + def rotatedRectWithMaxArea(w, h, angle): if w <= 0 or h <= 0: return 0, 0 @@ -25,6 +26,7 @@ def rotatedRectWithMaxArea(w, h, angle): return wr, hr + def rotate_max_area_new(image, rotated, angle): wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape @@ -34,17 +36,20 @@ def rotate_max_area_new(image, rotated, angle): x2 = x1 + int(wr) return rotated[y1:y2, x1:x2] + def rotation_image_new(img, thetha): rotated = imutils.rotate(img, thetha) return rotate_max_area_new(img, rotated, thetha) + def rotate_image(img_patch, slope): (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, slope, 1.0) return cv2.warpAffine(img_patch, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) -def rotate_image_different( img, slope): + +def rotate_image_different(img, slope): # img = cv2.imread('images/input.jpg') num_rows, num_cols = img.shape[:2] @@ -52,6 +57,7 @@ def rotate_image_different( img, slope): img_rotation = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows)) return img_rotation + def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_table_prediction, angle): wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape @@ -61,6 +67,7 @@ def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_ta x2 = x1 + int(wr) return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_table_prediction[y1:y2, x1:x2] + def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thetha): rotated = imutils.rotate(img, thetha) rotated_textline = imutils.rotate(textline, thetha) @@ -68,6 +75,7 @@ def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thet rotated_table_prediction = imutils.rotate(table_prediction, thetha) return rotate_max_area(img, rotated, rotated_textline, rotated_layout, rotated_table_prediction, thetha) + def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regions_p_fully, thetha): rotated = imutils.rotate(img, thetha) rotated_textline = imutils.rotate(textline, thetha) @@ -75,6 +83,7 @@ def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regio rotated_layout_full = imutils.rotate(text_regions_p_fully, thetha) return rotate_max_area_full_layout(img, rotated, rotated_textline, rotated_layout, rotated_layout_full, thetha) + def rotate_max_area_full_layout(image, rotated, rotated_textline, rotated_layout, rotated_layout_full, angle): wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape @@ -83,4 +92,3 @@ def rotate_max_area_full_layout(image, rotated, rotated_textline, rotated_layout x1 = w // 2 - int(wr / 2) x2 = x1 + int(wr) return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_layout_full[y1:y2, x1:x2] - diff --git a/qurator/eynollah/utils/separate_lines.py b/eynollah/eynollah/utils/separate_lines.py similarity index 100% rename from qurator/eynollah/utils/separate_lines.py rename to eynollah/eynollah/utils/separate_lines.py diff --git a/qurator/eynollah/utils/tf.py b/eynollah/eynollah/utils/tf.py similarity index 100% rename from qurator/eynollah/utils/tf.py rename to eynollah/eynollah/utils/tf.py diff --git a/qurator/eynollah/utils/xml.py b/eynollah/eynollah/utils/xml.py similarity index 99% rename from qurator/eynollah/utils/xml.py rename to eynollah/eynollah/utils/xml.py index 0386b25..009edd2 100644 --- a/qurator/eynollah/utils/xml.py +++ b/eynollah/eynollah/utils/xml.py @@ -29,6 +29,7 @@ from ocrd_models.ocrd_page import ( to_xml) + def create_page_xml(imageFilename, height, width): now = datetime.now() pcgts = PcGtsType( @@ -46,6 +47,7 @@ def create_page_xml(imageFilename, height, width): )) return pcgts + def xml_reading_order(page, order_of_texts, id_of_marginalia): region_order = ReadingOrderType() og = OrderedGroupType(id="ro357564684568544579089") @@ -59,6 +61,7 @@ def xml_reading_order(page, order_of_texts, id_of_marginalia): og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) region_counter.inc('region') + def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, matrix_of_orders, indexes_sorted, index_of_types, kind_of_texts, ref_point): indexes_sorted = np.array(indexes_sorted) index_of_types = np.array(index_of_types) diff --git a/qurator/eynollah/writer.py b/eynollah/eynollah/writer.py similarity index 61% rename from qurator/eynollah/writer.py rename to eynollah/eynollah/writer.py index 72d9280..7f95e26 100644 --- a/qurator/eynollah/writer.py +++ b/eynollah/eynollah/writer.py @@ -11,18 +11,19 @@ from .utils.counter import EynollahIdCounter from ocrd_utils import getLogger from ocrd_models.ocrd_page import ( - BorderType, - CoordsType, - PcGtsType, - TextLineType, - TextRegionType, - ImageRegionType, - TableRegionType, - SeparatorRegionType, - to_xml - ) + BorderType, + CoordsType, + PcGtsType, + TextLineType, + TextRegionType, + ImageRegionType, + TableRegionType, + SeparatorRegionType, + to_xml +) import numpy as np + class EynollahXmlWriter(): def __init__( @@ -41,10 +42,10 @@ class EynollahXmlWriter(): self.curved_line = curved_line self.textline_light = textline_light self.pcgts = pcgts - self.scale_x = None # XXX set outside __init__ - self.scale_y = None # XXX set outside __init__ - self.height_org = None # XXX set outside __init__ - self.width_org = None # XXX set outside __init__ + self.scale_x = None # XXX set outside __init__ + self.scale_y = None # XXX set outside __init__ + self.height_org = None # XXX set outside __init__ + self.width_org = None # XXX set outside __init__ @property def image_filename_stem(self): @@ -61,11 +62,12 @@ class EynollahXmlWriter(): else: points_page_print += str(int((contour[0][0]) / self.scale_x)) points_page_print += ',' - points_page_print += str(int((contour[0][1] ) / self.scale_y)) + points_page_print += str(int((contour[0][1]) / self.scale_y)) points_page_print = points_page_print + ' ' return points_page_print[:-1] - def serialize_lines_in_marginal(self, marginal_region, all_found_textline_polygons_marginals, marginal_idx, page_coord, all_box_coord_marginals, slopes_marginals, counter): + def serialize_lines_in_marginal(self, marginal_region, all_found_textline_polygons_marginals, marginal_idx, + page_coord, all_box_coord_marginals, slopes_marginals, counter): for j in range(len(all_found_textline_polygons_marginals[marginal_idx])): coords = CoordsType() textline = TextLineType(id=counter.next_line_id, Coords=coords) @@ -74,37 +76,54 @@ class EynollahXmlWriter(): for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])): if not (self.curved_line or self.textline_light): if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: - textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) - textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + + all_box_coord_marginals[marginal_idx][2] + page_coord[ + 2]) / self.scale_x)) + textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + + all_box_coord_marginals[marginal_idx][0] + page_coord[ + 0]) / self.scale_y)) else: - textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) - textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + + all_box_coord_marginals[marginal_idx][2] + page_coord[ + 2]) / self.scale_x)) + textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + + all_box_coord_marginals[marginal_idx][0] + page_coord[ + 0]) / self.scale_y)) points_co += str(textline_x_coord) points_co += ',' points_co += str(textline_y_coord) if (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) <= 45: if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + page_coord[2]) / self.scale_x)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + page_coord[ + 2]) / self.scale_x)) points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + page_coord[0]) / self.scale_y)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + page_coord[ + 0]) / self.scale_y)) else: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + page_coord[2]) / self.scale_x)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + page_coord[0]) / self.scale_y)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + + page_coord[0]) / self.scale_y)) elif (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) > 45: if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) else: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) points_co += ' ' coords.set_points(points_co[:-1]) - def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter): + def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, + slopes, counter): self.logger.debug('enter serialize_lines_in_region') for j in range(len(all_found_textline_polygons[region_idx])): coords = CoordsType() @@ -115,11 +134,15 @@ class EynollahXmlWriter(): for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[region_idx][j]): if not (self.curved_line or self.textline_light): if len(contour_textline) == 2: - textline_x_coord = max(0, int((contour_textline[0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) - textline_y_coord = max(0, int((contour_textline[1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) + textline_x_coord = max(0, int((contour_textline[0] + region_bboxes[2] + page_coord[ + 2]) / self.scale_x)) + textline_y_coord = max(0, int((contour_textline[1] + region_bboxes[0] + page_coord[ + 0]) / self.scale_y)) else: - textline_x_coord = max(0, int((contour_textline[0][0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) - textline_y_coord = max(0, int((contour_textline[0][1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) + textline_x_coord = max(0, int((contour_textline[0][0] + region_bboxes[2] + page_coord[ + 2]) / self.scale_x)) + textline_y_coord = max(0, int((contour_textline[0][1] + region_bboxes[0] + page_coord[ + 0]) / self.scale_y)) points_co += str(textline_x_coord) points_co += ',' points_co += str(textline_y_coord) @@ -132,16 +155,18 @@ class EynollahXmlWriter(): else: points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) + points_co += str(int((contour_textline[0][1] + page_coord[0]) / self.scale_y)) elif (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) > 45: - if len(contour_textline)==2: - points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2])/self.scale_x)) + if len(contour_textline) == 2: + points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0])/self.scale_y)) + points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) else: - points_co += str(int((contour_textline[0][0] + region_bboxes[2]+page_coord[2])/self.scale_x)) + points_co += str( + int((contour_textline[0][0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((contour_textline[0][1] + region_bboxes[0]+page_coord[0])/self.scale_y)) + points_co += str( + int((contour_textline[0][1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) points_co += ' ' coords.set_points(points_co[:-1]) @@ -151,7 +176,11 @@ class EynollahXmlWriter(): with open(out_fname, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables): + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, + all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, + found_polygons_marginals, all_found_textline_polygons_marginals, + all_box_coord_marginals, slopes, slopes_marginals, cont_page, + polygons_lines_to_be_written_in_xml, found_polygons_tables): self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure @@ -167,36 +196,42 @@ class EynollahXmlWriter(): for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)), - ) + Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_text_region[mm], + page_coord)), + ) page.add_TextRegion(textregion) - self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter) + self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, + slopes, counter) for mm in range(len(found_polygons_marginals)): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_marginals[mm], + page_coord))) page.add_TextRegion(marginal) - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, + all_box_coord_marginals, slopes_marginals, counter) for mm in range(len(found_polygons_text_region_img)): img_region = ImageRegionType(id=counter.next_region_id, Coords=CoordsType()) page.add_ImageRegion(img_region) points_co = '' for lmm in range(len(found_polygons_text_region_img[mm])): - points_co += str(int((found_polygons_text_region_img[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) + points_co += str(int((found_polygons_text_region_img[mm][lmm, 0, 0] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((found_polygons_text_region_img[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) + points_co += str(int((found_polygons_text_region_img[mm][lmm, 0, 1] + page_coord[0]) / self.scale_y)) points_co += ' ' img_region.get_Coords().set_points(points_co[:-1]) - + for mm in range(len(polygons_lines_to_be_written_in_xml)): sep_hor = SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType()) page.add_SeparatorRegion(sep_hor) points_co = '' for lmm in range(len(polygons_lines_to_be_written_in_xml[mm])): - points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,0] ) / self.scale_x)) + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm, 0, 0]) / self.scale_x)) points_co += ',' - points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,1] ) / self.scale_y)) + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm, 0, 1]) / self.scale_y)) points_co += ' ' sep_hor.get_Coords().set_points(points_co[:-1]) for mm in range(len(found_polygons_tables)): @@ -204,15 +239,21 @@ class EynollahXmlWriter(): page.add_TableRegion(tab_region) points_co = '' for lmm in range(len(found_polygons_tables[mm])): - points_co += str(int((found_polygons_tables[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) + points_co += str(int((found_polygons_tables[mm][lmm, 0, 0] + page_coord[2]) / self.scale_x)) points_co += ',' - points_co += str(int((found_polygons_tables[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) + points_co += str(int((found_polygons_tables[mm][lmm, 0, 1] + page_coord[0]) / self.scale_y)) points_co += ' ' tab_region.get_Coords().set_points(points_co[:-1]) return pcgts - def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml): + def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, + order_of_texts, id_of_texts, all_found_textline_polygons, + all_found_textline_polygons_h, all_box_coord, all_box_coord_h, + found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, + found_polygons_marginals, all_found_textline_polygons_marginals, + all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, + polygons_lines_to_be_written_in_xml): self.logger.debug('enter build_pagexml_full_layout') # create the file structure @@ -227,35 +268,48 @@ class EynollahXmlWriter(): for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord))) + Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_text_region[mm], + page_coord))) page.add_TextRegion(textregion) - self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter) + self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, + slopes, counter) self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) for mm in range(len(found_polygons_text_region_h)): textregion = TextRegionType(id=counter.next_region_id, type_='header', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) + Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], + page_coord))) page.add_TextRegion(textregion) - self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter) + self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, + slopes_h, counter) for mm in range(len(found_polygons_marginals)): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_marginals[mm], + page_coord))) page.add_TextRegion(marginal) - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, + all_box_coord_marginals, slopes_marginals, counter) for mm in range(len(found_polygons_drop_capitals)): page.add_TextRegion(TextRegionType(id=counter.next_region_id, type_='drop-capital', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord)))) + Coords=CoordsType(points=self.calculate_polygon_coords( + found_polygons_drop_capitals[mm], page_coord)))) for mm in range(len(found_polygons_text_region_img)): - page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) - + page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) + for mm in range(len(polygons_lines_to_be_written_in_xml)): - page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0])))) - + page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType( + points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0, 0, 0, 0])))) + for mm in range(len(found_polygons_tables)): - page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) + page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType( + points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) return pcgts @@ -271,6 +325,5 @@ class EynollahXmlWriter(): coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) coords += ',' coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) - coords=coords + ' ' + coords = coords + ' ' return coords[:-1] - diff --git a/ocrd-tool.json b/ocrd-tool.json index 5c48493..5e513ae 120000 --- a/ocrd-tool.json +++ b/ocrd-tool.json @@ -1 +1 @@ -qurator/eynollah/ocrd-tool.json \ No newline at end of file +eynollah/eynollah/ocrd-tool.json \ No newline at end of file diff --git a/pyproject.toml.draft b/pyproject.toml.draft new file mode 100644 index 0000000..fd0b9df --- /dev/null +++ b/pyproject.toml.draft @@ -0,0 +1,42 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "eynollah" +version = "0.3.0" +authors = [ + {name = "Vahid Rezanezhad"} +] +description = "Document Layout Analysis" +readme = "README.md" +requires-python = ">=3.8" +keywords = ["document layout analysis", "semantic segmentation"] +classifiers = [ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Topic :: Scientific/Engineering :: Image Processing", +] +dependencies = [ + "ocrd >= 2.63.3", + "numpy <= 1.24.4", + "scikit-learn <= 1.3.2", + "tensorflow <= 2.13.1", + "imutils >= 0.5.4", + "matplotlib <= 3.7.5", + "setuptools >= 61", +] + +# TODO: test dependencies + +[project.scripts] +eynollah = "eynollah.eynollah.cli:main" +ocrd-eynollah-segment = "eynollah.eynollah.ocrd_cli:main" + +[project.urls] +Homepage = "https://github.com/qurator-spk/eynollah" +Repository = "https://github.com/qurator-spk/eynollah.git" diff --git a/qurator/eynollah/utils/marginals.py b/qurator/eynollah/utils/marginals.py deleted file mode 100644 index 7c43de6..0000000 --- a/qurator/eynollah/utils/marginals.py +++ /dev/null @@ -1,252 +0,0 @@ -import numpy as np -import cv2 -from scipy.signal import find_peaks -from scipy.ndimage import gaussian_filter1d - - -from .contour import find_new_features_of_contours, return_contours_of_interested_region -from .resize import resize_image -from .rotate import rotate_image - -def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None): - mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1])) - mask_marginals=mask_marginals.astype(np.uint8) - - - text_with_lines=text_with_lines.astype(np.uint8) - ##text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3) - - text_with_lines_eroded=cv2.erode(text_with_lines,kernel,iterations=5) - - if text_with_lines.shape[0]<=1500: - pass - elif text_with_lines.shape[0]>1500 and text_with_lines.shape[0]<=1800: - text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.5),text_with_lines.shape[1]) - text_with_lines=cv2.erode(text_with_lines,kernel,iterations=5) - text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - else: - text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.8),text_with_lines.shape[1]) - text_with_lines=cv2.erode(text_with_lines,kernel,iterations=7) - text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - - - text_with_lines_y=text_with_lines.sum(axis=0) - text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0) - - thickness_along_y_percent=text_with_lines_y_eroded.max()/(float(text_with_lines.shape[0]))*100 - - #print(thickness_along_y_percent,'thickness_along_y_percent') - - if thickness_along_y_percent<30: - min_textline_thickness=8 - elif thickness_along_y_percent>=30 and thickness_along_y_percent<50: - min_textline_thickness=20 - else: - min_textline_thickness=40 - - - - if thickness_along_y_percent>=14: - - text_with_lines_y_rev=-1*text_with_lines_y[:] - #print(text_with_lines_y) - #print(text_with_lines_y_rev) - - - - - #plt.plot(text_with_lines_y) - #plt.show() - - - text_with_lines_y_rev=text_with_lines_y_rev-np.min(text_with_lines_y_rev) - - #plt.plot(text_with_lines_y_rev) - #plt.show() - sigma_gaus=1 - region_sum_0= gaussian_filter1d(text_with_lines_y, sigma_gaus) - - region_sum_0_rev=gaussian_filter1d(text_with_lines_y_rev, sigma_gaus) - - #plt.plot(region_sum_0_rev) - #plt.show() - region_sum_0_updown=region_sum_0[len(region_sum_0)::-1] - - first_nonzero=(next((i for i, x in enumerate(region_sum_0) if x), None)) - last_nonzero=(next((i for i, x in enumerate(region_sum_0_updown) if x), None)) - - - last_nonzero=len(region_sum_0)-last_nonzero - - ##img_sum_0_smooth_rev=-region_sum_0 - - - mid_point=(last_nonzero+first_nonzero)/2. - - - one_third_right=(last_nonzero-mid_point)/3.0 - one_third_left=(mid_point-first_nonzero)/3.0 - - #img_sum_0_smooth_rev=img_sum_0_smooth_rev-np.min(img_sum_0_smooth_rev) - - - - - peaks, _ = find_peaks(text_with_lines_y_rev, height=0) - - - peaks=np.array(peaks) - - - #print(region_sum_0[peaks]) - ##plt.plot(region_sum_0) - ##plt.plot(peaks,region_sum_0[peaks],'*') - ##plt.show() - #print(first_nonzero,last_nonzero,peaks) - peaks=peaks[(peaks>first_nonzero) & ((peaksmid_point] - peaks_left=peaks[peaks(mid_point+one_third_right)] - peaks_left=peaks[peaks<(mid_point-one_third_left)] - - - try: - point_right=np.min(peaks_right) - except: - point_right=last_nonzero - - - try: - point_left=np.max(peaks_left) - except: - point_left=first_nonzero - - - - - #print(point_left,point_right) - #print(text_regions.shape) - if point_right>=mask_marginals.shape[1]: - point_right=mask_marginals.shape[1]-1 - - try: - mask_marginals[:,point_left:point_right]=1 - except: - mask_marginals[:,:]=1 - - #print(mask_marginals.shape,point_left,point_right,'nadosh') - mask_marginals_rotated=rotate_image(mask_marginals,-slope_deskew) - - #print(mask_marginals_rotated.shape,'nadosh') - mask_marginals_rotated_sum=mask_marginals_rotated.sum(axis=0) - - mask_marginals_rotated_sum[mask_marginals_rotated_sum!=0]=1 - index_x=np.array(range(len(mask_marginals_rotated_sum)))+1 - - index_x_interest=index_x[mask_marginals_rotated_sum==1] - - min_point_of_left_marginal=np.min(index_x_interest)-16 - max_point_of_right_marginal=np.max(index_x_interest)+16 - - if min_point_of_left_marginal<0: - min_point_of_left_marginal=0 - if max_point_of_right_marginal>=text_regions.shape[1]: - max_point_of_right_marginal=text_regions.shape[1]-1 - - - #print(np.min(index_x_interest) ,np.max(index_x_interest),'minmaxnew') - #print(mask_marginals_rotated.shape,text_regions.shape,'mask_marginals_rotated') - #plt.imshow(mask_marginals) - #plt.show() - - #plt.imshow(mask_marginals_rotated) - #plt.show() - - text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4 - - #plt.imshow(text_regions) - #plt.show() - - pixel_img=4 - min_area_text=0.00001 - polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) - - cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) - - text_regions[(text_regions[:,:]==4)]=1 - - marginlas_should_be_main_text=[] - - x_min_marginals_left=[] - x_min_marginals_right=[] - - for i in range(len(cx_text_only)): - - x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i]) - y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i]) - #print(x_width_mar,y_height_mar,y_height_mar/x_width_mar,'y_height_mar') - if x_width_mar>16 and y_height_mar/x_width_mar<18: - marginlas_should_be_main_text.append(polygons_of_marginals[i]) - if x_min_text_only[i]<(mid_point-one_third_left): - x_min_marginals_left_new=x_min_text_only[i] - if len(x_min_marginals_left)==0: - x_min_marginals_left.append(x_min_marginals_left_new) - else: - x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new) - else: - x_min_marginals_right_new=x_min_text_only[i] - if len(x_min_marginals_right)==0: - x_min_marginals_right.append(x_min_marginals_right_new) - else: - x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new) - - if len(x_min_marginals_left)==0: - x_min_marginals_left=[0] - if len(x_min_marginals_right)==0: - x_min_marginals_right=[text_regions.shape[1]-1] - - - - - #print(x_min_marginals_left[0],x_min_marginals_right[0],'margo') - - #print(marginlas_should_be_main_text,'marginlas_should_be_main_text') - text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4)) - - #print(np.unique(text_regions)) - - #text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 - #text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 - - text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0 - text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0 - - ###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4 - - ###text_regions[:,point_right:][ text_regions[:,point_right:]==1]=4 - #plt.plot(region_sum_0) - #plt.plot(peaks,region_sum_0[peaks],'*') - #plt.show() - - - #plt.imshow(text_regions) - #plt.show() - - #sys.exit() - else: - pass - return text_regions diff --git a/requirements.txt b/requirements.txt index feeea99..7458195 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ # ocrd includes opencv, numpy, shapely, click ocrd >= 3.0.0a2 -numpy <1.24.0 -scikit-learn >= 0.23.2 -tensorflow == 2.12.1 -imutils >= 0.5.3 -matplotlib -setuptools >= 50 +numpy <= 1.24.4 +scikit-learn <= 1.3.2 +tensorflow <= 2.13.1 +imutils >= 0.5.4 +matplotlib <= 3.7.5 +setuptools >= 61 diff --git a/setup.py b/setup.py index af8a321..7e343a0 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -from setuptools import setup, find_namespace_packages +from setuptools import find_packages, setup from json import load install_requires = open('requirements.txt').read().split('\n') @@ -13,15 +13,16 @@ setup( author='Vahid Rezanezhad', url='https://github.com/qurator-spk/eynollah', license='Apache License 2.0', - packages=find_namespace_packages(include=['qurator']), + namespace_packages=['eynollah'], + packages=find_packages(exclude=['tests']), install_requires=install_requires, package_data={ '': ['*.json'] }, entry_points={ 'console_scripts': [ - 'eynollah=qurator.eynollah.cli:main', - 'ocrd-eynollah-segment=qurator.eynollah.ocrd_cli:main', + 'eynollah=eynollah.eynollah.cli:main', + 'ocrd-eynollah-segment=eynollah.eynollah.ocrd_cli:main', ] }, ) diff --git a/tests/base.py b/tests/base.py index 9de35ef..841355d 100644 --- a/tests/base.py +++ b/tests/base.py @@ -10,12 +10,14 @@ from unittest import TestCase as VanillaTestCase, skip, main as unittests_main import pytest from ocrd_utils import disableLogging, initLogging + def main(fn=None): if fn: sys.exit(pytest.main([fn])) else: unittests_main() + class TestCase(VanillaTestCase): @classmethod @@ -26,6 +28,7 @@ class TestCase(VanillaTestCase): disableLogging() initLogging() + class CapturingTestCase(TestCase): """ A TestCase that needs to capture stderr/stdout and invoke click CLI. @@ -42,7 +45,7 @@ class CapturingTestCase(TestCase): """ self.capture_out_err() # XXX snapshot just before executing the CLI code = 0 - sys.argv[1:] = args # XXX necessary because sys.argv reflects pytest args not cli args + sys.argv[1:] = args # XXX necessary because sys.argv reflects pytest args not cli args try: cli.main(args=args) except SystemExit as e: diff --git a/tests/test_counter.py b/tests/test_counter.py index 8ef0756..145fbf6 100644 --- a/tests/test_counter.py +++ b/tests/test_counter.py @@ -1,5 +1,6 @@ from tests.base import main -from qurator.eynollah.utils.counter import EynollahIdCounter +from eynollah.eynollah.utils.counter import EynollahIdCounter + def test_counter_string(): c = EynollahIdCounter() @@ -11,6 +12,7 @@ def test_counter_string(): assert c.region_id(999) == 'region_0999' assert c.line_id(999, 888) == 'region_0999_line_0888' + def test_counter_init(): c = EynollahIdCounter(region_idx=2) assert c.get('region') == 2 @@ -19,6 +21,7 @@ def test_counter_init(): c.reset() assert c.get('region') == 2 + def test_counter_methods(): c = EynollahIdCounter() assert c.get('region') == 0 @@ -29,5 +32,6 @@ def test_counter_methods(): c.inc('region', -9) assert c.get('region') == 1 + if __name__ == '__main__': main(__file__) diff --git a/tests/test_dpi.py b/tests/test_dpi.py index 510ffc5..7f542e5 100644 --- a/tests/test_dpi.py +++ b/tests/test_dpi.py @@ -1,11 +1,13 @@ import cv2 from pathlib import Path -from qurator.eynollah.utils.pil_cv2 import check_dpi +from eynollah.eynollah.utils.pil_cv2 import check_dpi from tests.base import main + def test_dpi(): fpath = str(Path(__file__).parent.joinpath('resources', 'kant_aufklaerung_1784_0020.tif')) assert 230 == check_dpi(cv2.imread(fpath)) + if __name__ == '__main__': main(__file__) diff --git a/tests/test_run.py b/tests/test_run.py index e591a04..0a26582 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -2,13 +2,14 @@ from os import environ from pathlib import Path from ocrd_utils import pushd_popd from tests.base import CapturingTestCase as TestCase, main -from qurator.eynollah.cli import main as eynollah_cli +from eynollah.eynollah.cli import main as eynollah_cli testdir = Path(__file__).parent.resolve() # EYNOLLAH_MODELS = environ.get('EYNOLLAH_MODELS', str(testdir.joinpath('..', 'models_eynollah').resolve())) EYNOLLAH_MODELS = environ['EYNOLLAH_MODELS'] + class TestEynollahRun(TestCase): def test_full_run(self): @@ -21,5 +22,6 @@ class TestEynollahRun(TestCase): print(code, out, err) assert not code + if __name__ == '__main__': main(__file__) diff --git a/tests/test_smoke.py b/tests/test_smoke.py index d069479..b0a7846 100644 --- a/tests/test_smoke.py +++ b/tests/test_smoke.py @@ -1,7 +1,7 @@ def test_utils_import(): - import qurator.eynollah.utils - import qurator.eynollah.utils.contour - import qurator.eynollah.utils.drop_capitals - import qurator.eynollah.utils.drop_capitals - import qurator.eynollah.utils.is_nan - import qurator.eynollah.utils.rotate + import eynollah.eynollah.utils + import eynollah.eynollah.utils.contour + import eynollah.eynollah.utils.drop_capitals + import eynollah.eynollah.utils.drop_capitals + import eynollah.eynollah.utils.is_nan + import eynollah.eynollah.utils.rotate diff --git a/tests/test_xml.py b/tests/test_xml.py index 8422fd1..2fddd77 100644 --- a/tests/test_xml.py +++ b/tests/test_xml.py @@ -1,14 +1,16 @@ from pytest import main -from qurator.eynollah.utils.xml import create_page_xml +from eynollah.eynollah.utils.xml import create_page_xml from ocrd_models.ocrd_page import to_xml PAGE_2019 = 'http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15' + def test_create_xml(): pcgts = create_page_xml('/path/to/img.tif', 100, 100) xmlstr = to_xml(pcgts) assert 'xmlns:pc="%s"' % PAGE_2019 in xmlstr assert 'Metadata' in xmlstr + if __name__ == '__main__': main([__file__])