From 2a1f892d72938dd8ac5c8719a161effc92f68f28 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:17:41 +0200 Subject: [PATCH 01/28] expand keywords and supported Python versions --- pyproject.toml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e7744a1..fde7967 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,12 @@ description = "Document Layout Analysis" readme = "README.md" license.file = "LICENSE" requires-python = ">=3.8" -keywords = ["document layout analysis", "image segmentation"] +keywords = [ + "document layout analysis", + "image segmentation", + "binarization", + "optical character recognition" +] dynamic = [ "dependencies", @@ -25,6 +30,10 @@ classifiers = [ "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3 :: Only", "Topic :: Scientific/Engineering :: Image Processing", ] From 20a95365c283e4b90638063173fed3b8fb65cee1 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:19:00 +0200 Subject: [PATCH 02/28] remove redundant parentheses --- src/eynollah/utils/__init__.py | 2 +- src/eynollah/utils/separate_lines.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 5ccb2af..fc01520 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1355,7 +1355,7 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, label_lines, contours_h=None): t_ins_c0 = time.time() - separators_closeup=( (region_pre_p[:,:]==label_lines))*1 + separators_closeup= (region_pre_p[:, :] == label_lines) * 1 separators_closeup[0:110,:]=0 separators_closeup[separators_closeup.shape[0]-150:,:]=0 diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 22ef00d..d745ec7 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1475,9 +1475,9 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] - img_resized = np.zeros((int(img_int.shape[0] * (1.2)), int(img_int.shape[1] * (3)))) - img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], - int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] = img_int[:, :] + img_resized = np.zeros((int(img_int.shape[0] * 1.2), int(img_int.shape[1] * 3))) + img_resized[int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], + int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] = img_int[:, :] # plt.imshow(img_xline) # plt.show() img_line_rotated = rotate_image(img_resized, slopes_tile_wise[i]) @@ -1489,8 +1489,8 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_patch_separated_returned[:, :][img_patch_separated_returned[:, :] != 0] = 1 img_patch_separated_returned_true_size = img_patch_separated_returned[ - int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], - int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] + int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], + int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] img_patch_interest_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size @@ -1519,7 +1519,7 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] max_shape=np.max(img_int.shape) - img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) + img_resized=np.zeros((int(max_shape * 1.1) , int(max_shape * 1.1))) onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) From 9733d575bfd2caa19df0465a0fac9e5f352303b8 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:21:49 +0200 Subject: [PATCH 03/28] replace list declaration with list literal (faster) --- src/eynollah/utils/__init__.py | 18 ++++++------------ src/eynollah/utils/separate_lines.py | 6 ++---- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index fc01520..c906dd0 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -151,8 +151,7 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( min_ys=np.min(y_sep) max_ys=np.max(y_sep) - y_mains=[] - y_mains.append(min_ys) + y_mains= [min_ys] y_mains_sep_ohne_grenzen=[] for ii in range(len(new_main_sep_y)): @@ -525,8 +524,7 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg_fin[i + 1]) + forest = [peaks_neg_fin[i + 1]] if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -694,8 +692,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg_fin[i + 1]) + forest = [peaks_neg_fin[i + 1]] if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -1346,8 +1343,7 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( return img_p_in, special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): - peaks_neg_tot = [] - peaks_neg_tot.append(first_point) + peaks_neg_tot = [first_point] for ii in range(len(peaks_neg_fin)): peaks_neg_tot.append(peaks_neg_fin[ii]) peaks_neg_tot.append(last_point) @@ -1516,8 +1512,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, args_cy_splitter=np.argsort(cy_main_splitters) cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - splitter_y_new=[] - splitter_y_new.append(0) + splitter_y_new= [0] for i in range(len(cy_main_splitters_sort)): splitter_y_new.append( cy_main_splitters_sort[i] ) splitter_y_new.append(region_pre_p.shape[0]) @@ -1593,8 +1588,7 @@ def return_boxes_of_images_by_order_of_reading_new( num_col, peaks_neg_fin = find_num_col( regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], :], num_col_classifier, tables, multiplier=3.) - peaks_neg_fin_early=[] - peaks_neg_fin_early.append(0) + peaks_neg_fin_early= [0] #print(peaks_neg_fin,'peaks_neg_fin') for p_n in peaks_neg_fin: peaks_neg_fin_early.append(p_n) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index d745ec7..84ca6d7 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1227,8 +1227,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks[i] > cut_off: if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg[i + 1]) + forest = [peaks_neg[i + 1]] if i == (len(peaks_neg) - 1): if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -1248,8 +1247,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks_pos[i] > cut_off: if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) - forest = [] - forest.append(peaks[i + 1]) + forest = [peaks[i + 1]] if i == (len(peaks) - 1): if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) From f212ffa22ddfcdf953ec133d21dce900136cd7c1 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:27:18 +0200 Subject: [PATCH 04/28] remove unnecessary backslash --- src/eynollah/utils/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index c906dd0..aa89bd1 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1384,8 +1384,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, gray = cv2.bitwise_not(separators_closeup_n_binary) gray=gray.astype(np.uint8) - bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ - cv2.THRESH_BINARY, 15, -2) + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) horizontal = np.copy(bw) vertical = np.copy(bw) From 496a0e2ca43631b092b5537b11d0ba7336a4375c Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 19:19:26 +0200 Subject: [PATCH 05/28] readme and documentation updates --- README.md | 80 ++++++++++++++++++++++---------------------------- docs/docker.md | 24 +++++++++++++++ docs/ocrd.md | 21 +++++++++++++ 3 files changed, 80 insertions(+), 45 deletions(-) create mode 100644 docs/docker.md create mode 100644 docs/ocrd.md diff --git a/README.md b/README.md index 3ba5086..fabb594 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ > Document Layout Analysis, Binarization and OCR with Deep Learning and Heuristics +[![Python Versions](https://img.shields.io/pypi/pyversions/eynollah.svg)](https://pypi.python.org/pypi/eynollah) [![PyPI Version](https://img.shields.io/pypi/v/eynollah)](https://pypi.org/project/eynollah/) [![GH Actions Test](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml) [![GH Actions Deploy](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml) @@ -11,24 +12,22 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) ## Features -* Support for 10 distinct segmentation classes: +* Document layout analysis using pixelwise segmentation models with support for 10 distinct segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) -* Support for various image optimization operations: - * cropping (border detection), binarization, deskewing, dewarping, scaling, enhancing, resizing * Textline segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text -* Text recognition (OCR) using either CNN-RNN or Transformer models -* Detection of reading order (left-to-right or right-to-left) using either heuristics or trainable models +* Document image binarization with pixelwise segmentation or hybrid CNN-Transformer models +* Text recognition (OCR) with CNN-RNN or TrOCR models +* Detection of reading order (left-to-right or right-to-left) using heuristics or trainable models * Output in [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML) * [OCR-D](https://github.com/qurator-spk/eynollah#use-as-ocr-d-processor) interface :warning: Development is focused on achieving the best quality of results for a wide variety of historical -documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. +documents using a combination of multiple deep learning models and heuristics; therefore processing can be slow. ## Installation - Python `3.8-3.11` with Tensorflow `<2.13` on Linux are currently supported. - -For (limited) GPU support the CUDA toolkit needs to be installed. A known working config is CUDA `11` with cuDNN `8.6`. +For (limited) GPU support the CUDA toolkit needs to be installed. +A working config is CUDA `11.8` with cuDNN `8.6`. You can either install from PyPI @@ -53,23 +52,33 @@ pip install "eynollah[OCR]" make install EXTRAS=OCR ``` +With Docker, use + +``` +docker pull ghcr.io/qurator-spk/eynollah:latest +``` + +For additional documentation on using Eynollah and Docker, see [`docker.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/docker.md). + ## Models -Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). +Pretrained models can be downloaded from [Zenodo](https://zenodo.org/records/17194824) or [Hugging Face](https://huggingface.co/SBB?search_models=eynollah). -For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). -Model cards are also provided for our trained models. +For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). ## Training -In case you want to train your own model with Eynollah, see the -documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the -tools in the [`train` folder](https://github.com/qurator-spk/eynollah/tree/main/train). +To train your own model with Eynollah, see the documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the +tools in the [`train`](https://github.com/qurator-spk/eynollah/tree/main/train) folder. ## Usage -Eynollah supports five use cases: layout analysis (segmentation), binarization, -image enhancement, text recognition (OCR), and reading order detection. +Eynollah supports five use cases: +1. [layout analysis (segmentation)](#layout-analysis), +2. [binarization](#binarization), +3. [image enhancement](#image-enhancement), +4. [text recognition (OCR)](#ocr), and +5. [reading order detection](#reading-order-detection). ### Layout Analysis @@ -114,6 +123,8 @@ If no further option is set, the tool performs layout detection of main regions and marginals). The best output quality is achieved when RGB images are used as input rather than greyscale or binarized images. +Additional documentation can be found in [`usage.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). + ### Binarization The binarization module performs document image binarization using pretrained pixelwise segmentation models. @@ -127,9 +138,12 @@ eynollah binarization \ -m \ ``` +### Image Enhancement +TODO + ### OCR -The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. +The OCR module performs text recognition using either CNN-RNN or TrOCR models. The command-line interface for OCR can be called like this: @@ -141,7 +155,7 @@ eynollah ocr \ -m | --model_name \ ``` -### Machine-based-reading-order +### Reading Order Detection The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. @@ -160,36 +174,12 @@ eynollah machine-based-reading-order \ Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). -In this case, the source image file group with (preferably) RGB images should be used as input like this: - - ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: -- existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) -- existing annotation (and respective `AlternativeImage`s) are partially _ignored_: - - previous page frame detection (`cropped` images) - - previous derotation (`deskewed` images) - - previous thresholding (`binarized` images) -- if the page-level image nevertheless deviates from the original (`@imageFilename`) - (because some other preprocessing step was in effect like `denoised`), then - the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -In general, it makes more sense to add other workflow steps **after** Eynollah. - -There is also an OCR-D processor for binarization: - - ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 - -#### Additional documentation - -Additional documentation is available in the [docs](https://github.com/qurator-spk/eynollah/tree/main/docs) directory. +Further documentation on using Eynollah with OCR-D can be found in [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/ocrd.md). ## How to cite ```bibtex -@inproceedings{hip23rezanezhad, +@inproceedings{hip23eynollah, title = {Document Layout Analysis with Deep Learning and Heuristics}, author = {Rezanezhad, Vahid and Baierer, Konstantin and Gerber, Mike and Labusch, Kai and Neudecker, Clemens}, booktitle = {Proceedings of the 7th International Workshop on Historical Document Imaging and Processing {HIP} 2023, diff --git a/docs/docker.md b/docs/docker.md new file mode 100644 index 0000000..466adf6 --- /dev/null +++ b/docs/docker.md @@ -0,0 +1,24 @@ +# 1. ocrd resource manager +(just once, to get the models and install them into a named volume for later re-use) + + vol_models=ocrd-resources:/usr/local/share/ocrd-resources + docker run --rm -v $vol_models ocrd/eynollah ocrd resmgr download ocrd-eynollah-segment default + +Now, each time you want to use Eynollah, pass the same resources volume again. +Also, bind-mount some data directory, e.g. current working directory $PWD (/data is default working directory in the container). +Either use standalone CLI (2) or OCR-D CLI (3): + +# 2. standalone CLI (follow self-help, cf. readme) + + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah binarization --help + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah layout --help + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah ocr --help + +# 3. OCR-D CLI (follow self-help, cf. readme and https://ocr-d.de/en/spec/cli) + + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-eynollah-segment -h + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-sbb-binarize -h + +Alternatively, just "log in" to the container once and use the commands there: + + docker run --rm -v $vol_models -v $PWD:/data -it ocrd/eynollah bash \ No newline at end of file diff --git a/docs/ocrd.md b/docs/ocrd.md new file mode 100644 index 0000000..a391024 --- /dev/null +++ b/docs/ocrd.md @@ -0,0 +1,21 @@ +When using Eynollah in OCR-D, the source image file group with (preferably) RGB images should be used as input like this: + + ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 + +If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: +- existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) +- existing annotation (and respective `AlternativeImage`s) are partially _ignored_: + - previous page frame detection (`cropped` images) + - previous derotation (`deskewed` images) + - previous thresholding (`binarized` images) +- if the page-level image nevertheless deviates from the original (`@imageFilename`) + (because some other preprocessing step was in effect like `denoised`), then + the output PAGE-XML will be based on that as new top-level (`@imageFilename`) + + ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 + +In general, it makes more sense to add other workflow steps **after** Eynollah. + +There is also an OCR-D processor for binarization: + + ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 From 9d2dbb838845cdf15663fb611f5d8f477b469774 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 20 Oct 2025 14:47:55 +0200 Subject: [PATCH 06/28] updating model based reading orde detection --- docs/models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/models.md b/docs/models.md index 3d296d5..40b23ae 100644 --- a/docs/models.md +++ b/docs/models.md @@ -151,7 +151,7 @@ This model is used for the task of illustration detection only. Model card: [Reading Order Detection]() -TODO +The model extracts the reading order of text regions from the layout by classifying pairwise relationships between them. A sorting algorithm then determines the overall reading sequence. ## Heuristic methods From 3ec5ceb22e317fbe5234f625412898232277ab68 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 20 Oct 2025 14:55:14 +0200 Subject: [PATCH 07/28] Update flowchart --- docs/models.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/models.md b/docs/models.md index 40b23ae..50ef726 100644 --- a/docs/models.md +++ b/docs/models.md @@ -18,7 +18,8 @@ Two Arabic/Persian terms form the name of the model suite: عين الله, whic See the flowchart below for the different stages and how they interact: -![](https://user-images.githubusercontent.com/952378/100619946-1936f680-331e-11eb-9297-6e8b4cab3c16.png) +eynollah_flowchart + ## Models From c8455370a9dfde698ee91125d3400d8a313ede5a Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 20 Oct 2025 15:13:45 +0200 Subject: [PATCH 08/28] updating heuristics and ocr documentation --- docs/models.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/models.md b/docs/models.md index 50ef726..7f83b33 100644 --- a/docs/models.md +++ b/docs/models.md @@ -154,13 +154,17 @@ Model card: [Reading Order Detection]() The model extracts the reading order of text regions from the layout by classifying pairwise relationships between them. A sorting algorithm then determines the overall reading sequence. +### OCR + +We have trained three OCR models: two CNN-RNN–based models and one transformer-based TrOCR model. The CNN-RNN models are generally faster and provide better results in most cases, though their performance decreases with heavily degraded images. The TrOCR model, on the other hand, is computationally expensive and slower during inference, but it can possibly produce better results on strongly degraded images. ## Heuristic methods Additionally, some heuristic methods are employed to further improve the model predictions: * After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. -* For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. +* Unlike the non-light version, where the image is scaled up to help the model better detect the background spaces between text regions, the light version uses down-scaled images. In this case, introducing an artificial class along the boundaries of text regions and text lines has helped to isolate and separate the text regions more effectively. * A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. -* Deskewing is applied on the text region level (due to regions having different degrees of skew) in order to improve the textline segmentation result. -* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels. -* Finally, using the derived coordinates, bounding boxes are determined for each textline. +* In the non-light version, deskewing is applied at the text-region level (since regions may have different degrees of skew) to improve text-line segmentation results. In contrast, the light version performs deskewing only at the page level to enhance margin detection and heuristic reading-order estimation. +* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels (only in non-light version). +* Finally, using the derived coordinates, bounding boxes are determined for each textline (only in non-light version). +* As mentioned above, the reading order can be determined using a model; however, this approach is computationally expensive, time-consuming, and less accurate due to the limited amount of ground-truth data available for training. Therefore, our tool uses a heuristic reading-order detection method as the default. The heuristic approach relies on headers and separators to determine the reading order of text regions. From 6e3399fe7afecbd38df9a5d017388d1d5f83ae05 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 22:16:56 +0200 Subject: [PATCH 09/28] combine Docker docs --- docs/docker.md | 27 +++++++++++++++++++++++---- train/README.md | 16 ---------------- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/docs/docker.md b/docs/docker.md index 466adf6..e47f2d5 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -1,4 +1,8 @@ -# 1. ocrd resource manager +## Inference with Docker + + docker pull ghcr.io/qurator-spk/eynollah:latest + +### 1. ocrd resource manager (just once, to get the models and install them into a named volume for later re-use) vol_models=ocrd-resources:/usr/local/share/ocrd-resources @@ -6,19 +10,34 @@ Now, each time you want to use Eynollah, pass the same resources volume again. Also, bind-mount some data directory, e.g. current working directory $PWD (/data is default working directory in the container). + Either use standalone CLI (2) or OCR-D CLI (3): -# 2. standalone CLI (follow self-help, cf. readme) +### 2. standalone CLI +(follow self-help, cf. readme) docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah binarization --help docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah layout --help docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah ocr --help -# 3. OCR-D CLI (follow self-help, cf. readme and https://ocr-d.de/en/spec/cli) +### 3. OCR-D CLI +(follow self-help, cf. readme and https://ocr-d.de/en/spec/cli) docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-eynollah-segment -h docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-sbb-binarize -h Alternatively, just "log in" to the container once and use the commands there: - docker run --rm -v $vol_models -v $PWD:/data -it ocrd/eynollah bash \ No newline at end of file + docker run --rm -v $vol_models -v $PWD:/data -it ocrd/eynollah bash + +## Training with Docker + +Build the Docker image + + cd train + docker build -t model-training . + +Run the Docker image + + cd train + docker run --gpus all -v $PWD:/entry_point_dir model-training diff --git a/train/README.md b/train/README.md index 5f6d326..d270542 100644 --- a/train/README.md +++ b/train/README.md @@ -41,19 +41,3 @@ each class will be defined with a RGB value and beside images, a text file of cl > Convert COCO GT or results for a single image to a segmentation map and write it to disk. * [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) > Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. - -### Train using Docker - -Build the Docker image: - -```bash -cd train -docker build -t model-training . -``` - -Run Docker image - -```bash -cd train -docker run --gpus all -v $PWD:/entry_point_dir model-training -``` From e5254dc6c5bfcf2ee6d7b2b8636c14e32674f12f Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 22:39:54 +0200 Subject: [PATCH 10/28] integrate training docs --- docs/train.md | 38 ++++++++++++++++++++++++++++++++++++++ train/README.md | 43 ------------------------------------------- 2 files changed, 38 insertions(+), 43 deletions(-) delete mode 100644 train/README.md diff --git a/docs/train.md b/docs/train.md index 252bead..ffa39a9 100644 --- a/docs/train.md +++ b/docs/train.md @@ -1,3 +1,41 @@ +# Prerequisistes + +## 1. Install Eynollah with training dependencies + +Clone the repository and install eynollah along with the dependencies necessary for training: + +```sh +git clone https://github.com/qurator-spk/eynollah +cd eynollah +pip install '.[training]' +``` + +## 2. Pretrained encoder + +Download our pretrained weights and add them to a `train/pretrained_model` folder: + +```sh +cd train +wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 +tar xf pretrained_model.tar.gz +``` + +## 3. Example data + +### Binarization +A small sample of training data for binarization experiment can be found on [Zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), +which contains `images` and `labels` folders. + +## 4. Helpful tools + +* [`pagexml2img`](https://github.com/qurator-spk/page2img) +> Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, +each class will be defined with a RGB value and beside images, a text file of classes will also be produced. +* [`cocoSegmentationToPng`](https://github.com/nightrome/cocostuffapi/blob/17acf33aef3c6cc2d6aca46dcf084266c2778cf0/PythonAPI/pycocotools/cocostuffhelper.py#L130) +> Convert COCO GT or results for a single image to a segmentation map and write it to disk. +* [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) +> Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. + # Training documentation This document aims to assist users in preparing training datasets, training models, and diff --git a/train/README.md b/train/README.md deleted file mode 100644 index d270542..0000000 --- a/train/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Training eynollah - -This README explains the technical details of how to set up and run training, for detailed information on parameterization, see [`docs/train.md`](../docs/train.md) - -## Introduction - -This folder contains the source code for training an encoder model for document image segmentation. - -## Installation - -Clone the repository and install eynollah along with the dependencies necessary for training: - -```sh -git clone https://github.com/qurator-spk/eynollah -cd eynollah -pip install '.[training]' -``` - -### Pretrained encoder - -Download our pretrained weights and add them to a `train/pretrained_model` folder: - -```sh -cd train -wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 -tar xf pretrained_model.tar.gz -``` - -### Binarization training data - -A small sample of training data for binarization experiment can be found [on -zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), -which contains `images` and `labels` folders. - -### Helpful tools - -* [`pagexml2img`](https://github.com/qurator-spk/page2img) -> Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, -each class will be defined with a RGB value and beside images, a text file of classes will also be produced. -* [`cocoSegmentationToPng`](https://github.com/nightrome/cocostuffapi/blob/17acf33aef3c6cc2d6aca46dcf084266c2778cf0/PythonAPI/pycocotools/cocostuffhelper.py#L130) -> Convert COCO GT or results for a single image to a segmentation map and write it to disk. -* [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) -> Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. From 230e7cc705eef7800924917c23b9b4242d69f926 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 22:52:54 +0200 Subject: [PATCH 11/28] integrate ocrd docs --- README.md | 11 ++--------- docs/ocrd.md | 5 +++++ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index fabb594..d6930f7 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ eynollah ocr \ ### Reading Order Detection -The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. +The reading order detection module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. The command-line interface for machine based reading order can be called like this: @@ -169,17 +169,10 @@ eynollah machine-based-reading-order \ -o ``` -#### Use as OCR-D processor - -Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), -formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). - -Further documentation on using Eynollah with OCR-D can be found in [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/ocrd.md). - ## How to cite ```bibtex -@inproceedings{hip23eynollah, +@inproceedings{hip23rezanezhad, title = {Document Layout Analysis with Deep Learning and Heuristics}, author = {Rezanezhad, Vahid and Baierer, Konstantin and Gerber, Mike and Labusch, Kai and Neudecker, Clemens}, booktitle = {Proceedings of the 7th International Workshop on Historical Document Imaging and Processing {HIP} 2023, diff --git a/docs/ocrd.md b/docs/ocrd.md index a391024..9e7e268 100644 --- a/docs/ocrd.md +++ b/docs/ocrd.md @@ -1,3 +1,8 @@ +## Use as OCR-D processor + +Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), +formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). + When using Eynollah in OCR-D, the source image file group with (preferably) RGB images should be used as input like this: ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 From 7d70835d2251161b9c4ce4c41ad1ca98d2ca6953 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 23:19:10 +0200 Subject: [PATCH 12/28] small fixes to main readme --- README.md | 22 +++++++++++++--------- docs/docker.md | 4 ++-- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index d6930f7..6dc5bf9 100644 --- a/README.md +++ b/README.md @@ -52,24 +52,25 @@ pip install "eynollah[OCR]" make install EXTRAS=OCR ``` -With Docker, use +### Docker + +Use ``` docker pull ghcr.io/qurator-spk/eynollah:latest ``` -For additional documentation on using Eynollah and Docker, see [`docker.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/docker.md). +When using Eynollah with Docker, see [`docker.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/docker.md). ## Models Pretrained models can be downloaded from [Zenodo](https://zenodo.org/records/17194824) or [Hugging Face](https://huggingface.co/SBB?search_models=eynollah). -For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +For model documentation and model cards, see [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). ## Training -To train your own model with Eynollah, see the documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the -tools in the [`train`](https://github.com/qurator-spk/eynollah/tree/main/train) folder. +To train your own model with Eynollah, see [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the tools in the [`train`](https://github.com/qurator-spk/eynollah/tree/main/train) folder. ## Usage @@ -83,10 +84,7 @@ Eynollah supports five use cases: ### Layout Analysis The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading -order using either heuristic methods or a [pretrained reading order detection model](https://github.com/qurator-spk/eynollah#machine-based-reading-order). - -Reading order detection can be performed either as part of layout analysis based on image input, or, currently under -development, based on pre-existing layout analysis results in PAGE-XML format as input. +order using heuristic methods or a [pretrained model](https://github.com/qurator-spk/eynollah#machine-based-reading-order). The command-line interface for layout analysis can be called like this: @@ -156,6 +154,8 @@ eynollah ocr \ ``` ### Reading Order Detection +Reading order detection can be performed either as part of layout analysis based on image input, or, currently under +development, based on pre-existing layout analysis data in PAGE-XML format as input. The reading order detection module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. @@ -169,6 +169,10 @@ eynollah machine-based-reading-order \ -o ``` +## Use as OCR-D processor + +See [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). + ## How to cite ```bibtex diff --git a/docs/docker.md b/docs/docker.md index e47f2d5..7965622 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -32,12 +32,12 @@ Alternatively, just "log in" to the container once and use the commands there: ## Training with Docker -Build the Docker image +Build the Docker training image cd train docker build -t model-training . -Run the Docker image +Run the Docker training image cd train docker run --gpus all -v $PWD:/entry_point_dir model-training From 2fc723d292093cdfb263e2d6681e478d7018b953 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 18:29:14 +0200 Subject: [PATCH 13/28] extend README --- README.md | 66 ++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 46 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 3ba5086..c6ba0e4 100644 --- a/README.md +++ b/README.md @@ -91,24 +91,35 @@ eynollah layout \ The following options can be used to further configure the processing: -| option | description | -|-------------------|:-------------------------------------------------------------------------------| -| `-fl` | full layout analysis including all steps and segmentation classes | -| `-light` | lighter and faster but simpler method for main region detection and deskewing | -| `-tll` | this indicates the light textline and should be passed with light version | -| `-tab` | apply table detection | -| `-ae` | apply enhancement (the resulting image is saved to the output directory) | -| `-as` | apply scaling | -| `-cl` | apply contour detection for curved text lines instead of bounding boxes | -| `-ib` | apply binarization (the resulting image is saved to the output directory) | -| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | -| `-eoi` | extract only images to output directory (other processing will not be done) | -| `-ho` | ignore headers for reading order dectection | -| `-si ` | save image regions detected to this directory | -| `-sd ` | save deskewed image to this directory | -| `-sl ` | save layout prediction as plot to this directory | -| `-sp ` | save cropped page image to this directory | -| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | +| option | description | +|-------------------|:------------------------------------------------------------------------------- | +| `-fl` | full layout analysis including all steps and segmentation classes (recommended) | +| `-light` | lighter and faster but simpler method for main region detection and deskewing (recommended) | +| `-tll` | this indicates the light textline and should be passed with light version (recommended) | +| `-tab` | apply table detection | +| `-ae` | apply enhancement (the resulting image is saved to the output directory) | +| `-as` | apply scaling | +| `-cl` | apply contour detection for curved text lines instead of bounding boxes | +| `-ib` | apply binarization (the resulting image is saved to the output directory) | +| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | +| `-eoi` | extract only images to output directory (other processing will not be done) | +| `-ho` | ignore headers for reading order dectection | +| `-si ` | save image regions detected to this directory | +| `-sd ` | save deskewed image to this directory | +| `-sl ` | save layout prediction as plot to this directory | +| `-sp ` | save cropped page image to this directory | +| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | +| `-thart` | threshold of artifical class in the case of textline detection. The default value is 0.1 | +| `-tharl` | threshold of artifical class in the case of layout detection. The default value is 0.1 | +| `-ocr` | do ocr | +| `-tr` | apply transformer ocr. Default model is a CNN-RNN model | +| `-bs_ocr` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively | +| `-ncu` | upper limit of columns in document image | +| `-ncl` | lower limit of columns in document image | +| `-slro` | skip layout detection and reading order | +| `-romb` | apply machine based reading order detection | +| `-ipe` | ignore page extraction | + If no further option is set, the tool performs layout detection of main regions (background, text, images, separators and marginals). @@ -124,7 +135,7 @@ The command-line interface for binarization can be called like this: eynollah binarization \ -i | -di \ -o \ - -m \ + -m ``` ### OCR @@ -138,9 +149,24 @@ eynollah ocr \ -i | -di \ -dx \ -o \ - -m | --model_name \ + -m | --model_name ``` +The following options can be used to further configure the ocr processing: + +| option | description | +|-------------------|:------------------------------------------------------------------------------- | +| `-dib` | directory of bins(files type must be '.png'). Prediction with both RGB and bins. | +| `-doit` | Directory containing output images rendered with the predicted text | +| `--model_name` | Specific model file path to use for OCR | +| `-trocr` | transformer ocr will be applied, otherwise cnn_rnn model | +| `-etit` | textlines images and text in xml will be exported into output dir (OCR training data) | +| `-nmtc` | cropped textline images will not be masked with textline contour | +| `-bs` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively | +| `-ds_pref` | add an abbrevation of dataset name to generated training data | +| `-min_conf` | minimum OCR confidence value. OCRs with textline conf lower than this will be ignored | + + ### Machine-based-reading-order The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. From ab9ddd5214f4161038a48193df94b4cd363729f8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 18:41:15 +0200 Subject: [PATCH 14/28] OCR examples are added to README --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index c6ba0e4..405cab4 100644 --- a/README.md +++ b/README.md @@ -140,6 +140,16 @@ eynollah binarization \ ### OCR +

+ Input Image + Output Image +

+ +

+ Input Image + Output Image +

+ The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. The command-line interface for OCR can be called like this: From 59eb4fd3bee8199155998cffc75b47931dc8bb33 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 19:04:01 +0200 Subject: [PATCH 15/28] images with ro are added to readme --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 405cab4..e8a2721 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,11 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) +

+ Input Image + Output Image +

+ ## Features * Support for 10 distinct segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) From b56bb4428444aa67d43d759f319704393214921e Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 21:30:06 +0200 Subject: [PATCH 16/28] providing ocr model evaluation metrics --- docs/models.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/models.md b/docs/models.md index 7f83b33..a42cdb2 100644 --- a/docs/models.md +++ b/docs/models.md @@ -157,6 +157,38 @@ The model extracts the reading order of text regions from the layout by classify ### OCR We have trained three OCR models: two CNN-RNN–based models and one transformer-based TrOCR model. The CNN-RNN models are generally faster and provide better results in most cases, though their performance decreases with heavily degraded images. The TrOCR model, on the other hand, is computationally expensive and slower during inference, but it can possibly produce better results on strongly degraded images. + +#### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250805 + +This model is trained on data where most of the samples are in Fraktur german script. + +| Dataset | Input | CER | WER | +|-----------------------|:-------|:-----------|:----------| +| OCR-D-GT-Archiveform | BIN | 0.02147 | 0.05685 | +| OCR-D-GT-Archiveform | RGB | | | + +#### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) + +Compared to the model_eynollah_ocr_cnnrnn_20250805 model, this model is trained on a larger proportion of Antiqua data and achieves superior performance. + +| Dataset | Input | CER | WER | +|-----------------------|:------------|:-----------|:----------| +| OCR-D-GT-Archiveform | BIN | 0.01635 | 0.05410 | +| OCR-D-GT-Archiveform | RGB | 0.01471 | 0.05813 | +| BLN600 | RGB | 0.04409 | 0.08879 | +| BLN600 | Enhanced | 0.03599 | 0.06244 | + + +#### Transformer OCR model: model_eynollah_ocr_trocr_20250919 + +This transformer OCR model is trained on the same data as model_eynollah_ocr_trocr_20250919. + +| Dataset | Input | CER | WER | +|-----------------------|:------------|:-----------|:----------| +| OCR-D-GT-Archiveform | BIN | 0.01841 | 0.05589 | +| OCR-D-GT-Archiveform | RGB | | | +| BLN600 | RGB | 0.06347 | 0.13853 | + ## Heuristic methods Additionally, some heuristic methods are employed to further improve the model predictions: From 7b7714af2e3a40d18448a5dda6e7f624016c9eac Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 22:42:37 +0200 Subject: [PATCH 17/28] completing ocr evaluations metric --- docs/models.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/models.md b/docs/models.md index a42cdb2..7482043 100644 --- a/docs/models.md +++ b/docs/models.md @@ -165,7 +165,7 @@ This model is trained on data where most of the samples are in Fraktur german sc | Dataset | Input | CER | WER | |-----------------------|:-------|:-----------|:----------| | OCR-D-GT-Archiveform | BIN | 0.02147 | 0.05685 | -| OCR-D-GT-Archiveform | RGB | | | +| OCR-D-GT-Archiveform | RGB | 0.01636 | 0.06285 | #### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) @@ -186,7 +186,7 @@ This transformer OCR model is trained on the same data as model_eynollah_ocr_tro | Dataset | Input | CER | WER | |-----------------------|:------------|:-----------|:----------| | OCR-D-GT-Archiveform | BIN | 0.01841 | 0.05589 | -| OCR-D-GT-Archiveform | RGB | | | +| OCR-D-GT-Archiveform | RGB | 0.01552 | 0.06177 | | BLN600 | RGB | 0.06347 | 0.13853 | ## Heuristic methods From d0ad7a98b723ba494eee107e8fef388c444768bf Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 22:45:22 +0200 Subject: [PATCH 18/28] starting qualitative ocr evaluation --- docs/models.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/models.md b/docs/models.md index 7482043..741fc67 100644 --- a/docs/models.md +++ b/docs/models.md @@ -189,6 +189,16 @@ This transformer OCR model is trained on the same data as model_eynollah_ocr_tro | OCR-D-GT-Archiveform | RGB | 0.01552 | 0.06177 | | BLN600 | RGB | 0.06347 | 0.13853 | +##### Qualitative evaluation of the models + +###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250805 + + +###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) + + +###### Transformer OCR model: model_eynollah_ocr_trocr_20250919 + ## Heuristic methods Additionally, some heuristic methods are employed to further improve the model predictions: From 6192e5ba5c95f3b8b3ad21f2e23aed0fbdededad Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 23 Oct 2025 16:37:24 +0200 Subject: [PATCH 19/28] qualitative evaluation of ocr models are added to docs --- docs/models.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/docs/models.md b/docs/models.md index 741fc67..b858630 100644 --- a/docs/models.md +++ b/docs/models.md @@ -191,13 +191,27 @@ This transformer OCR model is trained on the same data as model_eynollah_ocr_tro ##### Qualitative evaluation of the models -###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250805 +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | -###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) + +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | + + +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | + + +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | -###### Transformer OCR model: model_eynollah_ocr_trocr_20250919 ## Heuristic methods From 22d61e8d9405a18b936537e0499fb4bd5205c9e9 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Tue, 28 Oct 2025 19:56:23 +0100 Subject: [PATCH 20/28] remove newspaper images from main readme --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index 9a6d9bc..5d5d5a8 100644 --- a/README.md +++ b/README.md @@ -11,11 +11,6 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) -

- Input Image - Output Image -

- ## Features * Document layout analysis using pixelwise segmentation models with support for 10 distinct segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) From 46a45f6b0eee17cfd979c2ed9a35b82b92343272 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 29 Oct 2025 22:23:48 +0100 Subject: [PATCH 21/28] Create examples.md --- docs/examples.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 docs/examples.md diff --git a/docs/examples.md b/docs/examples.md new file mode 100644 index 0000000..8da0baf --- /dev/null +++ b/docs/examples.md @@ -0,0 +1,21 @@ +# Examples + +Example outputs of various Eynollah models + +# Binarisation + + + + +# Reading Order Detection + +Input Image +Output Image + +# OCR + +Input Image +Output Image + +Input Image +Output Image From f6c0f56348e578b8dae8f13efcb0e12ca1bd92bf Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 29 Oct 2025 22:23:56 +0100 Subject: [PATCH 22/28] Update README.md --- README.md | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 5d5d5a8..8353005 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) ## Features -* Document layout analysis using pixelwise segmentation models with support for 10 distinct segmentation classes: +* Document layout analysis using pixelwise segmentation models with support for 10 segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) * Textline segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text * Document image binarization with pixelwise segmentation or hybrid CNN-Transformer models @@ -81,6 +81,8 @@ Eynollah supports five use cases: 4. [text recognition (OCR)](#ocr), and 5. [reading order detection](#reading-order-detection). +Some example outputs can be found in [`examples.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/examples.md). + ### Layout Analysis The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading @@ -152,16 +154,6 @@ TODO ### OCR -

- Input Image - Output Image -

- -

- Input Image - Output Image -

- The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. The command-line interface for OCR can be called like this: @@ -176,17 +168,17 @@ eynollah ocr \ The following options can be used to further configure the ocr processing: -| option | description | -|-------------------|:------------------------------------------------------------------------------- | -| `-dib` | directory of bins(files type must be '.png'). Prediction with both RGB and bins. | -| `-doit` | Directory containing output images rendered with the predicted text | -| `--model_name` | Specific model file path to use for OCR | -| `-trocr` | transformer ocr will be applied, otherwise cnn_rnn model | -| `-etit` | textlines images and text in xml will be exported into output dir (OCR training data) | -| `-nmtc` | cropped textline images will not be masked with textline contour | -| `-bs` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively | -| `-ds_pref` | add an abbrevation of dataset name to generated training data | -| `-min_conf` | minimum OCR confidence value. OCRs with textline conf lower than this will be ignored | +| option | description | +|-------------------|:-------------------------------------------------------------------------------------------| +| `-dib` | directory of binarized images (file type must be '.png'), prediction with both RGB and bin | +| `-doit` | directory for output images rendered with the predicted text | +| `--model_name` | file path to use specific model for OCR | +| `-trocr` | use transformer ocr model (otherwise cnn_rnn model is used) | +| `-etit` | export textline images and text in xml to output dir (OCR training data) | +| `-nmtc` | cropped textline images will not be masked with textline contour | +| `-bs` | ocr inference batch size. Default batch size is 2 for trocr and 8 for cnn_rnn models | +| `-ds_pref` | add an abbrevation of dataset name to generated training data | +| `-min_conf` | minimum OCR confidence value. OCR with textline conf lower than this will be ignored | ### Reading Order Detection From b1e191b2ea9511821cc15cfd6452184d76b87dad Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 29 Oct 2025 22:30:58 +0100 Subject: [PATCH 23/28] reformat cli options table --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8353005..a663215 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ eynollah layout \ The following options can be used to further configure the processing: | option | description | -|-------------------|:------------------------------------------------------------------------------- | +|-------------------|:--------------------------------------------------------------------------------------------| | `-fl` | full layout analysis including all steps and segmentation classes (recommended) | | `-light` | lighter and faster but simpler method for main region detection and deskewing (recommended) | | `-tll` | this indicates the light textline and should be passed with light version (recommended) | From c9efbe187159a72a9095ebee850a246553f6d986 Mon Sep 17 00:00:00 2001 From: Clemens Neudecker <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:52:59 +0100 Subject: [PATCH 24/28] refactor image layout in examples.md --- docs/examples.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/docs/examples.md b/docs/examples.md index 8da0baf..24336b3 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -4,8 +4,8 @@ Example outputs of various Eynollah models # Binarisation - - + + # Reading Order Detection @@ -14,8 +14,5 @@ Example outputs of various Eynollah models # OCR -Input Image -Output Image - -Input Image -Output Image +Input ImageOutput Image +Input ImageOutput Image From 70d8577a15d6a41433bbf3d48ce46fe4916ed19f Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:16:41 +0100 Subject: [PATCH 25/28] Revert "remove redundant parentheses" This reverts commit 20a95365c283e4b90638063173fed3b8fb65cee1. --- src/eynollah/utils/__init__.py | 2 +- src/eynollah/utils/separate_lines.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index aa89bd1..9cf30b0 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1351,7 +1351,7 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, label_lines, contours_h=None): t_ins_c0 = time.time() - separators_closeup= (region_pre_p[:, :] == label_lines) * 1 + separators_closeup=( (region_pre_p[:,:]==label_lines))*1 separators_closeup[0:110,:]=0 separators_closeup[separators_closeup.shape[0]-150:,:]=0 diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 84ca6d7..275bfac 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1473,9 +1473,9 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] - img_resized = np.zeros((int(img_int.shape[0] * 1.2), int(img_int.shape[1] * 3))) - img_resized[int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], - int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] = img_int[:, :] + img_resized = np.zeros((int(img_int.shape[0] * (1.2)), int(img_int.shape[1] * (3)))) + img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], + int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] = img_int[:, :] # plt.imshow(img_xline) # plt.show() img_line_rotated = rotate_image(img_resized, slopes_tile_wise[i]) @@ -1487,8 +1487,8 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_patch_separated_returned[:, :][img_patch_separated_returned[:, :] != 0] = 1 img_patch_separated_returned_true_size = img_patch_separated_returned[ - int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], - int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] + int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], + int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] img_patch_interest_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size @@ -1517,7 +1517,7 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] max_shape=np.max(img_int.shape) - img_resized=np.zeros((int(max_shape * 1.1) , int(max_shape * 1.1))) + img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) From 2d35a0598d6164d9ccad9ef77d715db4250161c6 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:16:48 +0100 Subject: [PATCH 26/28] Revert "replace list declaration with list literal (faster)" This reverts commit 9733d575bfd2caa19df0465a0fac9e5f352303b8. --- src/eynollah/utils/__init__.py | 18 ++++++++++++------ src/eynollah/utils/separate_lines.py | 6 ++++-- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 9cf30b0..d6c927b 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -151,7 +151,8 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( min_ys=np.min(y_sep) max_ys=np.max(y_sep) - y_mains= [min_ys] + y_mains=[] + y_mains.append(min_ys) y_mains_sep_ohne_grenzen=[] for ii in range(len(new_main_sep_y)): @@ -524,7 +525,8 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [peaks_neg_fin[i + 1]] + forest = [] + forest.append(peaks_neg_fin[i + 1]) if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -692,7 +694,8 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [peaks_neg_fin[i + 1]] + forest = [] + forest.append(peaks_neg_fin[i + 1]) if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -1343,7 +1346,8 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( return img_p_in, special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): - peaks_neg_tot = [first_point] + peaks_neg_tot = [] + peaks_neg_tot.append(first_point) for ii in range(len(peaks_neg_fin)): peaks_neg_tot.append(peaks_neg_fin[ii]) peaks_neg_tot.append(last_point) @@ -1511,7 +1515,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, args_cy_splitter=np.argsort(cy_main_splitters) cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - splitter_y_new= [0] + splitter_y_new=[] + splitter_y_new.append(0) for i in range(len(cy_main_splitters_sort)): splitter_y_new.append( cy_main_splitters_sort[i] ) splitter_y_new.append(region_pre_p.shape[0]) @@ -1587,7 +1592,8 @@ def return_boxes_of_images_by_order_of_reading_new( num_col, peaks_neg_fin = find_num_col( regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], :], num_col_classifier, tables, multiplier=3.) - peaks_neg_fin_early= [0] + peaks_neg_fin_early=[] + peaks_neg_fin_early.append(0) #print(peaks_neg_fin,'peaks_neg_fin') for p_n in peaks_neg_fin: peaks_neg_fin_early.append(p_n) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 275bfac..22ef00d 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1227,7 +1227,8 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks[i] > cut_off: if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [peaks_neg[i + 1]] + forest = [] + forest.append(peaks_neg[i + 1]) if i == (len(peaks_neg) - 1): if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -1247,7 +1248,8 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks_pos[i] > cut_off: if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) - forest = [peaks[i + 1]] + forest = [] + forest.append(peaks[i + 1]) if i == (len(peaks) - 1): if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) From 9dbac280cc4fc7914bd022f8b07665d1f4d70051 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:16:53 +0100 Subject: [PATCH 27/28] Revert "remove unnecessary backslash" This reverts commit f212ffa22ddfcdf953ec133d21dce900136cd7c1. --- src/eynollah/utils/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index d6c927b..5ccb2af 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1388,7 +1388,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, gray = cv2.bitwise_not(separators_closeup_n_binary) gray=gray.astype(np.uint8) - bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ + cv2.THRESH_BINARY, 15, -2) horizontal = np.copy(bw) vertical = np.copy(bw) From f90259d6e2f9360cf31b4e3b83a83bbd2b9cf544 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:24:54 +0100 Subject: [PATCH 28/28] fix docs links --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a663215..0283fe9 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ If no further option is set, the tool performs layout detection of main regions and marginals). The best output quality is achieved when RGB images are used as input rather than greyscale or binarized images. -Additional documentation can be found in [`usage.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +Additional documentation can be found in [`usage.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/usage.md). ### Binarization @@ -199,7 +199,7 @@ eynollah machine-based-reading-order \ ## Use as OCR-D processor -See [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +See [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/ocrd.md). ## How to cite