diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index e1b1a75..d77958b 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -2,7 +2,7 @@ name: CD on: push: - branches: [ "main" ] + branches: [ "master" ] workflow_dispatch: # run manually jobs: @@ -35,7 +35,7 @@ jobs: password: ${{ secrets.DOCKERIO_PASSWORD }} - name: Build the Docker image # build both tags at the same time - run: make docker DOCKER_TAG="docker.io/ocrd/eynollah ghcr.io/qurator-spk/eynollah" + run: make docker DOCKER_TAG="docker.io/ocrd/eynollah -t ghcr.io/qurator-spk/eynollah" - name: Test the Docker image run: docker run --rm ocrd/eynollah ocrd-eynollah-segment -h - name: Push to Dockerhub diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml deleted file mode 100644 index 248f4ef..0000000 --- a/.github/workflows/pypi.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: PyPI CD - -on: - release: - types: [published] - workflow_dispatch: - -jobs: - pypi-publish: - name: upload release to PyPI - runs-on: ubuntu-latest - permissions: - # IMPORTANT: this permission is mandatory for Trusted Publishing - id-token: write - steps: - - uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 - - name: Build package - run: make build - - name: Publish package distributions to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - verbose: true diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 82de94d..b27586c 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -24,59 +24,36 @@ jobs: sudo rm -rf "$AGENT_TOOLSDIRECTORY" df -h - uses: actions/checkout@v4 - - # - name: Lint with ruff - # uses: astral-sh/ruff-action@v3 - # with: - # src: "./src" - - - name: Try to restore models_eynollah - uses: actions/cache/restore@v4 - id: all_model_cache + - uses: actions/cache@v4 + id: seg_model_cache with: path: models_eynollah - key: models_eynollah-${{ hashFiles('src/eynollah/model_zoo/default_specs.py') }} - + key: ${{ runner.os }}-models + - uses: actions/cache@v4 + id: bin_model_cache + with: + path: default-2021-03-09 + key: ${{ runner.os }}-modelbin - name: Download models - if: steps.all_model_cache.outputs.cache-hit != 'true' - run: | - make models - ls -la models_eynollah - - - uses: actions/cache/save@v4 - if: steps.all_model_cache.outputs.cache-hit != 'true' - with: - path: models_eynollah - key: models_eynollah-${{ hashFiles('src/eynollah/model_zoo/default_specs.py') }} - + if: steps.seg_model_cache.outputs.cache-hit != 'true' || steps.bin_model_cache.outputs.cache-hit != 'true' + run: make models - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - # - uses: actions/cache@v4 - # with: - # path: | - # path/to/dependencies - # some/other/dependencies - # key: ${{ runner.os }}-${{ hashFiles('**/lockfiles') }} - - name: Install dependencies run: | python -m pip install --upgrade pip make install-dev EXTRAS=OCR,plotting - make deps-test EXTRAS=OCR,plotting - + make deps-test - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" - - name: Get coverage results run: | coverage report --format=markdown >> $GITHUB_STEP_SUMMARY coverage html coverage json coverage xml - - name: Store coverage results uses: actions/upload-artifact@v4 with: @@ -86,15 +63,12 @@ jobs: pytest.xml coverage.xml coverage.json - - name: Upload coverage results uses: codecov/codecov-action@v4 with: files: coverage.xml fail_ci_if_error: false - - name: Test standalone CLI run: make smoke-test - - name: Test OCR-D CLI run: make ocrd-test diff --git a/.gitignore b/.gitignore index 49835a7..5236dde 100644 --- a/.gitignore +++ b/.gitignore @@ -2,13 +2,6 @@ __pycache__ sbb_newspapers_org_image/pylint.log models_eynollah* -models_ocr* -models_layout* -default-2021-03-09 output.html /build /dist -*.tif -*.sw? -TAGS -uv.lock diff --git a/CHANGELOG.md b/CHANGELOG.md index 53e491c..f7ce6bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,181 +5,12 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased -## [0.8.0] - 2026-05-11 - -* Optimize model performance - * `multiprocessing.SpawnProcess` predictor wrapper for models to have commmunication with Tensorflow in a separate subprocess in a task queue with parallel jobs configurable via `--num-jobs` and maximum number of failed jobs via `--halt-fail` - * Keep batch size low enough for processing fitting into common 8GB GPU (with model-dependent batch resizing prepared but not yet active) - * GPU device can be selected manually with `--device` - * Handle image resizing and tiling in GPU as much as possible to avoid overhead of switching between GPU and CPU - * jit-compile and precompile models where possible (non-autosized, non-patched Keras models) - * Fix bugs and homogenize internal labels related to differing labels for early layout and different stages of full layout detection - * Replace `Lambda` layers with `ZeroPadding2D`, improving size and optimizability of models for `eynollah layout` -* Improved training - * Use connected components for loss function - * Integrate with Tensorboard to observe model training progress, including plots and visualizing intermediate evaluation results -* Simplified model usage - * Models can be overridden individually, so any model trained with `eynollah-training` can replace any model in the [distributions on zenodo](https://zenodo.org/records/17727267) - * `--model` is a CLI option of the `eynollah` root CLI now and should point to the same directory for all subcommands -* Improved reading order detection heuristics -* Improved drop capital, marginalia and column detection -* Fixing bugs in polygon handling and image operations - * No more self-intersecting polygons - * Correct rotation implementation, enlarging/shrinking canvas as necessary - * Use actual area of a polygon instead of length of polygon path or first candidate for comparisons -* Improved PAGE-XML serialization - * Annotate column classifier result in `/PcGts/Page/@custom` (Transkribus convention) and `/PcGts/Metadata/Comment` (QURATOR convention) - * Annotate page skew in `/PcGts/Page/@orientation` - * Calculate and annotate confidences as `Coords/@conf` for regions, lines, images and tables -* Massive refactoring and code quality improvement - * deduplication, idiomatic python, clean parallel processing, class reuse, consistent and meaningful naming - - -**NOTE** We are aware of a possible issue with regards to the cropping of images. It appears that we have not consistenly cropped images for training. This can lead to suboptimal results for cropped images. If you experience quality issues with the `eynollah layout`, try setting the `-ipe/--ignore_page_extraction` option to skip the builtin cropping. We will rectify this in the next trainings. - -## [0.7.0] - 2026-01-30 - -Added: - - * "Model zoo", central place to describe and load models, #207 - * Training code for the CNN/RNN OCR model - -Changed: - - * Lint training code, #204 - * Update documentation: README, pyproject.toml metadata, guides in `docs/`, #209 - - -## [0.6.0] - 2025-10-17 - -Added: - - * `eynollah-training` CLI and docs for training the models, #187, #193, https://github.com/qurator-spk/sbb_pixelwise_segmentation/tree/unifying-training-models - -Fixed: - - * `join_polygons` always returning Polygon, not MultiPolygon, #203 - -## [0.6.0rc2] - 2025-10-14 - -Fixed: - - * Prevent OOM GPU error by avoiding loading the `region_fl` model, #199 - * XML output: encoding should be `utf-8`, not `utf8`, #196, #197 - -## [0.6.0rc1] - 2025-10-10 - -Fixed: - - * continue processing when no columns detected but text regions exist - * convert marginalia to main text if no main text is present - * reset deskewing angle to 0° when text covers <30% image area and detected angle >45° - * :fire: polygons: avoid invalid paths (use `Polygon.buffer()` instead of dilation etc.) - * `return_boxes_of_images_by_order_of_reading_new`: avoid Numpy.dtype mismatch, simplify - * `return_boxes_of_images_by_order_of_reading_new`: log any exceptions instead of ignoring - * `filter_contours_without_textline_inside`: avoid removing from duplicate lists twice - * `get_marginals`: exit early if no peaks found to avoid spurious overlap mask - * `get_smallest_skew`: after shifting search range of rotation angle, use overall best result - * Dockerfile: fix CUDA installation (cuDNN contested between Torch and TF due to extra OCR) - * OCR: re-instate missing methods and fix `utils_ocr` function calls - * mbreorder/enhancement CLIs: missing imports - * :fire: writer: `SeparatorRegion` needs `SeparatorRegionType` (not `ImageRegionType`), f458e3e - * tests: switch from `pytest-subtests` to `parametrize` so we can use `pytest-isolate` - (so CUDA memory gets freed between tests if running on GPU) - -Added: - * :fire: `layout` CLI: new option `--model_version` to override default choices - * test coverage for OCR options in `layout` - * test coverage for table detection in `layout` - * CI linting with ruff - -Changed: - - * polygons: slightly widen for regions and lines, increase for separators - * various refactorings, some code style and identifier improvements - * deskewing/multiprocessing: switch back to ProcessPoolExecutor (faster), - but use shared memory if necessary, and switch back from `loky` to stdlib, - and shutdown in `del()` instead of `atexit` - * :fire: OCR: switch CNN-RNN model to `20250930` version compatible with TF 2.12 on CPU, too - * OCR: allow running `-tr` without `-fl`, too - * :fire: writer: use `@type='heading'` instead of `'header'` for headings - * :fire: performance gains via refactoring (simplification, less copy-code, vectorization, - avoiding unused calculations, avoiding unnecessary 3-channel image operations) - * :fire: heuristic reading order detection: many improvements - - contour vs splitter box matching: - * contour must be contained in box exactly instead of heuristics - * make fallback center matching, center must be contained in box - - original vs deskewed contour matching: - * same min-area filter on both sides - * similar area score in addition to center proximity - * avoid duplicate and missing mappings by allowing N:M - matches and splitting+joining where necessary - * CI: update+improve model caching - - -## [0.5.0] - 2025-09-26 - -Fixed: - - * restoring the contour in the original image caused an error due to an empty tuple, #154 - * removed NumPy warnings calculating sigma, mean, (fixed issue #158) - * fixed bug in `separate_lines.py`, #124 - * Drop capitals are now handled separately from their corresponding textline - * Marginals are now divided into left and right. Their reading order is written first for left marginals, then for right marginals, and within each side from top to bottom - * Added a new page extraction model. Instead of bounding boxes, it outputs page contours in the XML file, improving results for skewed pages - * Improved reading order for cases where a textline is segmented into multiple smaller textlines - -Changed - - * CLIs: read only allowed filename suffixes (image or XML) with `--dir_in` - * CLIs: make all output option required, and `-i` / `-di` required but mutually exclusive - * ocr CLI: drop redundant `-brb` in favour of just `-dib` - * APIs: move all input/output path options from class (kwarg and attribute) ro `run` kwarg - * layout textlines: polygonal also without `-cl` - -Added: - - * `eynollah machine-based-reading-order` CLI to run reading order detection, #175 - * `eynollah enhancement` CLI to run image enhancement, #175 - * Improved models for page extraction and reading order detection, #175 - * For the lightweight version (layout and textline detection), thresholds are now assigned to the artificial class. Users can apply these thresholds to improve detection of isolated textlines and regions. To counteract the drawback of thresholding, the skeleton of the artificial class is used to keep lines as thin as possible (resolved issues #163 and #161) - * Added and integrated a trained CNN-RNN OCR models - * Added and integrated a trained TrOCR model - * Improved OCR detection to support vertical and curved textlines - * Introduced a new machine-based reading order model with rotation augmentation - * Optimized reading order speed by clustering text regions that belong to the same block, maintaining top-to-bottom order - * Implemented text merging across textlines based on hyphenation when a line ends with a hyphen - * Integrated image enhancement as a separate use case - * Added reading order functionality on the layout level as a separate use case - * CNN-RNN OCR models provide confidence scores for predictions - * Added OCR visualization: predicted OCR can be overlaid on an image of the same size as the input - * Introduced a threshold value for CNN-RNN OCR models, allowing users to filter out low-confidence textline predictions - * For OCR, users can specify a single model by name instead of always using the default model - * Under the OCR use case, if Ground Truth XMLs and images are available, textline image and corresponding text extraction can now be performed - -Merged PRs: - - * better machine based reading order + layout and textline + ocr by @vahidrezanezhad in https://github.com/qurator-spk/eynollah/pull/175 - * CI: pypi by @kba in https://github.com/qurator-spk/eynollah/pull/154 - * CI: Use most recent actions/setup-python@v5 by @kba in https://github.com/qurator-spk/eynollah/pull/157 - * update docker by @bertsky in https://github.com/qurator-spk/eynollah/pull/159 - * Ocrd fixes by @kba in https://github.com/qurator-spk/eynollah/pull/167 - * Updating readme for eynollah use cases cli by @kba in https://github.com/qurator-spk/eynollah/pull/166 - * OCR-D processor: expose reading_order_machine_based by @bertsky in https://github.com/qurator-spk/eynollah/pull/171 - * prepare release v0.5.0: fix logging by @bertsky in https://github.com/qurator-spk/eynollah/pull/180 - * mb_ro_on_layout: remove copy-pasta code not actually used by @kba in https://github.com/qurator-spk/eynollah/pull/181 - * prepare release v0.5.0: improve CLI docstring, refactor I/O path options from class to run kwargs, increase test coverage @bertsky in #182 - * prepare release v0.5.0: fix for OCR doit subtest by @bertsky in https://github.com/qurator-spk/eynollah/pull/183 - * Prepare release v0.5.0 by @kba in https://github.com/qurator-spk/eynollah/pull/178 - * updating eynollah README, how to use it for use cases by @vahidrezanezhad in https://github.com/qurator-spk/eynollah/pull/156 - * add feedback to command line interface by @michalbubula in https://github.com/qurator-spk/eynollah/pull/170 - ## [0.4.0] - 2025-04-07 Fixed: * allow empty imports for optional dependencies - * avoid Numpy warnings (empty slices etc.) + * avoid Numpy warnings (empty slices etc) * remove deprecated Numpy types * binarization CLI: make `dir_in` usable again @@ -352,13 +183,6 @@ Fixed: Initial release -[0.8.0]: ../../compare/v0.8.0...v0.7.0 -[0.7.0]: ../../compare/v0.7.0...v0.6.0 -[0.6.0]: ../../compare/v0.6.0...v0.6.0rc2 -[0.6.0rc2]: ../../compare/v0.6.0rc2...v0.6.0rc1 -[0.6.0rc1]: ../../compare/v0.6.0rc1...v0.5.0 -[0.5.0]: ../../compare/v0.5.0...v0.4.0 -[0.4.0]: ../../compare/v0.4.0...v0.3.1 [0.3.1]: ../../compare/v0.3.1...v0.3.0 [0.3.0]: ../../compare/v0.3.0...v0.2.0 [0.2.0]: ../../compare/v0.2.0...v0.1.0 diff --git a/Dockerfile b/Dockerfile index a15776e..4785fc1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,12 +36,8 @@ COPY . . COPY ocrd-tool.json . # prepackage ocrd-tool.json as ocrd-all-tool.json RUN ocrd ocrd-tool ocrd-tool.json dump-tools > $(dirname $(ocrd bashlib filename))/ocrd-all-tool.json -# prepackage ocrd-all-module-dir.json -RUN ocrd ocrd-tool ocrd-tool.json dump-module-dirs > $(dirname $(ocrd bashlib filename))/ocrd-all-module-dir.json # install everything and reduce image size RUN make install EXTRAS=OCR && rm -rf /build/eynollah -# fixup for broken cuDNN installation (Torch pulls in 8.5.0, which is incompatible with Tensorflow) -RUN pip install nvidia-cudnn-cu11==8.6.0.163 # smoke test RUN eynollah --help diff --git a/Makefile b/Makefile index f54cf5b..5f2bf34 100644 --- a/Makefile +++ b/Makefile @@ -2,22 +2,19 @@ PYTHON ?= python3 PIP ?= pip3 EXTRAS ?= -DOCKER_BASE_IMAGE ?= docker.io/ocrd/core-cuda-tf2:v3.13.0 -DOCKER_TAG ?= ocrd/eynollah -DOCKER ?= docker -WGET = wget -O +# DOCKER_BASE_IMAGE = artefakt.dev.sbb.berlin:5000/sbb/ocrd_core:v2.68.0 +DOCKER_BASE_IMAGE = docker.io/ocrd/core-cuda-tf2:v3.3.0 +DOCKER_TAG = ocrd/eynollah #SEG_MODEL := https://qurator-data.de/eynollah/2021-04-25/models_eynollah.tar.gz #SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah_renamed.tar.gz -# SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah.tar.gz +SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz -#SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 -EYNOLLAH_MODELS_URL := https://zenodo.org/records/17727267/files/models_all_v0_8_0.zip -EYNOLLAH_MODELS_ZIP = $(notdir $(EYNOLLAH_MODELS_URL)) -EYNOLLAH_MODELS_DIR = $(EYNOLLAH_MODELS_ZIP:%.zip=%) -PYTEST_ARGS ?= -vv --isolate +BIN_MODEL := https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip + +PYTEST_ARGS ?= -vv # BEGIN-EVAL makefile-parser --make-help Makefile @@ -30,8 +27,7 @@ help: @echo " install Install package with pip" @echo " install-dev Install editable with pip" @echo " deps-test Install test dependencies with pip" - @echo " models Download and extract models to $(CURDIR):" - @echo " $(EYNOLLAH_MODELS_DIR)" + @echo " models Download and extract models to $(CURDIR)/models_eynollah" @echo " smoke-test Run simple CLI check" @echo " ocrd-test Run OCR-D CLI check" @echo " test Run unit tests" @@ -40,22 +36,29 @@ help: @echo " EXTRAS comma-separated list of features (like 'OCR,plotting') for 'install' [$(EXTRAS)]" @echo " DOCKER_TAG Docker image tag for 'docker' [$(DOCKER_TAG)]" @echo " PYTEST_ARGS pytest args for 'test' (Set to '-s' to see log output during test execution, '-vv' to see individual tests. [$(PYTEST_ARGS)]" - @echo " ALL_MODELS URL of archive of all models [$(ALL_MODELS)]" + @echo " SEG_MODEL URL of 'models' archive to download for segmentation 'test' [$(SEG_MODEL)]" + @echo " BIN_MODEL URL of 'models' archive to download for binarization 'test' [$(BIN_MODEL)]" @echo "" # END-EVAL -# Download and extract models to $(PWD)/models_layout_v0_6_0 -models: $(EYNOLLAH_MODELS_DIR) -# do not download these files if we already have the directories -.INTERMEDIATE: $(EYNOLLAH_MODELS_ZIP) +# Download and extract models to $(PWD)/models_eynollah +models: models_eynollah default-2021-03-09 -$(EYNOLLAH_MODELS_ZIP): - $(WGET) $@ $(EYNOLLAH_MODELS_URL) +models_eynollah: models_eynollah.tar.gz + tar zxf models_eynollah.tar.gz -$(EYNOLLAH_MODELS_DIR): $(EYNOLLAH_MODELS_ZIP) - unzip $< +models_eynollah.tar.gz: + wget $(SEG_MODEL) + +default-2021-03-09: $(notdir $(BIN_MODEL)) + unzip $(notdir $(BIN_MODEL)) + mkdir $@ + mv $(basename $(notdir $(BIN_MODEL))) $@ + +$(notdir $(BIN_MODEL)): + wget $(BIN_MODEL) build: $(PIP) install build @@ -69,48 +72,41 @@ install: install-dev: $(PIP) install -e .$(and $(EXTRAS),[$(EXTRAS)]) -deps-test: +deps-test: models_eynollah $(PIP) install -r requirements-test.txt smoke-test: TMPDIR != mktemp -d -smoke-test: tests/resources/2files/kant_aufklaerung_1784_0020.tif +smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif # layout analysis: - eynollah -m $(CURDIR) layout -i $< -o $(TMPDIR) + eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/models_eynollah fgrep -q http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 $(TMPDIR)/$(basename $( Document Layout Analysis with Deep Learning and Heuristics -> Document Layout Analysis, Binarization and OCR with Deep Learning and Heuristics - -[![Python Versions](https://img.shields.io/pypi/pyversions/eynollah.svg)](https://pypi.python.org/pypi/eynollah) [![PyPI Version](https://img.shields.io/pypi/v/eynollah)](https://pypi.org/project/eynollah/) [![GH Actions Test](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml) [![GH Actions Deploy](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml) -[![License: ASL](https://img.shields.io/pypi/l/eynollah)](https://opensource.org/license/apache-2-0/) +[![License: ASL](https://img.shields.io/github/license/qurator-spk/eynollah)](https://opensource.org/license/apache-2-0/) [![DOI](https://img.shields.io/badge/DOI-10.1145%2F3604951.3605513-red)](https://doi.org/10.1145/3604951.3605513) ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) ## Features -* Document layout analysis using pixelwise segmentation models with support for 10 segmentation classes: +* Support for up to 10 segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) -* Textline segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text -* Document image binarization with pixelwise segmentation or hybrid CNN-Transformer models -* Text recognition (OCR) with CNN-RNN or TrOCR models -* Detection of reading order (left-to-right or right-to-left) using heuristics or trainable models +* Support for various image optimization operations: + * cropping (border detection), binarization, deskewing, dewarping, scaling, enhancing, resizing +* Text line segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text +* Detection of reading order (left-to-right or right-to-left) * Output in [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML) * [OCR-D](https://github.com/qurator-spk/eynollah#use-as-ocr-d-processor) interface -:warning: Development is focused on achieving the best quality of results for a wide variety of historical -documents using a combination of multiple deep learning models and heuristics; therefore processing can be slow. +:warning: Development is currently focused on achieving the best possible quality of results for a wide variety of historical documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. ## Installation Python `3.8-3.11` with Tensorflow `<2.13` on Linux are currently supported. -For (limited) GPU support the CUDA toolkit needs to be installed. -A working config is CUDA `11.8` with cuDNN `8.6`. + +For (limited) GPU support the CUDA toolkit needs to be installed. You can either install from PyPI @@ -44,54 +41,19 @@ cd eynollah; pip install -e . Alternatively, you can run `make install` or `make install-dev` for editable installation. -To also install the dependencies for the OCR engines: - -``` -pip install "eynollah[OCR]" -# or -make install EXTRAS=OCR -``` - -### Docker - -Use - -``` -docker pull ghcr.io/qurator-spk/eynollah:latest -``` - -When using Eynollah with Docker, see [`docker.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/docker.md). - ## Models +Pre-trained models can be downloaded from [qurator-data.de](https://qurator-data.de/eynollah/) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). -Pretrained models can be downloaded from [Zenodo](https://zenodo.org/records/17727267) or [Hugging Face](https://huggingface.co/SBB?search_models=eynollah). +For documentation on methods and models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). -For model documentation and model cards, see [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). - -## Training - -To train your own model with Eynollah, see [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the tools in the [`train`](https://github.com/qurator-spk/eynollah/tree/main/train) folder. +## Train +In case you want to train your own model with Eynollah, have a look at [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md). ## Usage - -Eynollah supports five use cases: -1. [layout analysis (segmentation)](#layout-analysis), -2. [binarization](#binarization), -3. [image enhancement](#image-enhancement), -4. [text recognition (OCR)](#ocr), and -5. [reading order detection](#reading-order-detection). - -Some example outputs can be found in [`examples.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/examples.md). - -### Layout Analysis - -The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading -order using heuristic methods or a [pretrained model](https://github.com/qurator-spk/eynollah#machine-based-reading-order). - -The command-line interface for layout analysis can be called like this: +The command-line interface can be called like this: ```sh -eynollah layout \ +eynollah \ -i | -di \ -o \ -m \ @@ -100,102 +62,57 @@ eynollah layout \ The following options can be used to further configure the processing: -| option | description | -|-------------------|:--------------------------------------------------------------------------------------------| -| `-fl` | full layout analysis including all steps and segmentation classes (recommended) | -| `-tab` | apply table detection | -| `-ae` | apply enhancement (the resulting image is saved to the output directory) | -| `-as` | apply scaling | -| `-cl` | apply contour detection for curved text lines instead of bounding boxes | -| `-ib` | apply binarization (the resulting image is saved to the output directory) | -| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | -| `-ho` | ignore headers for reading order dectection | -| `-si ` | save image regions detected to this directory | -| `-sd ` | save deskewed image to this directory | -| `-sl ` | save layout prediction as plot to this directory | -| `-sp ` | save cropped page image to this directory | -| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | -| `-thart` | threshold of artifical class in the case of textline detection. The default value is 0.1 | -| `-tharl` | threshold of artifical class in the case of layout detection. The default value is 0.1 | -| `-ncu` | upper limit of columns in document image | -| `-ncl` | lower limit of columns in document image | -| `-slro` | skip layout detection and reading order | -| `-romb` | apply machine based reading order detection | -| `-ipe` | ignore page extraction | +| option | description | +|-------------------|:-------------------------------------------------------------------------------| +| `-fl` | full layout analysis including all steps and segmentation classes | +| `-light` | lighter and faster but simpler method for main region detection and deskewing | +| `-tab` | apply table detection | +| `-ae` | apply enhancement (the resulting image is saved to the output directory) | +| `-as` | apply scaling | +| `-cl` | apply contour detection for curved text lines instead of bounding boxes | +| `-ib` | apply binarization (the resulting image is saved to the output directory) | +| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | +| `-eoi` | extract only images to output directory (other processing will not be done) | +| `-ho` | ignore headers for reading order dectection | +| `-si ` | save image regions detected to this directory | +| `-sd ` | save deskewed image to this directory | +| `-sl ` | save layout prediction as plot to this directory | +| `-sp ` | save cropped page image to this directory | +| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | + +If no option is set, the tool performs layout detection of main regions (background, text, images, separators and marginals). +The best output quality is produced when RGB images are used as input rather than greyscale or binarized images. + +#### Use as OCR-D processor + +Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), +formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). + +In this case, the source image file group with (preferably) RGB images should be used as input like this: + + ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models 2022-04-05 -If no further option is set, the tool performs layout detection of main regions (background, text, images, separators -and marginals). -The best output quality is achieved when RGB images are used as input rather than greyscale or binarized images. - -Additional documentation can be found in [`usage.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/usage.md). - -### Binarization - -The binarization module performs document image binarization using pretrained pixelwise segmentation models. - -The command-line interface for binarization can be called like this: - -```sh -eynollah binarization \ - -i | -di \ - -o \ - -m -``` - -### Image Enhancement -TODO - -### OCR - -The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. - -The command-line interface for OCR can be called like this: - -```sh -eynollah ocr \ - -i | -di \ - -dx \ - -o \ - -m | --model_name -``` - -The following options can be used to further configure the ocr processing: - -| option | description | -|-------------------|:-------------------------------------------------------------------------------------------| -| `-dib` | directory of binarized images (file type must be '.png'), prediction with both RGB and bin | -| `-doit` | directory for output images rendered with the predicted text | -| `--model_name` | file path to use specific model for OCR | -| `-trocr` | use transformer ocr model (otherwise cnn_rnn model is used) | -| `-etit` | export textline images and text in xml to output dir (OCR training data) | -| `-nmtc` | cropped textline images will not be masked with textline contour | -| `-bs` | ocr inference batch size. Default batch size is 2 for trocr and 8 for cnn_rnn models | -| `-ds_pref` | add an abbrevation of dataset name to generated training data | -| `-min_conf` | minimum OCR confidence value. OCR with textline conf lower than this will be ignored | +If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: +- existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) +- existing annotation (and respective `AlternativeImage`s) are partially _ignored_: + - previous page frame detection (`cropped` images) + - previous derotation (`deskewed` images) + - previous thresholding (`binarized` images) +- if the page-level image nevertheless deviates from the original (`@imageFilename`) + (because some other preprocessing step was in effect like `denoised`), then + the output PAGE-XML will be based on that as new top-level (`@imageFilename`) -### Reading Order Detection -Reading order detection can be performed either as part of layout analysis based on image input, or, currently under -development, based on pre-existing layout analysis data in PAGE-XML format as input. + ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models 2022-04-05 -The reading order detection module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. +Still, in general, it makes more sense to add other workflow steps **after** Eynollah. -The command-line interface for machine based reading order can be called like this: - -```sh -eynollah machine-based-reading-order \ - -i | -di \ - -xml | -dx \ - -m \ - -o -``` - -## Use as OCR-D processor - -See [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/ocrd.md). +#### Additional documentation +Please check the [wiki](https://github.com/qurator-spk/eynollah/wiki). ## How to cite +If you find this tool useful in your work, please consider citing our paper: ```bibtex @inproceedings{hip23rezanezhad, diff --git a/docs/docker.md b/docs/docker.md deleted file mode 100644 index 7965622..0000000 --- a/docs/docker.md +++ /dev/null @@ -1,43 +0,0 @@ -## Inference with Docker - - docker pull ghcr.io/qurator-spk/eynollah:latest - -### 1. ocrd resource manager -(just once, to get the models and install them into a named volume for later re-use) - - vol_models=ocrd-resources:/usr/local/share/ocrd-resources - docker run --rm -v $vol_models ocrd/eynollah ocrd resmgr download ocrd-eynollah-segment default - -Now, each time you want to use Eynollah, pass the same resources volume again. -Also, bind-mount some data directory, e.g. current working directory $PWD (/data is default working directory in the container). - -Either use standalone CLI (2) or OCR-D CLI (3): - -### 2. standalone CLI -(follow self-help, cf. readme) - - docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah binarization --help - docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah layout --help - docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah ocr --help - -### 3. OCR-D CLI -(follow self-help, cf. readme and https://ocr-d.de/en/spec/cli) - - docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-eynollah-segment -h - docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-sbb-binarize -h - -Alternatively, just "log in" to the container once and use the commands there: - - docker run --rm -v $vol_models -v $PWD:/data -it ocrd/eynollah bash - -## Training with Docker - -Build the Docker training image - - cd train - docker build -t model-training . - -Run the Docker training image - - cd train - docker run --gpus all -v $PWD:/entry_point_dir model-training diff --git a/docs/examples.md b/docs/examples.md deleted file mode 100644 index 24336b3..0000000 --- a/docs/examples.md +++ /dev/null @@ -1,18 +0,0 @@ -# Examples - -Example outputs of various Eynollah models - -# Binarisation - - - - -# Reading Order Detection - -Input Image -Output Image - -# OCR - -Input ImageOutput Image -Input ImageOutput Image diff --git a/docs/models.md b/docs/models.md index b858630..ac563b0 100644 --- a/docs/models.md +++ b/docs/models.md @@ -1,6 +1,5 @@ # Models documentation - -This suite of 15 models presents a document layout analysis (DLA) system for historical documents implemented by +This suite of 14 models presents a document layout analysis (DLA) system for historical documents implemented by pixel-wise segmentation using a combination of a ResNet50 encoder with various U-Net decoders. In addition, heuristic methods are applied to detect marginals and to determine the reading order of text regions. @@ -18,14 +17,12 @@ Two Arabic/Persian terms form the name of the model suite: عين الله, whic See the flowchart below for the different stages and how they interact: -eynollah_flowchart - +![](https://user-images.githubusercontent.com/952378/100619946-1936f680-331e-11eb-9297-6e8b4cab3c16.png) ## Models ### Image enhancement - Model card: [Image Enhancement](https://huggingface.co/SBB/eynollah-enhancement) This model addresses image resolution, specifically targeting documents with suboptimal resolution. In instances where @@ -33,14 +30,12 @@ the detection of document layout exhibits inadequate performance, the proposed e the quality and clarity of the images, thus facilitating enhanced visual interpretation and analysis. ### Page extraction / border detection - Model card: [Page Extraction/Border Detection](https://huggingface.co/SBB/eynollah-page-extraction) A problem that can negatively affect OCR are black margins around a page caused by document scanning. A deep learning model helps to crop to the page borders by using a pixel-wise segmentation method. ### Column classification - Model card: [Column Classification](https://huggingface.co/SBB/eynollah-column-classifier) This model is a trained classifier that recognizes the number of columns in a document by use of a training set with @@ -48,7 +43,6 @@ manual classification of all documents into six classes with either one, two, th respectively. ### Binarization - Model card: [Binarization](https://huggingface.co/SBB/eynollah-binarization) This model is designed to tackle the intricate task of document image binarization, which involves segmentation of the @@ -58,7 +52,6 @@ capability of the model enables improved accuracy and reliability in subsequent enhanced document understanding and interpretation. ### Main region detection - Model card: [Main Region Detection](https://huggingface.co/SBB/eynollah-main-regions) This model has employed a different set of labels, including an artificial class specifically designed to encompass the @@ -68,7 +61,6 @@ during the inference phase. By incorporating this methodology, improved efficien model's ability to accurately identify and classify text regions within documents. ### Main region detection (with scaling augmentation) - Model card: [Main Region Detection (with scaling augmentation)](https://huggingface.co/SBB/eynollah-main-regions-aug-scaling) Utilizing scaling augmentation, this model leverages the capability to effectively segment elements of extremely high or @@ -77,14 +69,12 @@ categorizing and isolating such elements, thereby enhancing its overall performa documents with varying scale characteristics. ### Main region detection (with rotation augmentation) - Model card: [Main Region Detection (with rotation augmentation)](https://huggingface.co/SBB/eynollah-main-regions-aug-rotation) This model takes advantage of rotation augmentation. This helps the tool to segment the vertical text regions in a robust way. ### Main region detection (ensembled) - Model card: [Main Region Detection (ensembled)](https://huggingface.co/SBB/eynollah-main-regions-ensembled) The robustness of this model is attained through an ensembling technique that combines the weights from various epochs. @@ -92,19 +82,16 @@ By employing this approach, the model achieves a high level of resilience and st strengths of multiple epochs to enhance its overall performance and deliver consistent and reliable results. ### Full region detection (1,2-column documents) - Model card: [Full Region Detection (1,2-column documents)](https://huggingface.co/SBB/eynollah-full-regions-1column) This model deals with documents comprising of one and two columns. ### Full region detection (3,n-column documents) - Model card: [Full Region Detection (3,n-column documents)](https://huggingface.co/SBB/eynollah-full-regions-3pluscolumn) This model is responsible for detecting headers and drop capitals in documents with three or more columns. ### Textline detection - Model card: [Textline Detection](https://huggingface.co/SBB/eynollah-textline) The method for textline detection combines deep learning and heuristics. In the deep learning part, an image-to-image @@ -119,7 +106,6 @@ segmentation is first deskewed and then the textlines are separated with the sam textline bounding boxes. Later, the strap is rotated back into its original orientation. ### Textline detection (light) - Model card: [Textline Detection Light (simpler but faster method)](https://huggingface.co/SBB/eynollah-textline_light) The method for textline detection combines deep learning and heuristics. In the deep learning part, an image-to-image @@ -133,7 +119,6 @@ enhancing the model's ability to accurately identify and delineate individual te eliminates the need for additional heuristics in extracting textline contours. ### Table detection - Model card: [Table Detection](https://huggingface.co/SBB/eynollah-tables) The objective of this model is to perform table segmentation in historical document images. Due to the pixel-wise @@ -143,84 +128,20 @@ effectively identify and delineate tables within the historical document images, enabling subsequent analysis and interpretation. ### Image detection - Model card: [Image Detection](https://huggingface.co/SBB/eynollah-image-extraction) This model is used for the task of illustration detection only. ### Reading order detection - Model card: [Reading Order Detection]() -The model extracts the reading order of text regions from the layout by classifying pairwise relationships between them. A sorting algorithm then determines the overall reading sequence. - -### OCR - -We have trained three OCR models: two CNN-RNN–based models and one transformer-based TrOCR model. The CNN-RNN models are generally faster and provide better results in most cases, though their performance decreases with heavily degraded images. The TrOCR model, on the other hand, is computationally expensive and slower during inference, but it can possibly produce better results on strongly degraded images. - -#### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250805 - -This model is trained on data where most of the samples are in Fraktur german script. - -| Dataset | Input | CER | WER | -|-----------------------|:-------|:-----------|:----------| -| OCR-D-GT-Archiveform | BIN | 0.02147 | 0.05685 | -| OCR-D-GT-Archiveform | RGB | 0.01636 | 0.06285 | - -#### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) - -Compared to the model_eynollah_ocr_cnnrnn_20250805 model, this model is trained on a larger proportion of Antiqua data and achieves superior performance. - -| Dataset | Input | CER | WER | -|-----------------------|:------------|:-----------|:----------| -| OCR-D-GT-Archiveform | BIN | 0.01635 | 0.05410 | -| OCR-D-GT-Archiveform | RGB | 0.01471 | 0.05813 | -| BLN600 | RGB | 0.04409 | 0.08879 | -| BLN600 | Enhanced | 0.03599 | 0.06244 | - - -#### Transformer OCR model: model_eynollah_ocr_trocr_20250919 - -This transformer OCR model is trained on the same data as model_eynollah_ocr_trocr_20250919. - -| Dataset | Input | CER | WER | -|-----------------------|:------------|:-----------|:----------| -| OCR-D-GT-Archiveform | BIN | 0.01841 | 0.05589 | -| OCR-D-GT-Archiveform | RGB | 0.01552 | 0.06177 | -| BLN600 | RGB | 0.06347 | 0.13853 | - -##### Qualitative evaluation of the models - -| | | | | -|:---:|:---:|:---:|:---:| -| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | - - - -| | | | | -|:---:|:---:|:---:|:---:| -| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | - - -| | | | | -|:---:|:---:|:---:|:---:| -| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | - - -| | | | | -|:---:|:---:|:---:|:---:| -| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | - - +TODO ## Heuristic methods - Additionally, some heuristic methods are employed to further improve the model predictions: - * After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. -* Unlike the non-light version, where the image is scaled up to help the model better detect the background spaces between text regions, the light version uses down-scaled images. In this case, introducing an artificial class along the boundaries of text regions and text lines has helped to isolate and separate the text regions more effectively. +* For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. * A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. -* In the non-light version, deskewing is applied at the text-region level (since regions may have different degrees of skew) to improve text-line segmentation results. In contrast, the light version performs deskewing only at the page level to enhance margin detection and heuristic reading-order estimation. -* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels (only in non-light version). -* Finally, using the derived coordinates, bounding boxes are determined for each textline (only in non-light version). -* As mentioned above, the reading order can be determined using a model; however, this approach is computationally expensive, time-consuming, and less accurate due to the limited amount of ground-truth data available for training. Therefore, our tool uses a heuristic reading-order detection method as the default. The heuristic approach relies on headers and separators to determine the reading order of text regions. +* Deskewing is applied on the text region level (due to regions having different degrees of skew) in order to improve the textline segmentation result. +* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels. +* Finally, using the derived coordinates, bounding boxes are determined for each textline. diff --git a/docs/ocrd.md b/docs/ocrd.md deleted file mode 100644 index 9e7e268..0000000 --- a/docs/ocrd.md +++ /dev/null @@ -1,26 +0,0 @@ -## Use as OCR-D processor - -Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), -formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). - -When using Eynollah in OCR-D, the source image file group with (preferably) RGB images should be used as input like this: - - ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: -- existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) -- existing annotation (and respective `AlternativeImage`s) are partially _ignored_: - - previous page frame detection (`cropped` images) - - previous derotation (`deskewed` images) - - previous thresholding (`binarized` images) -- if the page-level image nevertheless deviates from the original (`@imageFilename`) - (because some other preprocessing step was in effect like `denoised`), then - the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -In general, it makes more sense to add other workflow steps **after** Eynollah. - -There is also an OCR-D processor for binarization: - - ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 diff --git a/docs/train.md b/docs/train.md index 9c390bb..9f44a63 100644 --- a/docs/train.md +++ b/docs/train.md @@ -1,93 +1,38 @@ -# Prerequisistes - -## 1. Install Eynollah with training dependencies - -Clone the repository and install eynollah along with the dependencies necessary for training: - -```sh -git clone https://github.com/qurator-spk/eynollah -cd eynollah -pip install '.[training]' -``` - -## 2. Pretrained encoder - -Download our pretrained weights and add them to a `train/pretrained_model` folder: - -```sh -cd train -wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 -tar xf pretrained_model.tar.gz -``` - -## 3. Example data - -### Binarization -A small sample of training data for binarization experiment can be found on [Zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), -which contains `images` and `labels` folders. - -## 4. Helpful tools - -* [`pagexml2img`](https://github.com/qurator-spk/page2img) -> Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, -each class will be defined with a RGB value and beside images, a text file of classes will also be produced. -* [`cocoSegmentationToPng`](https://github.com/nightrome/cocostuffapi/blob/17acf33aef3c6cc2d6aca46dcf084266c2778cf0/PythonAPI/pycocotools/cocostuffhelper.py#L130) -> Convert COCO GT or results for a single image to a segmentation map and write it to disk. -* [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) -> Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. - # Training documentation +This aims to assist users in preparing training datasets, training models, and performing inference with trained models. +We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based +reading order detection. For each use case, we provide guidance on how to generate the corresponding training dataset. -This document aims to assist users in preparing training datasets, training models, and -performing inference with trained models. We cover various use cases including -pixel-wise segmentation, image classification, image enhancement, and -machine-based reading order detection. For each use case, we provide guidance -on how to generate the corresponding training dataset. +The following three tasks can all be accomplished using the code in the +[`train`](https://github.com/qurator-spk/sbb_pixelwise_segmentation/tree/unifying-training-models) directory: -The following three tasks can all be accomplished using the code in the -[`train`](https://github.com/qurator-spk/eynollah/tree/main/train) directory: - -* [Generate training dataset](#generate-training-dataset) -* [Train a model](#train-a-model) -* [Inference with the trained model](#inference-with-the-trained-model) - -## Training, evaluation and output - -The train and evaluation folders should contain subfolders of `images` and `labels`. - -The output folder should be an empty folder where the output model will be written to. +* generate training dataset +* train a model +* inference with the trained model ## Generate training dataset +The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following +command demonstrates, the dataset generator provides three different commands: -The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following -command demonstrates, the dataset generator provides several subcommands: +`python generate_gt_for_training.py --help` -```sh -eynollah-training generate-gt --help -``` - -The three most important subcommands are: +These three commands are: * image-enhancement * machine-based-reading-order * pagexml2label ### image-enhancement - -Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of +Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of high-resolution images. The training dataset can then be generated using the following command: -```sh -eynollah-training image-enhancement \ - -dis "dir of high resolution images" \ - -dois "dir where degraded images will be written" \ - -dols "dir where the corresponding high resolution image will be written as label" \ - -scs "degrading scales json file" -``` +`python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded +images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs +"degrading scales json file"` -The scales JSON file is a dictionary with a key named `scales` and values representing scales smaller than 1. Images are -downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose -resolution at different scales. The degraded images are used as input images, and the original high-resolution images +The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are +downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose +resolution at different scales. The degraded images are used as input images, and the original high-resolution images serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: ```yaml @@ -97,47 +42,32 @@ serve as labels. The enhancement model can be trained with this generated datase ``` ### machine-based-reading-order - -For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's -input is a three-channel image: the first and last channels contain information about each of the two text regions, -while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. -To generate the training dataset, our script requires a PAGE XML file that specifies the image layout with the correct +For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's +input is a three-channel image: the first and last channels contain information about each of the two text regions, +while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. +To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct reading order. -For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set -to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area +For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set +to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area to the image area, with a default value of zero. To run the dataset generator, use the following command: -```shell -eynollah-training generate-gt machine-based-reading-order \ - -dx "dir of GT xml files" \ - -domi "dir where output images will be written" \ -"" -docl "dir where the labels will be written" \ - -ih "height" \ - -iw "width" \ - -min "min area ratio" -``` +`python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images +will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` ### pagexml2label - -`pagexml2label` is designed to generate labels from PAGE XML GT files for various pixel-wise segmentation use cases, -including: -- `printspace` (i.e. page frame), -- `layout` (i.e. regions), -- `textline`, -- `word`, and -- `glyph`. - -To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script -expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled -as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four +pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, +including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. +To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script +expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled +as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. -In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired +In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired element is automatically encoded as 1 in the PNG label. -To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. -For example, in the case of textline detection, the JSON contents could be this: +To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. +For example, in the case of 'textline' detection, the JSON file would resemble this: ```yaml { @@ -145,77 +75,57 @@ For example, in the case of textline detection, the JSON contents could be this: } ``` -In the case of layout segmentation, the config JSON file might look like this: +In the case of layout segmentation a custom config json file can look like this: ```yaml { "use_case": "layout", -"textregions": {"rest_as_paragraph": 1, "drop-capital": 1, "header": 2, "heading": 2, "marginalia": 3}, -"imageregion": 4, -"separatorregion": 5, -"graphicregions": {"rest_as_decoration": 6, "stamp": 7} +"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, +"imageregion":4, +"separatorregion":5, +"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} } ``` -The same example if `PrintSpace` (or `Border`) should be represented as a unique class: +A possible custom config json file for layout segmentation where the "printspace" is a class: ```yaml { "use_case": "layout", -"textregions": {"rest_as_paragraph": 1, "drop-capital": 1, "header": 2, "heading": 2, "marginalia": 3}, -"imageregion": 4, -"separatorregion": 5, -"graphicregions": {"rest_as_decoration": 6, "stamp": 7} -"printspace_as_class_in_layout": 8 +"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, +"imageregion":4, +"separatorregion":5, +"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} +"printspace_as_class_in_layout" : 8 } ``` -In the `layout` use-case, it is beneficial to first understand the structure of the PAGE XML file and its elements. -For a given page image, the visible segments are annotated in XML with their polygon coordinates and types. -On the region level, available segment types include `TextRegion`, `SeparatorRegion`, `ImageRegion`, `GraphicRegion`, -`NoiseRegion` and `TableRegion`. +For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. +In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. +For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', +'noiseregion', and 'tableregion'. -Moreover, text regions and graphic regions in particular are subdivided via `@type`: -- The allowed subtypes for text regions are `paragraph`, `heading`, `marginalia`, `drop-capital`, `header`, `footnote`, -`footnote-continued`, `signature-mark`, `page-number` and `catch-word`. -- The known subtypes for graphic regions are `handwritten-annotation`, `decoration`, `stamp` and `signature`. +Text regions and graphic regions also have their own specific types. The known types for text regions are 'paragraph', +'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', +and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and +'signature'. +Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined +two additional types, "rest_as_paragraph" and "rest_as_decoration", to ensure that no unknown types are missed. +This way, users can extract all known types from the labels and be confident that no unknown types are overlooked. -These types and subtypes must be mapped to classes for the segmentation model. However, sometimes these fine-grained -distinctions are not useful or the existing annotations are not very usable (too scarce or too unreliable). -In that case, instead of these subtypes with a specific mapping, they can be pooled together by using the two special -types: -- `rest_as_paragraph` (mapping missing TextRegion subtypes and `paragraph`) -- `rest_as_decoration` (mapping missing GraphicRegion subtypes and `decoration`) +In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown +as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the +graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator +region" are also present in the label. However, other regions like "noise region" and "table region" will not be +included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. -(That way, users can extract all known types from the labels and be confident that no subtypes are overlooked.) +`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will +be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just +to visualise the labels" "` -In the custom JSON example shown above, `header` and `heading` are extracted as the same class, -while `marginalia` is modelled as a different class. All other text region types, including `drop-capital`, -are grouped into the same class. For graphic regions, `stamp` has its own class, while all other types -are classified together. `ImageRegion` and `SeparatorRegion` will also represented with a class label in the -training data. However, other regions like `NoiseRegion` or `TableRegion` will not be included in the PNG files, -even if they were present in the PAGE XML. - -The tool expects various command-line options: - -```sh -eynollah-training generate-gt pagexml2label \ - -dx "dir of input PAGE XML files" \ - -do "dir of output label PNG files" \ - -cfg "custom config JSON file" \ - -to "output type (2d or 3d)" -``` - -As output type, use -- `2d` for training, -- `3d` to just visualise the labels. - -We have also defined an artificial class that can be added to (rendered around) the boundary -of text region types or text lines in order to make separation of neighbouring segments more -reliable. The key is called `artificial_class_on_boundary`, and it takes a list of text region -types to be applied to. - -Our example JSON config file could then look like this: +We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key +is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, +the example JSON config file should look like this: ```yaml { @@ -237,15 +147,14 @@ Our example JSON config file could then look like this: } ``` -This implies that the artificial class label (denoted by 7) will be present in the generated PNG files -and will only be added around segments labeled `paragraph`, `header`, `heading` or `marginalia`. (This -class will be handled specially during decoding at inference, and not show up in final results.) +This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the +elements labeled as "paragraph," "header," "heading," and "marginalia." -For `printspace`, `textline`, `word`, and `glyph` segmentation use-cases, there is no `artificial_class_on_boundary` key, -but `artificial_class_label` is available. If specified in the config file, then its value should be set at 2, because -these elements represent binary classification problems (with background represented as 0, and segments as 1, respectively). - -For example, the JSON config for textline detection could look as follows: +For "textline", "word", and "glyph", the artificial class on the boundaries will be activated only if the +"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements +represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the +artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use +case: ```yaml { @@ -254,38 +163,26 @@ For example, the JSON config for textline detection could look as follows: } ``` -If the coordinates of `PrintSpace` (or `Border`) are present in the PAGE XML ground truth files, -and one wishes to crop images to only cover the print space bounding box, this can be achieved -by passing the `-ps` option. Note that in this scenario, the directory of the original images -must also be provided, to ensure that the images are cropped in sync with the labels. The command -line would then resemble this: +If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to +crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that +in this scenario, since cropping will be applied to the label files, the directory of the original images must be +provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels +required for training are obtained. The command should resemble the following: -```sh -eynollah-training generate-gt pagexml2label \ - -dx "dir of input PAGE XML files" \ - -do "dir of output label PNG files" \ - -cfg "custom config JSON file" \ - -to "output type (2d or 3d)" \ - -ps \ - -di "dir of input original images" \ - -doi "dir of output cropped images" -``` - -Also, note that it can be detrimental to layout training if there are visible segments which -the annotation does not account for (and thus the model must learn to ignore). So if the images -are not cropped, the `-ps` _should_ be used. If a PAGE XML file is missing `PrintSpace` (or `Border`) -annotations, use `-mps` to either `skip` these or `project` (i.e. crop from existing segments). +`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will +be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just +to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will +be written" ` ## Train a model - ### classification -For the image classification use-case, we have not provided a ground truth generator, as it is unnecessary. -All we require is a training directory with subdirectories, each containing images of its respective classes. We need -separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both -directories. Additionally, the class names should be specified in the config JSON file, as shown in the following -example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the -`classification_classes_name` key in the config file should appear as follows: +For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, +all we require is a training directory with subdirectories, each containing images of its respective classes. We need +separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both +directories. Additionally, the class names should be specified in the config JSON file, as shown in the following +example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the +"classification_classes_name" key in the config file should appear as follows: ```yaml { @@ -307,18 +204,18 @@ example. If, for instance, we aim to classify "apple" and "orange," with a total } ``` -Then `dir_train` should be like this: +The "dir_train" should be like this: -``` +``` . └── train # train directory ├── apple # directory of images for apple class └── orange # directory of images for orange class ``` -And `dir_eval` analogously: +And the "dir_eval" the same structure as train directory: -``` +``` . └── eval # evaluation directory ├── apple # directory of images for apple class @@ -328,13 +225,11 @@ And `dir_eval` analogously: The classification model can be trained using the following command line: -```sh -eynollah-training train with config_classification.json -``` +`python train.py with config_classification.json` -As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. -This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, -an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". +As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. +This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, +an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". ### reading order @@ -376,91 +271,67 @@ And the "dir_eval" the same structure as train directory: └── labels # directory of labels ``` -The reading-order model can be trained like the classification case command line. +The classification model can be trained like the classification case command line. ### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement #### Parameter configuration for segmentation or enhancement usecases - -The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, -its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for +The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, +its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. -* `task`: The task parameter must be one of the following values: - - `binarization`, - - `enhancement`, - - `segmentation`, - - `classification`, - - `reading_order`. -* `backbone_type`: For the tasks `segmentation` (such as text line, and region layout detection), - `binarization` and `enhancement`, we offer two backbone options: - - `nontransformer` (only a CNN ResNet-50). - - `transformer` (first apply a CNN, followed by a transformer) -* `transformer_cnn_first`: Whether to apply the CNN first (followed by the transformer) when using `transformer` backbone. -* `transformer_num_patches_xy`: Number of patches for vision transformer in x and y direction respectively. -* `transformer_patchsize_x`: Patch size of vision transformer patches in x direction. -* `transformer_patchsize_y`: Patch size of vision transformer patches in y direction. -* `transformer_projection_dim`: Transformer projection dimension. Default value is 64. -* `transformer_mlp_head_units`: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. -* `transformer_layers`: transformer layers. Default value is 8. -* `transformer_num_heads`: Transformer number of heads. Default value is 4. -* `patches`: Whether to break up (tile) input images into smaller patches (input size of the model). - If `false`, the model will see the image once (resized to the input size of the model). - Should be set to `false` for cases like page extraction. -* `n_batch`: Number of batches at each iteration. -* `n_classes`: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it - should set to 1. And for the case of layout detection just the unique number of classes should be given. -* `n_epochs`: Number of epochs (iterations over the data) to train. -* `input_height`: the image height for the model's input. -* `input_width`: the image width for the model's input. -* `weight_decay`: Weight decay of l2 regularization of model layers. -* `weighted_loss`: If `true`, this means that you want to apply weighted categorical crossentropy as loss function. - (Mutually exclusive with `is_loss_soft_dice`, and only applies for `segmentation` and `binarization` tasks.) -* `pretraining`: Set to `true` to (download and) initialise pretrained weights of ResNet50 encoder. -* `dir_train`: Path to directory of raw training data (as extracted via `pagexml2labels`, i.e. with subdirectories - `images` and `labels` for input images and output labels. - (These are not prepared for training the model, yet. Upon first run, the raw data will be transformed to suitable size - needed for the model, and written in `dir_output` under `train` and `eval` subdirectories. See `data_is_provided`.) -* `dir_eval`: Ditto for raw evaluation data. -* `dir_output`: Directory to write model checkpoints, logs (for Tensorboard) and precomputed images to. -* `data_is_provided`: If you have already trained at least one complete epoch (using the same data settings) before, - you can set this to `true` to avoid computing the resized / patched / augmented image files again. - Be sure that there are subdirectories `train` and `eval` data are in `dir_output` (each with subdirectories `images` - and `labels`, respectively). -* `continue_training`: If `true`, continue training a model checkpoint from a previous run. - This requires providing the directory of the model checkpoint to load via `dir_of_start_model` - and setting `index_start` counter for naming new checkpoints. - For example if you have already trained for 3 epochs, then your last index is 2, so if you want - to continue with `model_04`, `model_05` etc., set `index_start=3`. -* `index_start`: Starting index for saving models in the case that `continue_training` is `true`. - (Existing checkpoints above this will be overwritten.) -* `dir_of_start_model`: Directory containing existing model checkpoint to initialise model weights from when `continue_training=true`. - (Can be an epoch-interval checkpoint, or batch-interval checkpoint from `save_interval`.) -* `augmentation`: If you want to apply any kind of augmentation this parameter should first set to `true`. - The remaining settings pertain to that... -* `flip_aug`: If `true`, different types of flipping over the image arrays. Requires `flip_index` parameter. -* `flip_index`: List of flip codes (as in `cv2.flip`, i.e. 0 for vertical, positive for horizontal shift, negative for vertical and horizontal shift). -* `blur_aug`: If `true`, different types of blurring will be applied on image. Requires `blur_k` parameter. -* `blur_k`: Method of blurring (`gauss`, `median` or `blur`). -* `scaling`: If `true`, scaling will be applied on image. Requires `scales` parameter. -* `scales`: List of scale factors for scaling. -* `scaling_bluring`: If `true`, combination of scaling and blurring will be applied on image. -* `scaling_binarization`: If `true`, combination of scaling and binarization will be applied on image. -* `scaling_flip`: If `true`, combination of scaling and flip will be applied on image. -* `degrading`: If `true`, degrading will be applied to the image. Requires `degrade_scales` parameter. -* `degrade_scales`: List of intensity factors for degrading. -* `brightening`: If `true`, brightening will be applied to the image. Requires `brightness` parameter. -* `brightness`: List of intensity factors for brightening. -* `binarization`: If `true`, Otsu thresholding will be applied to augment the input data with binarized images. -* `dir_img_bin`: With `binarization`, use this directory to read precomputed binarized images instead of ad-hoc Otsu. - (Base names should correspond to the files in `dir_train/images`.) -* `rotation`: If `true`, 90° rotation will be applied on images. -* `rotation_not_90`: If `true`, random rotation (other than 90°) will be applied on image. Requires `thetha` parameter. -* `thetha`: List of rotation angles (in degrees). +* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we +* offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first +* apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. +* task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this +* parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be +* set to ``false``. +* n_batch: Number of batches at each iteration. +* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it +* should set to 1. And for the case of layout detection just the unique number of classes should be given. +* n_epochs: Number of epochs. +* input_height: This indicates the height of model's input. +* input_width: This indicates the width of model's input. +* weight_decay: Weight decay of l2 regularization of model layers. +* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved +* in a folder named "pretrained_model" in the same directory of "train.py" script. +* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. +* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. +* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. +* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" parameter. +* degrading: If ``true``, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. +* brightening: If ``true``, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. +* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. +* rotation: If ``true``, 90 degree rotation will be applied on image. +* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. +* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. +* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. +* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. +* flip_index: Type of flips. +* blur_k: Type of blurrings. +* scales: Scales of scaling. +* brightness: The amount of brightenings. +* thetha: Rotation angles. +* degrade_scales: The amount of degradings. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. +* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. +* index_start: Starting index for saved models in the case that "continue_training" is ``true``. +* dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. +* transformer_num_patches_xy: Number of patches for vision transformer in x and y direction respectively. +* transformer_patchsize_x: Patch size of vision transformer patches in x direction. +* transformer_patchsize_y: Patch size of vision transformer patches in y direction. +* transformer_projection_dim: Transformer projection dimension. Default value is 64. +* transformer_mlp_head_units: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. +* transformer_layers: transformer layers. Default value is 8. +* transformer_num_heads: Transformer number of heads. Default value is 4. +* transformer_cnn_first: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. -In case of segmentation and enhancement the train and evaluation data should be organised as follows. +In the case of segmentation and enhancement the train and evaluation directory should be as following. -The "dir_train" directory should be like this: +The "dir_train" should be like this: ``` . @@ -478,40 +349,12 @@ And the "dir_eval" the same structure as train directory: └── labels # directory of labels ``` -After configuring the JSON file for segmentation or enhancement, -training can be initiated by running the following command line, -similar to classification and reading-order model training: +After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following +command, similar to the process for classification and reading order: -```sh -eynollah-training train with config_classification.json -``` +`python train.py with config_classification.json` #### Binarization - -### Ground truth format - -Lables for each pixel are identified by a number. So if you have a -binary case, ``n_classes`` should be set to ``2`` and labels should -be ``0`` and ``1`` for each class and pixel. - -In the case of multiclass, just set ``n_classes`` to the number of classes -you have and the try to produce the labels by pixels set from ``0 , 1 ,2 .., n_classes-1``. -The labels format should be png. -Our lables are 3 channel png images but only information of first channel is used. -If you have an image label with height and width of 10, for a binary case the first channel should look like this: - - Label: [ [1, 0, 0, 1, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - ..., - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] - - This means that you have an image by `10*10*3` and `pixel[0,0]` belongs - to class `1` and `pixel[0,1]` belongs to class `0`. - - A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/~vahid.rezanezhad/binarization_training_data_sample/), which contains images and lables folders. - - An example config json file for binarization can be like this: ```yaml @@ -555,7 +398,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -600,7 +443,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -645,7 +488,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -655,7 +498,7 @@ An example config json file for binarization can be like this: } ``` -It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel +It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel image. #### Page extraction @@ -693,7 +536,7 @@ image. "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -703,11 +546,10 @@ image. } ``` -For page segmentation (or print space or border segmentation), the model needs to view the input image in its -entirety,hence the patches parameter should be set to false. +For page segmentation (or printspace or border segmentation), the model needs to view the input image in its entirety, +hence the patches parameter should be set to false. #### layout segmentation - An example config json file for layout segmentation with 5 classes (including background) can be like this: ```yaml @@ -751,7 +593,7 @@ An example config json file for layout segmentation with 5 classes (including ba "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -763,42 +605,28 @@ An example config json file for layout segmentation with 5 classes (including ba ## Inference with the trained model ### classification - -For conducting inference with a trained model, you simply need to execute the following command line, specifying the +For conducting inference with a trained model, you simply need to execute the following command line, specifying the directory of the model and the image on which to perform inference: -```sh -eynollah-training inference -m "model dir" -i "image" -``` +`python inference.py -m "model dir" -i "image" ` This will straightforwardly return the class of the image. ### machine based reading order +To infer the reading order using a reading order model, we need a page XML file containing layout information but +without the reading order. We simply need to provide the model directory, the XML file, and the output directory. +The new XML file with the added reading order will be written to the output directory with the same name. +We need to run: -To infer the reading order using a reading order model, we need a PAGE XML file containing layout information but -without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The -new XML file with the added reading order will be written to the output directory with the same name. We need to run: - -```sh -eynollah-training inference \ - -m "model dir" \ - -xml "page xml file" \ - -o "output dir to write new xml with reading order" -``` +`python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` ### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement +For conducting inference with a trained model for segmentation and enhancement you need to run the following command +line: -For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: - -```sh -eynollah-training inference \ - -m "model dir" \ - -i "image" \ - -p \ - -s "output image" -``` +`python inference.py -m "model dir" -i "image" -p -s "output image" ` Note that in the case of page extraction the -p flag is not needed. -For segmentation or binarization tasks, if a ground truth (GT) label is available, the IoU evaluation metric can be +For segmentation or binarization tasks, if a ground truth (GT) label is available, the IoU evaluation metric can be calculated for the output. To do this, you need to provide the GT label using the argument -gt. diff --git a/pyproject.toml b/pyproject.toml index e6821a5..4da39ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,18 +11,9 @@ description = "Document Layout Analysis" readme = "README.md" license.file = "LICENSE" requires-python = ">=3.8" -keywords = [ - "document layout analysis", - "image segmentation", - "binarization", - "optical character recognition" -] +keywords = ["document layout analysis", "image segmentation"] -dynamic = [ - "dependencies", - "optional-dependencies", - "version" -] +dynamic = ["dependencies", "version"] classifiers = [ "Development Status :: 4 - Beta", @@ -30,17 +21,16 @@ classifiers = [ "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3 :: Only", "Topic :: Scientific/Engineering :: Image Processing", ] +[project.optional-dependencies] +OCR = ["torch <= 2.0.1", "transformers <= 4.30.2"] +plotting = ["matplotlib"] + [project.scripts] eynollah = "eynollah.cli:main" -eynollah-training = "eynollah.training.cli:main" ocrd-eynollah-segment = "eynollah.ocrd_cli:main" ocrd-sbb-binarize = "eynollah.ocrd_cli_binarization:main" @@ -51,35 +41,13 @@ Repository = "https://github.com/qurator-spk/eynollah.git" [tool.setuptools.dynamic] dependencies = {file = ["requirements.txt"]} optional-dependencies.test = {file = ["requirements-test.txt"]} -optional-dependencies.OCR = {file = ["requirements-ocr.txt"]} -optional-dependencies.plotting = {file = ["requirements-plotting.txt"]} -optional-dependencies.training = {file = ["requirements-training.txt"]} [tool.setuptools.packages.find] where = ["src"] [tool.setuptools.package-data] -"*" = ["*.json", '*.yml', '*.xml', '*.xsd', '*.ttf'] +"*" = ["*.json", '*.yml', '*.xml', '*.xsd'] [tool.coverage.run] branch = true source = ["eynollah"] - -[tool.ruff] -line-length = 120 - -[tool.ruff.lint] -ignore = [ -# disable unused imports -"F401", -# disable import order -"E402", -# disable unused variables -"F841", -# disable bare except -"E722", -] - -[tool.ruff.format] -quote-style = "preserve" - diff --git a/requirements-ocr.txt b/requirements-ocr.txt deleted file mode 100644 index dad26f4..0000000 --- a/requirements-ocr.txt +++ /dev/null @@ -1,3 +0,0 @@ -torch -transformers <= 4.30.2 ; python_version < '3.10' -transformers >= 5 ; python_version >= '3.10' diff --git a/requirements-plotting.txt b/requirements-plotting.txt deleted file mode 100644 index 6ccafc3..0000000 --- a/requirements-plotting.txt +++ /dev/null @@ -1 +0,0 @@ -matplotlib diff --git a/requirements-test.txt b/requirements-test.txt index 3ebcf71..cce9428 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,4 +1,4 @@ pytest -pytest-isolate +pytest-subtests coverage[toml] black diff --git a/requirements-training.txt b/requirements-training.txt deleted file mode 120000 index e1bc9c3..0000000 --- a/requirements-training.txt +++ /dev/null @@ -1 +0,0 @@ -train/requirements.txt \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index d79853f..9ed0584 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,7 @@ # ocrd includes opencv, numpy, shapely, click ocrd >= 3.3.0 -numpy < 2.0 +numpy <1.24.0 scikit-learn >= 0.23.2 -tensorflow -tf-keras # avoid keras 3 (also needs TF_USE_LEGACY_KERAS=1) +tensorflow < 2.13 numba <= 0.58.1 -scikit-image -tabulate +loky diff --git a/src/eynollah/Charis-Regular.ttf b/src/eynollah/Charis-Regular.ttf deleted file mode 100644 index a4e75a4..0000000 Binary files a/src/eynollah/Charis-Regular.ttf and /dev/null differ diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py new file mode 100644 index 0000000..c189aca --- /dev/null +++ b/src/eynollah/cli.py @@ -0,0 +1,404 @@ +import sys +import click +from ocrd_utils import initLogging, getLevelName, getLogger +from eynollah.eynollah import Eynollah, Eynollah_ocr +from eynollah.sbb_binarize import SbbBinarizer + +@click.group() +def main(): + pass + +@main.command() +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out_modal_image", + "-domi", + help="directory where ground truth images would be written", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out_classes", + "-docl", + help="directory where ground truth classes would be written", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--input_height", + "-ih", + help="input height", +) +@click.option( + "--input_width", + "-iw", + help="input width", +) +@click.option( + "--min_area_size", + "-min", + help="min area size of regions considered for reading order training.", +) +def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size): + xml_files_ind = os.listdir(dir_xml) + +@main.command() +@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') +@click.option('--model_dir', '-m', type=click.Path(exists=True, file_okay=False), required=True, help='directory containing models for prediction') +@click.argument('input_image', required=False) +@click.argument('output_image', required=False) +@click.option( + "--dir_in", + "-di", + help="directory of input images", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out", + "-do", + help="directory for output images", + type=click.Path(exists=True, file_okay=False), +) +def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out): + assert (dir_out is None) == (dir_in is None), "Options -di and -do are mutually dependent" + assert (input_image is None) == (output_image is None), "INPUT_IMAGE and OUTPUT_IMAGE are mutually dependent" + assert (dir_in is None) != (input_image is None), "Specify either -di and -do options, or INPUT_IMAGE and OUTPUT_IMAGE" + SbbBinarizer(model_dir).run(image_path=input_image, use_patches=patches, save=output_image, dir_in=dir_in, dir_out=dir_out) + + + + +@main.command() +@click.option( + "--image", + "-i", + help="image filename", + type=click.Path(exists=True, dir_okay=False), +) + +@click.option( + "--out", + "-o", + help="directory to write output xml data", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) +@click.option( + "--dir_in", + "-di", + help="directory of images", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--model", + "-m", + help="directory of models", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--save_images", + "-si", + help="if a directory is given, images in documents will be cropped and saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_layout", + "-sl", + help="if a directory is given, plot of layout will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_deskewed", + "-sd", + help="if a directory is given, deskewed image will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_all", + "-sa", + help="if a directory is given, all plots needed for documentation will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_page", + "-sp", + help="if a directory is given, page crop of image will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--enable-plotting/--disable-plotting", + "-ep/-noep", + is_flag=True, + help="If set, will plot intermediary files and images", +) +@click.option( + "--extract_only_images/--disable-extracting_only_images", + "-eoi/-noeoi", + is_flag=True, + help="If a directory is given, only images in documents will be cropped and saved there and the other processing will not be done", +) +@click.option( + "--allow-enhancement/--no-allow-enhancement", + "-ae/-noae", + is_flag=True, + help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory", +) +@click.option( + "--curved-line/--no-curvedline", + "-cl/-nocl", + is_flag=True, + help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.", +) +@click.option( + "--textline_light/--no-textline_light", + "-tll/-notll", + is_flag=True, + help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method.", +) +@click.option( + "--full-layout/--no-full-layout", + "-fl/-nofl", + is_flag=True, + help="if this parameter set to true, this tool will try to return all elements of layout.", +) +@click.option( + "--tables/--no-tables", + "-tab/-notab", + is_flag=True, + help="if this parameter set to true, this tool will try to detect tables.", +) +@click.option( + "--right2left/--left2right", + "-r2l/-l2r", + is_flag=True, + help="if this parameter set to true, this tool will extract right-to-left reading order.", +) +@click.option( + "--input_binary/--input-RGB", + "-ib/-irgb", + is_flag=True, + help="in general, eynollah uses RGB as input but if the input document is strongly dark, bright or for any other reason you can turn binarized input on. This option does not mean that you have to provide a binary image, otherwise this means that the tool itself will binarized the RGB input document.", +) +@click.option( + "--allow_scaling/--no-allow-scaling", + "-as/-noas", + is_flag=True, + help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection", +) +@click.option( + "--headers_off/--headers-on", + "-ho/-noho", + is_flag=True, + help="if this parameter set to true, this tool would ignore headers role in reading order", +) +@click.option( + "--light_version/--original", + "-light/-org", + is_flag=True, + help="if this parameter set to true, this tool would use lighter version", +) +@click.option( + "--ignore_page_extraction/--extract_page_included", + "-ipe/-epi", + is_flag=True, + help="if this parameter set to true, this tool would ignore page extraction", +) +@click.option( + "--reading_order_machine_based/--heuristic_reading_order", + "-romb/-hro", + is_flag=True, + help="if this parameter set to true, this tool would apply machine based reading order detection", +) +@click.option( + "--do_ocr", + "-ocr/-noocr", + is_flag=True, + help="if this parameter set to true, this tool will try to do ocr", +) +@click.option( + "--num_col_upper", + "-ncu", + help="lower limit of columns in document image", +) +@click.option( + "--num_col_lower", + "-ncl", + help="upper limit of columns in document image", +) +@click.option( + "--skip_layout_and_reading_order", + "-slro/-noslro", + is_flag=True, + help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.", +) +@click.option( + "--log_level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) + +def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, skip_layout_and_reading_order, ignore_page_extraction, log_level): + initLogging() + if log_level: + getLogger('eynollah').setLevel(getLevelName(log_level)) + assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" + assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" + assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" + assert enable_plotting or not save_page, "Plotting with -sp also requires -ep" + assert enable_plotting or not save_images, "Plotting with -si also requires -ep" + assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep" + assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \ + "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" + assert textline_light == light_version, "Both light textline detection -tll and light version -light must be set or unset equally" + assert not extract_only_images or not allow_enhancement, "Image extraction -eoi can not be set alongside allow_enhancement -ae" + assert not extract_only_images or not allow_scaling, "Image extraction -eoi can not be set alongside allow_scaling -as" + assert not extract_only_images or not light_version, "Image extraction -eoi can not be set alongside light_version -light" + assert not extract_only_images or not curved_line, "Image extraction -eoi can not be set alongside curved_line -cl" + assert not extract_only_images or not textline_light, "Image extraction -eoi can not be set alongside textline_light -tll" + assert not extract_only_images or not full_layout, "Image extraction -eoi can not be set alongside full_layout -fl" + assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab" + assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" + assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" + assert image or dir_in, "Either a single image -i or a dir_in -di is required" + eynollah = Eynollah( + model, + logger=getLogger('eynollah'), + dir_out=out, + dir_of_cropped_images=save_images, + extract_only_images=extract_only_images, + dir_of_layout=save_layout, + dir_of_deskewed=save_deskewed, + dir_of_all=save_all, + dir_save_page=save_page, + enable_plotting=enable_plotting, + allow_enhancement=allow_enhancement, + curved_line=curved_line, + textline_light=textline_light, + full_layout=full_layout, + tables=tables, + right2left=right2left, + input_binary=input_binary, + allow_scaling=allow_scaling, + headers_off=headers_off, + light_version=light_version, + ignore_page_extraction=ignore_page_extraction, + reading_order_machine_based=reading_order_machine_based, + do_ocr=do_ocr, + num_col_upper=num_col_upper, + num_col_lower=num_col_lower, + skip_layout_and_reading_order=skip_layout_and_reading_order, + ) + if dir_in: + eynollah.run(dir_in=dir_in, overwrite=overwrite) + else: + eynollah.run(image_filename=image, overwrite=overwrite) + + +@main.command() +@click.option( + "--dir_in", + "-di", + help="directory of images", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_in_bin", + "-dib", + help="directory of binarized images. This should be given if you want to do prediction based on both rgb and bin images. And all bin images are png files", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--out", + "-o", + help="directory to write output xml data", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--dir_xmls", + "-dx", + help="directory of xmls", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out_image_text", + "-doit", + help="directory of images with predicted text", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--model", + "-m", + help="directory of models", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--tr_ocr", + "-trocr/-notrocr", + is_flag=True, + help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.", +) +@click.option( + "--export_textline_images_and_text", + "-etit/-noetit", + is_flag=True, + help="if this parameter set to true, images and text in xml will be exported into output dir. This files can be used for training a OCR engine.", +) +@click.option( + "--do_not_mask_with_textline_contour", + "-nmtc/-mtc", + is_flag=True, + help="if this parameter set to true, cropped textline images will not be masked with textline contour.", +) +@click.option( + "--draw_texts_on_image", + "-dtoi/-ndtoi", + is_flag=True, + help="if this parameter set to true, the predicted texts will be displayed on an image.", +) +@click.option( + "--prediction_with_both_of_rgb_and_bin", + "-brb/-nbrb", + is_flag=True, + help="If this parameter is set to True, the prediction will be performed using both RGB and binary images. However, this does not necessarily improve results; it may be beneficial for certain document images.", +) +@click.option( + "--log_level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) + +def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, log_level): + initLogging() + if log_level: + getLogger('eynollah').setLevel(getLevelName(log_level)) + eynollah_ocr = Eynollah_ocr( + dir_xmls=dir_xmls, + dir_out_image_text=dir_out_image_text, + dir_in=dir_in, + dir_in_bin=dir_in_bin, + dir_out=out, + dir_models=model, + tr_ocr=tr_ocr, + export_textline_images_and_text=export_textline_images_and_text, + do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, + draw_texts_on_image=draw_texts_on_image, + prediction_with_both_of_rgb_and_bin=prediction_with_both_of_rgb_and_bin, + ) + eynollah_ocr.run() + +if __name__ == "__main__": + main() diff --git a/src/eynollah/cli/__init__.py b/src/eynollah/cli/__init__.py deleted file mode 100644 index 43ed046..0000000 --- a/src/eynollah/cli/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# NOTE: For predictable order of imports of torch/shapely/tensorflow -# this must be the first import of the CLI! -from ..eynollah_imports import imported_libs - -from .cli import main -from .cli_binarize import binarize_cli -from .cli_enhance import enhance_cli -from .cli_extract_images import extract_images_cli -from .cli_layout import layout_cli -from .cli_models import models_cli -from .cli_ocr import ocr_cli -from .cli_readingorder import readingorder_cli - -main.add_command(binarize_cli, 'binarization') -main.add_command(enhance_cli, 'enhancement') -main.add_command(layout_cli, 'layout') -main.add_command(readingorder_cli, 'machine-based-reading-order') -main.add_command(models_cli, 'models') -main.add_command(ocr_cli, 'ocr') -main.add_command(extract_images_cli, 'extract-images') diff --git a/src/eynollah/cli/cli.py b/src/eynollah/cli/cli.py deleted file mode 100644 index ace3f1c..0000000 --- a/src/eynollah/cli/cli.py +++ /dev/null @@ -1,66 +0,0 @@ -from dataclasses import dataclass -import logging -import sys -import os -from typing import Union - -import click - -from ..model_zoo import EynollahModelZoo -from .cli_models import models_cli - -@dataclass() -class EynollahCliCtx: - """ - Holds options relevant for all eynollah subcommands - """ - model_zoo: EynollahModelZoo - log_level : Union[str, None] = 'INFO' - - -@click.group() -@click.option( - "--model-basedir", - "-m", - help="directory of models", - # NOTE: not mandatory to exist so --help for subcommands works but will log a warning - # and raise exception when trying to load models in the CLI - # type=click.Path(exists=True), - default=os.getcwd(), -) -@click.option( - "--model-overrides", - "-mv", - help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", - type=(str, str, str), - multiple=True, -) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) -@click.pass_context -def main(ctx, model_basedir, model_overrides, log_level): - """ - eynollah - Document Layout Analysis, Image Enhancement, OCR - """ - # Initialize logging - console_handler = logging.StreamHandler(sys.stderr) - console_handler.setLevel(logging.NOTSET) - formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') - console_handler.setFormatter(formatter) - logging.getLogger('eynollah').addHandler(console_handler) - logging.getLogger('eynollah').setLevel(log_level or logging.INFO) - # Initialize model zoo - model_zoo = EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides) - # Initialize CLI context - ctx.obj = EynollahCliCtx( - model_zoo=model_zoo, - log_level=log_level, - ) - - -if __name__ == "__main__": - main() diff --git a/src/eynollah/cli/cli_binarize.py b/src/eynollah/cli/cli_binarize.py deleted file mode 100644 index f0e56f5..0000000 --- a/src/eynollah/cli/cli_binarize.py +++ /dev/null @@ -1,62 +0,0 @@ -import click - -@click.command() -@click.option( - '--patches/--no-patches', - default=True, - help='let the model see the image in patches (tiling) instead of total (full).' -) -@click.option( - "--input-image", "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False) -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--output", - "-o", - help="output image (if using -i) or output image directory (if using -di)", - type=click.Path(file_okay=True, dir_okay=True), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--device", - "-D", - help="placement of computations in predictors for each model type; if none (by default), will try to use first available GPU or fall back to CPU; set string to force using a device (e.g. 'GPU0', 'GPU1' or 'CPU'). Can also be a comma-separated list of model category to device mappings (e.g. 'col_classifier:CPU,page:GPU0,*:GPU1')", -) -@click.pass_context -def binarize_cli( - ctx, - patches, - input_image, - dir_in, - output, - overwrite, - device, -): - """ - Binarize images with a ML model - """ - from ..sbb_binarize import SbbBinarizer - assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - binarizer = SbbBinarizer(model_zoo=ctx.obj.model_zoo, device=device) - binarizer.run( - image_filename=input_image, - use_patches=patches, - output=output, - dir_in=dir_in, - overwrite=overwrite - ) - diff --git a/src/eynollah/cli/cli_enhance.py b/src/eynollah/cli/cli_enhance.py deleted file mode 100644 index 517e1e8..0000000 --- a/src/eynollah/cli/cli_enhance.py +++ /dev/null @@ -1,73 +0,0 @@ -import click - -@click.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--out", - "-o", - help="directory for output image files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output image exists", - is_flag=True, -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--num_col_upper", - "-ncu", - default=0, - type=click.IntRange(min=0), - help="lower limit of columns in document image", -) -@click.option( - "--num_col_lower", - "-ncl", - default=0, - type=click.IntRange(min=0), - help="upper limit of columns in document image", -) -@click.option( - "--save_org_scale", - "-sos", - is_flag=True, - help="save the enhanced image in original image size", -) -@click.option( - "--device", - "-D", - help="placement of computations in predictors for each model type; if none (by default), will try to use first available GPU or fall back to CPU; set string to force using a device (e.g. 'GPU0', 'GPU1' or 'CPU'). Can also be a comma-separated list of model category to device mappings (e.g. 'col_classifier:CPU,page:GPU0,*:GPU1')", -) -@click.pass_context -def enhance_cli(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower, save_org_scale, device): - """ - Enhance image - """ - assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - from ..image_enhancer import Enhancer - enhancer = Enhancer( - model_zoo=ctx.obj.model_zoo, - num_col_upper=num_col_upper, - num_col_lower=num_col_lower, - save_org_scale=save_org_scale, - device=device, - ) - enhancer.run(overwrite=overwrite, - dir_in=dir_in, - image_filename=image, - dir_out=out, - ) - diff --git a/src/eynollah/cli/cli_extract_images.py b/src/eynollah/cli/cli_extract_images.py deleted file mode 100644 index 0add5b5..0000000 --- a/src/eynollah/cli/cli_extract_images.py +++ /dev/null @@ -1,100 +0,0 @@ -import click - -@click.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_images", - "-si", - help="if a directory is given, images in documents will be cropped and saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--enable-plotting/--disable-plotting", - "-ep/-noep", - is_flag=True, - help="If set, will plot intermediary files and images", -) -@click.option( - "--input_binary/--input-RGB", - "-ib/-irgb", - is_flag=True, - help="In general, eynollah uses RGB as input but if the input document is very dark, very bright or for any other reason you can turn on input binarization. When this flag is set, eynollah will binarize the RGB input document, you should always provide RGB images to eynollah.", -) -@click.option( - "--ignore_page_extraction/--extract_page_included", - "-ipe/-epi", - is_flag=True, - help="if this parameter set to true, this tool would ignore page extraction", -) -@click.option( - "--num_col_upper", - "-ncu", - help="lower limit of columns in document image", -) -@click.option( - "--num_col_lower", - "-ncl", - help="upper limit of columns in document image", -) -@click.pass_context -def extract_images_cli( - ctx, - image, - out, - overwrite, - dir_in, - save_images, - enable_plotting, - input_binary, - num_col_upper, - num_col_lower, - ignore_page_extraction, -): - """ - Detect Layout (with optional image enhancement and reading order detection) - """ - assert enable_plotting or not save_images, "Plotting with -si also requires -ep" - assert not enable_plotting or save_images, "Plotting with -ep also requires -si" - assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - - from ..extract_images import EynollahImageExtractor - extractor = EynollahImageExtractor( - model_zoo=ctx.obj.model_zoo, - enable_plotting=enable_plotting, - input_binary=input_binary, - ignore_page_extraction=ignore_page_extraction, - num_col_upper=num_col_upper, - num_col_lower=num_col_lower, - ) - extractor.run(overwrite=overwrite, - image_filename=image, - dir_in=dir_in, - dir_out=out, - dir_of_cropped_images=save_images, - ) - diff --git a/src/eynollah/cli/cli_layout.py b/src/eynollah/cli/cli_layout.py deleted file mode 100644 index 417b202..0000000 --- a/src/eynollah/cli/cli_layout.py +++ /dev/null @@ -1,256 +0,0 @@ -import click - -@click.command(context_settings=dict( - help_option_names=['-h', '--help'], - show_default=True)) -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_images", - "-si", - help="if a directory is given, cropped images of pages will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_layout", - "-sl", - help="if a directory is given, plots of layout detection will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_deskewed", - "-sd", - help="if a directory is given, plots of page deskewing will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_all", - "-sa", - help="if a directory is given, all plots needed will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_page", - "-sp", - help="if a directory is given, plots of page cropping will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--enable-plotting", - "-ep", - is_flag=True, - help="plot intermediary diagnostic images to files", -) -@click.option( - "--allow-enhancement", - "-ae", - is_flag=True, - help="check whether input image need resizing and enhancement. If so, output of resized and enhanced image and corresponding layout data will be written in out directory", -) -@click.option( - "--curved-line", - "-cl", - is_flag=True, - help="try to return most precise textline contours by deskewing and detecting textlines for all text regions individually. Requires much more computation.", -) -@click.option( - "--full-layout", - "-fl", - is_flag=True, - help="return all elements of layout, including headings and drop-capitals", -) -@click.option( - "--tables", - "-tab", - is_flag=True, - help="try to detect table regions", -) -@click.option( - "--right2left", - "-r2l", - is_flag=True, - help="extract right-to-left reading order (instead of left-to-right)", -) -@click.option( - "--input_binary", - "-ib", - is_flag=True, - help="In general, eynollah uses RGB as input, but if the input document is very dark, very bright or for any other reason you can turn on internal binarization here. When set, eynollah will binarize the RGB input document first.", -) -@click.option( - "--allow_scaling", - "-as", - is_flag=True, - help="check the scale and if needed it will scale it to perform better layout detection", -) -@click.option( - "--headers_off", - "-ho", - is_flag=True, - help="ignore headers role in reading order", -) -@click.option( - "--ignore_page_extraction", - "-ipe", - is_flag=True, - help="ignore page extraction (cropping via page frame detection model)", -) -@click.option( - "--reading_order_machine_based", - "-romb", - is_flag=True, - help="apply model based reading order detection", -) -@click.option( - "--num_col_upper", - "-ncu", - default=0, - type=click.IntRange(min=0), - help="lower limit of columns in document image; 0 means autodetected from model", -) -@click.option( - "--num_col_lower", - "-ncl", - default=0, - type=click.IntRange(min=0), - help="upper limit of columns in document image; 0 means autodetected from model", -) -@click.option( - "--threshold_art_class_layout", - "-tharl", - default=0.1, - type=click.FloatRange(min=0.0, max=1.0), - help="confidence threshold of artifical boundary class during region detection", -) -@click.option( - "--threshold_art_class_textline", - "-thart", - default=0.1, - type=click.FloatRange(min=0.0, max=1.0), - help="confidence threshold of artifical boundary class during textline detection", -) -@click.option( - "--skip_layout_and_reading_order", - "-slro", - is_flag=True, - help="ignore layout detection and reading order, i.e. textline detection will be done within entire printspace, and textline contours will be written into a single overall text region.", -) -@click.option( - "--num-jobs", - "-j", - default=0, - type=click.IntRange(min=0), - help="number of parallel images to process (for --dir_in mode; also helps better utilise GPU if available); 0 means based on autodetected number of processor cores", -) -@click.option( - "--halt-fail", - "-H", - default=0, - type=click.FloatRange(min=0), - help="abort when number of failed images exceeds this value (if >=1) or ratio of failed over total images exceeds this value (if <1); 0 means ignore failures", -) -@click.option( - "--device", - "-D", - help="placement of computations in predictors for each model type; if none (by default), will try to use first available GPU or fall back to CPU; set string to force using a device (e.g. 'GPU0', 'GPU1' or 'CPU'). Can also be a comma-separated list of model category to device mappings (e.g. 'col_classifier:CPU,page:GPU0,*:GPU1')", -) -@click.pass_context -def layout_cli( - ctx, - image, - out, - overwrite, - dir_in, - save_images, - save_layout, - save_deskewed, - save_all, - save_page, - enable_plotting, - allow_enhancement, - curved_line, - full_layout, - tables, - right2left, - input_binary, - allow_scaling, - headers_off, - reading_order_machine_based, - num_col_upper, - num_col_lower, - threshold_art_class_textline, - threshold_art_class_layout, - skip_layout_and_reading_order, - ignore_page_extraction, - num_jobs, - halt_fail, - device, -): - """ - Detect Layout (with optional image enhancement and reading order detection) - """ - from ..eynollah import Eynollah - assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" - assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" - assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" - assert enable_plotting or not save_page, "Plotting with -sp also requires -ep" - assert enable_plotting or not save_images, "Plotting with -si also requires -ep" - assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \ - "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" - assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - eynollah = Eynollah( - model_zoo=ctx.obj.model_zoo, - device=device, - enable_plotting=enable_plotting, - allow_enhancement=allow_enhancement, - curved_line=curved_line, - full_layout=full_layout, - tables=tables, - right2left=right2left, - input_binary=input_binary, - allow_scaling=allow_scaling, - headers_off=headers_off, - ignore_page_extraction=ignore_page_extraction, - reading_order_machine_based=reading_order_machine_based, - num_col_upper=num_col_upper, - num_col_lower=num_col_lower, - skip_layout_and_reading_order=skip_layout_and_reading_order, - threshold_art_class_textline=threshold_art_class_textline, - threshold_art_class_layout=threshold_art_class_layout, - ) - eynollah.run(overwrite=overwrite, - image_filename=image, - dir_in=dir_in, - dir_out=out, - dir_of_cropped_images=save_images, - dir_of_layout=save_layout, - dir_of_deskewed=save_deskewed, - dir_of_all=save_all, - dir_save_page=save_page, - num_jobs=num_jobs, - halt_fail=halt_fail, - ) - diff --git a/src/eynollah/cli/cli_models.py b/src/eynollah/cli/cli_models.py deleted file mode 100644 index f3de596..0000000 --- a/src/eynollah/cli/cli_models.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Set, Tuple -import click - -from eynollah.model_zoo.default_specs import MODELS_VERSION - -@click.group() -@click.pass_context -def models_cli( - ctx, -): - """ - Organize models for the various runners in eynollah. - """ - assert ctx.obj.model_zoo - - -@models_cli.command('list') -@click.pass_context -def list_models( - ctx, -): - """ - List all the models in the zoo - """ - print(f"Model basedir: {ctx.obj.model_zoo.model_basedir}") - print(f"Model overrides: {ctx.obj.model_zoo.model_overrides}") - print(ctx.obj.model_zoo) - - -@models_cli.command('package') -@click.option( - '--set-version', '-V', 'version', help="Version to use for packaging", default=MODELS_VERSION, show_default=True -) -@click.argument('output_dir') -@click.pass_context -def package( - ctx, - version, - output_dir, -): - """ - Generate shell code to copy all the models in the zoo into properly named folders in OUTPUT_DIR for distribution. - - eynollah models -m SRC package OUTPUT_DIR - - SRC should contain a directory "models_eynollah" containing all the models. - """ - mkdirs: Set[Path] = set([]) - copies: Set[Tuple[Path, Path]] = set([]) - for spec in ctx.obj.model_zoo.specs.specs: - # skip these as they are dependent on the ocr model - if spec.category in ('num_to_char', 'characters'): - continue - src: Path = ctx.obj.model_zoo.model_path(spec.category, spec.variant) - # Only copy the top-most directory relative to models_eynollah - while src.parent.name != 'models_eynollah': - src = src.parent - for dist in spec.dists: - dist_dir = Path(f"{output_dir}/models_{dist}_{version}/models_eynollah") - copies.add((src, dist_dir)) - mkdirs.add(dist_dir) - for dir in mkdirs: - print(f"mkdir -vp {dir}") - for (src, dst) in copies: - print(f"cp -vr {src} {dst}") - for dir in mkdirs: - zip_path = Path(f'../{dir.parent.name}.zip') - print(f"(cd {dir}/..; zip -vr {zip_path} models_eynollah)") diff --git a/src/eynollah/cli/cli_ocr.py b/src/eynollah/cli/cli_ocr.py deleted file mode 100644 index 406af61..0000000 --- a/src/eynollah/cli/cli_ocr.py +++ /dev/null @@ -1,103 +0,0 @@ -import click - -@click.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_in_bin", - "-dib", - help=("directory of binarized images (in addition to --dir_in for RGB images; filename stems must match the RGB image files, with '.png' \n Perform prediction using both RGB and binary images. (This does not necessarily improve results, however it may be beneficial for certain document images."), - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_xmls", - "-dx", - help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--dir_out_image_text", - "-doit", - help="directory for output images, newly rendered with predicted text", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--tr_ocr", - "-trocr/-notrocr", - is_flag=True, - help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.", -) -@click.option( - "--do_not_mask_with_textline_contour", - "-nmtc/-mtc", - is_flag=True, - help="if this parameter set to true, cropped textline images will not be masked with textline contour.", -) -@click.option( - "--batch_size", - "-bs", - help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", -) -@click.option( - "--min_conf_value_of_textline_text", - "-min_conf", - help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.", -) -@click.pass_context -def ocr_cli( - ctx, - image, - dir_in, - dir_in_bin, - dir_xmls, - out, - dir_out_image_text, - overwrite, - tr_ocr, - do_not_mask_with_textline_contour, - batch_size, - min_conf_value_of_textline_text, -): - """ - Recognize text with a CNN/RNN or transformer ML model. - """ - assert bool(image) ^ bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." - from ..eynollah_ocr import Eynollah_ocr - eynollah_ocr = Eynollah_ocr( - model_zoo=ctx.obj.model_zoo, - tr_ocr=tr_ocr, - do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, - batch_size=batch_size, - min_conf_value_of_textline_text=min_conf_value_of_textline_text) - eynollah_ocr.run(overwrite=overwrite, - dir_in=dir_in, - dir_in_bin=dir_in_bin, - image_filename=image, - dir_xmls=dir_xmls, - dir_out_image_text=dir_out_image_text, - dir_out=out, - ) diff --git a/src/eynollah/cli/cli_readingorder.py b/src/eynollah/cli/cli_readingorder.py deleted file mode 100644 index 0f44b7f..0000000 --- a/src/eynollah/cli/cli_readingorder.py +++ /dev/null @@ -1,35 +0,0 @@ -import click - -@click.command() -@click.option( - "--input", - "-i", - help="PAGE-XML input filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of PAGE-XML input files (instead of --input)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--out", - "-o", - help="directory for output images", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.pass_context -def readingorder_cli(ctx, input, dir_in, out): - """ - Generate ReadingOrder with a ML model - """ - from ..mb_ro_on_layout import machine_based_reading_order_on_layout - assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - orderer = machine_based_reading_order_on_layout(model_zoo=ctx.obj.model_zoo) - orderer.run(xml_filename=input, - dir_in=dir_in, - dir_out=out, - ) - diff --git a/src/eynollah/extract_images.py b/src/eynollah/extract_images.py deleted file mode 100644 index 7a7e3f6..0000000 --- a/src/eynollah/extract_images.py +++ /dev/null @@ -1,281 +0,0 @@ -""" -extract images? -""" - -from concurrent.futures import ProcessPoolExecutor -import logging -from multiprocessing import cpu_count -import os -import time -from typing import Optional -from pathlib import Path -import tensorflow as tf -import numpy as np -import cv2 - -from eynollah.utils.contour import filter_contours_area_of_image, return_contours_of_image, return_contours_of_interested_region -from eynollah.utils.resize import resize_image - -from .model_zoo.model_zoo import EynollahModelZoo -from .eynollah import Eynollah -from .utils import box2rect, is_image_filename -from .plot import EynollahPlotter - -class EynollahImageExtractor(Eynollah): - - def __init__( - self, - *, - model_zoo: EynollahModelZoo, - enable_plotting : bool = False, - input_binary : bool = False, - ignore_page_extraction : bool = False, - num_col_upper : Optional[int] = None, - num_col_lower : Optional[int] = None, - full_layout : bool = False, - tables : bool = False, - curved_line : bool = False, - allow_enhancement : bool = False, - - ): - self.logger = logging.getLogger('eynollah.extract_images') - self.model_zoo = model_zoo - self.plotter = None - self.tables = tables - self.curved_line = curved_line - self.allow_enhancement = allow_enhancement - - self.enable_plotting = enable_plotting - # --input-binary sensible if image is very dark, if layout is not working. - self.input_binary = input_binary - self.ignore_page_extraction = ignore_page_extraction - self.full_layout = full_layout - if num_col_upper: - self.num_col_upper = int(num_col_upper) - else: - self.num_col_upper = num_col_upper - if num_col_lower: - self.num_col_lower = int(num_col_lower) - else: - self.num_col_lower = num_col_lower - - # for parallelization of CPU-intensive tasks: - self.executor = ProcessPoolExecutor(max_workers=cpu_count()) - - t_start = time.time() - - try: - for device in tf.config.list_physical_devices('GPU'): - tf.config.experimental.set_memory_growth(device, True) - except: - self.logger.warning("no GPU device available") - - self.logger.info("Loading models...") - self.setup_models() - self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") - - def setup_models(self): - - loadable = [ - "col_classifier", - "binarization", - "page", - "extract_images", - ] - self.model_zoo.load_models(*loadable) - - def get_regions_light_v_extract_only_images(self,img, num_col_classifier): - self.logger.debug("enter get_regions_extract_images_only") - erosion_hurts = False - img_org = np.copy(img) - img_height_h = img_org.shape[0] - img_width_h = img_org.shape[1] - - if num_col_classifier == 1: - img_w_new = 700 - elif num_col_classifier == 2: - img_w_new = 900 - elif num_col_classifier == 3: - img_w_new = 1500 - elif num_col_classifier == 4: - img_w_new = 1800 - elif num_col_classifier == 5: - img_w_new = 2200 - elif num_col_classifier == 6: - img_w_new = 2500 - else: - raise ValueError("num_col_classifier must be in range 1..6") - img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) - img_resized = resize_image(img,img_h_new, img_w_new ) - - prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.model_zoo.get("extract_images")) - - prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) - image_page, page_coord, cont_page = self.extract_page() - - prediction_regions_org = prediction_regions_org[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_seps_only = (prediction_regions_org[:,:] ==3)*1 - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - mask_images_only=(prediction_regions_org[:,:] ==2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_seps_only) - polygons_seplines = filter_contours_area_of_image( - mask_seps_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - polygons_of_only_seps = return_contours_of_interested_region(mask_seps_only,1,0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_seps, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1,1,1)) - - text_regions_p_true[text_regions_p_true.shape[0]-15:text_regions_p_true.shape[0], :] = 0 - text_regions_p_true[:, text_regions_p_true.shape[1]-15:text_regions_p_true.shape[1]] = 0 - - ##polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001) - polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.001) - - polygons_of_images_fin = [] - for ploy_img_ind in polygons_of_images: - box = _, _, w, h = cv2.boundingRect(ploy_img_ind) - if h < 150 or w < 150: - pass - else: - page_coord_img = box2rect(box) # type: ignore - polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], - [page_coord_img[3], page_coord_img[0]], - [page_coord_img[3], page_coord_img[1]], - [page_coord_img[2], page_coord_img[1]]])) - - self.logger.debug("exit get_regions_extract_images_only") - return (text_regions_p_true, - erosion_hurts, - polygons_seplines, - polygons_of_images_fin, - image_page, - page_coord, - cont_page) - - def run(self, - overwrite: bool = False, - image_filename: Optional[str] = None, - dir_in: Optional[str] = None, - dir_out: Optional[str] = None, - dir_of_cropped_images: Optional[str] = None, - dir_of_layout: Optional[str] = None, - dir_of_deskewed: Optional[str] = None, - dir_of_all: Optional[str] = None, - dir_save_page: Optional[str] = None, - ): - """ - Get image and scales, then extract the page of scanned image - """ - self.logger.debug("enter run") - t0_tot = time.time() - # Log enabled features directly - enabled_modes = [] - if self.full_layout: - enabled_modes.append("Full layout analysis") - if self.tables: - enabled_modes.append("Table detection") - if enabled_modes: - self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) - if self.enable_plotting: - self.logger.info("Saving debug plots") - if dir_of_cropped_images: - self.logger.info(f"Saving cropped images to: {dir_of_cropped_images}") - if dir_of_layout: - self.logger.info(f"Saving layout plots to: {dir_of_layout}") - if dir_of_deskewed: - self.logger.info(f"Saving deskewed images to: {dir_of_deskewed}") - - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - elif image_filename: - ls_imgs = [image_filename] - else: - raise ValueError("run requires either a single image filename or a directory") - - for img_filename in ls_imgs: - self.logger.info(img_filename) - t0 = time.time() - - self.reset_file_name_dir(img_filename, dir_out) - if self.enable_plotting: - self.plotter = EynollahPlotter(dir_out=dir_out, - dir_of_all=dir_of_all, - dir_save_page=dir_save_page, - dir_of_deskewed=dir_of_deskewed, - dir_of_cropped_images=dir_of_cropped_images, - dir_of_layout=dir_of_layout, - image_filename_stem=Path(img_filename).stem) - #print("text region early -11 in %.1fs", time.time() - t0) - if os.path.exists(self.writer.output_filename): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", self.writer.output_filename) - else: - self.logger.warning("will skip input for existing output file '%s'", self.writer.output_filename) - continue - - pcgts = self.run_single() - self.logger.info("Job done in %.1fs", time.time() - t0) - self.writer.write_pagexml(pcgts) - - if dir_in: - self.logger.info("All jobs done in %.1fs", time.time() - t0_tot) - - def run_single(self): - t0 = time.time() - - self.logger.info(f"Processing file: {self.writer.image_filename}") - self.logger.info("Step 1/5: Image Enhancement") - - img_res, is_image_enhanced, num_col_classifier, _ = \ - self.run_enhancement() - - self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, " - f"{self.dpi} DPI, {num_col_classifier} columns") - if is_image_enhanced: - self.logger.info("Enhancement applied") - - self.logger.info(f"Enhancement complete ({time.time() - t0:.1f}s)") - - - # Image Extraction Mode - self.logger.info("Step 2/5: Image Extraction Mode") - - _, _, _, polygons_of_images, \ - image_page, page_coord, cont_page = \ - self.get_regions_light_v_extract_only_images(img_res, num_col_classifier) - - pcgts = self.writer.build_pagexml_no_full_layout( - found_polygons_text_region=[], - page_coord=page_coord, - order_of_texts=[], - all_found_textline_polygons=[], - all_box_coord=[], - found_polygons_text_region_img=polygons_of_images, - found_polygons_marginals_left=[], - found_polygons_marginals_right=[], - all_found_textline_polygons_marginals_left=[], - all_found_textline_polygons_marginals_right=[], - all_box_coord_marginals_left=[], - all_box_coord_marginals_right=[], - slopes=[], - slopes_marginals_left=[], - slopes_marginals_right=[], - cont_page=cont_page, - polygons_seplines=[], - found_polygons_tables=[], - ) - if self.plotter: - self.plotter.write_images_into_directory(polygons_of_images, image_page) - - self.logger.info("Image extraction complete") - return pcgts diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index c632941..022cf0a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1,93 +1,114 @@ -""" -document layout analysis (segmentation) with output in PAGE-XML -""" # pylint: disable=no-member,invalid-name,line-too-long,missing-function-docstring,missing-class-docstring,too-many-branches # pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member # pylint: disable=too-many-public-methods,too-many-arguments,too-many-instance-attributes,too-many-public-methods, # pylint: disable=consider-using-enumerate -# FIXME: fix all of those... -# pyright: reportUnnecessaryTypeIgnoreComment=true -# pyright: reportPossiblyUnboundVariable=false -# pyright: reportOperatorIssue=false -# pyright: reportUnboundVariable=false -# pyright: reportArgumentType=false -# pyright: reportAttributeAccessIssue=false -# pyright: reportOptionalMemberAccess=false -# pyright: reportGeneralTypeIssues=false -# pyright: reportOptionalSubscript=false - -import logging -import logging.handlers -import sys +""" +document layout analysis (segmentation) with output in PAGE-XML +""" +from logging import Logger from difflib import SequenceMatcher as sq +from PIL import Image, ImageDraw, ImageFont import math import os +import sys import time from typing import Optional +import atexit +import warnings from functools import partial from pathlib import Path -import multiprocessing as mp -from concurrent.futures import ProcessPoolExecutor, as_completed +from multiprocessing import cpu_count import gc +import copy +import json +from loky import ProcessPoolExecutor +import xml.etree.ElementTree as ET import cv2 import numpy as np from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d +from numba import cuda +from ocrd import OcrdPage +from ocrd_utils import getLogger, tf_disable_interactive_logs + +try: + import torch +except ImportError: + torch = None try: import matplotlib.pyplot as plt except ImportError: plt = None +try: + from transformers import TrOCRProcessor, VisionEncoderDecoderModel +except ImportError: + TrOCRProcessor = VisionEncoderDecoderModel = None + +#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' +tf_disable_interactive_logs() +import tensorflow as tf +from tensorflow.python.keras import backend as K +from tensorflow.keras.models import load_model +tf.get_logger().setLevel("ERROR") +warnings.filterwarnings("ignore") +# use tf1 compatibility for keras backend +from tensorflow.compat.v1.keras.backend import set_session +from tensorflow.keras import layers +from tensorflow.keras.layers import StringLookup -from .model_zoo import EynollahModelZoo from .utils.contour import ( filter_contours_area_of_image, filter_contours_area_of_image_tables, - find_center_of_contours, + find_contours_mean_y_diff, find_new_features_of_contours, find_features_of_contours, get_text_region_boxes_by_given_contours, - get_region_confidences, + get_textregion_contours_in_org_image, + get_textregion_contours_in_org_image_light, return_contours_of_image, return_contours_of_interested_region, + return_contours_of_interested_region_by_min_size, + return_contours_of_interested_textline, return_parent_contours, - dilate_textregion_contours, - dilate_textline_contours, - match_deskewed_contours, - estimate_skew_contours, - polygon2contour, - contour2polygon, - join_polygons, - make_intersection, ) -from .utils.rotate import rotate_image +from .utils.rotate import ( + rotate_image, + rotation_not_90_func, + rotation_not_90_func_full_layout +) from .utils.separate_lines import ( + textline_contours_postprocessing, + separate_lines_new2, return_deskew_slop, + do_work_of_slopes_new, do_work_of_slopes_new_curved, + do_work_of_slopes_new_light, +) +from .utils.drop_capitals import ( + adhere_drop_capital_region_into_corresponding_textline, + filter_small_drop_capitals_from_no_patch_layout ) from .utils.marginals import get_marginals from .utils.resize import resize_image -from .utils.shm import share_ndarray from .utils import ( - ensure_array, - pairwise, - is_image_filename, - isNaN, + boosting_headers_by_longshot_region_segmentation, crop_image_inside_box, - box2rect, find_num_col, otsu_copy_binary, - seg_mask_label, - fill_bb_of_drop_capitals, - split_textregion_main_vs_head, + put_drop_out_from_only_drop_model, + putt_bb_of_drop_capitals_of_model_in_patches_in_layout, + check_any_text_region_in_model_one_is_main_or_header, + check_any_text_region_in_model_one_is_main_or_header_light, small_textlines_to_parent_adherence2, order_of_regions, find_number_of_columns_in_document, return_boxes_of_images_by_order_of_reading_new ) -from .utils.pil_cv2 import pil2cv +from .utils.pil_cv2 import check_dpi, pil2cv +from .utils.xml import order_and_id_of_texts from .plot import EynollahPlotter from .writer import EynollahXmlWriter @@ -98,172 +119,402 @@ DPI_THRESHOLD = 298 MAX_SLOPE = 999 KERNEL = np.ones((5, 5), np.uint8) +projection_dim = 64 +patch_size = 1 +num_patches =21*21#14*14#28*28#14*14#28*28 -_instance = None -def _set_instance(instance): - global _instance - _instance = instance -def _run_single(*args, **kwargs): - logq = kwargs.pop('logq') - # replace all inherited handlers with queue handler - logging.root.handlers.clear() - _instance.logger.handlers.clear() - handler = logging.handlers.QueueHandler(logq) - logging.root.addHandler(handler) - return _instance.run_single(*args, **kwargs) + +class Patches(layers.Layer): + def __init__(self, **kwargs): + super(Patches, self).__init__() + self.patch_size = patch_size + + def call(self, images): + batch_size = tf.shape(images)[0] + patches = tf.image.extract_patches( + images=images, + sizes=[1, self.patch_size, self.patch_size, 1], + strides=[1, self.patch_size, self.patch_size, 1], + rates=[1, 1, 1, 1], + padding="VALID", + ) + patch_dims = patches.shape[-1] + patches = tf.reshape(patches, [batch_size, -1, patch_dims]) + return patches + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'patch_size': self.patch_size, + }) + return config + + +class PatchEncoder(layers.Layer): + def __init__(self, **kwargs): + super(PatchEncoder, self).__init__() + self.num_patches = num_patches + self.projection = layers.Dense(units=projection_dim) + self.position_embedding = layers.Embedding( + input_dim=num_patches, output_dim=projection_dim + ) + + def call(self, patch): + positions = tf.range(start=0, limit=self.num_patches, delta=1) + encoded = self.projection(patch) + self.position_embedding(positions) + return encoded + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'num_patches': self.num_patches, + 'projection': self.projection, + 'position_embedding': self.position_embedding, + }) + return config class Eynollah: def __init__( self, - *, - model_zoo: EynollahModelZoo, - device: str = '', + dir_models : str, + dir_out : Optional[str] = None, + dir_of_cropped_images : Optional[str] = None, + extract_only_images : bool =False, + dir_of_layout : Optional[str] = None, + dir_of_deskewed : Optional[str] = None, + dir_of_all : Optional[str] = None, + dir_save_page : Optional[str] = None, enable_plotting : bool = False, allow_enhancement : bool = False, curved_line : bool = False, + textline_light : bool = False, full_layout : bool = False, tables : bool = False, right2left : bool = False, input_binary : bool = False, allow_scaling : bool = False, headers_off : bool = False, + light_version : bool = False, ignore_page_extraction : bool = False, reading_order_machine_based : bool = False, - num_col_upper : int = 0, - num_col_lower : int = 0, - threshold_art_class_layout: float = 0.1, - threshold_art_class_textline: float = 0.1, + do_ocr : bool = False, + num_col_upper : Optional[int] = None, + num_col_lower : Optional[int] = None, skip_layout_and_reading_order : bool = False, - num_jobs : int = 0, - logger : Optional[logging.Logger] = None, + logger : Optional[Logger] = None, ): - self.logger = logger or logging.getLogger('eynollah') - self.model_zoo = model_zoo - self.plotter = None - + if skip_layout_and_reading_order: + textline_light = True + self.light_version = light_version + self.dir_out = dir_out + self.dir_of_all = dir_of_all + self.dir_save_page = dir_save_page self.reading_order_machine_based = reading_order_machine_based + self.dir_of_deskewed = dir_of_deskewed + self.dir_of_deskewed = dir_of_deskewed + self.dir_of_cropped_images=dir_of_cropped_images + self.dir_of_layout=dir_of_layout self.enable_plotting = enable_plotting self.allow_enhancement = allow_enhancement self.curved_line = curved_line + self.textline_light = textline_light self.full_layout = full_layout self.tables = tables self.right2left = right2left - # --input-binary sensible if image is very dark, if layout is not working. self.input_binary = input_binary self.allow_scaling = allow_scaling self.headers_off = headers_off + self.light_version = light_version + self.extract_only_images = extract_only_images self.ignore_page_extraction = ignore_page_extraction self.skip_layout_and_reading_order = skip_layout_and_reading_order - self.num_col_upper = int(num_col_upper) - self.num_col_lower = int(num_col_lower) - self.threshold_art_class_layout = float(threshold_art_class_layout) - self.threshold_art_class_textline = float(threshold_art_class_textline) - - t_start = time.time() - - self.logger.info("Loading models...") - self.setup_models(device=device) - self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") - - def setup_models(self, device=''): - - # load models, depending on modes - # (note: loading too many models can cause OOM on GPU/CUDA, - # thus, we try set up the minimal configuration for the current mode) - # autosized variants: _resized or _patched (which one may depend on num_cols) - # (but _resized for full page images is too slow - better resize on CPU in numpy) - loadable = [ - "col_classifier", - #"enhancement", # todo: enhancement_patched - "page", - #"region" - ] - if self.input_binary: - loadable.append("binarization") # todo: binarization_patched - loadable.append("textline") # textline_patched - loadable.append("region_1_2") - #loadable.append("region_1_2_patched") - if self.full_layout: - loadable.append("region_fl_np") - #loadable.append("region_fl_patched") - if self.reading_order_machine_based: - loadable.append("reading_order") # todo: reading_order_patched + self.ocr = do_ocr + if num_col_upper: + self.num_col_upper = int(num_col_upper) + else: + self.num_col_upper = num_col_upper + if num_col_lower: + self.num_col_lower = int(num_col_lower) + else: + self.num_col_lower = num_col_lower + self.logger = logger if logger else getLogger('eynollah') + # for parallelization of CPU-intensive tasks: + self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) + atexit.register(self.executor.shutdown) + self.dir_models = dir_models + self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" + self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" + self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" + self.model_region_dir_p = dir_models + "/eynollah-main-regions-aug-scaling_20210425" + self.model_region_dir_p2 = dir_models + "/eynollah-main-regions-aug-rotation_20210425" + #"/modelens_full_lay_1_3_031124" + #"/modelens_full_lay_13__3_19_241024" + #"/model_full_lay_13_241024" + #"/modelens_full_lay_13_17_231024" + #"/modelens_full_lay_1_2_221024" + #"/eynollah-full-regions-1column_20210425" + self.model_region_dir_fully_np = dir_models + "/modelens_full_lay_1__4_3_091124" + #self.model_region_dir_fully = dir_models + "/eynollah-full-regions-3+column_20210425" + self.model_page_dir = dir_models + "/eynollah-page-extraction_20210425" + self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" + self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" + self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" + self.model_reading_order_dir = dir_models + "/model_ens_reading_order_machine_based" + #"/modelens_12sp_elay_0_3_4__3_6_n" + #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" + #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" + #"/modelens_1_2_4_5_early_lay_1_2_spaltige" + #"/model_3_eraly_layout_no_patches_1_2_spaltige" + self.model_region_dir_p_1_2_sp_np = dir_models + "/modelens_e_l_all_sp_0_1_2_3_4_171024" + ##self.model_region_dir_fully_new = dir_models + "/model_2_full_layout_new_trans" + #"/modelens_full_lay_1_3_031124" + #"/modelens_full_lay_13__3_19_241024" + #"/model_full_lay_13_241024" + #"/modelens_full_lay_13_17_231024" + #"/modelens_full_lay_1_2_221024" + #"/modelens_full_layout_24_till_28" + #"/model_2_full_layout_new_trans" + self.model_region_dir_fully = dir_models + "/modelens_full_lay_1__4_3_091124" + if self.textline_light: + #"/modelens_textline_1_4_16092024" + #"/model_textline_ens_3_4_5_6_artificial" + #"/modelens_textline_1_3_4_20240915" + #"/model_textline_ens_3_4_5_6_artificial" + #"/modelens_textline_9_12_13_14_15" + #"/eynollah-textline_light_20210425" + self.model_textline_dir = dir_models + "/modelens_textline_0_1__2_4_16092024" + else: + #"/eynollah-textline_20210425" + self.model_textline_dir = dir_models + "/modelens_textline_0_1__2_4_16092024" + if self.ocr: + self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" if self.tables: - loadable.append("table") + if self.light_version: + self.model_table_dir = dir_models + "/modelens_table_0t4_201124" + else: + self.model_table_dir = dir_models + "/eynollah-tables_20210319" - self.model_zoo.load_models(*loadable, device=device) - for model in loadable: - # retrieve and cache output shapes - if model.endswith(('_resized', '_patched')): - # autosized models do not have a predefined input_shape - # (and don't need one) - continue - self.logger.debug("model %s has input shape %s", model, - self.model_zoo.get(model).input_shape) + # #gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + # #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=7.7, allow_growth=True) + # #session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) + # config = tf.compat.v1.ConfigProto() + # config.gpu_options.allow_growth = True + # #session = tf.InteractiveSession() + # session = tf.compat.v1.Session(config=config) + # set_session(session) + try: + for device in tf.config.list_physical_devices('GPU'): + tf.config.experimental.set_memory_growth(device, True) + except: + self.logger.warning("no GPU device available") - def __del__(self): - if model_zoo := getattr(self, 'model_zoo', None): - if shutdown := getattr(model_zoo, 'shutdown', None): - shutdown() - del self.model_zoo + self.model_page = self.our_load_model(self.model_page_dir) + self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) + self.model_bin = self.our_load_model(self.model_dir_of_binarization) + if self.extract_only_images: + self.model_region = self.our_load_model(self.model_region_dir_p_ens_light_only_images_extraction) + else: + self.model_textline = self.our_load_model(self.model_textline_dir) + if self.light_version: + self.model_region = self.our_load_model(self.model_region_dir_p_ens_light) + self.model_region_1_2 = self.our_load_model(self.model_region_dir_p_1_2_sp_np) + else: + self.model_region = self.our_load_model(self.model_region_dir_p_ens) + self.model_region_p2 = self.our_load_model(self.model_region_dir_p2) + self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement) + ###self.model_region_fl_new = self.our_load_model(self.model_region_dir_fully_new) + self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np) + self.model_region_fl = self.our_load_model(self.model_region_dir_fully) + if self.reading_order_machine_based: + self.model_reading_order = self.our_load_model(self.model_reading_order_dir) + if self.ocr: + self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + #("microsoft/trocr-base-printed")#("microsoft/trocr-base-handwritten") + self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") + if self.tables: + self.model_table = self.our_load_model(self.model_table_dir) def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} - if image_pil: - ret['img'] = pil2cv(image_pil) - elif image_filename: - ret['img'] = cv2.imread(image_filename) + t_c0 = time.time() if image_filename: - ret['name'] = Path(image_filename).stem + ret['img'] = cv2.imread(image_filename) + if self.light_version: + self.dpi = 100 + else: + self.dpi = check_dpi(image_filename) else: - ret['name'] = "image" - ret['dpi'] = dpi or 100 + ret['img'] = pil2cv(image_pil) + if self.light_version: + self.dpi = 100 + else: + self.dpi = check_dpi(image_pil) ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) for prefix in ('', '_grayscale'): ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) - return ret + self._imgs = ret + if dpi is not None: + self.dpi = dpi - def imread(self, image: dict, grayscale=False, binary=False, uint8=True): + def reset_file_name_dir(self, image_filename): + t_c = time.time() + self.cache_images(image_filename=image_filename) + + self.plotter = None if not self.enable_plotting else EynollahPlotter( + dir_out=self.dir_out, + dir_of_all=self.dir_of_all, + dir_save_page=self.dir_save_page, + dir_of_deskewed=self.dir_of_deskewed, + dir_of_cropped_images=self.dir_of_cropped_images, + dir_of_layout=self.dir_of_layout, + image_filename_stem=Path(Path(image_filename).name).stem) + + self.writer = EynollahXmlWriter( + dir_out=self.dir_out, + image_filename=image_filename, + curved_line=self.curved_line, + textline_light = self.textline_light) + + def imread(self, grayscale=False, uint8=True): key = 'img' if grayscale: key += '_grayscale' - elif binary: - key += '_bin' if uint8: key += '_uint8' - return image[key].copy() + return self._imgs[key].copy() - def calculate_width_height_by_columns(self, img, num_col, conf_col, width_early): + def isNaN(self, num): + return num != num + + def predict_enhancement(self, img): + self.logger.debug("enter predict_enhancement") + + img_height_model = self.model_enhancement.layers[-1].output_shape[1] + img_width_model = self.model_enhancement.layers[-1].output_shape[2] + if img.shape[0] < img_height_model: + img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) + if img.shape[1] < img_width_model: + img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST) + margin = int(0 * img_width_model) + width_mid = img_width_model - 2 * margin + height_mid = img_height_model - 2 * margin + img = img / 255. + img_h = img.shape[0] + img_w = img.shape[1] + + prediction_true = np.zeros((img_h, img_w, 3)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - img_width_model + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - img_height_model + + img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] + label_p_pred = self.model_enhancement.predict(img_patch, verbose=0) + seg = label_p_pred[0, :, :, :] * 255 + + if i == 0 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + 0:index_x_u - margin] = \ + seg[0:-margin or None, + 0:-margin or None] + elif i == nxf - 1 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + margin:index_x_u - 0] = \ + seg[margin:, + margin:] + elif i == 0 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + 0:index_x_u - margin] = \ + seg[margin:, + 0:-margin or None] + elif i == nxf - 1 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + margin:index_x_u - 0] = \ + seg[0:-margin or None, + margin:] + elif i == 0 and j != 0 and j != nyf - 1: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + 0:index_x_u - margin] = \ + seg[margin:-margin or None, + 0:-margin or None] + elif i == nxf - 1 and j != 0 and j != nyf - 1: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + margin:index_x_u - 0] = \ + seg[margin:-margin or None, + margin:] + elif i != 0 and i != nxf - 1 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + margin:index_x_u - margin] = \ + seg[0:-margin or None, + margin:-margin or None] + elif i != 0 and i != nxf - 1 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + margin:index_x_u - margin] = \ + seg[margin:, + margin:-margin or None] + else: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + margin:index_x_u - margin] = \ + seg[margin:-margin or None, + margin:-margin or None] + + prediction_true = prediction_true.astype(int) + return prediction_true + + def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): self.logger.debug("enter calculate_width_height_by_columns") if num_col == 1 and width_early < 1100: img_w_new = 2000 elif num_col == 1 and width_early >= 2500: img_w_new = 2000 - elif num_col == 1: + elif num_col == 1 and width_early >= 1100 and width_early < 2500: img_w_new = width_early elif num_col == 2 and width_early < 2000: img_w_new = 2400 elif num_col == 2 and width_early >= 3500: img_w_new = 2400 - elif num_col == 2: + elif num_col == 2 and width_early >= 2000 and width_early < 3500: img_w_new = width_early elif num_col == 3 and width_early < 2000: img_w_new = 3000 elif num_col == 3 and width_early >= 4000: img_w_new = 3000 - elif num_col == 3: + elif num_col == 3 and width_early >= 2000 and width_early < 4000: img_w_new = width_early elif num_col == 4 and width_early < 2500: img_w_new = 4000 elif num_col == 4 and width_early >= 5000: img_w_new = 4000 - elif num_col == 4: + elif num_col == 4 and width_early >= 2500 and width_early < 5000: img_w_new = width_early elif num_col == 5 and width_early < 3700: img_w_new = 5000 elif num_col == 5 and width_early >= 7000: img_w_new = 5000 - elif num_col == 5: + elif num_col == 5 and width_early >= 3700 and width_early < 7000: img_w_new = width_early elif num_col == 6 and width_early < 4500: img_w_new = 6500 # 5400 @@ -271,22 +522,20 @@ class Eynollah: img_w_new = width_early img_h_new = img_w_new * img.shape[0] // img.shape[1] - if conf_col < 0.9 and img_w_new < width_early: - # don't downsample if unconfident + if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: img_new = np.copy(img) - img_is_resized = False - #elif conf_col < 0.8 and img_h_new >= 8000: + num_column_is_classified = False + #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: elif img_h_new >= 8000: - # don't upsample if too large img_new = np.copy(img) - img_is_resized = False + num_column_is_classified = False else: img_new = resize_image(img, img_h_new, img_w_new) - img_is_resized = True + num_column_is_classified = True - return img_new, img_is_resized + return img_new, num_column_is_classified - def calculate_width_height_by_columns_1_2(self, img, num_col, conf_col, width_early): + def calculate_width_height_by_columns_1_2(self, img, num_col, width_early, label_p_pred): self.logger.debug("enter calculate_width_height_by_columns") if num_col == 1: img_w_new = 1000 @@ -294,369 +543,272 @@ class Eynollah: img_w_new = 1300 img_h_new = img_w_new * img.shape[0] // img.shape[1] - if conf_col < 0.9 and img_w_new < width_early: - # don't downsample if unconfident + if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: img_new = np.copy(img) - img_is_resized = False - #elif conf_col < 0.8 and img_h_new >= 8000: + num_column_is_classified = False + #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: elif img_h_new >= 8000: - # don't upsample if too large img_new = np.copy(img) - img_is_resized = False + num_column_is_classified = False else: img_new = resize_image(img, img_h_new, img_w_new) - img_is_resized = True + num_column_is_classified = True - return img_new, img_is_resized + return img_new, num_column_is_classified - # FIXME: actually may run enhancement model, should be renamed - def resize_image_with_column_classifier(self, image): + def calculate_width_height_by_columns_extract_only_images(self, img, num_col, width_early, label_p_pred): + self.logger.debug("enter calculate_width_height_by_columns") + if num_col == 1: + img_w_new = 700 + elif num_col == 2: + img_w_new = 900 + elif num_col == 3: + img_w_new = 1500 + elif num_col == 4: + img_w_new = 1800 + elif num_col == 5: + img_w_new = 2200 + elif num_col == 6: + img_w_new = 2500 + img_h_new = img_w_new * img.shape[0] // img.shape[1] + + img_new = resize_image(img, img_h_new, img_w_new) + num_column_is_classified = True + + return img_new, num_column_is_classified + + def resize_image_with_column_classifier(self, is_image_enhanced, img_bin): self.logger.debug("enter resize_image_with_column_classifier") - img = self.imread(image, binary=self.input_binary) + if self.input_binary: + img = np.copy(img_bin) + else: + img = self.imread() - width_early = img.shape[1] - page_img, page_coord = self.early_page_for_num_of_column_classification(img) + _, page_coord = self.early_page_for_num_of_column_classification(img) - label_p_pred = np.ones(6) - conf_col = 1.0 - if self.num_col_upper and not self.num_col_lower: - num_col = self.num_col_upper - elif self.num_col_lower and not self.num_col_upper: - num_col = self.num_col_lower - elif (not self.num_col_upper and not self.num_col_lower or - self.num_col_upper != self.num_col_lower): - if self.input_binary: - img_in = page_img - else: - img_1ch = self.imread(image, grayscale=True) - img_1ch = img_1ch[page_coord[0]: page_coord[1], - page_coord[2]: page_coord[3]] - img_in = np.repeat(img_1ch[:, :, np.newaxis], 3, axis=2) + if self.input_binary: + img_in = np.copy(img) img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST).astype(np.float16) - - label_p_pred = self.model_zoo.get("col_classifier").predict(img_in[np.newaxis], verbose=0)[0] - num_col = np.argmax(label_p_pred) + 1 - conf_col = np.max(label_p_pred) - if self.num_col_upper and self.num_col_upper < num_col: - num_col = self.num_col_upper - conf_col = 1.0 - if self.num_col_lower and self.num_col_lower > num_col: - num_col = self.num_col_lower - conf_col = 1.0 + width_early = img_in.shape[1] + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) else: - num_col = self.num_col_upper - conf_col = 1.0 + img_1ch = self.imread(grayscale=True, uint8=False) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - self.logger.info("Found %s columns (%s)", num_col, np.around(label_p_pred, decimals=5)) - if num_col in (1, 2): - fun = self.calculate_width_height_by_columns_1_2 - else: - fun = self.calculate_width_height_by_columns - img_new, _ = fun(img, num_col, conf_col, width_early) + # plt.imshow(img_1ch) + # plt.show() + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = self.model_classifier.predict(img_in, verbose=0) + num_col = np.argmax(label_p_pred[0]) + 1 + + self.logger.info("Found %s columns (%s)", num_col, label_p_pred) + img_new, _ = self.calculate_width_height_by_columns(img, num_col, width_early, label_p_pred) if img_new.shape[1] > img.shape[1]: - img_new = self.do_prediction(True, img_new, self.model_zoo.get("enhancement"), - marginal_of_patch_percent=0, - n_batch_inference=3, - is_enhancement=True) - self.logger.info("Enhancement applied") + img_new = self.predict_enhancement(img_new) + is_image_enhanced = True - image['img_res'] = img_new - image['scale_y'] = 1.0 * img_new.shape[0] / img.shape[0] - image['scale_x'] = 1.0 * img_new.shape[1] / img.shape[1] - return + return img, img_new, is_image_enhanced - # FIXME: does not actually run enhancement model, should be renamed - def resize_and_enhance_image_with_column_classifier(self, image): + def resize_and_enhance_image_with_column_classifier(self, light_version): self.logger.debug("enter resize_and_enhance_image_with_column_classifier") - dpi = image['dpi'] - img = self.imread(image) + dpi = self.dpi self.logger.info("Detected %s DPI", dpi) if self.input_binary: - prediction_bin = self.do_prediction(True, img, self.model_zoo.get("binarization"), n_batch_inference=5) - prediction_bin = 255 * (prediction_bin == 0) + img = self.imread() + prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) + prediction_bin = 255 * (prediction_bin[:,:,0]==0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) - image['img_bin_uint8'] = prediction_bin - img = np.copy(prediction_bin) + img= np.copy(prediction_bin) + img_bin = prediction_bin else: - image['img_bin_uint8'] = None + img = self.imread() + img_bin = None width_early = img.shape[1] t1 = time.time() - page_img, page_coord = self.early_page_for_num_of_column_classification(img) + _, page_coord = self.early_page_for_num_of_column_classification(img_bin) + + self.image_page_org_size = img[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3], :] + self.page_coord = page_coord - label_p_pred = np.ones(6) - conf_col = 1.0 if self.num_col_upper and not self.num_col_lower: num_col = self.num_col_upper + label_p_pred = [np.ones(6)] elif self.num_col_lower and not self.num_col_upper: num_col = self.num_col_lower - elif (not self.num_col_upper and not self.num_col_lower or - self.num_col_upper != self.num_col_lower): + label_p_pred = [np.ones(6)] + elif not self.num_col_upper and not self.num_col_lower: if self.input_binary: - img_in = page_img + img_in = np.copy(img) + img_in = img_in / 255.0 + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) else: - img_1ch = self.imread(image, grayscale=True) - img_1ch = img_1ch[page_coord[0]: page_coord[1], - page_coord[2]: page_coord[3]] - img_in = np.repeat(img_1ch[:, :, np.newaxis], 3, axis=2) - img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST).astype(np.float16) + img_1ch = self.imread(grayscale=True) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - label_p_pred = self.model_zoo.get("col_classifier").predict(img_in[np.newaxis], verbose=0)[0] - num_col = np.argmax(label_p_pred) + 1 - conf_col = np.max(label_p_pred) + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] - if self.num_col_upper and self.num_col_upper < num_col: + label_p_pred = self.model_classifier.predict(img_in, verbose=0) + num_col = np.argmax(label_p_pred[0]) + 1 + elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): + if self.input_binary: + img_in = np.copy(img) + img_in = img_in / 255.0 + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) + else: + img_1ch = self.imread(grayscale=True) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = self.model_classifier.predict(img_in, verbose=0) + num_col = np.argmax(label_p_pred[0]) + 1 + + if num_col > self.num_col_upper: num_col = self.num_col_upper - conf_col = 1.0 - if self.num_col_lower and self.num_col_lower > num_col: + label_p_pred = [np.ones(6)] + if num_col < self.num_col_lower: num_col = self.num_col_lower - conf_col = 1.0 + label_p_pred = [np.ones(6)] else: num_col = self.num_col_upper - conf_col = 1.0 + label_p_pred = [np.ones(6)] self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) - if num_col in (1,2): - img_res, is_image_resized = self.calculate_width_height_by_columns_1_2( - img, num_col, conf_col, width_early) - is_image_enhanced = True - elif dpi < DPI_THRESHOLD: - img_res, is_image_resized = self.calculate_width_height_by_columns( - img, num_col, conf_col, width_early) - is_image_enhanced = True + if not self.extract_only_images: + if dpi < DPI_THRESHOLD: + if light_version and num_col in (1,2): + img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( + img, num_col, width_early, label_p_pred) + else: + img_new, num_column_is_classified = self.calculate_width_height_by_columns( + img, num_col, width_early, label_p_pred) + if light_version: + image_res = np.copy(img_new) + else: + image_res = self.predict_enhancement(img_new) + is_image_enhanced = True + else: + if light_version and num_col in (1,2): + img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( + img, num_col, width_early, label_p_pred) + image_res = np.copy(img_new) + is_image_enhanced = True + else: + num_column_is_classified = True + image_res = np.copy(img) + is_image_enhanced = False else: - img_res = np.copy(img) - is_image_resized = True # FIXME: not true actually, but branch is dead anyway + num_column_is_classified = True + image_res = np.copy(img) is_image_enhanced = False self.logger.debug("exit resize_and_enhance_image_with_column_classifier") - image['img_res'] = img_res.astype(np.uint8) - image['scale_y'] = 1.0 * img_res.shape[0] / img.shape[0] - image['scale_x'] = 1.0 * img_res.shape[1] / img.shape[1] - return is_image_enhanced, num_col, is_image_resized + return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin + + # pylint: disable=attribute-defined-outside-init + def get_image_and_scales(self, img_org, img_res, scale): + self.logger.debug("enter get_image_and_scales") + self.image = np.copy(img_res) + self.image_org = np.copy(img_org) + self.height_org = self.image.shape[0] + self.width_org = self.image.shape[1] + + self.img_hight_int = int(self.image.shape[0] * scale) + self.img_width_int = int(self.image.shape[1] * scale) + self.scale_y = self.img_hight_int / float(self.image.shape[0]) + self.scale_x = self.img_width_int / float(self.image.shape[1]) + + self.image = resize_image(self.image, self.img_hight_int, self.img_width_int) + + # Also set for the plotter + if self.plotter: + self.plotter.image_org = self.image_org + self.plotter.scale_y = self.scale_y + self.plotter.scale_x = self.scale_x + # Also set for the writer + self.writer.image_org = self.image_org + self.writer.scale_y = self.scale_y + self.writer.scale_x = self.scale_x + self.writer.height_org = self.height_org + self.writer.width_org = self.width_org + + def get_image_and_scales_after_enhancing(self, img_org, img_res): + self.logger.debug("enter get_image_and_scales_after_enhancing") + self.image = np.copy(img_res) + self.image = self.image.astype(np.uint8) + self.image_org = np.copy(img_org) + self.height_org = self.image_org.shape[0] + self.width_org = self.image_org.shape[1] + + self.scale_y = img_res.shape[0] / float(self.image_org.shape[0]) + self.scale_x = img_res.shape[1] / float(self.image_org.shape[1]) + + # Also set for the plotter + if self.plotter: + self.plotter.image_org = self.image_org + self.plotter.scale_y = self.scale_y + self.plotter.scale_x = self.scale_x + # Also set for the writer + self.writer.image_org = self.image_org + self.writer.scale_y = self.scale_y + self.writer.scale_x = self.scale_x + self.writer.height_org = self.height_org + self.writer.width_org = self.width_org def do_prediction( self, patches, img, model, - n_batch_inference=1, - marginal_of_patch_percent=0.1, - thresholding_for_some_classes=False, - thresholding_for_heading=False, - heading_class=2, - thresholding_for_artificial_class=False, - threshold_art_class=0.1, - artificial_class=2, - is_enhancement=False, - ): + n_batch_inference=1, marginal_of_patch_percent=0.1, + thresholding_for_some_classes_in_light_version=False, + thresholding_for_artificial_class_in_light_version=False): - self.logger.debug("enter do_prediction (patches=%d)", patches) - _, img_height_model, img_width_model, _ = model.input_shape - img_h_page = img.shape[0] - img_w_page = img.shape[1] - - img = img / 255. - img = img.astype(np.float16) - - if not patches: - img = resize_image(img, img_height_model, img_width_model) - - label_p_pred = model.predict(img[np.newaxis], verbose=0)[0] - if is_enhancement: - seg = (label_p_pred * 255).astype(np.uint8) - else: - seg = np.argmax(label_p_pred, axis=2).astype(np.uint8) - - if thresholding_for_artificial_class: - seg_mask_label( - seg, label_p_pred[:, :, artificial_class] >= threshold_art_class, - label=artificial_class, - skeletonize=True) - - if thresholding_for_heading: - seg_mask_label( - seg, label_p_pred[:, :, heading_class] >= 0.2, - label=heading_class) - - return resize_image(seg, img_h_page, img_w_page) - - if img_h_page < img_height_model: - img = resize_image(img, img_height_model, img.shape[1]) - if img_w_page < img_width_model: - img = resize_image(img, img.shape[0], img_width_model) - - self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) - margin = int(marginal_of_patch_percent * img_height_model) - window = 1 / (1 + np.exp(5.0 - 5 * np.arange(2 * margin) / margin)) - width_mid = img_width_model - 2 * margin - height_mid = img_height_model - 2 * margin - img_h = img.shape[0] - img_w = img.shape[1] - prediction = None - nxf = math.ceil((img_w - 2.0 * margin) / width_mid) - nyf = math.ceil((img_h - 2.0 * margin) / height_mid) - - batch_i = [] - batch_j = [] - batch_x_u = [] - batch_x_d = [] - batch_x_s = [] - batch_y_u = [] - batch_y_d = [] - batch_y_s = [] - - batch = 0 - img_patch = np.zeros((n_batch_inference, - img_height_model, - img_width_model, - 3), dtype=np.float16) - for i in range(nxf): - for j in range(nyf): - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - if index_x_u > img_w: - index_x_s = index_x_u - img_w - index_x_u = img_w - index_x_d = img_w - img_width_model - else: - index_x_s = 0 - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - if index_y_u > img_h: - index_y_s = index_y_u - img_h - index_y_u = img_h - index_y_d = img_h - img_height_model - else: - index_y_s = 0 - - batch_i.append(i) - batch_j.append(j) - batch_x_u.append(index_x_u) - batch_x_d.append(index_x_d) - batch_x_s.append(index_x_s) - batch_y_d.append(index_y_d) - batch_y_u.append(index_y_u) - batch_y_s.append(index_y_s) - - img_patch[batch] = img[index_y_d: index_y_u, - index_x_d: index_x_u] - batch += 1 - if (batch == n_batch_inference or - # last batch - i == nxf - 1 and j == nyf - 1): - self.logger.debug("predicting patches on %s", str(img_patch.shape)) - label_p_pred = model.predict(img_patch, verbose=0) - if prediction is None: - # now we know the number of classes - prediction = np.zeros((img_h, img_w, label_p_pred.shape[-1]), dtype=float) - - for batch in range(batch): - where = np.index_exp[batch_y_d[batch]: batch_y_u[batch], - batch_x_d[batch]: batch_x_u[batch]] - # shorter window on last tile - part = np.index_exp[batch_y_s[batch]:, - batch_x_s[batch]:] - # normalize probability (where windows overlap) - attenuation_y = np.ones(img_height_model - batch_y_s[batch]) - attenuation_x = np.ones(img_width_model - batch_x_s[batch]) - if margin and batch_j[batch] > 0: - attenuation_y[:2 * margin] = window - if margin and batch_j[batch] < nyf - 1: - attenuation_y[-2 * margin:] = 1 - window - if margin and batch_i[batch] > 0: - attenuation_x[:2 * margin] = window - if margin and batch_i[batch] < nxf - 1: - attenuation_x[-2 * margin:] = 1 - window - label_p_pred[batch][part] *= attenuation_y[:, np.newaxis, np.newaxis] - label_p_pred[batch][part] *= attenuation_x[np.newaxis, :, np.newaxis] - prediction[where][part] += label_p_pred[batch][part] - - batch_i = [] - batch_j = [] - batch_x_u = [] - batch_x_d = [] - batch_x_s = [] - batch_y_u = [] - batch_y_d = [] - batch_y_s = [] - batch = 0 - img_patch[:] = 0 - - if is_enhancement: - seg = (prediction * 255).astype(np.uint8) - else: - seg = np.argmax(prediction, axis=2).astype(np.uint8) - if thresholding_for_some_classes: - seg_mask_label( - seg, prediction[:, :, 4] > 0.03, - label=4) # - seg_mask_label( - seg, prediction[:, :, 0] > 0.25, - label=0) # bg - seg_mask_label( - seg, prediction[:, :, 3] > 0.10 & seg == 0, - label=3) # line - if thresholding_for_artificial_class: - seg_art = prediction[:, :, artificial_class] >= threshold_art_class - seg_mask_label(seg, seg_art, - label=artificial_class, - only=True, - skeletonize=True, - dilate=3) - - if img_h != img_h_page or img_w != img_w_page: - seg = resize_image(seg, img_h_page, img_w_page) - - gc.collect() - return seg - - def do_prediction_new_concept( - self, patches, img, model, - n_batch_inference=1, - marginal_of_patch_percent=0.1, - thresholding_for_heading=False, - heading_class=2, - thresholding_for_artificial_class=False, - threshold_art_class=0.1, - artificial_class=4, - separator_class=0, - ): - - self.logger.debug("enter do_prediction_new_concept (patches=%d)", patches) - _, img_height_model, img_width_model, _ = model.input_shape - - img = img / 255.0 - img = img.astype(np.float16) + self.logger.debug("enter do_prediction") + img_height_model = model.layers[-1].output_shape[1] + img_width_model = model.layers[-1].output_shape[2] if not patches: img_h_page = img.shape[0] img_w_page = img.shape[1] + img = img / float(255.0) img = resize_image(img, img_height_model, img_width_model) - label_p_pred = model.predict(img[np.newaxis], verbose=0)[0] - seg = np.argmax(label_p_pred, axis=2).astype(np.uint8) + label_p_pred = model.predict(img[np.newaxis], verbose=0) + seg = np.argmax(label_p_pred, axis=3)[0] - prediction = resize_image(seg, img_h_page, img_w_page) + if thresholding_for_artificial_class_in_light_version: + seg_art = label_p_pred[0,:,:,2] - if thresholding_for_artificial_class: - mask = resize_image(label_p_pred[:, :, artificial_class], - img_h_page, img_w_page) >= threshold_art_class - seg_mask_label(prediction, mask, - label=artificial_class, - only=True, - skeletonize=True, - dilate=3, - keep=separator_class) - if thresholding_for_heading: - mask = resize_image(label_p_pred[:, :, heading_class], - img_h_page, img_w_page) >= 0.2 - seg_mask_label(prediction, mask, - label=heading_class) + seg_art[seg_art<0.2] = 0 + seg_art[seg_art>0] =1 - conf = label_p_pred[tuple(np.indices(seg.shape)) + (seg,)] - conf = resize_image(conf, img_h_page, img_w_page) - return prediction, conf + seg[seg_art==1]=2 + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) + return prediction_true if img.shape[0] < img_height_model: img = resize_image(img, img_height_model, img.shape[1]) @@ -665,378 +817,977 @@ class Eynollah: self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) margin = int(marginal_of_patch_percent * img_height_model) - window = 1 / (1 + np.exp(5.0 - 5 * np.arange(2 * margin) / margin)) width_mid = img_width_model - 2 * margin height_mid = img_height_model - 2 * margin + img = img / 255. + #img = img.astype(np.float16) img_h = img.shape[0] img_w = img.shape[1] - prediction = None - nxf = math.ceil((img_w - 2.0 * margin) / width_mid) - nyf = math.ceil((img_h - 2.0 * margin) / height_mid) + prediction_true = np.zeros((img_h, img_w, 3)) + mask_true = np.zeros((img_h, img_w)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - batch_i = [] - batch_j = [] - batch_x_u = [] - batch_x_d = [] - batch_x_s = [] - batch_y_u = [] - batch_y_d = [] - batch_y_s = [] - batch = 0 - img_patch = np.zeros((n_batch_inference, - img_height_model, - img_width_model, - 3), dtype=np.float16) + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + img_patch = np.zeros((n_batch_inference, img_height_model, img_width_model, 3)) for i in range(nxf): for j in range(nyf): - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model if index_x_u > img_w: - index_x_s = index_x_u - img_w index_x_u = img_w index_x_d = img_w - img_width_model - else: - index_x_s = 0 - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model if index_y_u > img_h: - index_y_s = index_y_u - img_h index_y_u = img_h index_y_d = img_h - img_height_model - else: - index_y_s = 0 - batch_i.append(i) - batch_j.append(j) - batch_x_u.append(index_x_u) - batch_x_d.append(index_x_d) - batch_x_s.append(index_x_s) - batch_y_d.append(index_y_d) - batch_y_u.append(index_y_u) - batch_y_s.append(index_y_s) + list_i_s.append(i) + list_j_s.append(j) + list_x_u.append(index_x_u) + list_x_d.append(index_x_d) + list_y_d.append(index_y_d) + list_y_u.append(index_y_u) - img_patch[batch] = img[index_y_d: index_y_u, - index_x_d: index_x_u] - batch += 1 - if (batch == n_batch_inference or + img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + batch_indexer += 1 + + if (batch_indexer == n_batch_inference or # last batch i == nxf - 1 and j == nyf - 1): self.logger.debug("predicting patches on %s", str(img_patch.shape)) label_p_pred = model.predict(img_patch, verbose=0) - if prediction is None: - # now we know the number of classes - prediction = np.zeros((img_h, img_w, label_p_pred.shape[-1]), dtype=float) + seg = np.argmax(label_p_pred, axis=3) - for batch in range(batch): - where = np.index_exp[batch_y_d[batch]: batch_y_u[batch], - batch_x_d[batch]: batch_x_u[batch]] - # shorter window on last tile - part = np.index_exp[batch_y_s[batch]:, - batch_x_s[batch]:] - # normalize probability (where windows overlap) - attenuation_y = np.ones(img_height_model - batch_y_s[batch]) - attenuation_x = np.ones(img_width_model - batch_x_s[batch]) - if margin and batch_j[batch] > 0: - attenuation_y[:2 * margin] = window - if margin and batch_j[batch] < nyf - 1: - attenuation_y[-2 * margin:] = 1 - window - if margin and batch_i[batch] > 0: - attenuation_x[:2 * margin] = window - if margin and batch_i[batch] < nxf - 1: - attenuation_x[-2 * margin:] = 1 - window - label_p_pred[batch][part] *= attenuation_y[:, np.newaxis, np.newaxis] - label_p_pred[batch][part] *= attenuation_x[np.newaxis, :, np.newaxis] - prediction[where][part] += label_p_pred[batch][part] + if thresholding_for_some_classes_in_light_version: + seg_not_base = label_p_pred[:,:,:,4] + seg_not_base[seg_not_base>0.03] =1 + seg_not_base[seg_not_base<1] =0 - batch_i = [] - batch_j = [] - batch_x_u = [] - batch_x_d = [] - batch_x_s = [] - batch_y_u = [] - batch_y_d = [] - batch_y_s = [] - batch = 0 + seg_line = label_p_pred[:,:,:,3] + seg_line[seg_line>0.1] =1 + seg_line[seg_line<1] =0 + + seg_background = label_p_pred[:,:,:,0] + seg_background[seg_background>0.25] =1 + seg_background[seg_background<1] =0 + + seg[seg_not_base==1]=4 + seg[seg_background==1]=0 + seg[(seg_line==1) & (seg==0)]=3 + if thresholding_for_artificial_class_in_light_version: + seg_art = label_p_pred[:,:,:,2] + + seg_art[seg_art<0.2] = 0 + seg_art[seg_art>0] =1 + + seg[seg_art==1]=2 + + indexer_inside_batch = 0 + for i_batch, j_batch in zip(list_i_s, list_j_s): + seg_in = seg[indexer_inside_batch] + + index_y_u_in = list_y_u[indexer_inside_batch] + index_y_d_in = list_y_d[indexer_inside_batch] + + index_x_u_in = list_x_u[indexer_inside_batch] + index_x_d_in = list_x_d[indexer_inside_batch] + + if i_batch == 0 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[0:-margin or None, + 0:-margin or None, + np.newaxis] + elif i_batch == nxf - 1 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[margin:, + margin:, + np.newaxis] + elif i_batch == 0 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[margin:, + 0:-margin or None, + np.newaxis] + elif i_batch == nxf - 1 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[0:-margin or None, + margin:, + np.newaxis] + elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[margin:-margin or None, + 0:-margin or None, + np.newaxis] + elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[margin:-margin or None, + margin:, + np.newaxis] + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[0:-margin or None, + margin:-margin or None, + np.newaxis] + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[margin:, + margin:-margin or None, + np.newaxis] + else: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[margin:-margin or None, + margin:-margin or None, + np.newaxis] + indexer_inside_batch += 1 + + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 img_patch[:] = 0 - # decode - seg = np.argmax(prediction, axis=2).astype(np.uint8) - conf = prediction[tuple(np.indices(seg.shape)) + (seg,)] - if thresholding_for_artificial_class: - seg_art = prediction[:, :, artificial_class] >= threshold_art_class - seg_mask_label(seg, seg_art, - label=artificial_class, - only=True, - skeletonize=True, - dilate=3, - keep=separator_class) + prediction_true = prediction_true.astype(np.uint8) + #del model gc.collect() - return seg, conf + return prediction_true - # variant of do_prediction_new_concept with no need - # for resizing or tiling into patches - done on model - # (Tensorflow/CUDA) side - # (after loading wrapped resized or patched model) - def do_prediction_new_concept_autosize( - self, img, model, - n_batch_inference=None, - thresholding_for_heading=False, - thresholding_for_artificial_class=False, - threshold_art_class=0.1, - artificial_class=4, - ): - self.logger.debug("enter do_prediction_new_concept (%s)", model.name) + def do_padding_with_scale(self, img, scale): + h_n = int(img.shape[0]*scale) + w_n = int(img.shape[1]*scale) + + channel0_avg = int( np.mean(img[:,:,0]) ) + channel1_avg = int( np.mean(img[:,:,1]) ) + channel2_avg = int( np.mean(img[:,:,2]) ) + + h_diff = img.shape[0] - h_n + w_diff = img.shape[1] - w_n + + h_start = int(0.5 * h_diff) + w_start = int(0.5 * w_diff) + + img_res = resize_image(img, h_n, w_n) + #label_res = resize_image(label, h_n, w_n) + + img_scaled_padded = np.copy(img) + + #label_scaled_padded = np.zeros(label.shape) + + img_scaled_padded[:,:,0] = channel0_avg + img_scaled_padded[:,:,1] = channel1_avg + img_scaled_padded[:,:,2] = channel2_avg + + img_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = img_res[:,:,:] + #label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:] + + return img_scaled_padded#, label_scaled_padded + + def do_prediction_new_concept_scatter_nd( + self, patches, img, model, + n_batch_inference=1, marginal_of_patch_percent=0.1, + thresholding_for_some_classes_in_light_version=False, + thresholding_for_artificial_class_in_light_version=False): + + self.logger.debug("enter do_prediction_new_concept") + img_height_model = model.layers[-1].output_shape[1] + img_width_model = model.layers[-1].output_shape[2] + + if not patches: + img_h_page = img.shape[0] + img_w_page = img.shape[1] + img = img / 255.0 + img = resize_image(img, img_height_model, img_width_model) + + label_p_pred = model.predict(img[np.newaxis], verbose=0) + seg = np.argmax(label_p_pred, axis=3)[0] + + if thresholding_for_artificial_class_in_light_version: + #seg_text = label_p_pred[0,:,:,1] + #seg_text[seg_text<0.2] =0 + #seg_text[seg_text>0] =1 + #seg[seg_text==1]=1 + + seg_art = label_p_pred[0,:,:,4] + seg_art[seg_art<0.2] =0 + seg_art[seg_art>0] =1 + seg[seg_art==1]=4 + + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) + return prediction_true + + if img.shape[0] < img_height_model: + img = resize_image(img, img_height_model, img.shape[1]) + if img.shape[1] < img_width_model: + img = resize_image(img, img.shape[0], img_width_model) + + self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) + ##margin = int(marginal_of_patch_percent * img_height_model) + #width_mid = img_width_model - 2 * margin + #height_mid = img_height_model - 2 * margin img = img / 255.0 img = img.astype(np.float16) + img_h = img.shape[0] + img_w = img.shape[1] - prediction = model.predict(img[np.newaxis])[0] - confidence = prediction[:, :, 1] - segmentation = np.argmax(prediction, axis=2).astype(np.uint8) + stride_x = img_width_model - 100 + stride_y = img_height_model - 100 - if thresholding_for_artificial_class: - seg_mask_label(segmentation, - prediction[:, :, artificial_class] >= threshold_art_class, - label=artificial_class, - only=True, - skeletonize=True, - dilate=3) - if thresholding_for_heading: - seg_mask_label(segmentation, - prediction[:, :, 2] >= 0.2, - label=2) + one_tensor = tf.ones_like(img) + img_patches, one_patches = tf.image.extract_patches( + images=[img, one_tensor], + sizes=[1, img_height_model, img_width_model, 1], + strides=[1, stride_y, stride_x, 1], + rates=[1, 1, 1, 1], + padding='SAME') + img_patches = tf.squeeze(img_patches) + one_patches = tf.squeeze(one_patches) + img_patches_resh = tf.reshape(img_patches, shape=(img_patches.shape[0] * img_patches.shape[1], + img_height_model, img_width_model, 3)) + pred_patches = model.predict(img_patches_resh, batch_size=n_batch_inference) + one_patches = tf.reshape(one_patches, shape=(img_patches.shape[0] * img_patches.shape[1], + img_height_model, img_width_model, 3)) + x = tf.range(img.shape[1]) + y = tf.range(img.shape[0]) + x, y = tf.meshgrid(x, y) + indices = tf.stack([y, x], axis=-1) + + indices_patches = tf.image.extract_patches( + images=tf.expand_dims(indices, axis=0), + sizes=[1, img_height_model, img_width_model, 1], + strides=[1, stride_y, stride_x, 1], + rates=[1, 1, 1, 1], + padding='SAME') + indices_patches = tf.squeeze(indices_patches) + indices_patches = tf.reshape(indices_patches, shape=(img_patches.shape[0] * img_patches.shape[1], + img_height_model, img_width_model, 2)) + margin_y = int( 0.5 * (img_height_model - stride_y) ) + margin_x = int( 0.5 * (img_width_model - stride_x) ) + + mask_margin = np.zeros((img_height_model, img_width_model)) + mask_margin[margin_y:img_height_model - margin_y, + margin_x:img_width_model - margin_x] = 1 + + indices_patches_array = indices_patches.numpy() + for i in range(indices_patches_array.shape[0]): + indices_patches_array[i,:,:,0] = indices_patches_array[i,:,:,0]*mask_margin + indices_patches_array[i,:,:,1] = indices_patches_array[i,:,:,1]*mask_margin + + reconstructed = tf.scatter_nd( + indices=indices_patches_array, + updates=pred_patches, + shape=(img.shape[0], img.shape[1], pred_patches.shape[-1])).numpy() + + prediction_true = np.argmax(reconstructed, axis=2).astype(np.uint8) gc.collect() - return segmentation, confidence + return np.repeat(prediction_true[:, :, np.newaxis], 3, axis=2) - def extract_page(self, image): - cropped_page = img = image['img_res'] - h, w = img.shape[:2] - page_coord = [0, h, 0, w] - cont_page = [np.array([[[0, 0]], - [[w, 0]], - [[w, h]], - [[0, h]]])] - mask_page = np.ones((h, w), dtype=np.uint8) + def do_prediction_new_concept( + self, patches, img, model, + n_batch_inference=1, marginal_of_patch_percent=0.1, + thresholding_for_some_classes_in_light_version=False, + thresholding_for_artificial_class_in_light_version=False): + + self.logger.debug("enter do_prediction_new_concept") + img_height_model = model.layers[-1].output_shape[1] + img_width_model = model.layers[-1].output_shape[2] + + if not patches: + img_h_page = img.shape[0] + img_w_page = img.shape[1] + img = img / 255.0 + img = resize_image(img, img_height_model, img_width_model) + + label_p_pred = model.predict(img[np.newaxis], verbose=0) + seg = np.argmax(label_p_pred, axis=3)[0] + + if thresholding_for_artificial_class_in_light_version: + #seg_text = label_p_pred[0,:,:,1] + #seg_text[seg_text<0.2] =0 + #seg_text[seg_text>0] =1 + #seg[seg_text==1]=1 + + seg_art = label_p_pred[0,:,:,4] + seg_art[seg_art<0.2] =0 + seg_art[seg_art>0] =1 + seg[seg_art==1]=4 + + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) + return prediction_true , resize_image(label_p_pred[0, :, :, 1] , img_h_page, img_w_page) + + if img.shape[0] < img_height_model: + img = resize_image(img, img_height_model, img.shape[1]) + if img.shape[1] < img_width_model: + img = resize_image(img, img.shape[0], img_width_model) + + self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) + margin = int(marginal_of_patch_percent * img_height_model) + width_mid = img_width_model - 2 * margin + height_mid = img_height_model - 2 * margin + img = img / 255.0 + img = img.astype(np.float16) + img_h = img.shape[0] + img_w = img.shape[1] + prediction_true = np.zeros((img_h, img_w, 3)) + confidence_matrix = np.zeros((img_h, img_w)) + mask_true = np.zeros((img_h, img_w)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + img_patch = np.zeros((n_batch_inference, img_height_model, img_width_model, 3)) + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - img_width_model + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - img_height_model + + list_i_s.append(i) + list_j_s.append(j) + list_x_u.append(index_x_u) + list_x_d.append(index_x_d) + list_y_d.append(index_y_d) + list_y_u.append(index_y_u) + + img_patch[batch_indexer] = img[index_y_d:index_y_u, index_x_d:index_x_u] + batch_indexer += 1 + + if (batch_indexer == n_batch_inference or + # last batch + i == nxf - 1 and j == nyf - 1): + self.logger.debug("predicting patches on %s", str(img_patch.shape)) + label_p_pred = model.predict(img_patch,verbose=0) + seg = np.argmax(label_p_pred, axis=3) + + if thresholding_for_some_classes_in_light_version: + seg_art = label_p_pred[:,:,:,4] + seg_art[seg_art<0.2] =0 + seg_art[seg_art>0] =1 + + seg_line = label_p_pred[:,:,:,3] + seg_line[seg_line>0.1] =1 + seg_line[seg_line<1] =0 + + seg[seg_art==1]=4 + seg[(seg_line==1) & (seg==0)]=3 + if thresholding_for_artificial_class_in_light_version: + seg_art = label_p_pred[:,:,:,2] + + seg_art[seg_art<0.2] = 0 + seg_art[seg_art>0] =1 + + seg[seg_art==1]=2 + + indexer_inside_batch = 0 + for i_batch, j_batch in zip(list_i_s, list_j_s): + seg_in = seg[indexer_inside_batch] + + index_y_u_in = list_y_u[indexer_inside_batch] + index_y_d_in = list_y_d[indexer_inside_batch] + + index_x_u_in = list_x_u[indexer_inside_batch] + index_x_d_in = list_x_d[indexer_inside_batch] + + if i_batch == 0 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[0:-margin or None, + 0:-margin or None, + np.newaxis] + confidence_matrix[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + label_p_pred[0, 0:-margin or None, + 0:-margin or None, + 1] + elif i_batch == nxf - 1 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[margin:, + margin:, + np.newaxis] + confidence_matrix[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - 0] = \ + label_p_pred[0, margin:, + margin:, + 1] + elif i_batch == 0 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[margin:, + 0:-margin or None, + np.newaxis] + confidence_matrix[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + 0:index_x_u_in - margin] = \ + label_p_pred[0, margin:, + 0:-margin or None, + 1] + elif i_batch == nxf - 1 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[0:-margin or None, + margin:, + np.newaxis] + confidence_matrix[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + label_p_pred[0, 0:-margin or None, + margin:, + 1] + elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[margin:-margin or None, + 0:-margin or None, + np.newaxis] + confidence_matrix[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + label_p_pred[0, margin:-margin or None, + 0:-margin or None, + 1] + elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[margin:-margin or None, + margin:, + np.newaxis] + confidence_matrix[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + label_p_pred[0, margin:-margin or None, + margin:, + 1] + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[0:-margin or None, + margin:-margin or None, + np.newaxis] + confidence_matrix[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + label_p_pred[0, 0:-margin or None, + margin:-margin or None, + 1] + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[margin:, + margin:-margin or None, + np.newaxis] + confidence_matrix[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - margin] = \ + label_p_pred[0, margin:, + margin:-margin or None, + 1] + else: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[margin:-margin or None, + margin:-margin or None, + np.newaxis] + confidence_matrix[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + label_p_pred[0, margin:-margin or None, + margin:-margin or None, + 1] + indexer_inside_batch += 1 + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + img_patch[:] = 0 + + prediction_true = prediction_true.astype(np.uint8) + gc.collect() + return prediction_true, confidence_matrix + + def extract_page(self): + self.logger.debug("enter extract_page") + cont_page = [] if not self.ignore_page_extraction: - self.logger.debug("enter extract_page") - #cv2.GaussianBlur(img, (5, 5), 0) - prediction = self.do_prediction(False, img, self.model_zoo.get("page")) - contours, _ = cv2.findContours(prediction, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - if len(contours): - areas = np.array(list(map(cv2.contourArea, contours))) - cnt = contours[np.argmax(areas)] - cont_page = [cnt] + img = cv2.GaussianBlur(self.image, (5, 5), 0) + img_page_prediction = self.do_prediction(False, img, self.model_page) + imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + thresh = cv2.dilate(thresh, KERNEL, iterations=3) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + if len(contours)>0: + cnt_size = np.array([cv2.contourArea(contours[j]) + for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] x, y, w, h = cv2.boundingRect(cnt) - #if x <= 30: - #w += x - #x = 0 - #if (self.image.shape[1] - (x + w)) <= 30: - #w = w + (self.image.shape[1] - (x + w)) - #if y <= 30: - #h = h + y - #y = 0 - #if (self.image.shape[0] - (y + h)) <= 30: - #h = h + (self.image.shape[0] - (y + h)) + if x <= 30: + w += x + x = 0 + if (self.image.shape[1] - (x + w)) <= 30: + w = w + (self.image.shape[1] - (x + w)) + if y <= 30: + h = h + y + y = 0 + if (self.image.shape[0] - (y + h)) <= 30: + h = h + (self.image.shape[0] - (y + h)) box = [x, y, w, h] - cropped_page, page_coord = crop_image_inside_box(box, img) - mask_page = np.zeros((h, w), dtype=np.uint8) - mask_page = cv2.fillPoly(mask_page, pts=[cnt - [x, y]], color=1) - + else: + box = [0, 0, img.shape[1], img.shape[0]] + cropped_page, page_coord = crop_image_inside_box(box, self.image) + cont_page.append(np.array([[page_coord[2], page_coord[0]], + [page_coord[3], page_coord[0]], + [page_coord[3], page_coord[1]], + [page_coord[2], page_coord[1]]])) self.logger.debug("exit extract_page") - return page_coord, cont_page, cropped_page, mask_page + else: + box = [0, 0, self.image.shape[1], self.image.shape[0]] + cropped_page, page_coord = crop_image_inside_box(box, self.image) + cont_page.append(np.array([[page_coord[2], page_coord[0]], + [page_coord[3], page_coord[0]], + [page_coord[3], page_coord[1]], + [page_coord[2], page_coord[1]]])) + return cropped_page, page_coord, cont_page - def early_page_for_num_of_column_classification(self, img): + def early_page_for_num_of_column_classification(self,img_bin): if not self.ignore_page_extraction: self.logger.debug("enter early_page_for_num_of_column_classification") - img2 = cv2.GaussianBlur(img, (5, 5), 0) - prediction = self.do_prediction(False, img2, self.model_zoo.get("page")) - prediction = cv2.dilate(prediction, KERNEL, iterations=3) - contours, _ = cv2.findContours(prediction, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - if len(contours): - areas = np.array(list(map(cv2.contourArea, contours))) - cnt = contours[np.argmax(areas)] + if self.input_binary: + img = np.copy(img_bin).astype(np.uint8) + else: + img = self.imread() + img = cv2.GaussianBlur(img, (5, 5), 0) + img_page_prediction = self.do_prediction(False, img, self.model_page) + + imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + thresh = cv2.dilate(thresh, KERNEL, iterations=3) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + cnt_size = np.array([cv2.contourArea(contours[j]) + for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] box = cv2.boundingRect(cnt) else: box = [0, 0, img.shape[1], img.shape[0]] + cropped_page, page_coord = crop_image_inside_box(box, img) + self.logger.debug("exit early_page_for_num_of_column_classification") else: + img = self.imread() box = [0, 0, img.shape[1], img.shape[0]] - cropped_page, page_coord = crop_image_inside_box(box, img) + cropped_page, page_coord = crop_image_inside_box(box, img) return cropped_page, page_coord def extract_text_regions_new(self, img, patches, cols): - self.logger.debug("enter extract_text_regions_new") + self.logger.debug("enter extract_text_regions") img_height_h = img.shape[0] img_width_h = img.shape[1] + model_region = self.model_region_fl if patches else self.model_region_fl_np - prediction_regions, confidence_regions = self.do_prediction_new_concept( - patches, img, self.model_zoo.get("region_fl" if patches else "region_fl_np"), - n_batch_inference=1, - thresholding_for_heading=not patches) + if self.light_version: + pass + elif not patches: + img = otsu_copy_binary(img).astype(np.uint8) + prediction_regions = None + elif cols: + img = otsu_copy_binary(img).astype(np.uint8) + if cols == 1: + img = resize_image(img, int(img_height_h * 1000 / float(img_width_h)), 1000).astype(np.uint8) + elif cols == 2: + img = resize_image(img, int(img_height_h * 1300 / float(img_width_h)), 1300).astype(np.uint8) + elif cols == 3: + img = resize_image(img, int(img_height_h * 1600 / float(img_width_h)), 1600).astype(np.uint8) + elif cols == 4: + img = resize_image(img, int(img_height_h * 1900 / float(img_width_h)), 1900).astype(np.uint8) + elif cols == 5: + img = resize_image(img, int(img_height_h * 2200 / float(img_width_h)), 2200).astype(np.uint8) + else: + img = resize_image(img, int(img_height_h * 2500 / float(img_width_h)), 2500).astype(np.uint8) - self.logger.debug("exit extract_text_regions_new") - return prediction_regions, confidence_regions + prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent=0.1, n_batch_inference=3) + prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) + self.logger.debug("exit extract_text_regions") + return prediction_regions, prediction_regions def extract_text_regions(self, img, patches, cols): self.logger.debug("enter extract_text_regions") img_height_h = img.shape[0] img_width_h = img.shape[1] - model_region = self.model_zoo.get("region_fl" if patches else "region_fl_np") + model_region = self.model_region_fl if patches else self.model_region_fl_np - prediction_regions = self.do_prediction(patches, img, model_region, - marginal_of_patch_percent=0.1) + if not patches: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + prediction_regions2 = None + elif cols: + if cols == 1: + img_height_new = int(img_height_h * 0.7) + img_width_new = int(img_width_h * 0.7) + elif cols == 2: + img_height_new = int(img_height_h * 0.4) + img_width_new = int(img_width_h * 0.4) + else: + img_height_new = int(img_height_h * 0.3) + img_width_new = int(img_width_h * 0.3) + img2 = otsu_copy_binary(img) + img2 = img2.astype(np.uint8) + img2 = resize_image(img2, img_height_new, img_width_new) + prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent=0.1) + prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h) + + img = otsu_copy_binary(img).astype(np.uint8) + if cols == 1: + img = resize_image(img, int(img_height_h * 0.5), int(img_width_h * 0.5)).astype(np.uint8) + elif cols == 2 and img_width_h >= 2000: + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) + elif cols == 3 and ((self.scale_x == 1 and img_width_h > 3000) or + (self.scale_x != 1 and img_width_h > 2800)): + img = resize_image(img, 2800 * img_height_h // img_width_h, 2800).astype(np.uint8) + elif cols == 4 and ((self.scale_x == 1 and img_width_h > 4000) or + (self.scale_x != 1 and img_width_h > 3700)): + img = resize_image(img, 3700 * img_height_h // img_width_h, 3700).astype(np.uint8) + elif cols == 4: + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) + elif cols == 5 and self.scale_x == 1 and img_width_h > 5000: + img = resize_image(img, int(img_height_h * 0.7), int(img_width_h * 0.7)).astype(np.uint8) + elif cols == 5: + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) + elif img_width_h > 5600: + img = resize_image(img, 5600 * img_height_h // img_width_h, 5600).astype(np.uint8) + else: + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) + + prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent=0.1) prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) self.logger.debug("exit extract_text_regions") - return prediction_regions + return prediction_regions, prediction_regions2 - def get_textlines_of_a_textregion_sorted(self, textlines_textregion, cx_textline, cy_textline, w_h_textline): - N = len(cy_textline) - if N <= 1: - return textlines_textregion + def get_slopes_and_deskew_new_light2(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): - cx_textline = np.array(cx_textline) - cy_textline = np.array(cy_textline) - diff_cy = np.abs(np.diff(np.sort(cy_textline))) - diff_cx = np.abs(np.diff(np.sort(cx_textline))) - - if N > 1: - mean_y_diff = np.median(diff_cy) - mean_x_diff = np.median(diff_cx) - count_hor = np.count_nonzero(np.diff(w_h_textline) > 0) - count_ver = N - count_hor - else: - mean_y_diff = 0 - mean_x_diff = 0 - count_hor = 1 - count_ver = 0 - - if count_hor >= count_ver: - row_threshold = mean_y_diff / 1.5 if mean_y_diff > 0 else 10 - rows = [] - for prev_idx, curr_idx in pairwise(np.argsort(cy_textline)): - if not len(rows): - rows.append([prev_idx]) - if abs(cy_textline[curr_idx] - cy_textline[prev_idx]) <= row_threshold: - rows[-1].append(curr_idx) - else: - rows.append([curr_idx]) - - sorted_textlines = [] - for row in rows: - for idx in np.argsort(cx_textline[row]): - sorted_textlines.append(textlines_textregion[row[idx]]) - - else: - col_threshold = mean_x_diff / 1.5 if mean_x_diff > 0 else 10 - cols = [] - for prev_idx, curr_idx in pairwise(np.argsort(cx_textline)): - if not len(cols): - cols.append([prev_idx]) - if abs(cx_textline[curr_idx] - cx_textline[prev_idx]) <= col_threshold: - cols[-1].append(curr_idx) - else: - cols.append([curr_idx]) - - sorted_textlines = [] - for col in cols: - for idx in np.argsort(cy_textline[col]): - sorted_textlines.append(textlines_textregion[col[idx]]) - - return sorted_textlines - - def get_slopes_and_deskew_new_light2(self, contours_par, textline_mask_tot, slope_deskew): - - polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot, 1, 0.0001) - cx_textlines, cy_textlines = find_center_of_contours(polygons_of_textlines) - w_h_textlines = [cv2.boundingRect(polygon)[2:] for polygon in polygons_of_textlines] - args_textlines = np.arange(len(polygons_of_textlines)) + polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot,1,0.00001) + M_main_tot = [cv2.moments(polygons_of_textlines[j]) + for j in range(len(polygons_of_textlines))] + cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + args_textlines = np.array(range(len(polygons_of_textlines))) all_found_textline_polygons = [] slopes = [] - for index, contour in enumerate(contours_par): - results = [cv2.pointPolygonTest(contour, - (cx_textlines[ind], - cy_textlines[ind]), - False) - for ind in args_textlines] + all_box_coord =[] + + for index, con_region_ind in enumerate(contours_par): + results = [cv2.pointPolygonTest(con_region_ind, (cx_main_tot[ind], cy_main_tot[ind]), False) + for ind in args_textlines ] results = np.array(results) - indexes_in = args_textlines[results == 1] - textlines_in = self.get_textlines_of_a_textregion_sorted( - [polygons_of_textlines[ind] for ind in indexes_in], - [cx_textlines[ind] for ind in indexes_in], - [cy_textlines[ind] for ind in indexes_in], - [w_h_textlines[ind] for ind in indexes_in]) + indexes_in = args_textlines[results==1] + textlines_ins = [polygons_of_textlines[ind] for ind in indexes_in] - all_found_textline_polygons.append(textlines_in) #[::-1]) + all_found_textline_polygons.append(textlines_ins[::-1]) + slopes.append(slope_deskew) - try: - slopes.append(estimate_skew_contours(textlines_in)) - except ValueError: - slopes.append(slope_deskew) - # plt.imshow(textline_mask_tot) - # for contour in textlines_in: - # plt.plot(*contour[:, 0].T, linewidth=3, color='red') - # plt.show() + _, crop_coor = crop_image_inside_box(boxes[index],image_page_rotated) + all_box_coord.append(crop_coor) - return all_found_textline_polygons, slopes + return all_found_textline_polygons, boxes, contours, contours_par, all_box_coord, np.array(range(len(contours_par))), slopes - def get_slopes_and_deskew_new_curved(self, contours_par, textline_mask_tot, - num_col, slope_deskew, name): - if not len(contours_par): - return [], [] + def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + if not len(contours): + return [], [], [], [], [], [], [] + self.logger.debug("enter get_slopes_and_deskew_new_light") + results = self.executor.map(partial(do_work_of_slopes_new_light, + textline_mask_tot_ea=textline_mask_tot, + image_page_rotated=image_page_rotated, + slope_deskew=slope_deskew,textline_light=self.textline_light, + logger=self.logger,), + boxes, contours, contours_par, range(len(contours_par))) + #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) + self.logger.debug("exit get_slopes_and_deskew_new_light") + return tuple(zip(*results)) + + def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + if not len(contours): + return [], [], [], [], [], [], [] + self.logger.debug("enter get_slopes_and_deskew_new") + results = self.executor.map(partial(do_work_of_slopes_new, + textline_mask_tot_ea=textline_mask_tot, + image_page_rotated=image_page_rotated, + slope_deskew=slope_deskew, + MAX_SLOPE=MAX_SLOPE, + KERNEL=KERNEL, + logger=self.logger, + plotter=self.plotter,), + boxes, contours, contours_par, range(len(contours_par))) + #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) + self.logger.debug("exit get_slopes_and_deskew_new") + return tuple(zip(*results)) + + def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, mask_texts_only, num_col, scale_par, slope_deskew): + if not len(contours): + return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_curved") - results = map(partial(do_work_of_slopes_new_curved, - textline_mask_tot_ea=textline_mask_tot, - num_col=num_col, - slope_deskew=slope_deskew, - MAX_SLOPE=MAX_SLOPE, - KERNEL=KERNEL, - logger=self.logger, - plotter=self.plotter, - name=name), - contours_par) - results = list(results) # exhaust prior to release - #textline_polygons, slopes = zip(*results) + results = self.executor.map(partial(do_work_of_slopes_new_curved, + textline_mask_tot_ea=textline_mask_tot, + image_page_rotated=image_page_rotated, + mask_texts_only=mask_texts_only, + num_col=num_col, + scale_par=scale_par, + slope_deskew=slope_deskew, + MAX_SLOPE=MAX_SLOPE, + KERNEL=KERNEL, + logger=self.logger, + plotter=self.plotter,), + boxes, contours, contours_par, range(len(contours_par))) + #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) self.logger.debug("exit get_slopes_and_deskew_new_curved") return tuple(zip(*results)) - def textline_contours(self, img, use_patches): + def textline_contours(self, img, use_patches, scaler_h, scaler_w, num_col_classifier=None): self.logger.debug('enter textline_contours') - if (self.tables or - self.reading_order_machine_based or - self.input_binary): - # avoid OOM - n_batch = 1 - else: - n_batch = 3 - prediction_textline, conf_textline = self.do_prediction_new_concept( - use_patches, img, self.model_zoo.get("textline"), - artificial_class=2, - n_batch_inference=n_batch, - thresholding_for_artificial_class=True, - threshold_art_class=self.threshold_art_class_textline) + #img = img.astype(np.uint8) + img_org = np.copy(img) + img_h = img_org.shape[0] + img_w = img_org.shape[1] + img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w)) - #prediction_textline_longshot = self.do_prediction(False, img, self.model_zoo.get("textline")) + prediction_textline = self.do_prediction( + use_patches, img, self.model_textline, + marginal_of_patch_percent=0.15, n_batch_inference=3, + thresholding_for_artificial_class_in_light_version=self.textline_light) + #if not self.textline_light: + #if num_col_classifier==1: + #prediction_textline_nopatch = self.do_prediction(False, img, self.model_textline) + #prediction_textline[:,:][prediction_textline_nopatch[:,:]==0] = 0 + + prediction_textline = resize_image(prediction_textline, img_h, img_w) + textline_mask_tot_ea_art = (prediction_textline[:,:]==2)*1 + + old_art = np.copy(textline_mask_tot_ea_art) + if not self.textline_light: + textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') + #textline_mask_tot_ea_art = cv2.dilate(textline_mask_tot_ea_art, KERNEL, iterations=1) + prediction_textline[:,:][textline_mask_tot_ea_art[:,:]==1]=2 + + textline_mask_tot_ea_lines = (prediction_textline[:,:]==1)*1 + textline_mask_tot_ea_lines = textline_mask_tot_ea_lines.astype('uint8') + if not self.textline_light: + textline_mask_tot_ea_lines = cv2.dilate(textline_mask_tot_ea_lines, KERNEL, iterations=1) + + prediction_textline[:,:][textline_mask_tot_ea_lines[:,:]==1]=1 + if not self.textline_light: + prediction_textline[:,:][old_art[:,:]==1]=2 + + prediction_textline_longshot = self.do_prediction(False, img, self.model_textline) + prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w) self.logger.debug('exit textline_contours') - # suppress artificial boundary label - result = (prediction_textline == 1).astype(np.uint8) - #, (prediction_textline_longshot==1).astype(np.uint8) - return result, conf_textline + return ((prediction_textline[:, :, 0]==1).astype(np.uint8), + (prediction_textline_longshot_true_size[:, :, 0]==1).astype(np.uint8)) - def get_early_layout( - self, img, - num_col_classifier, - label_text=1, - label_imgs=2, - label_seps=3, - label_tabs=10, - ): - self.logger.debug("enter get_early_layout") + + def do_work_of_slopes(self, q, poly, box_sub, boxes_per_process, textline_mask_tot, contours_per_process): + self.logger.debug('enter do_work_of_slopes') + slope_biggest = 0 + slopes_sub = [] + boxes_sub_new = [] + poly_sub = [] + for mv in range(len(boxes_per_process)): + crop_img, _ = crop_image_inside_box(boxes_per_process[mv], np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2)) + crop_img = crop_img[:, :, 0] + crop_img = cv2.erode(crop_img, KERNEL, iterations=2) + try: + textline_con, hierarchy = return_contours_of_image(crop_img) + textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, max_area=1, min_area=0.0008) + y_diff_mean = find_contours_mean_y_diff(textline_con_fil) + sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) + crop_img[crop_img > 0] = 1 + slope_corresponding_textregion = return_deskew_slop(crop_img, sigma_des, + map=self.executor.map, logger=self.logger, plotter=self.plotter) + except Exception as why: + self.logger.error(why) + slope_corresponding_textregion = MAX_SLOPE + + if slope_corresponding_textregion == MAX_SLOPE: + slope_corresponding_textregion = slope_biggest + slopes_sub.append(slope_corresponding_textregion) + + cnt_clean_rot = textline_contours_postprocessing( + crop_img, slope_corresponding_textregion, contours_per_process[mv], boxes_per_process[mv]) + + poly_sub.append(cnt_clean_rot) + boxes_sub_new.append(boxes_per_process[mv]) + + q.put(slopes_sub) + poly.put(poly_sub) + box_sub.put(boxes_sub_new) + self.logger.debug('exit do_work_of_slopes') + + def get_regions_light_v_extract_only_images(self,img,is_image_enhanced, num_col_classifier): + self.logger.debug("enter get_regions_extract_images_only") + erosion_hurts = False + img_org = np.copy(img) + img_height_h = img_org.shape[0] + img_width_h = img_org.shape[1] + + if num_col_classifier == 1: + img_w_new = 700 + elif num_col_classifier == 2: + img_w_new = 900 + elif num_col_classifier == 3: + img_w_new = 1500 + elif num_col_classifier == 4: + img_w_new = 1800 + elif num_col_classifier == 5: + img_w_new = 2200 + elif num_col_classifier == 6: + img_w_new = 2500 + img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) + img_resized = resize_image(img,img_h_new, img_w_new ) + + prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.model_region) + + prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) + image_page, page_coord, cont_page = self.extract_page() + + prediction_regions_org = prediction_regions_org[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + prediction_regions_org=prediction_regions_org[:,:,0] + + mask_lines_only = (prediction_regions_org[:,:] ==3)*1 + mask_texts_only = (prediction_regions_org[:,:] ==1)*1 + mask_images_only=(prediction_regions_org[:,:] ==2)*1 + + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) + polygons_lines_xml = textline_con_fil = filter_contours_area_of_image( + mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) + + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1,1,1)) + + text_regions_p_true[text_regions_p_true.shape[0]-15:text_regions_p_true.shape[0], :] = 0 + text_regions_p_true[:, text_regions_p_true.shape[1]-15:text_regions_p_true.shape[1]] = 0 + + ##polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001) + polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.001) + image_boundary_of_doc = np.zeros((text_regions_p_true.shape[0], text_regions_p_true.shape[1])) + + ###image_boundary_of_doc[:6, :] = 1 + ###image_boundary_of_doc[text_regions_p_true.shape[0]-6:text_regions_p_true.shape[0], :] = 1 + + ###image_boundary_of_doc[:, :6] = 1 + ###image_boundary_of_doc[:, text_regions_p_true.shape[1]-6:text_regions_p_true.shape[1]] = 1 + + polygons_of_images_fin = [] + for ploy_img_ind in polygons_of_images: + """ + test_poly_image = np.zeros((text_regions_p_true.shape[0], text_regions_p_true.shape[1])) + test_poly_image = cv2.fillPoly(test_poly_image, pts=[ploy_img_ind], color=(1,1,1)) + + test_poly_image = test_poly_image + image_boundary_of_doc + test_poly_image_intersected_area = ( test_poly_image[:,:]==2 )*1 + + test_poly_image_intersected_area = test_poly_image_intersected_area.sum() + + if test_poly_image_intersected_area==0: + ##polygons_of_images_fin.append(ploy_img_ind) + + box = cv2.boundingRect(ploy_img_ind) + _, page_coord_img = crop_image_inside_box(box, text_regions_p_true) + # cont_page.append(np.array([[page_coord[2], page_coord[0]], + # [page_coord[3], page_coord[0]], + # [page_coord[3], page_coord[1]], + # [page_coord[2], page_coord[1]]])) + polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], + [page_coord_img[3], page_coord_img[0]], + [page_coord_img[3], page_coord_img[1]], + [page_coord_img[2], page_coord_img[1]]]) ) + """ + box = x, y, w, h = cv2.boundingRect(ploy_img_ind) + if h < 150 or w < 150: + pass + else: + _, page_coord_img = crop_image_inside_box(box, text_regions_p_true) + # cont_page.append(np.array([[page_coord[2], page_coord[0]], + # [page_coord[3], page_coord[0]], + # [page_coord[3], page_coord[1]], + # [page_coord[2], page_coord[1]]])) + polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], + [page_coord_img[3], page_coord_img[0]], + [page_coord_img[3], page_coord_img[1]], + [page_coord_img[2], page_coord_img[1]]])) + + self.logger.debug("exit get_regions_extract_images_only") + return text_regions_p_true, erosion_hurts, polygons_lines_xml, polygons_of_images_fin, image_page, page_coord, cont_page + + def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier, skip_layout_and_reading_order=False): + self.logger.debug("enter get_regions_light_v") t_in = time.time() erosion_hurts = False - # already cropped - img_height_h, img_width_h = img.shape[:2] + img_org = np.copy(img) + img_height_h = img_org.shape[0] + img_width_h = img_org.shape[1] + + #print(num_col_classifier,'num_col_classifier') if num_col_classifier == 1: img_w_new = 1000 @@ -1050,217 +1801,659 @@ class Eynollah: img_w_new = 3000 else: img_w_new = 4000 - img_h_new = img_w_new * img_height_h // img_width_h - img_resized = resize_image(img, img_h_new, img_w_new) - self.logger.debug("detecting textlines on %s with %d colors", - str(img_resized.shape), len(np.unique(img_resized))) + img_h_new = img_w_new * img_org.shape[0] // img_org.shape[1] + img_resized = resize_image(img,img_h_new, img_w_new ) - textline_mask_tot_ea, confidence_textline = self.run_textline(img_resized) - textline_mask_tot_ea = resize_image(textline_mask_tot_ea, img_height_h, img_width_h) - confidence_textline = resize_image(confidence_textline, img_height_h, img_width_h) + t_bin = time.time() + #if (not self.input_binary) or self.full_layout: + #if self.input_binary: + #img_bin = np.copy(img_resized) + ###if (not self.input_binary and self.full_layout) or (not self.input_binary and num_col_classifier >= 30): + ###prediction_bin = self.do_prediction(True, img_resized, self.model_bin, n_batch_inference=5) - if self.skip_layout_and_reading_order: - self.logger.debug("exit get_early_layout") - return erosion_hurts, None, None, None, None, textline_mask_tot_ea, None, None + ####print("inside bin ", time.time()-t_bin) + ###prediction_bin=prediction_bin[:,:,0] + ###prediction_bin = (prediction_bin[:,:]==0)*1 + ###prediction_bin = prediction_bin*255 - #print("inside 2 ", time.time()-t_in) - if num_col_classifier == 1 or num_col_classifier == 2: - if img_height_h / img_width_h > 2.5: - patches = True - else: - patches = False - self.logger.debug("resized to %dx%d for %d cols", - img_w_new, img_h_new, num_col_classifier) + ###prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + + ###prediction_bin = prediction_bin.astype(np.uint16) + ####img= np.copy(prediction_bin) + ###img_bin = np.copy(prediction_bin) + ###else: + ###img_bin = np.copy(img_resized) + if self.ocr and not self.input_binary: + prediction_bin = self.do_prediction(True, img_resized, self.model_bin, n_batch_inference=5) + prediction_bin = 255 * (prediction_bin[:,:,0] == 0) + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + prediction_bin = prediction_bin.astype(np.uint16) + #img= np.copy(prediction_bin) + img_bin = np.copy(prediction_bin) else: - new_w = (900+ (num_col_classifier-3)*100) - new_h = new_w * img_height_h // img_width_h - img_resized = resize_image(img_resized, new_h, new_w) - self.logger.debug("resized to %dx%d for %d cols", - new_w, new_h, num_col_classifier) - patches = True + img_bin = np.copy(img_resized) + #print("inside 1 ", time.time()-t_in) - prediction_regions, confidence_regions = \ - self.do_prediction_new_concept( - patches, img_resized, self.model_zoo.get("region_1_2"), - n_batch_inference=1, - thresholding_for_artificial_class=True, - threshold_art_class=self.threshold_art_class_layout, - separator_class=label_seps) + ###textline_mask_tot_ea = self.run_textline(img_bin) + self.logger.debug("detecting textlines on %s with %d colors", str(img_resized.shape), len(np.unique(img_resized))) + textline_mask_tot_ea = self.run_textline(img_resized, num_col_classifier) + textline_mask_tot_ea = resize_image(textline_mask_tot_ea,img_height_h, img_width_h ) - prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) - confidence_regions = resize_image(confidence_regions, img_height_h, img_width_h) + #print(self.image_org.shape) + #cv2.imwrite('out_13.png', self.image_page_org_size) - if self.tables: - prediction_tables, confidence_tables = self.get_tables_from_model(img) - else: - prediction_tables = np.zeros(img.shape[:2], dtype=np.uint8) - confidence_tables = np.zeros(img.shape[:2], dtype=bool) - - mask_texts_only = (prediction_regions == label_text).astype('uint8') - mask_images_only = (prediction_regions == label_imgs).astype('uint8') - mask_seps_only = (prediction_regions == label_seps).astype('uint8') - mask_tabs_only = prediction_tables - - # if num_col_classifier == 1 or num_col_classifier == 2: - # mask_texts_only = cv2.morphologyEx(mask_texts_only, cv2.MORPH_OPEN, KERNEL, iterations=1) - mask_texts_only = cv2.dilate(mask_texts_only, kernel=np.ones((2, 2), np.uint8), iterations=1) - - polygons_seplines, hir_seplines = return_contours_of_image(mask_seps_only) - polygons_seplines = filter_contours_area_of_image( - mask_seps_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - ##polygons_of_only_texts = dilate_textregion_contours(polygons_of_only_texts) - polygons_of_only_seps = return_contours_of_interested_region(mask_seps_only,1,0.00001) - polygons_of_only_tabs = return_contours_of_interested_region(mask_tabs_only,1,0.00001) - - text_regions_p = np.zeros_like(prediction_regions) - text_regions_p = cv2.fillPoly(text_regions_p, pts=polygons_of_only_seps, color=label_seps) - text_regions_p[mask_images_only == 1] = label_imgs - text_regions_p = cv2.fillPoly(text_regions_p, pts=polygons_of_only_texts, color=label_text) - text_regions_p = cv2.fillPoly(text_regions_p, pts=polygons_of_only_tabs, color=label_tabs) - - textline_mask_tot_ea[text_regions_p != label_text] = 0 - confidence_textline[text_regions_p != label_text] = 0 - confidence_regions[text_regions_p == label_tabs] = \ - confidence_tables[text_regions_p == label_tabs] - - regions_without_separators = ((text_regions_p == label_text) | - (text_regions_p == label_tabs)).astype(np.uint8) - #plt.imshow(textline_mask_tot_ea) + #plt.imshwo(self.image_page_org_size) #plt.show() - #print("inside 4 ", time.time()-t_in) - self.logger.debug("exit get_early_layout") - return (erosion_hurts, - polygons_seplines, - polygons_of_only_texts, - regions_without_separators, - text_regions_p, - textline_mask_tot_ea, - confidence_regions, - confidence_textline) + if not skip_layout_and_reading_order: + #print("inside 2 ", time.time()-t_in) + if num_col_classifier == 1 or num_col_classifier == 2: + if self.image_org.shape[0]/self.image_org.shape[1] > 2.5: + self.logger.debug("resized to %dx%d for %d cols", + img_resized.shape[1], img_resized.shape[0], num_col_classifier) + prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( + True, img_resized, self.model_region_1_2, n_batch_inference=1, + thresholding_for_some_classes_in_light_version=True) + else: + prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) + confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) + prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( + False, self.image_page_org_size, self.model_region_1_2, n_batch_inference=1, + thresholding_for_artificial_class_in_light_version=True) + ys = slice(*self.page_coord[0:2]) + xs = slice(*self.page_coord[2:4]) + prediction_regions_org[ys, xs] = prediction_regions_page + confidence_matrix[ys, xs] = confidence_matrix_page - def do_order_of_regions( - self, - contours_only_text_parent, - contours_only_text_parent_h, - polygons_of_drop_capitals, - boxes, - textline_mask_tot - ): - assert np.any(textline_mask_tot) - self.logger.debug("enter do_order_of_regions") - contours_only_text_parent = ensure_array(contours_only_text_parent) - contours_only_text_parent_h = ensure_array(contours_only_text_parent_h) - polygons_of_drop_capitals = ensure_array(polygons_of_drop_capitals) + else: + new_h = (900+ (num_col_classifier-3)*100) + img_resized = resize_image(img_bin, int(new_h * img_bin.shape[0] /img_bin.shape[1]), new_h) + self.logger.debug("resized to %dx%d (new_h=%d) for %d cols", + img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) + prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( + True, img_resized, self.model_region_1_2, n_batch_inference=2, + thresholding_for_some_classes_in_light_version=True) + ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_region, n_batch_inference=3, thresholding_for_some_classes_in_light_version=True) + #print("inside 3 ", time.time()-t_in) + #plt.imshow(prediction_regions_org[:,:,0]) + #plt.show() + + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + confidence_matrix = resize_image(confidence_matrix, img_height_h, img_width_h ) + img_bin = resize_image(img_bin, img_height_h, img_width_h ) + prediction_regions_org=prediction_regions_org[:,:,0] + + mask_lines_only = (prediction_regions_org[:,:] ==3)*1 + mask_texts_only = (prediction_regions_org[:,:] ==1)*1 + mask_texts_only = mask_texts_only.astype('uint8') + + ##if num_col_classifier == 1 or num_col_classifier == 2: + ###mask_texts_only = cv2.erode(mask_texts_only, KERNEL, iterations=1) + ##mask_texts_only = cv2.dilate(mask_texts_only, KERNEL, iterations=1) + + mask_texts_only = cv2.dilate(mask_texts_only, kernel=np.ones((2,2), np.uint8), iterations=1) + mask_images_only=(prediction_regions_org[:,:] ==2)*1 + + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) + test_khat = np.zeros(prediction_regions_org.shape) + test_khat = cv2.fillPoly(test_khat, pts=polygons_lines_xml, color=(1,1,1)) + + #plt.imshow(test_khat[:,:]) + #plt.show() + #for jv in range(1): + #print(jv, hir_lines_xml[0][232][3]) + #test_khat = np.zeros(prediction_regions_org.shape) + #test_khat = cv2.fillPoly(test_khat, pts = [polygons_lines_xml[232]], color=(1,1,1)) + #plt.imshow(test_khat[:,:]) + #plt.show() + + polygons_lines_xml = filter_contours_area_of_image( + mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + + test_khat = np.zeros(prediction_regions_org.shape) + test_khat = cv2.fillPoly(test_khat, pts = polygons_lines_xml, color=(1,1,1)) + + #plt.imshow(test_khat[:,:]) + #plt.show() + #sys.exit() + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) + ##polygons_of_only_texts = self.dilate_textregions_contours(polygons_of_only_texts) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3,3,3)) + + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) + + #plt.imshow(textline_mask_tot_ea) + #plt.show() + + textline_mask_tot_ea[(text_regions_p_true==0) | (text_regions_p_true==4) ] = 0 + + #plt.imshow(textline_mask_tot_ea) + #plt.show() + #print("inside 4 ", time.time()-t_in) + self.logger.debug("exit get_regions_light_v") + return text_regions_p_true, erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin, confidence_matrix + else: + img_bin = resize_image(img_bin,img_height_h, img_width_h ) + self.logger.debug("exit get_regions_light_v") + return None, erosion_hurts, None, textline_mask_tot_ea, img_bin, None + + def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier): + self.logger.debug("enter get_regions_from_xy_2models") + erosion_hurts = False + img_org = np.copy(img) + img_height_h = img_org.shape[0] + img_width_h = img_org.shape[1] + + ratio_y=1.3 + ratio_x=1 + + img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + prediction_regions_org_y = self.do_prediction(True, img, self.model_region) + prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h ) + + #plt.imshow(prediction_regions_org_y[:,:,0]) + #plt.show() + prediction_regions_org_y = prediction_regions_org_y[:,:,0] + mask_zeros_y = (prediction_regions_org_y[:,:]==0)*1 + + ##img_only_regions_with_sep = ( (prediction_regions_org_y[:,:] != 3) & (prediction_regions_org_y[:,:] != 0) )*1 + img_only_regions_with_sep = (prediction_regions_org_y == 1).astype(np.uint8) + try: + img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=20) + _, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) + img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1))) + + prediction_regions_org = self.do_prediction(True, img, self.model_region) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + + prediction_regions_org=prediction_regions_org[:,:,0] + prediction_regions_org[(prediction_regions_org[:,:]==1) & (mask_zeros_y[:,:]==1)]=0 + + img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1])) + + prediction_regions_org2 = self.do_prediction(True, img, self.model_region_p2, marginal_of_patch_percent=0.2) + prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h ) + + mask_zeros2 = (prediction_regions_org2[:,:,0] == 0) + mask_lines2 = (prediction_regions_org2[:,:,0] == 3) + text_sume_early = (prediction_regions_org[:,:] == 1).sum() + prediction_regions_org_copy = np.copy(prediction_regions_org) + prediction_regions_org_copy[(prediction_regions_org_copy[:,:]==1) & (mask_zeros2[:,:]==1)] = 0 + text_sume_second = ((prediction_regions_org_copy[:,:]==1)*1).sum() + rate_two_models = 100. * text_sume_second / text_sume_early + + self.logger.info("ratio_of_two_models: %s", rate_two_models) + if not(is_image_enhanced and rate_two_models < RATIO_OF_TWO_MODEL_THRESHOLD): + prediction_regions_org = np.copy(prediction_regions_org_copy) + + prediction_regions_org[(mask_lines2[:,:]==1) & (prediction_regions_org[:,:]==0)]=3 + mask_lines_only=(prediction_regions_org[:,:]==3)*1 + prediction_regions_org = cv2.erode(prediction_regions_org[:,:], KERNEL, iterations=2) + prediction_regions_org = cv2.dilate(prediction_regions_org[:,:], KERNEL, iterations=2) + + if rate_two_models<=40: + if self.input_binary: + prediction_bin = np.copy(img_org) + else: + prediction_bin = self.do_prediction(True, img_org, self.model_bin, n_batch_inference=5) + prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) + prediction_bin = 255 * (prediction_bin[:,:,0]==0) + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + + ratio_y=1 + ratio_x=1 + + img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + + prediction_regions_org = self.do_prediction(True, img, self.model_region) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + prediction_regions_org=prediction_regions_org[:,:,0] + + mask_lines_only=(prediction_regions_org[:,:]==3)*1 + + mask_texts_only=(prediction_regions_org[:,:]==1)*1 + mask_images_only=(prediction_regions_org[:,:]==2)*1 + + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) + polygons_lines_xml = filter_contours_area_of_image( + mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + text_regions_p_true = cv2.fillPoly(text_regions_p_true,pts = polygons_of_only_lines, color=(3, 3, 3)) + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + + text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1)) + + self.logger.debug("exit get_regions_from_xy_2models") + return text_regions_p_true, erosion_hurts, polygons_lines_xml + except: + if self.input_binary: + prediction_bin = np.copy(img_org) + prediction_bin = self.do_prediction(True, img_org, self.model_bin, n_batch_inference=5) + prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) + prediction_bin = 255 * (prediction_bin[:,:,0]==0) + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + else: + prediction_bin = np.copy(img_org) + ratio_y=1 + ratio_x=1 + + + img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + prediction_regions_org = self.do_prediction(True, img, self.model_region) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + prediction_regions_org=prediction_regions_org[:,:,0] + + #mask_lines_only=(prediction_regions_org[:,:]==3)*1 + #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) + + #prediction_regions_org = self.do_prediction(True, img, self.model_region) + + #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + + #prediction_regions_org = prediction_regions_org[:,:,0] + + #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 + + + mask_lines_only = (prediction_regions_org == 3)*1 + mask_texts_only = (prediction_regions_org == 1)*1 + mask_images_only= (prediction_regions_org == 2)*1 + + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) + polygons_lines_xml = filter_contours_area_of_image( + mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) + + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) + + erosion_hurts = True + self.logger.debug("exit get_regions_from_xy_2models") + return text_regions_p_true, erosion_hurts, polygons_lines_xml + + def do_order_of_regions_full_layout( + self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): + + self.logger.debug("enter do_order_of_regions_full_layout") boxes = np.array(boxes, dtype=int) # to be on the safe side - c_boxes = np.stack((0.5 * boxes[:, 2:4].sum(axis=1), - 0.5 * boxes[:, 0:2].sum(axis=1))) - - def match_boxes(contours, only_centers: bool, kind: str): - cx, cy, mx, Mx, my, My, mxy = find_new_features_of_contours(contours) - cx = np.array(cx, dtype=int) - cy = np.array(cy, dtype=int) - arg_text_con = np.zeros(len(contours), dtype=int) - for ii in range(len(contours)): - box_found = False - for jj, box in enumerate(boxes): - if ((cx[ii] >= box[0] and - cx[ii] < box[1] and - cy[ii] >= box[2] and - cy[ii] < box[3]) if only_centers else - (mx[ii] >= box[0] and - Mx[ii] < box[1] and - my[ii] >= box[2] and - My[ii] < box[3])): - arg_text_con[ii] = jj - box_found = True - # print(kind, "/matched ", ii, "\t", (mx[ii], Mx[ii], my[ii], My[ii]), "\tin", jj, box, only_centers) - break - if not box_found: - dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy[ii]], [cx[ii]]]), axis=0) - pcontained_in_box = ((boxes[:, 2] <= cy[ii]) & (cy[ii] < boxes[:, 3]) & - (boxes[:, 0] <= cx[ii]) & (cx[ii] < boxes[:, 1])) - assert pcontained_in_box.any(), (ii, cx[ii], cy[ii]) - ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con[ii] = ind_min - # print(kind, "/fallback ", ii, "\t", (mx[ii], Mx[ii], my[ii], My[ii]), "\tin", ind_min, boxes[ind_min], only_centers) - return arg_text_con - - def order_from_boxes(only_centers: bool): - arg_text_con_main = match_boxes(contours_only_text_parent, only_centers, "main") - arg_text_con_head = match_boxes(contours_only_text_parent_h, only_centers, "head") - arg_text_con_drop = match_boxes(polygons_of_drop_capitals, only_centers, "drop") - args_contours_main = np.arange(len(contours_only_text_parent)) - args_contours_head = np.arange(len(contours_only_text_parent_h)) - args_contours_drop = np.arange(len(polygons_of_drop_capitals)) - order_by_con_main = np.zeros_like(arg_text_con_main) - order_by_con_head = np.zeros_like(arg_text_con_head) - order_by_con_drop = np.zeros_like(arg_text_con_drop) - idx = 0 - for iij, box in enumerate(boxes): - ys = slice(*box[2:4]) - xs = slice(*box[0:2]) - args_contours_box_main = args_contours_main[arg_text_con_main == iij] - args_contours_box_head = args_contours_head[arg_text_con_head == iij] - args_contours_box_drop = args_contours_drop[arg_text_con_drop == iij] - - _, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], - contours_only_text_parent[args_contours_box_main], - contours_only_text_parent_h[args_contours_box_head], - polygons_of_drop_capitals[args_contours_box_drop], - box[2], box[0]) - - for tidx, kind in zip(index_by_kind_sorted, kind_of_texts_sorted): - if kind == 1: - # print(iij, "main", args_contours_box_main[tidx], "becomes", idx) - order_by_con_main[args_contours_box_main[tidx]] = idx - elif kind == 2: - # print(iij, "head", args_contours_box_head[tidx], "becomes", idx) - order_by_con_head[args_contours_box_head[tidx]] = idx - else: - # print(iij, "drop", args_contours_box_drop[tidx], "becomes", idx) - order_by_con_drop[args_contours_box_drop[tidx]] = idx - idx += 1 - - # xml writer will create region ids in order of - # - contours_only_text_parent (main text), followed by - # - contours_only_text_parent_h (headings), and then - # - polygons_of_drop_capitals, - # and then create regionrefs into these ordered by order_text_new - order_text_new = np.argsort(np.concatenate((order_by_con_main, - order_by_con_head, - order_by_con_drop))) - return order_text_new + cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( + contours_only_text_parent) + cx_text_only_h, cy_text_only_h, x_min_text_only_h, _, _, _, y_cor_x_min_main_h = find_new_features_of_contours( + contours_only_text_parent_h) try: - results = order_from_boxes(False) - except Exception as why: - self.logger.exception(why) - results = order_from_boxes(True) + arg_text_con = [] + for ii in range(len(cx_text_only)): + check_if_textregion_located_in_a_box = False + for jj in range(len(boxes)): + if (x_min_text_only[ii] + 80 >= boxes[jj][0] and + x_min_text_only[ii] + 80 < boxes[jj][1] and + y_cor_x_min_main[ii] >= boxes[jj][2] and + y_cor_x_min_main[ii] < boxes[jj][3]): + arg_text_con.append(jj) + check_if_textregion_located_in_a_box = True + break + if not check_if_textregion_located_in_a_box: + dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + + (cy_text_only[ii] - boxes[jj][2]) ** 2) + for jj in range(len(boxes))] + ind_min = np.argmin(dists_tr_from_box) + arg_text_con.append(ind_min) + args_contours = np.array(range(len(arg_text_con))) + arg_text_con_h = [] + for ii in range(len(cx_text_only_h)): + check_if_textregion_located_in_a_box = False + for jj in range(len(boxes)): + if (x_min_text_only_h[ii] + 80 >= boxes[jj][0] and + x_min_text_only_h[ii] + 80 < boxes[jj][1] and + y_cor_x_min_main_h[ii] >= boxes[jj][2] and + y_cor_x_min_main_h[ii] < boxes[jj][3]): + arg_text_con_h.append(jj) + check_if_textregion_located_in_a_box = True + break + if not check_if_textregion_located_in_a_box: + dists_tr_from_box = [math.sqrt((cx_text_only_h[ii] - boxes[jj][1]) ** 2 + + (cy_text_only_h[ii] - boxes[jj][2]) ** 2) + for jj in range(len(boxes))] + ind_min = np.argmin(dists_tr_from_box) + arg_text_con_h.append(ind_min) + args_contours_h = np.array(range(len(arg_text_con_h))) - self.logger.debug("exit do_order_of_regions") - return results + order_by_con_head = np.zeros(len(arg_text_con_h)) + order_by_con_main = np.zeros(len(arg_text_con)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij in range(len(boxes)): + ys = slice(*boxes[iij][2:4]) + xs = slice(*boxes[iij][0:2]) + args_contours_box = args_contours[np.array(arg_text_con) == iij] + args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] + con_inter_box = [] + con_inter_box_h = [] + + for box in args_contours_box: + con_inter_box.append(contours_only_text_parent[box]) + + for box in args_contours_box_h: + con_inter_box_h.append(contours_only_text_parent_h[box]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts( + con_inter_box, con_inter_box_h, + matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2] + indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for zahler, _ in enumerate(args_contours_box_h): + arg_order_v = indexes_sorted_head[zahler] + order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji in range(len(id_of_texts)): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + for tj1 in range(len(contours_only_text_parent_h)): + order_of_texts_tot.append(int(order_by_con_head[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + + except Exception as why: + self.logger.error(why) + arg_text_con = [] + for ii in range(len(cx_text_only)): + check_if_textregion_located_in_a_box = False + for jj in range(len(boxes)): + if (cx_text_only[ii] >= boxes[jj][0] and + cx_text_only[ii] < boxes[jj][1] and + cy_text_only[ii] >= boxes[jj][2] and + cy_text_only[ii] < boxes[jj][3]): + # this is valid if the center of region identify in which box it is located + arg_text_con.append(jj) + check_if_textregion_located_in_a_box = True + break + + if not check_if_textregion_located_in_a_box: + dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + + (cy_text_only[ii] - boxes[jj][2]) ** 2) + for jj in range(len(boxes))] + ind_min = np.argmin(dists_tr_from_box) + arg_text_con.append(ind_min) + args_contours = np.array(range(len(arg_text_con))) + order_by_con_main = np.zeros(len(arg_text_con)) + + ############################# head + + arg_text_con_h = [] + for ii in range(len(cx_text_only_h)): + check_if_textregion_located_in_a_box = False + for jj in range(len(boxes)): + if (cx_text_only_h[ii] >= boxes[jj][0] and + cx_text_only_h[ii] < boxes[jj][1] and + cy_text_only_h[ii] >= boxes[jj][2] and + cy_text_only_h[ii] < boxes[jj][3]): + # this is valid if the center of region identify in which box it is located + arg_text_con_h.append(jj) + check_if_textregion_located_in_a_box = True + break + if not check_if_textregion_located_in_a_box: + dists_tr_from_box = [math.sqrt((cx_text_only_h[ii] - boxes[jj][1]) ** 2 + + (cy_text_only_h[ii] - boxes[jj][2]) ** 2) + for jj in range(len(boxes))] + ind_min = np.argmin(dists_tr_from_box) + arg_text_con_h.append(ind_min) + args_contours_h = np.array(range(len(arg_text_con_h))) + order_by_con_head = np.zeros(len(arg_text_con_h)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij, _ in enumerate(boxes): + ys = slice(*boxes[iij][2:4]) + xs = slice(*boxes[iij][0:2]) + args_contours_box = args_contours[np.array(arg_text_con) == iij] + args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] + con_inter_box = [] + con_inter_box_h = [] + + for box in args_contours_box: + con_inter_box.append(contours_only_text_parent[box]) + + for box in args_contours_box_h: + con_inter_box_h.append(contours_only_text_parent_h[box]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts( + con_inter_box, con_inter_box_h, + matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2] + indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for zahler, _ in enumerate(args_contours_box_h): + arg_order_v = indexes_sorted_head[zahler] + order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji, _ in enumerate(id_of_texts): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + for tj1 in range(len(contours_only_text_parent_h)): + order_of_texts_tot.append(int(order_by_con_head[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + + self.logger.debug("exit do_order_of_regions_full_layout") + return order_text_new, id_of_texts_tot + + def do_order_of_regions_no_full_layout( + self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): + + self.logger.debug("enter do_order_of_regions_no_full_layout") + boxes = np.array(boxes, dtype=int) # to be on the safe side + cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( + contours_only_text_parent) + + try: + arg_text_con = [] + for ii in range(len(cx_text_only)): + check_if_textregion_located_in_a_box = False + for jj in range(len(boxes)): + if (x_min_text_only[ii] + 80 >= boxes[jj][0] and + x_min_text_only[ii] + 80 < boxes[jj][1] and + y_cor_x_min_main[ii] >= boxes[jj][2] and + y_cor_x_min_main[ii] < boxes[jj][3]): + arg_text_con.append(jj) + check_if_textregion_located_in_a_box = True + break + if not check_if_textregion_located_in_a_box: + dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + + (cy_text_only[ii] - boxes[jj][2]) ** 2) + for jj in range(len(boxes))] + ind_min = np.argmin(dists_tr_from_box) + arg_text_con.append(ind_min) + args_contours = np.array(range(len(arg_text_con))) + order_by_con_main = np.zeros(len(arg_text_con)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij in range(len(boxes)): + ys = slice(*boxes[iij][2:4]) + xs = slice(*boxes[iij][0:2]) + args_contours_box = args_contours[np.array(arg_text_con) == iij] + con_inter_box = [] + con_inter_box_h = [] + for i in range(len(args_contours_box)): + con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts( + con_inter_box, con_inter_box_h, + matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji, _ in enumerate(id_of_texts): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + + except Exception as why: + self.logger.error(why) + arg_text_con = [] + for ii in range(len(cx_text_only)): + check_if_textregion_located_in_a_box = False + for jj in range(len(boxes)): + if (cx_text_only[ii] >= boxes[jj][0] and + cx_text_only[ii] < boxes[jj][1] and + cy_text_only[ii] >= boxes[jj][2] and + cy_text_only[ii] < boxes[jj][3]): + # this is valid if the center of region identify in which box it is located + arg_text_con.append(jj) + check_if_textregion_located_in_a_box = True + break + if not check_if_textregion_located_in_a_box: + dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + + (cy_text_only[ii] - boxes[jj][2]) ** 2) + for jj in range(len(boxes))] + ind_min = np.argmin(dists_tr_from_box) + arg_text_con.append(ind_min) + args_contours = np.array(range(len(arg_text_con))) + order_by_con_main = np.zeros(len(arg_text_con)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij in range(len(boxes)): + ys = slice(*boxes[iij][2:4]) + xs = slice(*boxes[iij][0:2]) + args_contours_box = args_contours[np.array(arg_text_con) == iij] + con_inter_box = [] + con_inter_box_h = [] + for i in range(len(args_contours_box)): + con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts( + con_inter_box, con_inter_box_h, + matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji, _ in enumerate(id_of_texts): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + + self.logger.debug("exit do_order_of_regions_no_full_layout") + return order_text_new, id_of_texts_tot def check_iou_of_bounding_box_and_contour_for_tables( self, layout, table_prediction_early, pixel_table, num_col_classifier): layout_org = np.copy(layout) - layout_org[layout_org == pixel_table] = 0 - layout = (layout == pixel_table).astype(np.uint8) * 1 - _, thresh = cv2.threshold(layout, 0, 255, 0) + layout_org[:,:,0][layout_org[:,:,0]==pixel_table] = 0 + layout = (layout[:,:,0]==pixel_table)*1 + + layout =np.repeat(layout[:, :, np.newaxis], 3, axis=2) + layout = layout.astype(np.uint8) + imgray = cv2.cvtColor(layout, cv2.COLOR_BGR2GRAY ) + _, thresh = cv2.threshold(imgray, 0, 255, 0) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cnt_size = np.array([cv2.contourArea(cnt) for cnt in contours]) + cnt_size = np.array([cv2.contourArea(contours[j]) + for j in range(len(contours))]) contours_new = [] - for i, contour in enumerate(contours): - x, y, w, h = cv2.boundingRect(contour) + for i in range(len(contours)): + x, y, w, h = cv2.boundingRect(contours[i]) iou = cnt_size[i] /float(w*h) *100 if iou<80: - layout_contour = np.zeros(layout_org.shape[:2]) - layout_contour = cv2.fillPoly(layout_contour, pts=[contour] ,color=1) + layout_contour = np.zeros((layout_org.shape[0], layout_org.shape[1])) + layout_contour= cv2.fillPoly(layout_contour,pts=[contours[i]] ,color=(1,1,1)) layout_contour_sum = layout_contour.sum(axis=0) layout_contour_sum_diff = np.diff(layout_contour_sum) @@ -1276,124 +2469,142 @@ class Eynollah: layout_contour=cv2.erode(layout_contour[:,:], KERNEL, iterations=5) layout_contour=cv2.dilate(layout_contour[:,:], KERNEL, iterations=5) + layout_contour =np.repeat(layout_contour[:, :, np.newaxis], 3, axis=2) layout_contour = layout_contour.astype(np.uint8) - _, thresh = cv2.threshold(layout_contour, 0, 255, 0) + + imgray = cv2.cvtColor(layout_contour, cv2.COLOR_BGR2GRAY ) + _, thresh = cv2.threshold(imgray, 0, 255, 0) contours_sep, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for ji in range(len(contours_sep) ): contours_new.append(contours_sep[ji]) if num_col_classifier>=2: - only_recent_contour_image = np.zeros(layout.shape[:2]) - only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, - pts=[contours_sep[ji]], color=1) + only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) + only_recent_contour_image= cv2.fillPoly(only_recent_contour_image, pts=[contours_sep[ji]], color=(1,1,1)) table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() #print(iou_in,'iou_in_in1') if iou_in>30: - layout_org = cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=pixel_table) + layout_org= cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=3 * (pixel_table,)) else: pass else: - layout_org= cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=pixel_table) + layout_org= cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=3 * (pixel_table,)) else: - contours_new.append(contour) + contours_new.append(contours[i]) if num_col_classifier>=2: - only_recent_contour_image = np.zeros(layout.shape[:2]) - only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, pts=[contour],color=1) + only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) + only_recent_contour_image= cv2.fillPoly(only_recent_contour_image,pts=[contours[i]] ,color=(1,1,1)) table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() #print(iou_in,'iou_in') if iou_in>30: - layout_org = cv2.fillPoly(layout_org, pts=[contour], color=pixel_table) + layout_org= cv2.fillPoly(layout_org, pts=[contours[i]], color=3 * (pixel_table,)) else: pass else: - layout_org = cv2.fillPoly(layout_org, pts=[contour], color=pixel_table) + layout_org= cv2.fillPoly(layout_org, pts=[contours[i]], color=3 * (pixel_table,)) return layout_org, contours_new - def delete_separator_around(self, splitter_y, peaks_neg, image_by_region, label_seps, label_table): + def delete_separator_around(self, spliter_y,peaks_neg,image_by_region, pixel_line, pixel_table): # format of subboxes: box=[x1, x2 , y1, y2] pix_del = 100 - for i in range(len(splitter_y)-1): - for j in range(1,len(peaks_neg[i])-1): - where = np.index_exp[splitter_y[i]: - splitter_y[i+1], - peaks_neg[i][j] - pix_del: - peaks_neg[i][j] + pix_del, - :] - if image_by_region.ndim < 3: - where = where[:2] - else: - print("image_by_region ndim is 3!") # rs - image_by_region[where][image_by_region[where] == label_seps] = 0 - image_by_region[where][image_by_region[where] == label_table] = 0 + if len(image_by_region.shape)==3: + for i in range(len(spliter_y)-1): + for j in range(1,len(peaks_neg[i])-1): + ys = slice(int(spliter_y[i]), + int(spliter_y[i+1])) + xs = slice(peaks_neg[i][j] - pix_del, + peaks_neg[i][j] + pix_del) + image_by_region[ys,xs,0][image_by_region[ys,xs,0]==pixel_line] = 0 + image_by_region[ys,xs,0][image_by_region[ys,xs,1]==pixel_line] = 0 + image_by_region[ys,xs,0][image_by_region[ys,xs,2]==pixel_line] = 0 + + image_by_region[ys,xs,0][image_by_region[ys,xs,0]==pixel_table] = 0 + image_by_region[ys,xs,0][image_by_region[ys,xs,1]==pixel_table] = 0 + image_by_region[ys,xs,0][image_by_region[ys,xs,2]==pixel_table] = 0 + else: + for i in range(len(spliter_y)-1): + for j in range(1,len(peaks_neg[i])-1): + ys = slice(int(spliter_y[i]), + int(spliter_y[i+1])) + xs = slice(peaks_neg[i][j] - pix_del, + peaks_neg[i][j] + pix_del) + image_by_region[ys,xs][image_by_region[ys,xs]==pixel_line] = 0 + image_by_region[ys,xs][image_by_region[ys,xs]==pixel_table] = 0 return image_by_region def add_tables_heuristic_to_layout( self, image_regions_eraly_p, boxes, - slope_mean_hor, splitter_y, peaks_neg_tot, image_revised, - num_col_classifier, min_area, label_seps): + slope_mean_hor, spliter_y, peaks_neg_tot, image_revised, + num_col_classifier, min_area, pixel_line): - label_table =10 - image_revised_1 = self.delete_separator_around(splitter_y, peaks_neg_tot, image_revised, label_seps, label_table) + pixel_table =10 + image_revised_1 = self.delete_separator_around(spliter_y, peaks_neg_tot, image_revised, pixel_line, pixel_table) try: - image_revised_1[:,:30][image_revised_1[:,:30]==label_seps] = 0 - image_revised_1[:,-30:][image_revised_1[:,-30:]==label_seps] = 0 + image_revised_1[:,:30][image_revised_1[:,:30]==pixel_line] = 0 + image_revised_1[:,-30:][image_revised_1[:,-30:]==pixel_line] = 0 except: pass boxes = np.array(boxes, dtype=int) # to be on the safe side - img_comm = np.zeros(image_revised_1.shape, dtype=np.uint8) + img_comm_e = np.zeros(image_revised_1.shape) + img_comm = np.repeat(img_comm_e[:, :, np.newaxis], 3, axis=2) + for indiv in np.unique(image_revised_1): - image_col = (image_revised_1 == indiv).astype(np.uint8) * 255 - _, thresh = cv2.threshold(image_col, 0, 255, 0) + image_col=(image_revised_1==indiv)*255 + img_comm_in=np.repeat(image_col[:, :, np.newaxis], 3, axis=2) + img_comm_in=img_comm_in.astype(np.uint8) + + imgray = cv2.cvtColor(img_comm_in, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) contours,hirarchy=cv2.findContours(thresh.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - if indiv==label_table: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, - max_area=1, min_area=0.001) + if indiv==pixel_table: + main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = 0.001) else: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, - max_area=1, min_area=min_area) + main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = min_area) - img_comm = cv2.fillPoly(img_comm, pts=main_contours, color=indiv) + img_comm = cv2.fillPoly(img_comm, pts = main_contours, color = (indiv, indiv, indiv)) + img_comm = img_comm.astype(np.uint8) - if not isNaN(slope_mean_hor): - image_revised_last = np.zeros(image_regions_eraly_p.shape[:2]) + if not self.isNaN(slope_mean_hor): + image_revised_last = np.zeros((image_regions_eraly_p.shape[0], image_regions_eraly_p.shape[1],3)) for i in range(len(boxes)): box_ys = slice(*boxes[i][2:4]) box_xs = slice(*boxes[i][0:2]) image_box = img_comm[box_ys, box_xs] try: - image_box_tabels_1 = (image_box == label_table) * 1 + image_box_tabels_1=(image_box[:,:,0]==pixel_table)*1 contours_tab,_=return_contours_of_image(image_box_tabels_1) contours_tab=filter_contours_area_of_image_tables(image_box_tabels_1,contours_tab,_,1,0.003) - image_box_tabels_1 = (image_box == label_seps).astype(np.uint8) * 1 - image_box_tabels_and_m_text = ( (image_box == label_table) | - (image_box == 1) ).astype(np.uint8) * 1 + image_box_tabels_1=(image_box[:,:,0]==pixel_line)*1 - image_box_tabels_1 = cv2.dilate(image_box_tabels_1, KERNEL, iterations=5) + image_box_tabels_and_m_text=( (image_box[:,:,0]==pixel_table) | (image_box[:,:,0]==1) )*1 + image_box_tabels_and_m_text=image_box_tabels_and_m_text.astype(np.uint8) - contours_table_m_text, _ = return_contours_of_image(image_box_tabels_and_m_text) - _, thresh = cv2.threshold(image_box_tabels_1, 0, 255, 0) - contours_line, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + image_box_tabels_1=image_box_tabels_1.astype(np.uint8) + image_box_tabels_1 = cv2.dilate(image_box_tabels_1,KERNEL,iterations = 5) + + contours_table_m_text,_=return_contours_of_image(image_box_tabels_and_m_text) + image_box_tabels=np.repeat(image_box_tabels_1[:, :, np.newaxis], 3, axis=2) + + image_box_tabels=image_box_tabels.astype(np.uint8) + imgray = cv2.cvtColor(image_box_tabels, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_line,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) y_min_main_line ,y_max_main_line=find_features_of_contours(contours_line) y_min_main_tab ,y_max_main_tab=find_features_of_contours(contours_tab) - (cx_tab_m_text, cy_tab_m_text, - x_min_tab_m_text, x_max_tab_m_text, - y_min_tab_m_text, y_max_tab_m_text, - _) = find_new_features_of_contours(contours_table_m_text) - (cx_tabl, cy_tabl, - x_min_tabl, x_max_tabl, - y_min_tabl, y_max_tabl, - _) = find_new_features_of_contours(contours_tab) + cx_tab_m_text,cy_tab_m_text ,x_min_tab_m_text , x_max_tab_m_text, y_min_tab_m_text ,y_max_tab_m_text, _= find_new_features_of_contours(contours_table_m_text) + cx_tabl,cy_tabl ,x_min_tabl , x_max_tabl, y_min_tabl ,y_max_tabl,_= find_new_features_of_contours(contours_tab) if len(y_min_main_tab )>0: y_down_tabs=[] @@ -1403,30 +2614,22 @@ class Eynollah: y_down_tab=[] y_up_tab=[] for i_l in range(len(y_min_main_line)): - if (y_min_main_tab[i_t] > y_min_main_line[i_l] and - y_max_main_tab[i_t] > y_min_main_line[i_l] and - y_min_main_tab[i_t] > y_max_main_line[i_l] and - y_max_main_tab[i_t] > y_min_main_line[i_l]): + if y_min_main_tab[i_t]>y_min_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l] and y_min_main_tab[i_t]>y_max_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l]: pass - elif (y_min_main_tab[i_t] < y_max_main_line[i_l] and - y_max_main_tab[i_t] < y_max_main_line[i_l] and - y_max_main_tab[i_t] < y_min_main_line[i_l] and - y_min_main_tab[i_t] < y_min_main_line[i_l]): + elif y_min_main_tab[i_t]0: for ijv in range(len(y_min_tab_col1)): - image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv])] = label_table + image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv]),:,:]=pixel_table return image_revised_last - def get_tables_from_model(self, img): - table_prediction, table_confidence = self.do_prediction_new_concept( - False, img, - self.model_zoo.get("table"), - thresholding_for_artificial_class=True, - threshold_art_class=0.05, - artificial_class=2) - table_prediction = table_prediction.astype(np.uint8) - return table_prediction, table_confidence + def do_order_of_regions(self, *args, **kwargs): + if self.full_layout: + return self.do_order_of_regions_full_layout(*args, **kwargs) + return self.do_order_of_regions_no_full_layout(*args, **kwargs) - def run_columns( - self, text_regions_p_1, - num_col_classifier, num_column_is_classified, - erosion_hurts, - label_imgs=2, - label_seps=3, - ): - """post-process column classifier result""" + def get_tables_from_model(self, img, num_col_classifier): + img_org = np.copy(img) + img_height_h = img_org.shape[0] + img_width_h = img_org.shape[1] + patches = False + if self.light_version: + prediction_table, _ = self.do_prediction_new_concept(patches, img, self.model_table) + prediction_table = prediction_table.astype(np.int16) + return prediction_table[:,:,0] + else: + if num_col_classifier < 4 and num_col_classifier > 2: + prediction_table = self.do_prediction(patches, img, self.model_table) + pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_table) + pre_updown = cv2.flip(pre_updown, -1) + + prediction_table[:,:,0][pre_updown[:,:,0]==1]=1 + prediction_table = prediction_table.astype(np.int16) + + elif num_col_classifier ==2: + height_ext = 0 # img.shape[0] // 4 + h_start = height_ext // 2 + width_ext = img.shape[1] // 8 + w_start = width_ext // 2 + + img_new = np.zeros((img.shape[0] + height_ext, + img.shape[1] + width_ext, + img.shape[2])).astype(float) + ys = slice(h_start, h_start + img.shape[0]) + xs = slice(w_start, w_start + img.shape[1]) + img_new[ys, xs] = img + + prediction_ext = self.do_prediction(patches, img_new, self.model_table) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_table) + pre_updown = cv2.flip(pre_updown, -1) + + prediction_table = prediction_ext[ys, xs] + prediction_table_updown = pre_updown[ys, xs] + + prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 + prediction_table = prediction_table.astype(np.int16) + elif num_col_classifier ==1: + height_ext = 0 # img.shape[0] // 4 + h_start = height_ext // 2 + width_ext = img.shape[1] // 4 + w_start = width_ext // 2 + + img_new =np.zeros((img.shape[0] + height_ext, + img.shape[1] + width_ext, + img.shape[2])).astype(float) + ys = slice(h_start, h_start + img.shape[0]) + xs = slice(w_start, w_start + img.shape[1]) + img_new[ys, xs] = img + + prediction_ext = self.do_prediction(patches, img_new, self.model_table) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_table) + pre_updown = cv2.flip(pre_updown, -1) + + prediction_table = prediction_ext[ys, xs] + prediction_table_updown = pre_updown[ys, xs] + + prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 + prediction_table = prediction_table.astype(np.int16) + else: + prediction_table = np.zeros(img.shape) + img_w_half = img.shape[1] // 2 + + pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], self.model_table) + pre2 = self.do_prediction(patches, img[:,img_w_half:,:], self.model_table) + pre_full = self.do_prediction(patches, img[:,:,:], self.model_table) + pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_table) + pre_updown = cv2.flip(pre_updown, -1) + + prediction_table_full_erode = cv2.erode(pre_full[:,:,0], KERNEL, iterations=4) + prediction_table_full_erode = cv2.dilate(prediction_table_full_erode, KERNEL, iterations=4) + + prediction_table_full_updown_erode = cv2.erode(pre_updown[:,:,0], KERNEL, iterations=4) + prediction_table_full_updown_erode = cv2.dilate(prediction_table_full_updown_erode, KERNEL, iterations=4) + + prediction_table[:,0:img_w_half,:] = pre1[:,:,:] + prediction_table[:,img_w_half:,:] = pre2[:,:,:] + + prediction_table[:,:,0][prediction_table_full_erode[:,:]==1]=1 + prediction_table[:,:,0][prediction_table_full_updown_erode[:,:]==1]=1 + prediction_table = prediction_table.astype(np.int16) + + #prediction_table_erode = cv2.erode(prediction_table[:,:,0], self.kernel, iterations=6) + #prediction_table_erode = cv2.dilate(prediction_table_erode, self.kernel, iterations=6) + + prediction_table_erode = cv2.erode(prediction_table[:,:,0], KERNEL, iterations=20) + prediction_table_erode = cv2.dilate(prediction_table_erode, KERNEL, iterations=20) + return prediction_table_erode.astype(np.int16) + + def run_graphics_and_columns_light( + self, text_regions_p_1, textline_mask_tot_ea, + num_col_classifier, num_column_is_classified, erosion_hurts, img_bin_light): + + #print(text_regions_p_1.shape, 'text_regions_p_1 shape run graphics') + #print(erosion_hurts, 'erosion_hurts') t_in_gr = time.time() - regions_without_separators = ((text_regions_p_1 != label_seps) & - (text_regions_p_1 != 0)).astype(np.uint8) - if not erosion_hurts: - regions_without_separators = cv2.erode(regions_without_separators, KERNEL, iterations=6) + img_g = self.imread(grayscale=True, uint8=True) + img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) + img_g3 = img_g3.astype(np.uint8) + img_g3[:, :, 0] = img_g[:, :] + img_g3[:, :, 1] = img_g[:, :] + img_g3[:, :, 2] = img_g[:, :] + + image_page, page_coord, cont_page = self.extract_page() + #print("inside graphics 1 ", time.time() - t_in_gr) + if self.tables: + table_prediction = self.get_tables_from_model(image_page, num_col_classifier) + else: + table_prediction = np.zeros((image_page.shape[0], image_page.shape[1])).astype(np.int16) + + if self.plotter: + self.plotter.save_page_image(image_page) + + text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + img_bin_light = img_bin_light[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + mask_images = (text_regions_p_1[:, :] == 2) * 1 + mask_images = mask_images.astype(np.uint8) + mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) + mask_lines = (text_regions_p_1[:, :] == 3) * 1 + mask_lines = mask_lines.astype(np.uint8) + img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 + img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) + + #print("inside graphics 2 ", time.time() - t_in_gr) + if erosion_hurts: + img_only_regions = np.copy(img_only_regions_with_sep[:,:]) + else: + img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) + + ##print(img_only_regions.shape,'img_only_regions') + ##plt.imshow(img_only_regions[:,:]) + ##plt.show() + ##num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) try: - num_col, _ = find_num_col(regions_without_separators, num_col_classifier, self.tables, multiplier=6.0) + num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) num_col = num_col + 1 if not num_column_is_classified: - num_col_classifier = num_col - num_col_classifier = min(self.num_col_upper or num_col_classifier, - max(self.num_col_lower or num_col_classifier, - num_col_classifier)) + num_col_classifier = num_col + 1 except Exception as why: - self.logger.exception(why) + self.logger.error(why) num_col = None - return num_col, num_col_classifier + #print("inside graphics 3 ", time.time() - t_in_gr) + return (num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, + text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea, img_bin_light) - def run_enhancement(self, image): + def run_graphics_and_columns_without_layout(self, textline_mask_tot_ea, img_bin_light): + #print(text_regions_p_1.shape, 'text_regions_p_1 shape run graphics') + #print(erosion_hurts, 'erosion_hurts') + t_in_gr = time.time() + img_g = self.imread(grayscale=True, uint8=True) + + img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) + img_g3 = img_g3.astype(np.uint8) + img_g3[:, :, 0] = img_g[:, :] + img_g3[:, :, 1] = img_g[:, :] + img_g3[:, :, 2] = img_g[:, :] + + image_page, page_coord, cont_page = self.extract_page() + #print("inside graphics 1 ", time.time() - t_in_gr) + + textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + img_bin_light = img_bin_light[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + return page_coord, image_page, textline_mask_tot_ea, img_bin_light, cont_page + + def run_graphics_and_columns( + self, text_regions_p_1, + num_col_classifier, num_column_is_classified, erosion_hurts): + + t_in_gr = time.time() + img_g = self.imread(grayscale=True, uint8=True) + + img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) + img_g3 = img_g3.astype(np.uint8) + img_g3[:, :, 0] = img_g[:, :] + img_g3[:, :, 1] = img_g[:, :] + img_g3[:, :, 2] = img_g[:, :] + + image_page, page_coord, cont_page = self.extract_page() + + if self.tables: + table_prediction = self.get_tables_from_model(image_page, num_col_classifier) + else: + table_prediction = np.zeros((image_page.shape[0], image_page.shape[1])).astype(np.int16) + + if self.plotter: + self.plotter.save_page_image(image_page) + + text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + mask_images = (text_regions_p_1[:, :] == 2) * 1 + mask_images = mask_images.astype(np.uint8) + mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) + mask_lines = (text_regions_p_1[:, :] == 3) * 1 + mask_lines = mask_lines.astype(np.uint8) + img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 + img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) + + if erosion_hurts: + img_only_regions = np.copy(img_only_regions_with_sep[:,:]) + else: + img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) + try: + num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) + num_col = num_col + 1 + if not num_column_is_classified: + num_col_classifier = num_col + 1 + except Exception as why: + self.logger.error(why) + num_col = None + return (num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, + text_regions_p_1, cont_page, table_prediction) + + def run_enhancement(self, light_version): t_in = time.time() self.logger.info("Resizing and enhancing image...") - is_image_enhanced, num_col_classifier, num_column_is_classified = \ - self.resize_and_enhance_image_with_column_classifier(image) + is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = \ + self.resize_and_enhance_image_with_column_classifier(light_version) self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ') + scale = 1 if is_image_enhanced: if self.allow_enhancement: + #img_res = img_res.astype(np.uint8) + self.get_image_and_scales(img_org, img_res, scale) if self.plotter: - self.plotter.save_enhanced_image(image['img_res'], image['name']) + self.plotter.save_enhanced_image(img_res) + else: + self.get_image_and_scales_after_enhancing(img_org, img_res) else: - # rs FIXME: dead branch (i.e. no actual enhancement/scaling done) - # also, unclear why col classifier should run again on same input - # (why not predict enhancement iff size(img_res) > size(img_org) ?) + if self.allow_enhancement: + self.get_image_and_scales(img_org, img_res, scale) + else: + self.get_image_and_scales(img_org, img_res, scale) if self.allow_scaling: - self.resize_image_with_column_classifier(image) - + img_org, img_res, is_image_enhanced = self.resize_image_with_column_classifier(is_image_enhanced, img_bin) + self.get_image_and_scales_after_enhancing(img_org, img_res) #print("enhancement in ", time.time()-t_in) - return num_col_classifier, num_column_is_classified + return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified - def run_textline(self, image_page): - textline_mask_tot_ea, textline_conf = self.textline_contours(image_page, True) - #textline_mask_tot_ea = textline_mask_tot_ea.astype(np.int16) - return textline_mask_tot_ea, textline_conf + def run_textline(self, image_page, num_col_classifier=None): + scaler_h_textline = 1#1.3 # 1.2#1.2 + scaler_w_textline = 1#1.3 # 0.9#1 + #print(image_page.shape) + textline_mask_tot_ea, _ = self.textline_contours(image_page, True, scaler_h_textline, scaler_w_textline, num_col_classifier) + if self.textline_light: + textline_mask_tot_ea = textline_mask_tot_ea.astype(np.int16) + + if self.plotter: + self.plotter.save_plot_of_textlines(textline_mask_tot_ea, image_page) + return textline_mask_tot_ea def run_deskew(self, textline_mask_tot_ea): - if not np.any(textline_mask_tot_ea): - self.logger.info("slope_deskew: empty page") - return 0 - #print(textline_mask_tot_ea.shape, 'textline_mask_tot_ea deskew') - textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2) - slope_deskew = return_deskew_slop(textline_mask_tot_ea, 2, - n_tot_angles=30, main_page=True, - logger=self.logger, plotter=self.plotter) + slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), 2, 30, True, + map=self.executor.map, logger=self.logger, plotter=self.plotter) + slope_first = 0 + + if self.plotter: + self.plotter.save_deskewed_image(slope_deskew) self.logger.info("slope_deskew: %.2f°", slope_deskew) - return slope_deskew + return slope_deskew, slope_first - def run_marginals(self, num_col_classifier, slope_deskew, text_regions_p): - get_marginals(num_col_classifier, slope_deskew, text_regions_p, - kernel=KERNEL) + def run_marginals( + self, image_page, textline_mask_tot_ea, mask_images, mask_lines, + num_col_classifier, slope_deskew, text_regions_p_1, table_prediction): - def get_full_layout( - self, image_page, - text_regions_p, - num_col_classifier, - label_text=1, - label_imgs=2, - label_imgs_fl=5, - label_imgs_fl_model=4, - label_seps=3, - label_seps_fl=6, - label_seps_fl_model=5, - label_marg=4, - label_marg_fl=8, - label_drop_fl=4, - label_drop_fl_model=3, - label_tabs=10, - ): - self.logger.debug('enter get_full_layout') - t_full0 = time.time() + image_page_rotated, textline_mask_tot = image_page[:, :], textline_mask_tot_ea[:, :] + textline_mask_tot[mask_images[:, :] == 1] = 0 - # segment labels used by models/arrays: - # class | early | old full (and decoded here) | new full (just predicted) | comment - # --- - # para | 1 | 1 | 1 | - # head | - | 2 | 2 | used in split_textregion_main_vs_head() afterwards - # drop | - | 4 | 3 | assigned from full model below - # img | 2 | 5 | 4 | mapped below - # sep | 3 | 6 | 5 | mapped + assigned from full model below - # marg | 4 | 8 | - | rule-based in run_marginals() from early text - # tab | - | 10 | - | dedicated model, optional - text_regions_p[text_regions_p == label_imgs] = label_imgs_fl - text_regions_p[text_regions_p == label_seps] = label_seps_fl - text_regions_p[text_regions_p == label_marg] = label_marg_fl + text_regions_p_1[mask_lines[:, :] == 1] = 3 + text_regions_p = text_regions_p_1[:, :] + text_regions_p = np.array(text_regions_p) - if self.full_layout: - regions_fully, regionsfl_confidence = self.extract_text_regions_new( - image_page, - False, cols=num_col_classifier) + if num_col_classifier in (1, 2): + try: + regions_without_separators = (text_regions_p[:, :] == 1) * 1 + if self.tables: + regions_without_separators[table_prediction==1] = 1 + regions_without_separators = regions_without_separators.astype(np.uint8) + text_regions_p = get_marginals( + rotate_image(regions_without_separators, slope_deskew), text_regions_p, + num_col_classifier, slope_deskew, light_version=self.light_version, kernel=KERNEL) + except Exception as e: + self.logger.error("exception %s", e) - # the separators in full layout will not be written on layout - if not self.reading_order_machine_based: - text_regions_p[regions_fully == label_seps_fl_model] = label_seps_fl + if self.plotter: + self.plotter.save_plot_of_layout_main_all(text_regions_p, image_page) + self.plotter.save_plot_of_layout_main(text_regions_p, image_page) + return textline_mask_tot, text_regions_p, image_page_rotated - drops = regions_fully == label_drop_fl_model - regions_fully[drops] = label_text - # rs: why erode to text here, when fill_bb... will mask out text (only allowing img/drop/bg)? - drops = cv2.erode(drops.astype(np.uint8), KERNEL, iterations=1) == 1 - regions_fully[drops] = label_drop_fl_model - drops = fill_bb_of_drop_capitals(regions_fully, text_regions_p) - text_regions_p[drops] = label_drop_fl - else: - regions_fully = None, - regionsfl_confidence = None + def run_boxes_no_full_layout( + self, image_page, textline_mask_tot, text_regions_p, + slope_deskew, num_col_classifier, table_prediction, erosion_hurts): - # no need to return text_regions_p (inplace editing) - self.logger.debug('exit get_full_layout') - return regions_fully, regionsfl_confidence + self.logger.debug('enter run_boxes_no_full_layout') + t_0_box = time.time() + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = rotation_not_90_func( + image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) + text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) + table_prediction_n = resize_image(table_prediction_n, text_regions_p.shape[0], text_regions_p.shape[1]) + regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 + if self.tables: + regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 + regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) + #print(time.time()-t_0_box,'time box in 1') + if self.tables: + regions_without_separators[table_prediction ==1 ] = 1 + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + text_regions_p_1_n = None + textline_mask_tot_d = None + regions_without_separators_d = None + pixel_lines = 3 + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_lines) - def get_deskewed_masks( - self, - slope_deskew, - textline_mask_tot, - text_regions_p, - regions_without_separators, - ): - return (rotate_image(textline_mask_tot, slope_deskew), - rotate_image(text_regions_p, slope_deskew), - rotate_image(regions_without_separators, slope_deskew), - ) + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_lines) + #print(time.time()-t_0_box,'time box in 2') + self.logger.info("num_col_classifier: %s", num_col_classifier) - def run_boxes_order( - self, - text_regions_p, - num_col_classifier, - erosion_hurts, - regions_without_separators, - contours_h=None, - label_seps_fl=6, - ): - if not erosion_hurts: - regions_without_separators = regions_without_separators.astype(np.uint8) - regions_without_separators = cv2.erode(regions_without_separators, KERNEL, iterations=6) - separator_mask = text_regions_p == label_seps_fl - - _, _, matrix_of_seps_ch, splitter_y_new = find_number_of_columns_in_document( - regions_without_separators, separator_mask, num_col_classifier, self.tables, - contours_h=contours_h) - - boxes, _ = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new, regions_without_separators, - separator_mask, matrix_of_seps_ch, - num_col_classifier, erosion_hurts, self.tables, self.right2left, - logger=self.logger) - return boxes - - def do_order_of_regions_with_model( - self, - contours_only_text_parent, - contours_only_text_parent_h, - # not trained on drops directly, but it does work: - polygons_of_drop_capitals, - text_regions_p, - n_batch_inference=1, # 3 (causes OOM on 8 GB GPUs) - # input labels as in run_boxes_full_layout - # output labels as in RO model's read_xml - label_text=1, - label_head=2, - label_imgs=5, - label_imgs_ro=4, - label_seps=6, - label_seps_ro=5, - label_marg=8, - label_marg_ro=3, - label_drop=4, - # no drop-capital in RO model, yet - label_drop_ro=4, - ): - model = self.model_zoo.get("reading_order") - _, height_model, width_model, _ = model.input_shape - - ver_kernel = np.ones((5, 1), dtype=np.uint8) - hor_kernel = np.ones((1, 5), dtype=np.uint8) - min_cont_size_to_be_dilated = 10 - if len(contours_only_text_parent) > min_cont_size_to_be_dilated: - (cx_conts, cy_conts, - x_min_conts, x_max_conts, - y_min_conts, y_max_conts, - _) = find_new_features_of_contours(contours_only_text_parent) - cx_conts = ensure_array(cx_conts) - cy_conts = ensure_array(cy_conts) - contours_only_text_parent = ensure_array(contours_only_text_parent) - args_cont = np.arange(len(contours_only_text_parent)) - - diff_x_conts = np.abs(x_max_conts[:]-x_min_conts) - mean_x = np.mean(diff_x_conts) - diff_x_ratio = diff_x_conts / mean_x - - args_cont_excluded = args_cont[diff_x_ratio >= 1.3] - args_cont_included = args_cont[diff_x_ratio < 1.3] - - if len(args_cont_excluded): - textregion_par = np.zeros_like(text_regions_p) - textregion_par = cv2.fillPoly(textregion_par, - pts=contours_only_text_parent[args_cont_included], - color=1) + if num_col_classifier >= 3: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + regions_without_separators = regions_without_separators.astype(np.uint8) + regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) else: - textregion_par = (text_regions_p == 1).astype(np.uint8) + regions_without_separators_d = regions_without_separators_d.astype(np.uint8) + regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) + #print(time.time()-t_0_box,'time box in 3') + t1 = time.time() + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( + splitter_y_new, regions_without_separators, matrix_of_lines_ch, + num_col_classifier, erosion_hurts, self.tables, self.right2left) + boxes_d = None + self.logger.debug("len(boxes): %s", len(boxes)) + #print(time.time()-t_0_box,'time box in 3.1') - textregion_par = cv2.erode(textregion_par, hor_kernel, iterations=2) - textregion_par = cv2.dilate(textregion_par, ver_kernel, iterations=4) - textregion_par = cv2.erode(textregion_par, hor_kernel, iterations=1) - textregion_par = cv2.dilate(textregion_par, ver_kernel, iterations=5) - textregion_par[text_regions_p > 1] = 0 - - contours_only_dilated, hir_on_text_dilated = return_contours_of_image(textregion_par) - contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) - - indexes_of_located_cont, _, cy_of_located = \ - self.return_indexes_of_contours_located_inside_another_list_of_contours( - contours_only_dilated, - cx_conts[args_cont_included], - cy_conts[args_cont_included], - args_cont_included) - - indexes_of_located_cont.extend(args_cont_excluded[:, np.newaxis]) - contours_only_dilated.extend(contours_only_text_parent[args_cont_excluded]) - - missing_textregions = np.setdiff1d(args_cont, np.concatenate(indexes_of_located_cont)) - - indexes_of_located_cont.extend(missing_textregions[:, np.newaxis]) - contours_only_dilated.extend(contours_only_text_parent[missing_textregions]) - - args_cont_h = np.arange(len(contours_only_text_parent_h)) - indexes_of_located_cont.extend(args_cont_h[:, np.newaxis] + - len(contours_only_text_parent)) - - args_cont_drop = np.arange(len(polygons_of_drop_capitals)) - indexes_of_located_cont.extend(args_cont_drop[:, np.newaxis] + - len(contours_only_text_parent) + - len(contours_only_text_parent_h)) - - co_text_all = contours_only_dilated + if self.tables: + if self.light_version: + pass + else: + text_regions_p_tables = np.copy(text_regions_p) + text_regions_p_tables[:,:][(table_prediction[:,:] == 1)] = 10 + pixel_line = 3 + img_revised_tab2 = self.add_tables_heuristic_to_layout( + text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables, + num_col_classifier , 0.000005, pixel_line) + #print(time.time()-t_0_box,'time box in 3.2') + img_revised_tab2, contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables( + img_revised_tab2, table_prediction, 10, num_col_classifier) + #print(time.time()-t_0_box,'time box in 3.3') else: - co_text_all = list(contours_only_text_parent) + boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( + splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, + num_col_classifier, erosion_hurts, self.tables, self.right2left) + boxes = None + self.logger.debug("len(boxes): %s", len(boxes_d)) - img_poly = np.zeros_like(text_regions_p) - img_poly[text_regions_p == label_text] = label_text - img_poly[text_regions_p == label_head] = label_head - img_poly[text_regions_p == 3] = label_imgs # rs: ?? - img_poly[text_regions_p == label_imgs] = label_imgs_ro - img_poly[text_regions_p == label_marg] = label_marg_ro - img_poly[text_regions_p == label_seps] = label_seps_ro + if self.tables: + if self.light_version: + pass + else: + text_regions_p_tables = np.copy(text_regions_p_1_n) + text_regions_p_tables =np.round(text_regions_p_tables) + text_regions_p_tables[:,:][(text_regions_p_tables[:,:] != 3) & (table_prediction_n[:,:] == 1)] = 10 - img_header_and_sep = np.zeros_like(text_regions_p) - for contour in contours_only_text_parent_h: - # rs: why (max:max+12) instad of (min:max)? - # what about actual seps? - img_header_and_sep[contour[:, 0, 1].max(): contour[:, 0, 1].max() + 12, - contour[:, 0, 0].min(): contour[:, 0, 0].max()] = 1 - co_text_all.extend(contours_only_text_parent_h) - co_text_all.extend(polygons_of_drop_capitals) + pixel_line = 3 + img_revised_tab2 = self.add_tables_heuristic_to_layout( + text_regions_p_tables, boxes_d, 0, splitter_y_new_d, peaks_neg_tot_tables_d, text_regions_p_tables, + num_col_classifier, 0.000005, pixel_line) + img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables( + img_revised_tab2, table_prediction_n, 10, num_col_classifier) + + img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) + img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) + img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) + img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], text_regions_p.shape[1]) + #print(time.time()-t_0_box,'time box in 4') + self.logger.info("detecting boxes took %.1fs", time.time() - t1) + + if self.tables: + if self.light_version: + text_regions_p[:,:][table_prediction[:,:]==1] = 10 + img_revised_tab=text_regions_p[:,:] + else: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + img_revised_tab = np.copy(img_revised_tab2[:,:,0]) + img_revised_tab[:,:][(text_regions_p[:,:] == 1) & (img_revised_tab[:,:] != 10)] = 1 + else: + img_revised_tab = np.copy(text_regions_p[:,:]) + img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0 + img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10 + + text_regions_p[:,:][text_regions_p[:,:]==10] = 0 + text_regions_p[:,:][img_revised_tab[:,:]==10] = 10 + else: + img_revised_tab=text_regions_p[:,:] + #img_revised_tab = text_regions_p[:, :] + if self.light_version: + polygons_of_images = return_contours_of_interested_region(text_regions_p, 2) + else: + polygons_of_images = return_contours_of_interested_region(img_revised_tab, 2) + + pixel_img = 4 + min_area_mar = 0.00001 + if self.light_version: + marginal_mask = (text_regions_p[:,:]==pixel_img)*1 + marginal_mask = marginal_mask.astype('uint8') + marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) + + polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) + else: + polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) + + pixel_img = 10 + contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) + #print(time.time()-t_0_box,'time box in 5') + self.logger.debug('exit run_boxes_no_full_layout') + return (polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, + regions_without_separators_d, boxes, boxes_d, + polygons_of_marginals, contours_tables) + + def run_boxes_full_layout( + self, image_page, textline_mask_tot, text_regions_p, + slope_deskew, num_col_classifier, img_only_regions, + table_prediction, erosion_hurts, img_bin_light): + + self.logger.debug('enter run_boxes_full_layout') + t_full0 = time.time() + if self.tables: + if self.light_version: + text_regions_p[:,:][table_prediction[:,:]==1] = 10 + img_revised_tab = text_regions_p[:,:] + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + image_page_rotated_n, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ + rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) + + text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d,text_regions_p.shape[0],text_regions_p.shape[1]) + table_prediction_n = resize_image(table_prediction_n,text_regions_p.shape[0],text_regions_p.shape[1]) + + regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 + regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 + else: + text_regions_p_1_n = None + textline_mask_tot_d = None + regions_without_separators_d = None + # regions_without_separators = ( text_regions_p[:,:]==1 | text_regions_p[:,:]==2 )*1 + #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) + regions_without_separators = (text_regions_p[:,:] == 1)*1 + regions_without_separators[table_prediction == 1] = 1 + + else: + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + image_page_rotated_n, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ + rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) + + text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d,text_regions_p.shape[0],text_regions_p.shape[1]) + table_prediction_n = resize_image(table_prediction_n,text_regions_p.shape[0],text_regions_p.shape[1]) + + regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 + regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 + else: + text_regions_p_1_n = None + textline_mask_tot_d = None + regions_without_separators_d = None + + # regions_without_separators = ( text_regions_p[:,:]==1 | text_regions_p[:,:]==2 )*1 + #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) + regions_without_separators = (text_regions_p[:,:] == 1)*1 + regions_without_separators[table_prediction == 1] = 1 + + pixel_lines=3 + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_lines) + + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + num_col_d, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_lines) + + if num_col_classifier>=3: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + regions_without_separators = regions_without_separators.astype(np.uint8) + regions_without_separators = cv2.erode(regions_without_separators[:,:], KERNEL, iterations=6) + + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + regions_without_separators_d = regions_without_separators_d.astype(np.uint8) + regions_without_separators_d = cv2.erode(regions_without_separators_d[:,:], KERNEL, iterations=6) + else: + pass + + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( + splitter_y_new, regions_without_separators, matrix_of_lines_ch, + num_col_classifier, erosion_hurts, self.tables, self.right2left) + text_regions_p_tables = np.copy(text_regions_p) + text_regions_p_tables[:,:][(table_prediction[:,:]==1)] = 10 + pixel_line = 3 + img_revised_tab2 = self.add_tables_heuristic_to_layout( + text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables, + num_col_classifier , 0.000005, pixel_line) + + img_revised_tab2,contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables( + img_revised_tab2, table_prediction, 10, num_col_classifier) + else: + boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( + splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, + num_col_classifier, erosion_hurts, self.tables, self.right2left) + text_regions_p_tables = np.copy(text_regions_p_1_n) + text_regions_p_tables = np.round(text_regions_p_tables) + text_regions_p_tables[:,:][(text_regions_p_tables[:,:]!=3) & (table_prediction_n[:,:]==1)] = 10 + + pixel_line = 3 + img_revised_tab2 = self.add_tables_heuristic_to_layout( + text_regions_p_tables, boxes_d, 0, splitter_y_new_d, peaks_neg_tot_tables_d, text_regions_p_tables, + num_col_classifier, 0.000005, pixel_line) + + img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables( + img_revised_tab2, table_prediction_n, 10, num_col_classifier) + img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) + + img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) + img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) + + img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], text_regions_p.shape[1]) + + if np.abs(slope_deskew) < 0.13: + img_revised_tab = np.copy(img_revised_tab2[:,:,0]) + else: + img_revised_tab = np.copy(text_regions_p[:,:]) + img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0 + img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10 + + ##img_revised_tab=img_revised_tab2[:,:,0] + #img_revised_tab=text_regions_p[:,:] + text_regions_p[:,:][text_regions_p[:,:]==10] = 0 + text_regions_p[:,:][img_revised_tab[:,:]==10] = 10 + #img_revised_tab[img_revised_tab2[:,:,0]==10] =10 + + pixel_img = 4 + min_area_mar = 0.00001 + + if self.light_version: + marginal_mask = (text_regions_p[:,:]==pixel_img)*1 + marginal_mask = marginal_mask.astype('uint8') + marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) + + polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) + else: + polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) + + pixel_img = 10 + contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) + + # set first model with second model + text_regions_p[:, :][text_regions_p[:, :] == 2] = 5 + text_regions_p[:, :][text_regions_p[:, :] == 3] = 6 + text_regions_p[:, :][text_regions_p[:, :] == 4] = 8 + + image_page = image_page.astype(np.uint8) + #print("full inside 1", time.time()- t_full0) + regions_fully, regions_fully_only_drop = self.extract_text_regions_new( + img_bin_light if self.light_version else image_page, + False, cols=num_col_classifier) + #print("full inside 2", time.time()- t_full0) + # 6 is the separators lable in old full layout model + # 4 is the drop capital class in old full layout model + # in the new full layout drop capital is 3 and separators are 5 + + text_regions_p[:,:][regions_fully[:,:,0]==5]=6 + ###regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 3] = 4 + + #text_regions_p[:,:][regions_fully[:,:,0]==6]=6 + ##regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p) + ##regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 4] = 4 + drop_capital_label_in_full_layout_model = 3 + + drops = (regions_fully[:,:,0]==drop_capital_label_in_full_layout_model)*1 + drops= drops.astype(np.uint8) + + regions_fully[:,:,0][regions_fully[:,:,0]==drop_capital_label_in_full_layout_model] = 1 + + drops = cv2.erode(drops[:,:], KERNEL, iterations=1) + regions_fully[:,:,0][drops[:,:]==1] = drop_capital_label_in_full_layout_model + + regions_fully = putt_bb_of_drop_capitals_of_model_in_patches_in_layout( + regions_fully, drop_capital_label_in_full_layout_model, text_regions_p) + ##regions_fully_np, _ = self.extract_text_regions(image_page, False, cols=num_col_classifier) + ##if num_col_classifier > 2: + ##regions_fully_np[:, :, 0][regions_fully_np[:, :, 0] == 4] = 0 + ##else: + ##regions_fully_np = filter_small_drop_capitals_from_no_patch_layout(regions_fully_np, text_regions_p) + + ###regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, regions_fully_np, img_only_regions) + # plt.imshow(regions_fully[:,:,0]) + # plt.show() + text_regions_p[:, :][regions_fully[:, :, 0] == drop_capital_label_in_full_layout_model] = 4 + ####text_regions_p[:, :][regions_fully_np[:, :, 0] == 4] = 4 + #plt.imshow(text_regions_p) + #plt.show() + ####if not self.tables: + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + _, textline_mask_tot_d, text_regions_p_1_n, regions_fully_n = rotation_not_90_func_full_layout( + image_page, textline_mask_tot, text_regions_p, regions_fully, slope_deskew) + + text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) + regions_fully_n = resize_image(regions_fully_n, text_regions_p.shape[0], text_regions_p.shape[1]) + if not self.tables: + regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 + else: + text_regions_p_1_n = None + textline_mask_tot_d = None + regions_without_separators_d = None + if not self.tables: + regions_without_separators = (text_regions_p[:, :] == 1) * 1 + img_revised_tab = np.copy(text_regions_p[:, :]) + polygons_of_images = return_contours_of_interested_region(img_revised_tab, 5) + + self.logger.debug('exit run_boxes_full_layout') + #print("full inside 3", time.time()- t_full0) + return (polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, + regions_without_separators_d, regions_fully, regions_without_separators, + polygons_of_marginals, contours_tables) + + @staticmethod + def our_load_model(model_file): + if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): + # prefer SavedModel over HDF5 format if it exists + model_file = model_file[:-3] + try: + model = load_model(model_file, compile=False) + except: + model = load_model(model_file, compile=False, custom_objects={ + "PatchEncoder": PatchEncoder, "Patches": Patches}) + return model + + def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): + y_len = text_regions_p.shape[0] + x_len = text_regions_p.shape[1] + + img_poly = np.zeros((y_len,x_len), dtype='uint8') + img_poly[text_regions_p[:,:]==1] = 1 + img_poly[text_regions_p[:,:]==2] = 2 + img_poly[text_regions_p[:,:]==3] = 4 + img_poly[text_regions_p[:,:]==6] = 5 + + img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') + if contours_only_text_parent_h: + _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours( + contours_only_text_parent_h) + for j in range(len(cy_main)): + img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, + int(x_min_main[j]):int(x_max_main[j])] = 1 + co_text_all = contours_only_text_parent + contours_only_text_parent_h + else: + co_text_all = contours_only_text_parent if not len(co_text_all): - return [] + return [], [] - # fill polygons in lower resolution to be faster - height, width = text_regions_p.shape - labels_con = np.zeros((height // 6, width // 6, len(co_text_all)), dtype=bool) + labels_con = np.zeros((y_len, x_len, len(co_text_all)), dtype=bool) for i in range(len(co_text_all)): - img = np.zeros(labels_con.shape[:2], dtype=np.uint8) - cv2.fillPoly(img, pts=[co_text_all[i] // 6], color=1) - labels_con[:, :, i] = img - labels_con = resize_image(labels_con.astype(np.uint8), height_model, width_model).astype(bool) - img_header_and_sep = resize_image(img_header_and_sep, height_model, width_model) - img_poly = resize_image(img_poly, height_model, width_model) - labels_con[img_poly == label_seps_ro] = 2 - labels_con[img_header_and_sep == 1] = 3 - labels_con = labels_con / 3. - img_poly = img_poly / 5. + img = labels_con[:,:,i].astype(np.uint8) + cv2.fillPoly(img, pts=[co_text_all[i]], color=(1,)) + labels_con[:,:,i] = img - input_1 = np.zeros((n_batch_inference, height_model, width_model, 3)) + height1 =672#448 + width1 = 448#224 + + height2 =672#448 + width2= 448#224 + + height3 =672#448 + width3 = 448#224 + + labels_con = resize_image(labels_con.astype(np.uint8), height1, width1).astype(bool) + img_header_and_sep = resize_image(img_header_and_sep, height1, width1) + img_poly = resize_image(img_poly, height3, width3) + + inference_bs = 3 + input_1 = np.zeros((inference_bs, height1, width1, 3)) ordered = [list(range(len(co_text_all)))] index_update = 0 #print(labels_con.shape[2],"number of regions for reading order") @@ -1769,16 +3374,23 @@ class Eynollah: tot_counter = 0 batch = [] for j in ij_list: - input_1[len(batch), :, :, 0] = labels_con[:, :, i] - input_1[len(batch), :, :, 1] = img_poly - input_1[len(batch), :, :, 2] = labels_con[:, :, j] + img1 = labels_con[:,:,i].astype(float) + img2 = labels_con[:,:,j].astype(float) + img1[img_poly==5] = 2 + img2[img_poly==5] = 2 + img1[img_header_and_sep==1] = 3 + img2[img_header_and_sep==1] = 3 + + input_1[len(batch), :, :, 0] = img1 / 3. + input_1[len(batch), :, :, 2] = img2 / 3. + input_1[len(batch), :, :, 1] = img_poly / 5. tot_counter += 1 batch.append(j) - if tot_counter % n_batch_inference == 0 or tot_counter == len(ij_list): - y_pr = model.predict(input_1 , verbose=0) - for post_pr in y_pr: - if post_pr[0] >= 0.5: + if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): + y_pr = self.model_reading_order.predict(input_1 , verbose=0) + for jb, j in enumerate(batch): + if y_pr[jb][0]>=0.5: post_list.append(j) else: ante_list.append(j) @@ -1798,717 +3410,2024 @@ class Eynollah: break ordered = [i[0] for i in ordered] + region_ids = ['region_%04d' % i for i in range(len(co_text_all))] + return ordered, region_ids - if len(contours_only_text_parent) > min_cont_size_to_be_dilated: - org_contours_indexes = [] - for i in ordered: - if i < len(contours_only_dilated): - if i >= len(cy_of_located): - # excluded or missing dilated version of main region - org_contours_indexes.extend(indexes_of_located_cont[i]) - else: - # reconstructed dilated version of main region - org_contours_indexes.extend(indexes_of_located_cont[i][ - np.argsort(cy_of_located[i])]) + def return_start_and_end_of_common_text_of_textline_ocr(self, textline_image, ind_tot): + width = np.shape(textline_image)[1] + height = np.shape(textline_image)[0] + common_window = int(0.2*width) + + width1 = int ( width/2. - common_window ) + width2 = int ( width/2. + common_window ) + + img_sum = np.sum(textline_image[:,:,0], axis=0) + sum_smoothed = gaussian_filter1d(img_sum, 3) + + peaks_real, _ = find_peaks(sum_smoothed, height=0) + if len(peaks_real)>70: + print(len(peaks_real), 'len(peaks_real)') + + peaks_real = peaks_real[(peaks_realwidth1)] + + arg_sort = np.argsort(sum_smoothed[peaks_real]) + arg_sort4 =arg_sort[::-1][:4] + peaks_sort_4 = peaks_real[arg_sort][::-1][:4] + argsort_sorted = np.argsort(peaks_sort_4) + + first_4_sorted = peaks_sort_4[argsort_sorted] + y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] + #print(first_4_sorted,'first_4_sorted') + + arg_sortnew = np.argsort(y_4_sorted) + peaks_final =np.sort( first_4_sorted[arg_sortnew][2:] ) + + #plt.figure(ind_tot) + #plt.imshow(textline_image) + #plt.plot([peaks_final[0], peaks_final[0]], [0, height-1]) + #plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) + #plt.savefig('./'+str(ind_tot)+'.png') + + return peaks_final[0], peaks_final[1] + else: + pass + + def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image, ind_tot): + width = np.shape(textline_image)[1] + height = np.shape(textline_image)[0] + common_window = int(0.06*width) + + width1 = int ( width/2. - common_window ) + width2 = int ( width/2. + common_window ) + + img_sum = np.sum(textline_image[:,:,0], axis=0) + sum_smoothed = gaussian_filter1d(img_sum, 3) + + peaks_real, _ = find_peaks(sum_smoothed, height=0) + if len(peaks_real)>70: + #print(len(peaks_real), 'len(peaks_real)') + + peaks_real = peaks_real[(peaks_realwidth1)] + + arg_max = np.argmax(sum_smoothed[peaks_real]) + peaks_final = peaks_real[arg_max] + + #plt.figure(ind_tot) + #plt.imshow(textline_image) + #plt.plot([peaks_final, peaks_final], [0, height-1]) + ##plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) + #plt.savefig('./'+str(ind_tot)+'.png') + + return peaks_final + else: + return None + + def return_start_and_end_of_common_text_of_textline_ocr_new_splitted( + self, peaks_real, sum_smoothed, start_split, end_split): + + peaks_real = peaks_real[(peaks_realstart_split)] + + arg_sort = np.argsort(sum_smoothed[peaks_real]) + arg_sort4 =arg_sort[::-1][:4] + peaks_sort_4 = peaks_real[arg_sort][::-1][:4] + argsort_sorted = np.argsort(peaks_sort_4) + + first_4_sorted = peaks_sort_4[argsort_sorted] + y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] + #print(first_4_sorted,'first_4_sorted') + + arg_sortnew = np.argsort(y_4_sorted) + peaks_final =np.sort( first_4_sorted[arg_sortnew][3:] ) + return peaks_final[0] + + def return_start_and_end_of_common_text_of_textline_ocr_new(self, textline_image, ind_tot): + width = np.shape(textline_image)[1] + height = np.shape(textline_image)[0] + common_window = int(0.15*width) + + width1 = int ( width/2. - common_window ) + width2 = int ( width/2. + common_window ) + mid = int(width/2.) + + img_sum = np.sum(textline_image[:,:,0], axis=0) + sum_smoothed = gaussian_filter1d(img_sum, 3) + + peaks_real, _ = find_peaks(sum_smoothed, height=0) + if len(peaks_real)>70: + peak_start = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( + peaks_real, sum_smoothed, width1, mid+2) + peak_end = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( + peaks_real, sum_smoothed, mid-2, width2) + + #plt.figure(ind_tot) + #plt.imshow(textline_image) + #plt.plot([peak_start, peak_start], [0, height-1]) + #plt.plot([peak_end, peak_end], [0, height-1]) + #plt.savefig('./'+str(ind_tot)+'.png') + + return peak_start, peak_end + else: + pass + + def return_ocr_of_textline_without_common_section( + self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): + + if h2w_ratio > 0.05: + pixel_values = processor(textline_image, return_tensors="pt").pixel_values + generated_ids = model_ocr.generate(pixel_values.to(device)) + generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + else: + #width = np.shape(textline_image)[1] + #height = np.shape(textline_image)[0] + #common_window = int(0.3*width) + #width1 = int ( width/2. - common_window ) + #width2 = int ( width/2. + common_window ) + + split_point = self.return_start_and_end_of_common_text_of_textline_ocr_without_common_section( + textline_image, ind_tot) + if split_point: + image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) + image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) + + #pixel_values1 = processor(image1, return_tensors="pt").pixel_values + #pixel_values2 = processor(image2, return_tensors="pt").pixel_values + + pixel_values_merged = processor([image1,image2], return_tensors="pt").pixel_values + generated_ids_merged = model_ocr.generate(pixel_values_merged.to(device)) + generated_text_merged = processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + #print(generated_text_merged,'generated_text_merged') + + #generated_ids1 = model_ocr.generate(pixel_values1.to(device)) + #generated_ids2 = model_ocr.generate(pixel_values2.to(device)) + + #generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] + #generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] + + #generated_text = generated_text1 + ' ' + generated_text2 + generated_text = generated_text_merged[0] + ' ' + generated_text_merged[1] + + #print(generated_text1,'generated_text1') + #print(generated_text2, 'generated_text2') + #print('########################################') + else: + pixel_values = processor(textline_image, return_tensors="pt").pixel_values + generated_ids = model_ocr.generate(pixel_values.to(device)) + generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + #print(generated_text,'generated_text') + #print('########################################') + return generated_text + + def return_ocr_of_textline( + self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): + + if h2w_ratio > 0.05: + pixel_values = processor(textline_image, return_tensors="pt").pixel_values + generated_ids = model_ocr.generate(pixel_values.to(device)) + generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + else: + #width = np.shape(textline_image)[1] + #height = np.shape(textline_image)[0] + #common_window = int(0.3*width) + #width1 = int ( width/2. - common_window ) + #width2 = int ( width/2. + common_window ) + + try: + width1, width2 = self.return_start_and_end_of_common_text_of_textline_ocr_new(textline_image, ind_tot) + + image1 = textline_image[:, :width2,:]# image.crop((0, 0, width2, height)) + image2 = textline_image[:, width1:,:]#image.crop((width1, 0, width, height)) + + pixel_values1 = processor(image1, return_tensors="pt").pixel_values + pixel_values2 = processor(image2, return_tensors="pt").pixel_values + + generated_ids1 = model_ocr.generate(pixel_values1.to(device)) + generated_ids2 = model_ocr.generate(pixel_values2.to(device)) + + generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] + generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] + #print(generated_text1,'generated_text1') + #print(generated_text2, 'generated_text2') + #print('########################################') + + match = sq(None, generated_text1, generated_text2).find_longest_match( + 0, len(generated_text1), 0, len(generated_text2)) + generated_text = generated_text1 + generated_text2[match.b+match.size:] + except: + pixel_values = processor(textline_image, return_tensors="pt").pixel_values + generated_ids = model_ocr.generate(pixel_values.to(device)) + generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + return generated_text + + def return_textline_contour_with_added_box_coordinate(self, textline_contour, box_ind): + textline_contour[:,0] = textline_contour[:,0] + box_ind[2] + textline_contour[:,1] = textline_contour[:,1] + box_ind[0] + return textline_contour + + def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): + return [ls_cons[sorted_indexes[index]] for index in range(len(sorted_indexes))] + + def return_it_in_two_groups(self, x_differential): + split = [ind if x_differential[ind]!=x_differential[ind+1] else -1 + for ind in range(len(x_differential)-1)] + split_masked = list( np.array(split[:])[np.array(split[:])!=-1] ) + if 0 not in split_masked: + split_masked.insert(0, -1) + split_masked.append(len(x_differential)-1) + + split_masked = np.array(split_masked) +1 + + sums = [np.sum(x_differential[split_masked[ind]:split_masked[ind+1]]) + for ind in range(len(split_masked)-1)] + + indexes_to_bec_changed = [ind if (np.abs(sums[ind-1]) > np.abs(sums[ind]) and + np.abs(sums[ind+1]) > np.abs(sums[ind])) else -1 + for ind in range(1,len(sums)-1)] + indexes_to_bec_changed_filtered = np.array(indexes_to_bec_changed)[np.array(indexes_to_bec_changed)!=-1] + + x_differential_new = np.copy(x_differential) + for i in indexes_to_bec_changed_filtered: + i_slice = slice(split_masked[i], split_masked[i+1]) + x_differential_new[i_slice] = -1 * np.array(x_differential)[i_slice] + + return x_differential_new + + def dilate_textregions_contours_textline_version(self, all_found_textline_polygons): + #print(all_found_textline_polygons) + for j in range(len(all_found_textline_polygons)): + for ij in range(len(all_found_textline_polygons[j])): + con_ind = all_found_textline_polygons[j][ij] + area = cv2.contourArea(con_ind) + con_ind = con_ind.astype(float) + + x_differential = np.diff( con_ind[:,0,0]) + y_differential = np.diff( con_ind[:,0,1]) + + x_differential = gaussian_filter1d(x_differential, 0.1) + y_differential = gaussian_filter1d(y_differential, 0.1) + + x_min = float(np.min( con_ind[:,0,0] )) + y_min = float(np.min( con_ind[:,0,1] )) + + x_max = float(np.max( con_ind[:,0,0] )) + y_max = float(np.max( con_ind[:,0,1] )) + + x_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in x_differential] + y_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in y_differential] + + abs_diff=abs(abs(x_differential)- abs(y_differential) ) + + inc_x = np.zeros(len(x_differential)+1) + inc_y = np.zeros(len(x_differential)+1) + + if (y_max-y_min) <= (x_max-x_min): + dilation_m1 = round(area / (x_max-x_min) * 0.12) else: - # header or drop-capital region - org_contours_indexes.extend(indexes_of_located_cont[i]) - return org_contours_indexes - else: - return ordered + dilation_m1 = round(area / (y_max-y_min) * 0.12) - def filter_contours_inside_a_bigger_one(self, contours, contours_d, shape, - marginal_cnts=None, type_contour="textregion"): - if type_contour == "textregion": - areas = np.array(list(map(cv2.contourArea, contours))) - areas = areas / float(np.prod(shape[:2])) - cx_main, cy_main = find_center_of_contours(contours) + if dilation_m1>8: + dilation_m1 = 8 + if dilation_m1<6: + dilation_m1 = 6 + #print(dilation_m1, 'dilation_m1') + dilation_m1 = 6 + dilation_m2 = int(dilation_m1/2.) +1 - contours = ensure_array(contours) - indices_small = np.flatnonzero(areas < 1e-3) - indices_large = np.flatnonzero(areas >= 1e-3) + for i in range(len(x_differential)): + if abs_diff[i]==0: + inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) + inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) + elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]==0 and y_differential_mask_nonzeros[i]!=0: + inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) + elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]!=0 and y_differential_mask_nonzeros[i]==0: + inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) - indices_drop = [] - for ind_small in indices_small: - results = [cv2.pointPolygonTest(contours[ind_large], - (cx_main[ind_small], - cy_main[ind_small]), - False) - for ind_large in indices_large] + elif abs_diff[i]!=0 and abs_diff[i]>=3: + if abs(x_differential[i])>abs(y_differential[i]): + inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) + else: + inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) + else: + inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) + inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) + + inc_x[0] = inc_x[-1] + inc_y[0] = inc_y[-1] + + con_scaled = con_ind*1 + + con_scaled[:,0, 0] = con_ind[:,0,0] + np.array(inc_x)[:] + con_scaled[:,0, 1] = con_ind[:,0,1] + np.array(inc_y)[:] + + con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 + con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 + + area_scaled = cv2.contourArea(con_scaled.astype(np.int32)) + + con_ind = con_ind.astype(np.int32) + + results = [cv2.pointPolygonTest(con_ind, (con_scaled[ind,0, 0], con_scaled[ind,0, 1]), False) + for ind in range(len(con_scaled[:,0, 1])) ] results = np.array(results) - if np.any(results == 1): - indices_drop.append(ind_small) - elif marginal_cnts: - results = [cv2.pointPolygonTest(contour, - (cx_main[ind_small], - cy_main[ind_small]), - False) - for contour in marginal_cnts] - results = np.array(results) - if np.any(results == 1): - indices_drop.append(ind_small) + #print(results,'results') + results[results==0] = 1 - contours = np.delete(contours, indices_drop, axis=0) - if len(contours_d): - contours_d = ensure_array(contours_d) - contours_d = np.delete(contours_d, indices_drop, axis=0) + diff_result = np.diff(results) - return contours, contours_d + indices_2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==2] + indices_m2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==-2] + + if results[0]==1: + con_scaled[:indices_m2[0]+1,0, 1] = con_ind[:indices_m2[0]+1,0,1] + con_scaled[:indices_m2[0]+1,0, 0] = con_ind[:indices_m2[0]+1,0,0] + #indices_2 = indices_2[1:] + indices_m2 = indices_m2[1:] + + if len(indices_2)>len(indices_m2): + con_scaled[indices_2[-1]+1:,0, 1] = con_ind[indices_2[-1]+1:,0,1] + con_scaled[indices_2[-1]+1:,0, 0] = con_ind[indices_2[-1]+1:,0,0] + indices_2 = indices_2[:-1] + + for ii in range(len(indices_2)): + con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 1] = con_scaled[indices_2[ii],0, 1] + con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 0] = con_scaled[indices_2[ii],0, 0] + + all_found_textline_polygons[j][ij][:,0,1] = con_scaled[:,0, 1] + all_found_textline_polygons[j][ij][:,0,0] = con_scaled[:,0, 0] + return all_found_textline_polygons + + def dilate_textregions_contours(self, all_found_textline_polygons): + #print(all_found_textline_polygons) + for j in range(len(all_found_textline_polygons)): + con_ind = all_found_textline_polygons[j] + #print(len(con_ind[:,0,0]),'con_ind[:,0,0]') + area = cv2.contourArea(con_ind) + con_ind = con_ind.astype(float) + + x_differential = np.diff( con_ind[:,0,0]) + y_differential = np.diff( con_ind[:,0,1]) + + x_differential = gaussian_filter1d(x_differential, 0.1) + y_differential = gaussian_filter1d(y_differential, 0.1) + + x_min = float(np.min( con_ind[:,0,0] )) + y_min = float(np.min( con_ind[:,0,1] )) + + x_max = float(np.max( con_ind[:,0,0] )) + y_max = float(np.max( con_ind[:,0,1] )) + + x_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in x_differential] + y_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in y_differential] + + abs_diff=abs(abs(x_differential)- abs(y_differential) ) + + inc_x = np.zeros(len(x_differential)+1) + inc_y = np.zeros(len(x_differential)+1) + + if (y_max-y_min) <= (x_max-x_min): + dilation_m1 = round(area / (x_max-x_min) * 0.12) + else: + dilation_m1 = round(area / (y_max-y_min) * 0.12) + + if dilation_m1>8: + dilation_m1 = 8 + if dilation_m1<6: + dilation_m1 = 6 + #print(dilation_m1, 'dilation_m1') + dilation_m1 = 6 + dilation_m2 = int(dilation_m1/2.) +1 + + for i in range(len(x_differential)): + if abs_diff[i]==0: + inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) + inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) + elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]==0 and y_differential_mask_nonzeros[i]!=0: + inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) + elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]!=0 and y_differential_mask_nonzeros[i]==0: + inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) + + elif abs_diff[i]!=0 and abs_diff[i]>=3: + if abs(x_differential[i])>abs(y_differential[i]): + inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) + else: + inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) + else: + inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) + inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) + + inc_x[0] = inc_x[-1] + inc_y[0] = inc_y[-1] + + con_scaled = con_ind*1 + + con_scaled[:,0, 0] = con_ind[:,0,0] + np.array(inc_x)[:] + con_scaled[:,0, 1] = con_ind[:,0,1] + np.array(inc_y)[:] + + con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 + con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 + + area_scaled = cv2.contourArea(con_scaled.astype(np.int32)) + + con_ind = con_ind.astype(np.int32) + + results = [cv2.pointPolygonTest(con_ind, (con_scaled[ind,0, 0], con_scaled[ind,0, 1]), False) + for ind in range(len(con_scaled[:,0, 1])) ] + results = np.array(results) + #print(results,'results') + results[results==0] = 1 + + diff_result = np.diff(results) + indices_2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==2] + indices_m2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==-2] + + if results[0]==1: + con_scaled[:indices_m2[0]+1,0, 1] = con_ind[:indices_m2[0]+1,0,1] + con_scaled[:indices_m2[0]+1,0, 0] = con_ind[:indices_m2[0]+1,0,0] + #indices_2 = indices_2[1:] + indices_m2 = indices_m2[1:] + + if len(indices_2)>len(indices_m2): + con_scaled[indices_2[-1]+1:,0, 1] = con_ind[indices_2[-1]+1:,0,1] + con_scaled[indices_2[-1]+1:,0, 0] = con_ind[indices_2[-1]+1:,0,0] + indices_2 = indices_2[:-1] + + for ii in range(len(indices_2)): + con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 1] = con_scaled[indices_2[ii],0, 1] + con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 0] = con_scaled[indices_2[ii],0, 0] + + all_found_textline_polygons[j][:,0,1] = con_scaled[:,0, 1] + all_found_textline_polygons[j][:,0,0] = con_scaled[:,0, 0] + return all_found_textline_polygons + + def dilate_textline_contours(self, all_found_textline_polygons): + for j in range(len(all_found_textline_polygons)): + for ij in range(len(all_found_textline_polygons[j])): + con_ind = all_found_textline_polygons[j][ij] + area = cv2.contourArea(con_ind) + + con_ind = con_ind.astype(float) + + x_differential = np.diff( con_ind[:,0,0]) + y_differential = np.diff( con_ind[:,0,1]) + + x_differential = gaussian_filter1d(x_differential, 3) + y_differential = gaussian_filter1d(y_differential, 3) + + x_min = float(np.min( con_ind[:,0,0] )) + y_min = float(np.min( con_ind[:,0,1] )) + + x_max = float(np.max( con_ind[:,0,0] )) + y_max = float(np.max( con_ind[:,0,1] )) + + x_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in x_differential] + y_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in y_differential] + + abs_diff=abs(abs(x_differential)- abs(y_differential) ) + + inc_x = np.zeros(len(x_differential)+1) + inc_y = np.zeros(len(x_differential)+1) + + if (y_max-y_min) <= (x_max-x_min): + dilation_m1 = round(area / (x_max-x_min) * 0.35) + else: + dilation_m1 = round(area / (y_max-y_min) * 0.35) + + if dilation_m1>12: + dilation_m1 = 12 + if dilation_m1<4: + dilation_m1 = 4 + #print(dilation_m1, 'dilation_m1') + dilation_m2 = int(dilation_m1/2.) +1 + + for i in range(len(x_differential)): + if abs_diff[i]==0: + inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) + inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) + elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]==0 and y_differential_mask_nonzeros[i]!=0: + inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) + elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]!=0 and y_differential_mask_nonzeros[i]==0: + inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) + + elif abs_diff[i]!=0 and abs_diff[i]>=3: + if abs(x_differential[i])>abs(y_differential[i]): + inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) + else: + inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) + else: + inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) + inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) + + inc_x[0] = inc_x[-1] + inc_y[0] = inc_y[-1] + + con_scaled = con_ind*1 + + con_scaled[:,0, 0] = con_ind[:,0,0] + np.array(inc_x)[:] + con_scaled[:,0, 1] = con_ind[:,0,1] + np.array(inc_y)[:] + + con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 + con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 + + con_ind = con_ind.astype(np.int32) + + results = [cv2.pointPolygonTest(con_ind, (con_scaled[ind,0, 0], con_scaled[ind,0, 1]), False) + for ind in range(len(con_scaled[:,0, 1])) ] + results = np.array(results) + results[results==0] = 1 + + diff_result = np.diff(results) + + indices_2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==2] + indices_m2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==-2] + + if results[0]==1: + con_scaled[:indices_m2[0]+1,0, 1] = con_ind[:indices_m2[0]+1,0,1] + con_scaled[:indices_m2[0]+1,0, 0] = con_ind[:indices_m2[0]+1,0,0] + indices_m2 = indices_m2[1:] + + if len(indices_2)>len(indices_m2): + con_scaled[indices_2[-1]+1:,0, 1] = con_ind[indices_2[-1]+1:,0,1] + con_scaled[indices_2[-1]+1:,0, 0] = con_ind[indices_2[-1]+1:,0,0] + indices_2 = indices_2[:-1] + + for ii in range(len(indices_2)): + con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 1] = con_scaled[indices_2[ii],0, 1] + con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 0] = con_scaled[indices_2[ii],0, 0] + + all_found_textline_polygons[j][ij][:,0,1] = con_scaled[:,0, 1] + all_found_textline_polygons[j][ij][:,0,0] = con_scaled[:,0, 0] + return all_found_textline_polygons + + def filter_contours_inside_a_bigger_one(self,contours, contours_d_ordered, image, marginal_cnts=None, type_contour="textregion"): + if type_contour=="textregion": + areas = [cv2.contourArea(contours[j]) for j in range(len(contours))] + area_tot = image.shape[0]*image.shape[1] + + M_main = [cv2.moments(contours[j]) + for j in range(len(contours))] + cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + + areas_ratio = np.array(areas)/ area_tot + contours_index_small = [ind for ind in range(len(contours)) if areas_ratio[ind] < 1e-3] + contours_index_big = [ind for ind in range(len(contours)) if areas_ratio[ind] >= 1e-3] + + #contours_> = [contours[ind] for ind in contours_index_big] + indexes_to_be_removed = [] + for ind_small in contours_index_small: + results = [cv2.pointPolygonTest(contours[ind], (cx_main[ind_small], cy_main[ind_small]), False) + for ind in contours_index_big] + if marginal_cnts: + results_marginal = [cv2.pointPolygonTest(marginal_cnts[ind], (cx_main[ind_small], cy_main[ind_small]), False) + for ind in range(len(marginal_cnts))] + results_marginal = np.array(results_marginal) + + if np.any(results_marginal==1): + indexes_to_be_removed.append(ind_small) + + results = np.array(results) + + if np.any(results==1): + indexes_to_be_removed.append(ind_small) + + if len(indexes_to_be_removed)>0: + indexes_to_be_removed = np.unique(indexes_to_be_removed) + indexes_to_be_removed = np.sort(indexes_to_be_removed)[::-1] + for ind in indexes_to_be_removed: + contours.pop(ind) + if len(contours_d_ordered)>0: + contours_d_ordered.pop(ind) + + return contours, contours_d_ordered else: - contours_of_contours = [] - indexes_parent = [] - indexes_child = [] - for ind_region, textlines in enumerate(contours): - contours_of_contours.extend(textlines) - indexes_parent.extend([ind_region] * len(textlines)) - indexes_child.extend(list(range(len(textlines)))) + contours_txtline_of_all_textregions = [] + indexes_of_textline_tot = [] + index_textline_inside_textregion = [] - areas = np.array(list(map(cv2.contourArea, contours_of_contours))) - cx, cy = find_center_of_contours(contours_of_contours) + for jj in range(len(contours)): + contours_txtline_of_all_textregions = contours_txtline_of_all_textregions + contours[jj] - textline_in_textregion_index_to_del = {} - for i in range(len(contours_of_contours)): - args_other = np.setdiff1d(np.arange(len(contours_of_contours)), i) - areas_other = areas[args_other] - args_other_larger = args_other[areas_other > 1.5 * areas[i]] + ind_textline_inside_tr = list(range(len(contours[jj]))) + index_textline_inside_textregion = index_textline_inside_textregion + ind_textline_inside_tr + ind_ins = [jj] * len(contours[jj]) + indexes_of_textline_tot = indexes_of_textline_tot + ind_ins - for ind in args_other_larger: - if cv2.pointPolygonTest(contours_of_contours[ind], - (cx[i], cy[i]), False) == 1: - textline_in_textregion_index_to_del.setdefault( - indexes_parent[i], list()).append( - indexes_child[i]) + M_main_tot = [cv2.moments(contours_txtline_of_all_textregions[j]) + for j in range(len(contours_txtline_of_all_textregions))] + cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - for where, which in textline_in_textregion_index_to_del.items(): - contours[where] = [line for idx, line in enumerate(contours[where]) - if idx not in which] + areas_tot = [cv2.contourArea(con_ind) for con_ind in contours_txtline_of_all_textregions] + area_tot_tot = image.shape[0]*image.shape[1] + + textregion_index_to_del = [] + textline_in_textregion_index_to_del = [] + for ij in range(len(contours_txtline_of_all_textregions)): + args_all = list(np.array(range(len(contours_txtline_of_all_textregions)))) + args_all.pop(ij) + + areas_without = np.array(areas_tot)[args_all] + area_of_con_interest = areas_tot[ij] + + args_with_bigger_area = np.array(args_all)[areas_without > 1.5*area_of_con_interest] + + if len(args_with_bigger_area)>0: + results = [cv2.pointPolygonTest(contours_txtline_of_all_textregions[ind], (cx_main_tot[ij], cy_main_tot[ij]), False) + for ind in args_with_bigger_area ] + results = np.array(results) + if np.any(results==1): + #print(indexes_of_textline_tot[ij], index_textline_inside_textregion[ij]) + textregion_index_to_del.append(int(indexes_of_textline_tot[ij])) + textline_in_textregion_index_to_del.append(int(index_textline_inside_textregion[ij])) + #contours[int(indexes_of_textline_tot[ij])].pop(int(index_textline_inside_textregion[ij])) + + textregion_index_to_del = np.array(textregion_index_to_del) + textline_in_textregion_index_to_del = np.array(textline_in_textregion_index_to_del) + for ind_u_a_trs in np.unique(textregion_index_to_del): + textline_in_textregion_index_to_del_ind = textline_in_textregion_index_to_del[textregion_index_to_del==ind_u_a_trs] + textline_in_textregion_index_to_del_ind = np.sort(textline_in_textregion_index_to_del_ind)[::-1] + for ittrd in textline_in_textregion_index_to_del_ind: + contours[ind_u_a_trs].pop(ittrd) return contours - def return_indexes_of_contours_located_inside_another_list_of_contours( - self, contours, centersx_loc, centersy_loc, indexes_loc): - indexes = [] - centersx = [] - centersy = [] - for contour in contours: - results = np.array([cv2.pointPolygonTest(contour, (px, py), False) - for px, py in zip(centersx_loc, centersy_loc)]) - indexes_in = (results == 0) | (results == 1) - indexes.append(indexes_loc[indexes_in]) - centersx.append(centersx_loc[indexes_in]) - centersy.append(centersy_loc[indexes_in]) - - return indexes, centersx, centersy - def filter_contours_without_textline_inside( - self, contours_textregions, contours_textregions_d, - contours_textlines, slopes, conf_contours_textregions): + self, contours,text_con_org, contours_textline, contours_only_text_parent_d_ordered, conf_contours_textregions): + ###contours_txtline_of_all_textregions = [] + ###for jj in range(len(contours_textline)): + ###contours_txtline_of_all_textregions = contours_txtline_of_all_textregions + contours_textline[jj] - assert len(contours_textregions) == len(contours_textlines) - indices = [ind for ind, lines in enumerate(contours_textlines) - if len(lines)] - def filterfun(lis): - if len(lis) == 0: - return [] - return [lis[ind] for ind in indices] + ###M_main_textline = [cv2.moments(contours_txtline_of_all_textregions[j]) + ### for j in range(len(contours_txtline_of_all_textregions))] + ###cx_main_textline = [(M_main_textline[j]["m10"] / (M_main_textline[j]["m00"] + 1e-32)) + ### for j in range(len(M_main_textline))] + ###cy_main_textline = [(M_main_textline[j]["m01"] / (M_main_textline[j]["m00"] + 1e-32)) + ### for j in range(len(M_main_textline))] - return (filterfun(contours_textregions), - filterfun(contours_textregions_d), - filterfun(contours_textlines), - filterfun(slopes), - filterfun(conf_contours_textregions), - ) + ###M_main = [cv2.moments(contours[j]) for j in range(len(contours))] + ###cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + ###cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - def separate_marginals_to_left_and_right_and_order_from_top_to_down( - self, polygons_of_marginals, all_found_textline_polygons_marginals, - slopes_marginals, conf_marginals, mid_point_of_page_width): - cx_marg, cy_marg = find_center_of_contours(polygons_of_marginals) - cx_marg = ensure_array(cx_marg) - cy_marg = ensure_array(cy_marg) + ###contours_with_textline = [] + ###for ind_tr, con_tr in enumerate(contours): + ###results = [cv2.pointPolygonTest(con_tr, (cx_main_textline[index_textline_con], cy_main_textline[index_textline_con]), False) + ### for index_textline_con in range(len(contours_txtline_of_all_textregions)) ] + ###results = np.array(results) + ###if np.any(results==1): + ###contours_with_textline.append(con_tr) - def split(lis): - left, right = [], [] - for itm, prop in zip(lis, cx_marg < mid_point_of_page_width): - (left if prop else right).append(itm) - return left, right + textregion_index_to_del = [] + for index_textregion, textlines_textregion in enumerate(contours_textline): + if len(textlines_textregion)==0: + textregion_index_to_del.append(index_textregion) - cy_marg_left, cy_marg_right = split(cy_marg) - order_left = np.argsort(cy_marg_left) - order_right = np.argsort(cy_marg_right) + uniqe_args_trs = np.unique(textregion_index_to_del) + uniqe_args_trs_sorted = np.sort(uniqe_args_trs)[::-1] - def splitsort(lis): - left, right = split(lis) - return [left[i] for i in order_left], [right[i] for i in order_right] + for ind_u_a_trs in uniqe_args_trs_sorted: + conf_contours_textregions.pop(ind_u_a_trs) + contours.pop(ind_u_a_trs) + contours_textline.pop(ind_u_a_trs) + text_con_org.pop(ind_u_a_trs) + if len(contours_only_text_parent_d_ordered) > 0: + contours_only_text_parent_d_ordered.pop(ind_u_a_trs) - return (*splitsort(polygons_of_marginals), - *splitsort(all_found_textline_polygons_marginals), - *splitsort(slopes_marginals), - *splitsort(conf_marginals)) + return contours, text_con_org, conf_contours_textregions, contours_textline, contours_only_text_parent_d_ordered, np.array(range(len(contours))) - def run(self, - overwrite: bool = False, - image_filename: Optional[str] = None, - dir_in: Optional[str] = None, - dir_out: Optional[str] = None, - dir_of_cropped_images: Optional[str] = None, - dir_of_layout: Optional[str] = None, - dir_of_deskewed: Optional[str] = None, - dir_of_all: Optional[str] = None, - dir_save_page: Optional[str] = None, - num_jobs: int = 0, - halt_fail: float = 0, - ): + def dilate_textlines(self, all_found_textline_polygons): + for j in range(len(all_found_textline_polygons)): + for i in range(len(all_found_textline_polygons[j])): + con_ind = all_found_textline_polygons[j][i] + con_ind = con_ind.astype(float) + + x_differential = np.diff( con_ind[:,0,0]) + y_differential = np.diff( con_ind[:,0,1]) + + x_min = float(np.min( con_ind[:,0,0] )) + y_min = float(np.min( con_ind[:,0,1] )) + + x_max = float(np.max( con_ind[:,0,0] )) + y_max = float(np.max( con_ind[:,0,1] )) + + if (y_max - y_min) > (x_max - x_min) and (x_max - x_min)<70: + x_biger_than_x = np.abs(x_differential) > np.abs(y_differential) + mult = x_biger_than_x*x_differential + + arg_min_mult = np.argmin(mult) + arg_max_mult = np.argmax(mult) + + if y_differential[0]==0: + y_differential[0] = 0.1 + if y_differential[-1]==0: + y_differential[-1]= 0.1 + y_differential = [y_differential[ind] if y_differential[ind] != 0 + else 0.5 * (y_differential[ind-1] + y_differential[ind+1]) + for ind in range(len(y_differential))] + + if y_differential[0]==0.1: + y_differential[0] = y_differential[1] + if y_differential[-1]==0.1: + y_differential[-1] = y_differential[-2] + y_differential.append(y_differential[0]) + + y_differential = [-1 if y_differential[ind] < 0 else 1 + for ind in range(len(y_differential))] + y_differential = self.return_it_in_two_groups(y_differential) + y_differential = np.array(y_differential) + + con_scaled = con_ind*1 + con_scaled[:,0, 0] = con_ind[:,0,0] - 8*y_differential + con_scaled[arg_min_mult,0, 1] = con_ind[arg_min_mult,0,1] + 8 + con_scaled[arg_min_mult+1,0, 1] = con_ind[arg_min_mult+1,0,1] + 8 + + try: + con_scaled[arg_min_mult-1,0, 1] = con_ind[arg_min_mult-1,0,1] + 5 + con_scaled[arg_min_mult+2,0, 1] = con_ind[arg_min_mult+2,0,1] + 5 + except: + pass + + con_scaled[arg_max_mult,0, 1] = con_ind[arg_max_mult,0,1] - 8 + con_scaled[arg_max_mult+1,0, 1] = con_ind[arg_max_mult+1,0,1] - 8 + + try: + con_scaled[arg_max_mult-1,0, 1] = con_ind[arg_max_mult-1,0,1] - 5 + con_scaled[arg_max_mult+2,0, 1] = con_ind[arg_max_mult+2,0,1] - 5 + except: + pass + + else: + y_biger_than_x = np.abs(y_differential) > np.abs(x_differential) + mult = y_biger_than_x*y_differential + + arg_min_mult = np.argmin(mult) + arg_max_mult = np.argmax(mult) + + if x_differential[0]==0: + x_differential[0] = 0.1 + if x_differential[-1]==0: + x_differential[-1]= 0.1 + x_differential = [x_differential[ind] if x_differential[ind] != 0 + else 0.5 * (x_differential[ind-1] + x_differential[ind+1]) + for ind in range(len(x_differential))] + + if x_differential[0]==0.1: + x_differential[0] = x_differential[1] + if x_differential[-1]==0.1: + x_differential[-1] = x_differential[-2] + x_differential.append(x_differential[0]) + + x_differential = [-1 if x_differential[ind] < 0 else 1 + for ind in range(len(x_differential))] + x_differential = self.return_it_in_two_groups(x_differential) + x_differential = np.array(x_differential) + + con_scaled = con_ind*1 + con_scaled[:,0, 1] = con_ind[:,0,1] + 8*x_differential + con_scaled[arg_min_mult,0, 0] = con_ind[arg_min_mult,0,0] + 8 + con_scaled[arg_min_mult+1,0, 0] = con_ind[arg_min_mult+1,0,0] + 8 + + try: + con_scaled[arg_min_mult-1,0, 0] = con_ind[arg_min_mult-1,0,0] + 5 + con_scaled[arg_min_mult+2,0, 0] = con_ind[arg_min_mult+2,0,0] + 5 + except: + pass + + con_scaled[arg_max_mult,0, 0] = con_ind[arg_max_mult,0,0] - 8 + con_scaled[arg_max_mult+1,0, 0] = con_ind[arg_max_mult+1,0,0] - 8 + + try: + con_scaled[arg_max_mult-1,0, 0] = con_ind[arg_max_mult-1,0,0] - 5 + con_scaled[arg_max_mult+2,0, 0] = con_ind[arg_max_mult+2,0,0] - 5 + except: + pass + + con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 + con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 + + all_found_textline_polygons[j][i][:,0,1] = con_scaled[:,0, 1] + all_found_textline_polygons[j][i][:,0,0] = con_scaled[:,0, 0] + + return all_found_textline_polygons + + def delete_regions_without_textlines( + self, slopes, all_found_textline_polygons, boxes_text, txt_con_org, + contours_only_text_parent, index_by_text_par_con): + + slopes_rem = [] + all_found_textline_polygons_rem = [] + boxes_text_rem = [] + txt_con_org_rem = [] + contours_only_text_parent_rem = [] + index_by_text_par_con_rem = [] + + for i, ind_con in enumerate(all_found_textline_polygons): + if len(ind_con): + all_found_textline_polygons_rem.append(ind_con) + slopes_rem.append(slopes[i]) + boxes_text_rem.append(boxes_text[i]) + txt_con_org_rem.append(txt_con_org[i]) + contours_only_text_parent_rem.append(contours_only_text_parent[i]) + index_by_text_par_con_rem.append(index_by_text_par_con[i]) + + index_sort = np.argsort(index_by_text_par_con_rem) + indexes_new = np.array(range(len(index_by_text_par_con_rem))) + + index_by_text_par_con_rem_sort = [indexes_new[index_sort==j][0] + for j in range(len(index_by_text_par_con_rem))] + + return (slopes_rem, all_found_textline_polygons_rem, boxes_text_rem, txt_con_org_rem, + contours_only_text_parent_rem, index_by_text_par_con_rem_sort) + + def run(self, image_filename : Optional[str] = None, dir_in : Optional[str] = None, overwrite : bool = False): """ Get image and scales, then extract the page of scanned image """ self.logger.debug("enter run") t0_tot = time.time() - # Log enabled features directly - enabled_modes = [] - if self.full_layout: - enabled_modes.append("Full layout analysis") - if self.tables: - enabled_modes.append("Table detection") - if enabled_modes: - self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) - if self.enable_plotting: - self.logger.info("Saving debug plots") - if dir_of_cropped_images: - self.logger.info(f"Saving cropped images to: {dir_of_cropped_images}") - if dir_of_layout: - self.logger.info(f"Saving layout plots to: {dir_of_layout}") - if dir_of_deskewed: - self.logger.info(f"Saving deskewed images to: {dir_of_deskewed}") - self.plotter = EynollahPlotter( - dir_out=dir_out, - dir_of_all=dir_of_all, - dir_save_page=dir_save_page, - dir_of_deskewed=dir_of_deskewed, - dir_of_cropped_images=dir_of_cropped_images, - dir_of_layout=dir_of_layout) - else: - self.plotter = None - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - with ProcessPoolExecutor(max_workers=num_jobs or None, - mp_context=mp.get_context('fork'), - initializer=_set_instance, - initargs=(self,) - ) as exe: - jobs = {} - mngr = mp.get_context('fork').Manager() - n_success = n_fail = 0 - for img_filename in ls_imgs: - logq = mngr.Queue() - jobs[exe.submit(_run_single, img_filename, - dir_out=dir_out, - overwrite=overwrite, - logq=logq)] = img_filename, logq - for job in as_completed(list(jobs)): - img_filename, logq = jobs[job] - loglistener = logging.handlers.QueueListener( - logq, *self.logger.handlers, respect_handler_level=False) - try: - loglistener.start() - job.result() - n_success += 1 - except: - self.logger.exception("Job %s failed", img_filename) - n_fail += 1 - if (halt_fail and - n_fail >= halt_fail * (len(jobs) if halt_fail < 1 else 1)): - self.logger.fatal("terminating after %d failures", n_fail) - for job in jobs: - job.cancel() - break - finally: - loglistener.stop() - # for img_filename, result in zip(ls_imgs, results) ... - self.logger.info("%d of %d jobs successful", n_success, len(jobs)) - self.logger.info("All jobs done in %.1fs", time.time() - t0_tot) + self.ls_imgs = os.listdir(dir_in) elif image_filename: - try: - self.run_single(image_filename, dir_out=dir_out, overwrite=overwrite) - except: - self.logger.exception("Job failed") + self.ls_imgs = [image_filename] else: raise ValueError("run requires either a single image filename or a directory") - if self.enable_plotting: - del self.plotter + for img_filename in self.ls_imgs: + self.logger.info(img_filename) + t0 = time.time() - def run_single(self, - img_filename: str, - dir_out: Optional[str] = None, - overwrite: bool = False, - img_pil=None, - pcgts=None, - ) -> None: - label_text = 1 - label_imgs = 2 - label_imgs_fl = 5 - label_seps = 3 - label_seps_fl = 6 - label_marg = 4 - label_marg_fl = 8 - label_drop_fl = 4 - label_tabs = 10 + self.reset_file_name_dir(os.path.join(dir_in or "", img_filename)) + #print("text region early -11 in %.1fs", time.time() - t0) + if os.path.exists(self.writer.output_filename): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", self.writer.output_filename) + else: + self.logger.warning("will skip input for existing output file '%s'", self.writer.output_filename) + continue + pcgts = self.run_single() + self.logger.info("Job done in %.1fs", time.time() - t0) + #print("Job done in %.1fs" % (time.time() - t0)) + self.writer.write_pagexml(pcgts) + + if dir_in: + self.logger.info("All jobs done in %.1fs", time.time() - t0_tot) + print("all Job done in %.1fs", time.time() - t0_tot) + + def run_single(self): t0 = time.time() - self.logger.info(img_filename) + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) + self.logger.info("Enhancing took %.1fs ", time.time() - t0) + if self.extract_only_images: + text_regions_p_1, erosion_hurts, polygons_lines_xml, polygons_of_images, image_page, page_coord, cont_page = \ + self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) + ocr_all_textlines = None + pcgts = self.writer.build_pagexml_no_full_layout( + [], page_coord, [], [], [], [], + polygons_of_images, [], [], [], [], [], + cont_page, [], [], ocr_all_textlines, []) + if self.plotter: + self.plotter.write_images_into_directory(polygons_of_images, image_page) + return pcgts - image = self.cache_images(image_filename=img_filename, image_pil=img_pil) - writer = EynollahXmlWriter( - dir_out=dir_out, - image_filename=img_filename, - image_width=image['img'].shape[1], - image_height=image['img'].shape[0], - curved_line=self.curved_line, - pcgts=pcgts) - - if os.path.exists(writer.output_filename): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", writer.output_filename) - else: - self.logger.warning("will skip input for existing output file '%s'", writer.output_filename) - return - - self.logger.info(f"Processing file: {writer.image_filename}") - self.logger.info("Step 1/5: Image Enhancement") - - num_col_classifier, num_column_is_classified = self.run_enhancement(image) - writer.scale_x = image['scale_x'] - writer.scale_y = image['scale_y'] - - self.logger.info(f"Image: {image['img_res'].shape[1]}x{image['img_res'].shape[0]}, " - f"scale {image['scale_x']:.1f}x{image['scale_y']:.1f}, " - f"{image['dpi']} DPI, {num_col_classifier} columns") - self.logger.info(f"Enhancement complete ({time.time() - t0:.1f}s)") - - t1 = time.time() - page_coord, cont_page, image_page, mask_page = self.extract_page(image) - if not self.ignore_page_extraction: - self.logger.debug("Cropped page is %dx%d", image_page.shape[1], image_page.shape[0]) - self.logger.info("Cropping took %.1fs", time.time() - t1) - if self.plotter: - self.plotter.save_page_image(image_page, image['name']) - - # Basic Processing Mode if self.skip_layout_and_reading_order: - self.logger.info("Step 2/5: Basic Processing Mode") - self.logger.info("Skipping layout analysis and reading order detection") + _ ,_, _, textline_mask_tot_ea, img_bin_light, _ = \ + self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier, + skip_layout_and_reading_order=self.skip_layout_and_reading_order) - _, _, _, _, _, textline_mask_tot_ea, _, _ = \ - self.get_early_layout(image_page, num_col_classifier) + page_coord, image_page, textline_mask_tot_ea, img_bin_light, cont_page = \ + self.run_graphics_and_columns_without_layout(textline_mask_tot_ea, img_bin_light) - textline_mask_tot_ea *= mask_page - textline_cnt, textline_hir = return_contours_of_image(textline_mask_tot_ea) + + ##all_found_textline_polygons =self.scale_contours_new(textline_mask_tot_ea) + + cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(textline_mask_tot_ea) all_found_textline_polygons = filter_contours_area_of_image( - textline_mask_tot_ea, textline_cnt, textline_hir, max_area=1, min_area=0.00001) + textline_mask_tot_ea, cnt_clean_rot_raw, hir_on_cnt_clean_rot, max_area=1, min_area=0.00001) - cx_textlines, cy_textlines = find_center_of_contours(all_found_textline_polygons) - w_h_textlines = [cv2.boundingRect(polygon)[2:] - for polygon in all_found_textline_polygons] - all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted( - #all_found_textline_polygons[::-1] - all_found_textline_polygons, cx_textlines, cy_textlines, w_h_textlines) - all_found_textline_polygons = [all_found_textline_polygons] - all_found_textline_polygons = dilate_textline_contours(all_found_textline_polygons) + all_found_textline_polygons=[ all_found_textline_polygons ] + + all_found_textline_polygons = self.dilate_textregions_contours_textline_version( + all_found_textline_polygons) all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( - all_found_textline_polygons, None, None, type_contour="textline") + all_found_textline_polygons, None, textline_mask_tot_ea, type_contour="textline") - pcgts = writer.build_pagexml_no_full_layout( - num_col=num_col_classifier, - found_polygons_text_region=cont_page, - page_coord=page_coord, - page_slope=0, - order_of_texts=[0], - all_found_textline_polygons=all_found_textline_polygons, - found_polygons_images=[], - found_polygons_tables=[], - found_polygons_marginals_left=[], - found_polygons_marginals_right=[], - all_found_textline_polygons_marginals_left=[], - all_found_textline_polygons_marginals_right=[], - slopes=[0], - slopes_marginals_left=[], - slopes_marginals_right=[], - cont_page=cont_page, - polygons_seplines=[], - conf_textregions=[0], - ) - self.logger.info("Basic processing complete") - writer.write_pagexml(pcgts) - self.logger.info("Job done in %.1fs", time.time() - t0) - return + order_text_new = [0] + slopes =[0] + id_of_texts_tot =['region_0001'] + + polygons_of_images = [] + slopes_marginals = [] + polygons_of_marginals = [] + all_found_textline_polygons_marginals = [] + all_box_coord_marginals = [] + polygons_lines_xml = [] + contours_tables = [] + ocr_all_textlines = None + conf_contours_textregions =None + pcgts = self.writer.build_pagexml_no_full_layout( + cont_page, page_coord, order_text_new, id_of_texts_tot, + all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, + all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, + cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + return pcgts + + #print("text region early -1 in %.1fs", time.time() - t0) t1 = time.time() - self.logger.info("Step 2/5: Layout Analysis") + if self.light_version: + text_regions_p_1 ,erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin_light, confidence_matrix = \ + self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) + #print("text region early -2 in %.1fs", time.time() - t0) - (erosion_hurts, - polygons_seplines, - polygons_text_early, - regions_without_separators, - text_regions_p, - textline_mask_tot_ea, - regions_confidence, - textline_confidence) = self.get_early_layout(image['img_res'], num_col_classifier) - t2 = time.time() - self.logger.info("Early layout took %.1fs", t2 - t1) - if self.plotter: - self.plotter.save_plot_of_textlines(textline_mask_tot_ea, image['img_res'], image['name']) + if num_col_classifier == 1 or num_col_classifier ==2: + if num_col_classifier == 1: + img_w_new = 1000 + else: + img_w_new = 1300 + img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] - if num_col_classifier == 1 or num_col_classifier ==2: - if num_col_classifier == 1: - img_w_new = 1000 + textline_mask_tot_ea_deskew = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) + + slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea_deskew) else: - img_w_new = 1300 - img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] - - textline_mask_tot_ea_deskew = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) - slope_deskew = self.run_deskew(textline_mask_tot_ea_deskew) + slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) + #print("text region early -2,5 in %.1fs", time.time() - t0) + #self.logger.info("Textregion detection took %.1fs ", time.time() - t1t) + num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ + text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea, img_bin_light = \ + self.run_graphics_and_columns_light(text_regions_p_1, textline_mask_tot_ea, + num_col_classifier, num_column_is_classified, erosion_hurts, img_bin_light) + #self.logger.info("run graphics %.1fs ", time.time() - t1t) + #print("text region early -3 in %.1fs", time.time() - t0) + textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) + #print("text region early -4 in %.1fs", time.time() - t0) else: - slope_deskew = self.run_deskew(textline_mask_tot_ea) - # if ratio of text regions to page area is smaller that 30%, - # then ignore skew angle above 45° - if (abs(slope_deskew) > 45 and - ((text_regions_p == label_text).sum()) <= 0.3 * image_page.size): - slope_deskew = 0 - if self.plotter: - self.plotter.save_deskewed_image(slope_deskew, image['img'], image['name']) - t3 = time.time() - self.logger.info("Deskewing took %.1fs", t3 - t2) + text_regions_p_1 ,erosion_hurts, polygons_lines_xml = \ + self.get_regions_from_xy_2models(img_res, is_image_enhanced, + num_col_classifier) + self.logger.info("Textregion detection took %.1fs ", time.time() - t1) + confidence_matrix = np.zeros((text_regions_p_1.shape[:2])) - page_coord = np.array(page_coord) - page_box = (slice(*page_coord[:2]), - slice(*page_coord[2:])) - polygons_seplines = [contour - page_coord[::2][::-1][np.newaxis, np.newaxis] - for contour in polygons_seplines] - regions_without_separators = regions_without_separators[page_box] * mask_page - text_regions_p = text_regions_p[page_box] * mask_page - textline_mask_tot_ea = textline_mask_tot_ea[page_box] * mask_page + t1 = time.time() + num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ + text_regions_p_1, cont_page, table_prediction = \ + self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts) + self.logger.info("Graphics detection took %.1fs ", time.time() - t1) + #self.logger.info('cont_page %s', cont_page) + #plt.imshow(table_prediction) + #plt.show() - num_col, num_col_classifier = \ - self.run_columns(text_regions_p, - num_col_classifier, num_column_is_classified, - erosion_hurts) - t4 = time.time() - textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) + if not num_col: + self.logger.info("No columns detected, outputting an empty PAGE-XML") + ocr_all_textlines = None + pcgts = self.writer.build_pagexml_no_full_layout( + [], page_coord, [], [], [], [], [], [], [], [], [], [], + cont_page, [], [], ocr_all_textlines, []) + return pcgts - if not num_col and len(polygons_text_early) == 0 or not image_page.size: - self.logger.info("No columns detected - generating empty PAGE-XML") - - pcgts = writer.build_pagexml_no_full_layout( - num_col=0, - found_polygons_text_region=[], - page_coord=page_coord, - page_slope=slope_deskew, - order_of_texts=[], - all_found_textline_polygons=[], - found_polygons_images=[], - found_polygons_tables=[], - found_polygons_marginals_left=[], - found_polygons_marginals_right=[], - all_found_textline_polygons_marginals_left=[], - all_found_textline_polygons_marginals_right=[], - slopes=[], - slopes_marginals_left=[], - slopes_marginals_right=[], - cont_page=cont_page, - polygons_seplines=[], - ) - writer.write_pagexml(pcgts) - self.logger.info("Job done in %.1fs", time.time() - t0) - return - - if num_col_classifier in (1,2): - img_h_org, img_w_org = text_regions_p.shape + #print("text region early in %.1fs", time.time() - t0) + t1 = time.time() + if not self.light_version: + textline_mask_tot_ea = self.run_textline(image_page) + self.logger.info("textline detection took %.1fs", time.time() - t1) + t1 = time.time() + slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) + self.logger.info("deskewing took %.1fs", time.time() - t1) + elif num_col_classifier in (1,2): + org_h_l_m = textline_mask_tot_ea.shape[0] + org_w_l_m = textline_mask_tot_ea.shape[1] if num_col_classifier == 1: img_w_new = 2000 else: img_w_new = 2400 - img_h_new = img_w_new * img_h_org // img_w_org + img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] - text_regions_p_new = resize_image(text_regions_p, img_h_new, img_w_new) - self.run_marginals(num_col_classifier, slope_deskew, text_regions_p_new) - text_regions_p = resize_image(text_regions_p_new, img_h_org, img_w_org) + image_page = resize_image(image_page,img_h_new, img_w_new ) + textline_mask_tot_ea = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) + mask_images = resize_image(mask_images,img_h_new, img_w_new ) + mask_lines = resize_image(mask_lines,img_h_new, img_w_new ) + text_regions_p_1 = resize_image(text_regions_p_1,img_h_new, img_w_new ) + table_prediction = resize_image(table_prediction,img_h_new, img_w_new ) - t5 = time.time() - self.logger.info("Marginalia extraction took %.1fs", t5 - t4) + textline_mask_tot, text_regions_p, image_page_rotated = \ + self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, + num_col_classifier, slope_deskew, text_regions_p_1, table_prediction) + + if self.light_version and num_col_classifier in (1,2): + image_page = resize_image(image_page,org_h_l_m, org_w_l_m ) + textline_mask_tot_ea = resize_image(textline_mask_tot_ea,org_h_l_m, org_w_l_m ) + text_regions_p = resize_image(text_regions_p,org_h_l_m, org_w_l_m ) + textline_mask_tot = resize_image(textline_mask_tot,org_h_l_m, org_w_l_m ) + text_regions_p_1 = resize_image(text_regions_p_1,org_h_l_m, org_w_l_m ) + table_prediction = resize_image(table_prediction,org_h_l_m, org_w_l_m ) + image_page_rotated = resize_image(image_page_rotated,org_h_l_m, org_w_l_m ) + + self.logger.info("detection of marginals took %.1fs", time.time() - t1) + #print("text region early 2 marginal in %.1fs", time.time() - t0) + ## birdan sora chock chakir + t1 = time.time() + if not self.full_layout: + polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, \ + boxes, boxes_d, polygons_of_marginals, contours_tables = \ + self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, + num_col_classifier, table_prediction, erosion_hurts) + ###polygons_of_marginals = self.dilate_textregions_contours(polygons_of_marginals) else: - t5 = time.time() + polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, \ + regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = \ + self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, + num_col_classifier, img_only_regions, table_prediction, erosion_hurts, + img_bin_light if self.light_version else None) + ###polygons_of_marginals = self.dilate_textregions_contours(polygons_of_marginals) + if self.light_version: + drop_label_in_full_layout = 4 + textline_mask_tot_ea_org[img_revised_tab==drop_label_in_full_layout] = 0 - if self.plotter: - self.plotter.save_plot_of_layout_main_all(text_regions_p, image_page, image['name']) - self.plotter.save_plot_of_layout_main(text_regions_p, image_page, image['name']) - regions_fully, regionsfl_confidence = \ - self.get_full_layout(image_page, text_regions_p, num_col_classifier) + text_only = ((img_revised_tab[:, :] == 1)) * 1 + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + text_only_d = ((text_regions_p_1_n[:, :] == 1)) * 1 - if self.full_layout: - regions_without_separators[text_regions_p == label_drop_fl] = 1 # also cover in reading-order - textline_mask_tot_ea_org[text_regions_p == label_drop_fl] = 0 # skip for textlines - textline_mask_tot_ea[text_regions_p == label_drop_fl] = 1 # needed for reading order - polygons_of_drop_capitals = return_contours_of_interested_region(text_regions_p, - label_drop_fl, - min_area=0.00003) - conf_drops = get_region_confidences(polygons_of_drop_capitals, regionsfl_confidence) - t6 = time.time() - self.logger.info("Full layout took %.1fs", t6 - t5) + #print("text region early 2 in %.1fs", time.time() - t0) + ###min_con_area = 0.000005 + contours_only_text, hir_on_text = return_contours_of_image(text_only) + contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text) + if len(contours_only_text_parent) > 0: + areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) + areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) + #self.logger.info('areas_cnt_text %s', areas_cnt_text) + contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)] + contours_only_text_parent = [c for jz, c in enumerate(contours_only_text_parent) + if areas_cnt_text[jz] > MIN_AREA_REGION] + areas_cnt_text_parent = [area for area in areas_cnt_text if area > MIN_AREA_REGION] + index_con_parents = np.argsort(areas_cnt_text_parent) + + contours_only_text_parent = self.return_list_of_contours_with_desired_order( + contours_only_text_parent, index_con_parents) + + ##try: + ##contours_only_text_parent = \ + ##list(np.array(contours_only_text_parent,dtype=object)[index_con_parents]) + ##except: + ##contours_only_text_parent = \ + ##list(np.array(contours_only_text_parent,dtype=np.int32)[index_con_parents]) + ##areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents]) + areas_cnt_text_parent = self.return_list_of_contours_with_desired_order( + areas_cnt_text_parent, index_con_parents) + + cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest]) + cx_bigest, cy_biggest, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) + + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d) + contours_only_text_parent_d = return_parent_contours(contours_only_text_d, hir_on_text_d) + + areas_cnt_text_d = np.array([cv2.contourArea(c) for c in contours_only_text_parent_d]) + areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1]) + + if len(areas_cnt_text_d)>0: + contours_biggest_d = contours_only_text_parent_d[np.argmax(areas_cnt_text_d)] + index_con_parents_d = np.argsort(areas_cnt_text_d) + contours_only_text_parent_d = self.return_list_of_contours_with_desired_order( + contours_only_text_parent_d, index_con_parents_d) + #try: + #contours_only_text_parent_d = \ + #list(np.array(contours_only_text_parent_d,dtype=object)[index_con_parents_d]) + #except: + #contours_only_text_parent_d = \ + #list(np.array(contours_only_text_parent_d,dtype=np.int32)[index_con_parents_d]) + #areas_cnt_text_d = list(np.array(areas_cnt_text_d)[index_con_parents_d]) + areas_cnt_text_d = self.return_list_of_contours_with_desired_order( + areas_cnt_text_d, index_con_parents_d) + + cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest_d]) + cx_bigest_d, cy_biggest_d, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_d) + try: + if len(cx_bigest_d) >= 5: + cx_bigest_d_last5 = cx_bigest_d[-5:] + cy_biggest_d_last5 = cy_biggest_d[-5:] + dists_d = [math.sqrt((cx_bigest_big[0] - cx_bigest_d_last5[j]) ** 2 + + (cy_biggest_big[0] - cy_biggest_d_last5[j]) ** 2) + for j in range(len(cy_biggest_d_last5))] + ind_largest = len(cx_bigest_d) -5 + np.argmin(dists_d) + else: + cx_bigest_d_last5 = cx_bigest_d[-len(cx_bigest_d):] + cy_biggest_d_last5 = cy_biggest_d[-len(cx_bigest_d):] + dists_d = [math.sqrt((cx_bigest_big[0]-cx_bigest_d_last5[j])**2 + + (cy_biggest_big[0]-cy_biggest_d_last5[j])**2) + for j in range(len(cy_biggest_d_last5))] + ind_largest = len(cx_bigest_d) - len(cx_bigest_d) + np.argmin(dists_d) + + cx_bigest_d_big[0] = cx_bigest_d[ind_largest] + cy_biggest_d_big[0] = cy_biggest_d[ind_largest] + except Exception as why: + self.logger.error(why) + + (h, w) = text_only.shape[:2] + center = (w // 2.0, h // 2.0) + M = cv2.getRotationMatrix2D(center, slope_deskew, 1.0) + M_22 = np.array(M)[:2, :2] + p_big = np.dot(M_22, [cx_bigest_big, cy_biggest_big]) + x_diff = p_big[0] - cx_bigest_d_big + y_diff = p_big[1] - cy_biggest_d_big + + contours_only_text_parent_d_ordered = [] + for i in range(len(contours_only_text_parent)): + p = np.dot(M_22, [cx_bigest[i], cy_biggest[i]]) + p[0] = p[0] - x_diff[0] + p[1] = p[1] - y_diff[0] + dists = [math.sqrt((p[0] - cx_bigest_d[j]) ** 2 + + (p[1] - cy_biggest_d[j]) ** 2) + for j in range(len(cx_bigest_d))] + contours_only_text_parent_d_ordered.append(contours_only_text_parent_d[np.argmin(dists)]) + # img2=np.zeros((text_only.shape[0],text_only.shape[1],3)) + # img2=cv2.fillPoly(img2,pts=[contours_only_text_parent_d[np.argmin(dists)]] ,color=(1,1,1)) + # plt.imshow(img2[:,:,0]) + # plt.show() + else: + contours_only_text_parent_d_ordered = [] + contours_only_text_parent_d = [] + contours_only_text_parent = [] + + else: + contours_only_text_parent_d_ordered = [] + contours_only_text_parent_d = [] + #contours_only_text_parent = [] + if not len(contours_only_text_parent): + # stop early + empty_marginals = [[]] * len(polygons_of_marginals) + if self.full_layout: + pcgts = self.writer.build_pagexml_full_layout( + [], [], page_coord, [], [], [], [], [], [], + polygons_of_images, contours_tables, [], + polygons_of_marginals, empty_marginals, empty_marginals, [], [], [], + cont_page, polygons_lines_xml, [], [], []) + else: + pcgts = self.writer.build_pagexml_no_full_layout( + [], page_coord, [], [], [], [], + polygons_of_images, + polygons_of_marginals, empty_marginals, empty_marginals, [], [], + cont_page, polygons_lines_xml, contours_tables, [], []) + return pcgts + + + ## check the ro order + + + + + #print("text region early 3 in %.1fs", time.time() - t0) + if self.light_version: + contours_only_text_parent = self.dilate_textregions_contours( + contours_only_text_parent) + contours_only_text_parent , contours_only_text_parent_d_ordered = self.filter_contours_inside_a_bigger_one( + contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, marginal_cnts=polygons_of_marginals) + #print("text region early 3.5 in %.1fs", time.time() - t0) + txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( + contours_only_text_parent, self.image, slope_first, confidence_matrix, map=self.executor.map) + #txt_con_org = self.dilate_textregions_contours(txt_con_org) + #contours_only_text_parent = self.dilate_textregions_contours(contours_only_text_parent) else: - t6 = time.time() - self.logger.info("Step 3/5: Contour extraction") - - min_area_mar = 0.00001 - marginal_mask = (text_regions_p == label_marg_fl).astype(np.uint8) - marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) - polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, - min_area_mar) - polygons_of_tables = return_contours_of_interested_region(text_regions_p, label_tabs, - min_area_mar) - polygons_of_images = return_contours_of_interested_region(text_regions_p, label_imgs_fl) - conf_marginals = get_region_confidences(polygons_of_marginals, regions_confidence) - conf_images = get_region_confidences(polygons_of_images, regions_confidence) - conf_tables = get_region_confidences(polygons_of_tables, regions_confidence) - - polygons_of_textregions = return_contours_of_interested_region(text_regions_p, label_text, - min_area=MIN_AREA_REGION) - - if np.abs(slope_deskew) >= SLOPE_THRESHOLD and not self.reading_order_machine_based: - (text_regions_p_d, - textline_mask_tot_ea_d, - regions_without_separators_d) = self.get_deskewed_masks( - slope_deskew, - text_regions_p, - textline_mask_tot_ea, - regions_without_separators) - - polygons_of_textregions_d = return_contours_of_interested_region(text_regions_p_d, label_text, - min_area=MIN_AREA_REGION) - if (len(polygons_of_textregions) and - len(polygons_of_textregions_d)): - polygons_of_textregions_d = \ - match_deskewed_contours( - slope_deskew, - polygons_of_textregions, - polygons_of_textregions_d, - text_regions_p.shape, - text_regions_p_d.shape) - else: - polygons_of_textregions_d = [] - (polygons_of_textregions, - polygons_of_textregions_d) = self.filter_contours_inside_a_bigger_one( - polygons_of_textregions, - polygons_of_textregions_d, - text_regions_p.shape, - marginal_cnts=polygons_of_marginals) - polygons_of_textregions = dilate_textregion_contours(polygons_of_textregions) - conf_textregions = get_region_confidences(polygons_of_textregions, regions_confidence) - - if not len(polygons_of_textregions): - polygons_of_textregions = polygons_of_marginals - polygons_of_marginals = [] - conf_textregions = conf_marginals - conf_marginals = [] - t7 = time.time() - self.logger.info("Region contours took %.1fs", t7 - t6) - + txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( + contours_only_text_parent, self.image, slope_first, confidence_matrix, map=self.executor.map) + #print("text region early 4 in %.1fs", time.time() - t0) + boxes_text, _ = get_text_region_boxes_by_given_contours(contours_only_text_parent) + boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals) + #print("text region early 5 in %.1fs", time.time() - t0) + ## birdan sora chock chakir if not self.curved_line: - self.logger.info("Mode: Light line detection") - all_found_textline_polygons, slopes = \ - self.get_slopes_and_deskew_new_light2( - polygons_of_textregions, textline_mask_tot_ea_org, - slope_deskew) - all_found_textline_polygons_marginals, slopes_marginals = \ - self.get_slopes_and_deskew_new_light2( - polygons_of_marginals, textline_mask_tot_ea_org, - slope_deskew) + if self.light_version: + if self.textline_light: + all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ + all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_light2( + txt_con_org, contours_only_text_parent, textline_mask_tot_ea_org, + image_page_rotated, boxes_text, slope_deskew) + all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ + all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_light2( + polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_org, + image_page_rotated, boxes_marginals, slope_deskew) - all_found_textline_polygons = dilate_textline_contours( - all_found_textline_polygons) - all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( - all_found_textline_polygons, None, None, type_contour="textline") - all_found_textline_polygons_marginals = dilate_textline_contours( - all_found_textline_polygons_marginals) + #slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, index_by_text_par_con = \ + # self.delete_regions_without_textlines(slopes, all_found_textline_polygons, + # boxes_text, txt_con_org, contours_only_text_parent, index_by_text_par_con) + #slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, polygons_of_marginals, polygons_of_marginals, _ = \ + # self.delete_regions_without_textlines(slopes_marginals, all_found_textline_polygons_marginals, + # boxes_marginals, polygons_of_marginals, polygons_of_marginals, np.array(range(len(polygons_of_marginals)))) + #all_found_textline_polygons = self.dilate_textlines(all_found_textline_polygons) + #####all_found_textline_polygons = self.dilate_textline_contours(all_found_textline_polygons) + all_found_textline_polygons = self.dilate_textregions_contours_textline_version( + all_found_textline_polygons) + all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( + all_found_textline_polygons, None, textline_mask_tot_ea_org, type_contour="textline") + all_found_textline_polygons_marginals = self.dilate_textregions_contours_textline_version( + all_found_textline_polygons_marginals) + contours_only_text_parent, txt_con_org, conf_contours_textregions, all_found_textline_polygons, contours_only_text_parent_d_ordered, \ + index_by_text_par_con = self.filter_contours_without_textline_inside( + contours_only_text_parent, txt_con_org, all_found_textline_polygons, contours_only_text_parent_d_ordered, conf_contours_textregions) + else: + textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) + all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, \ + index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_light( + txt_con_org, contours_only_text_parent, textline_mask_tot_ea, + image_page_rotated, boxes_text, slope_deskew) + all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ + all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_light( + polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, + image_page_rotated, boxes_marginals, slope_deskew) + #all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( + # all_found_textline_polygons, textline_mask_tot_ea_org, type_contour="textline") + else: + textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) + all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ + all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new( + txt_con_org, contours_only_text_parent, textline_mask_tot_ea, + image_page_rotated, boxes_text, slope_deskew) + all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ + all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new( + polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, + image_page_rotated, boxes_marginals, slope_deskew) else: - self.logger.info("Mode: Curved line detection") - - textline_mask_tot_ea_erode = cv2.erode(textline_mask_tot_ea_org, kernel=KERNEL, iterations=2) - all_found_textline_polygons, slopes = \ - self.get_slopes_and_deskew_new_curved( - polygons_of_textregions, textline_mask_tot_ea_erode, - num_col_classifier, slope_deskew, image['name']) + scale_param = 1 + textline_mask_tot_ea_erode = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=2) + all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ + all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_curved( + txt_con_org, contours_only_text_parent, textline_mask_tot_ea_erode, + image_page_rotated, boxes_text, text_only, + num_col_classifier, scale_param, slope_deskew) all_found_textline_polygons = small_textlines_to_parent_adherence2( all_found_textline_polygons, textline_mask_tot_ea, num_col_classifier) - all_found_textline_polygons_marginals, slopes_marginals = \ - self.get_slopes_and_deskew_new_curved( - polygons_of_marginals, textline_mask_tot_ea_erode, - num_col_classifier, slope_deskew, image['name']) + all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ + all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_curved( + polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_erode, + image_page_rotated, boxes_marginals, text_only, + num_col_classifier, scale_param, slope_deskew) all_found_textline_polygons_marginals = small_textlines_to_parent_adherence2( all_found_textline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) - (polygons_of_textregions, - polygons_of_textregions_d, - all_found_textline_polygons, - slopes, - conf_textregions) = \ - self.filter_contours_without_textline_inside( - polygons_of_textregions, - polygons_of_textregions_d, - all_found_textline_polygons, - slopes, - conf_textregions) - t8 = time.time() - self.logger.info("Line contours took %.1fs", t8 - t7) - - (polygons_of_marginals_left, - polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, - all_found_textline_polygons_marginals_right, - slopes_marginals_left, - slopes_marginals_right, - conf_marginals_left, - conf_marginals_right) = \ - self.separate_marginals_to_left_and_right_and_order_from_top_to_down( - polygons_of_marginals, - all_found_textline_polygons_marginals, - slopes_marginals, - conf_marginals, - 0.5 * text_regions_p.shape[1]) - # FIXME: get_region_confidences w/ textline_confidence on all types of textlines... + #print("text region early 6 in %.1fs", time.time() - t0) if self.full_layout: - (text_regions_p, - polygons_of_textregions, - polygons_of_textregions_h, - polygons_of_textregions_d, - polygons_of_textregions_h_d, - all_found_textline_polygons, - all_found_textline_polygons_h, - slopes, - slopes_h, - conf_textregions, - conf_textregions_h) = split_textregion_main_vs_head( - text_regions_p, - regions_fully, - polygons_of_textregions, - polygons_of_textregions_d, - all_found_textline_polygons, - slopes, - conf_textregions) + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( + contours_only_text_parent_d_ordered, index_by_text_par_con) + #try: + #contours_only_text_parent_d_ordered = \ + #list(np.array(contours_only_text_parent_d_ordered, dtype=np.int32)[index_by_text_par_con]) + #except: + #contours_only_text_parent_d_ordered = \ + #list(np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) + else: + #takes long timee + contours_only_text_parent_d_ordered = None + if self.light_version: + fun = check_any_text_region_in_model_one_is_main_or_header_light + else: + fun = check_any_text_region_in_model_one_is_main_or_header + text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, \ + all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, \ + contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, \ + conf_contours_textregions, conf_contours_textregions_h = fun( + text_regions_p, regions_fully, contours_only_text_parent, + all_box_coord, all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered, conf_contours_textregions) if self.plotter: - self.plotter.save_plot_of_layout(text_regions_p, image_page, image['name']) - self.plotter.save_plot_of_layout_all(text_regions_p, image_page, image['name']) - else: - polygons_of_drop_capitals = [] - polygons_of_textregions_h = [] - polygons_of_textregions_h_d = [] + self.plotter.save_plot_of_layout(text_regions_p, image_page) + self.plotter.save_plot_of_layout_all(text_regions_p, image_page) + + pixel_img = 4 + polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, pixel_img) + all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline( + text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, + all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, + kernel=KERNEL, curved_line=self.curved_line, textline_light=self.textline_light) + + if not self.reading_order_machine_based: + pixel_seps = 6 + if not self.headers_off: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_seps, contours_only_text_parent_h) + else: + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_seps, contours_only_text_parent_h_d_ordered) + elif self.headers_off: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_seps) + else: + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( + np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), + num_col_classifier, self.tables, pixel_seps) + + if num_col_classifier >= 3: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + regions_without_separators = regions_without_separators.astype(np.uint8) + regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) + else: + regions_without_separators_d = regions_without_separators_d.astype(np.uint8) + regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) + + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( + splitter_y_new, regions_without_separators, matrix_of_lines_ch, + num_col_classifier, erosion_hurts, self.tables, self.right2left) + else: + boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( + splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, + num_col_classifier, erosion_hurts, self.tables, self.right2left) if self.plotter: - self.plotter.write_images_into_directory(polygons_of_images, image_page, - image['scale_x'], image['scale_y'], image['name']) - + self.plotter.write_images_into_directory(polygons_of_images, image_page) t_order = time.time() - self.logger.info("Step 4/5: Reading Order") - if self.right2left: - self.logger.info("Right-to-left mode enabled") - if self.headers_off: - self.logger.info("Headers ignored in reading order") + if self.full_layout: + if self.reading_order_machine_based: + order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( + contours_only_text_parent, contours_only_text_parent_h, text_regions_p) + else: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + order_text_new, id_of_texts_tot = self.do_order_of_regions( + contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) + else: + order_text_new, id_of_texts_tot = self.do_order_of_regions( + contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d) + self.logger.info("detection of reading order took %.1fs", time.time() - t_order) + + if self.ocr: + ocr_all_textlines = [] + else: + ocr_all_textlines = None + pcgts = self.writer.build_pagexml_full_layout( + contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, + all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, + polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals, + all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, + cont_page, polygons_lines_xml, ocr_all_textlines, conf_contours_textregions, conf_contours_textregions_h) + return pcgts + + contours_only_text_parent_h = None if self.reading_order_machine_based: - self.logger.info("Using machine-based detection") - order_text = self.do_order_of_regions_with_model( - polygons_of_textregions, - polygons_of_textregions_h, - polygons_of_drop_capitals, - text_regions_p) + order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( + contours_only_text_parent, contours_only_text_parent_h, text_regions_p) else: if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes = self.run_boxes_order(text_regions_p, num_col_classifier, erosion_hurts, - regions_without_separators, - contours_h=(None if self.headers_off or not self.full_layout - else polygons_of_textregions_h)) - order_text = self.do_order_of_regions( - polygons_of_textregions, - polygons_of_textregions_h, - polygons_of_drop_capitals, - boxes, regions_without_separators) #textline_mask_tot_ea) + order_text_new, id_of_texts_tot = self.do_order_of_regions( + contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) else: - boxes_d = self.run_boxes_order(text_regions_p_d, num_col_classifier, erosion_hurts, - regions_without_separators_d, - contours_h=(None if self.headers_off or not self.full_layout - else polygons_of_textregions_h_d)) + contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( + contours_only_text_parent_d_ordered, index_by_text_par_con) + #try: + #contours_only_text_parent_d_ordered = \ + #list(np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) + #except: + #contours_only_text_parent_d_ordered = \ + #list(np.array(contours_only_text_parent_d_ordered, dtype=np.int32)[index_by_text_par_con]) + order_text_new, id_of_texts_tot = self.do_order_of_regions( + contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) - order_text = self.do_order_of_regions( - polygons_of_textregions_d, - polygons_of_textregions_h_d, - polygons_of_drop_capitals, - boxes_d, regions_without_separators_d) #textline_mask_tot_ea_d) - self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") + if self.ocr: + device = cuda.get_current_device() + device.reset() + gc.collect() + model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") + torch.cuda.empty_cache() + model_ocr.to(device) + + ind_tot = 0 + #cv2.imwrite('./img_out.png', image_page) + ocr_all_textlines = [] + for indexing, ind_poly_first in enumerate(all_found_textline_polygons): + ocr_textline_in_textregion = [] + for indexing2, ind_poly in enumerate(ind_poly_first): + if not (self.textline_light or self.curved_line): + ind_poly = copy.deepcopy(ind_poly) + box_ind = all_box_coord[indexing] + #print(ind_poly,np.shape(ind_poly), 'ind_poly') + #print(box_ind) + ind_poly = self.return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) + #print(ind_poly_copy) + ind_poly[ind_poly<0] = 0 + x, y, w, h = cv2.boundingRect(ind_poly) + #print(ind_poly_copy, np.shape(ind_poly_copy)) + #print(x, y, w, h, h/float(w),'ratio') + h2w_ratio = h/float(w) + mask_poly = np.zeros(image_page.shape) + if not self.light_version: + img_poly_on_img = np.copy(image_page) + else: + img_poly_on_img = np.copy(img_bin_light) + mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) + + if self.textline_light: + mask_poly = cv2.dilate(mask_poly, KERNEL, iterations=1) + img_poly_on_img[:,:,0][mask_poly[:,:,0] ==0] = 255 + img_poly_on_img[:,:,1][mask_poly[:,:,0] ==0] = 255 + img_poly_on_img[:,:,2][mask_poly[:,:,0] ==0] = 255 + + img_croped = img_poly_on_img[y:y+h, x:x+w, :] + #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) + text_ocr = self.return_ocr_of_textline_without_common_section(img_croped, model_ocr, processor, device, w, h2w_ratio, ind_tot) + ocr_textline_in_textregion.append(text_ocr) + ind_tot = ind_tot +1 + ocr_all_textlines.append(ocr_textline_in_textregion) - self.logger.info("Step 5/5: Output Generation") - if self.full_layout: - pcgts = writer.build_pagexml_full_layout( - num_col=num_col_classifier, - found_polygons_text_region=polygons_of_textregions, - found_polygons_text_region_h=polygons_of_textregions_h, - page_coord=page_coord, - page_slope=slope_deskew, - order_of_texts=order_text, - all_found_textline_polygons=all_found_textline_polygons, - all_found_textline_polygons_h=all_found_textline_polygons_h, - found_polygons_images=polygons_of_images, - found_polygons_tables=polygons_of_tables, - found_polygons_drop_capitals=polygons_of_drop_capitals, - found_polygons_marginals_left=polygons_of_marginals_left, - found_polygons_marginals_right=polygons_of_marginals_right, - all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, - all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, - slopes=slopes, - slopes_h=slopes_h, - slopes_marginals_left=slopes_marginals_left, - slopes_marginals_right=slopes_marginals_right, - cont_page=cont_page, - polygons_seplines=polygons_seplines, - conf_textregions=conf_textregions, - conf_textregions_h=conf_textregions_h, - conf_marginals_left=conf_marginals_left, - conf_marginals_right=conf_marginals_right, - conf_images=conf_images, - conf_tables=conf_tables, - conf_drops=conf_drops, - ) else: - pcgts = writer.build_pagexml_no_full_layout( - num_col=num_col_classifier, - found_polygons_text_region=polygons_of_textregions, - page_coord=page_coord, - page_slope=slope_deskew, - order_of_texts=order_text, - all_found_textline_polygons=all_found_textline_polygons, - found_polygons_images=polygons_of_images, - found_polygons_tables=polygons_of_tables, - found_polygons_marginals_left=polygons_of_marginals_left, - found_polygons_marginals_right=polygons_of_marginals_right, - all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, - all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, - slopes=slopes, - slopes_marginals_left=slopes_marginals_left, - slopes_marginals_right=slopes_marginals_right, - cont_page=cont_page, - polygons_seplines=polygons_seplines, - conf_textregions=conf_textregions, - conf_marginals_left=conf_marginals_left, - conf_marginals_right=conf_marginals_right, - conf_images=conf_images, - conf_tables=conf_tables, - ) + ocr_all_textlines = None + #print(ocr_all_textlines) + self.logger.info("detection of reading order took %.1fs", time.time() - t_order) + pcgts = self.writer.build_pagexml_no_full_layout( + txt_con_org, page_coord, order_text_new, id_of_texts_tot, + all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, + all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, + cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + return pcgts - writer.write_pagexml(pcgts) - self.logger.info("Job done in %.1fs", time.time() - t0) - return + +class Eynollah_ocr: + def __init__( + self, + dir_models, + dir_xmls=None, + dir_in=None, + dir_in_bin=None, + dir_out=None, + dir_out_image_text=None, + tr_ocr=False, + export_textline_images_and_text=False, + do_not_mask_with_textline_contour=False, + draw_texts_on_image=False, + prediction_with_both_of_rgb_and_bin=False, + logger=None, + ): + self.dir_in = dir_in + self.dir_in_bin = dir_in_bin + self.dir_out = dir_out + self.dir_xmls = dir_xmls + self.dir_models = dir_models + self.tr_ocr = tr_ocr + self.export_textline_images_and_text = export_textline_images_and_text + self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour + self.draw_texts_on_image = draw_texts_on_image + self.dir_out_image_text = dir_out_image_text + self.prediction_with_both_of_rgb_and_bin = prediction_with_both_of_rgb_and_bin + if tr_ocr: + self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" + self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) + self.model_ocr.to(self.device) + + else: + self.model_ocr_dir = dir_models + "/model_step_75000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + model_ocr = load_model(self.model_ocr_dir , compile=False) + + self.prediction_model = tf.keras.models.Model( + model_ocr.get_layer(name = "image").input, + model_ocr.get_layer(name = "dense2").output) + + + with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: + characters = json.load(config_file) + + + AUTOTUNE = tf.data.AUTOTUNE + + # Mapping characters to integers. + char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) + + # Mapping integers back to original characters. + self.num_to_char = StringLookup( + vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True + ) + + def decode_batch_predictions(self, pred, max_len = 128): + # input_len is the product of the batch size and the + # number of time steps. + input_len = np.ones(pred.shape[0]) * pred.shape[1] + + # Decode CTC predictions using greedy search. + # decoded is a tuple with 2 elements. + decoded = tf.keras.backend.ctc_decode(pred, + input_length = input_len, + beam_width = 100) + # The outputs are in the first element of the tuple. + # Additionally, the first element is actually a list, + # therefore we take the first element of that list as well. + #print(decoded,'decoded') + decoded = decoded[0][0][:, :max_len] + + #print(decoded, decoded.shape,'decoded') + + output = [] + for d in decoded: + # Convert the predicted indices to the corresponding chars. + d = tf.strings.reduce_join(self.num_to_char(d)) + d = d.numpy().decode("utf-8") + output.append(d) + return output + + + def distortion_free_resize(self, image, img_size): + w, h = img_size + image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True) + + # Check tha amount of padding needed to be done. + pad_height = h - tf.shape(image)[0] + pad_width = w - tf.shape(image)[1] + + # Only necessary if you want to do same amount of padding on both sides. + if pad_height % 2 != 0: + height = pad_height // 2 + pad_height_top = height + 1 + pad_height_bottom = height + else: + pad_height_top = pad_height_bottom = pad_height // 2 + + if pad_width % 2 != 0: + width = pad_width // 2 + pad_width_left = width + 1 + pad_width_right = width + else: + pad_width_left = pad_width_right = pad_width // 2 + + image = tf.pad( + image, + paddings=[ + [pad_height_top, pad_height_bottom], + [pad_width_left, pad_width_right], + [0, 0], + ], + ) + + image = tf.transpose(image, (1, 0, 2)) + image = tf.image.flip_left_right(image) + return image + + def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image): + width = np.shape(textline_image)[1] + height = np.shape(textline_image)[0] + common_window = int(0.06*width) + + width1 = int ( width/2. - common_window ) + width2 = int ( width/2. + common_window ) + + img_sum = np.sum(textline_image[:,:,0], axis=0) + sum_smoothed = gaussian_filter1d(img_sum, 3) + + peaks_real, _ = find_peaks(sum_smoothed, height=0) + + if len(peaks_real)>70: + + peaks_real = peaks_real[(peaks_realwidth1)] + + arg_max = np.argmax(sum_smoothed[peaks_real]) + + peaks_final = peaks_real[arg_max] + + return peaks_final + else: + return None + + # Function to fit text inside the given area + def fit_text_single_line(self, draw, text, font_path, max_width, max_height): + initial_font_size = 50 + font_size = initial_font_size + while font_size > 10: # Minimum font size + font = ImageFont.truetype(font_path, font_size) + text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + if text_width <= max_width and text_height <= max_height: + return font # Return the best-fitting font + + font_size -= 2 # Reduce font size and retry + + return ImageFont.truetype(font_path, 10) # Smallest font fallback + + def return_textlines_split_if_needed(self, textline_image, textline_image_bin): + + split_point = self.return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) + if split_point: + image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) + image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) + if self.prediction_with_both_of_rgb_and_bin: + image1_bin = textline_image_bin[:, :split_point,:]# image.crop((0, 0, width2, height)) + image2_bin = textline_image_bin[:, split_point:,:]#image.crop((width1, 0, width, height)) + return [image1, image2], [image1_bin, image2_bin] + else: + return [image1, image2], None + else: + return None, None + def preprocess_and_resize_image_for_ocrcnn_model(self, img, image_height, image_width): + ratio = image_height /float(img.shape[0]) + w_ratio = int(ratio * img.shape[1]) + + if w_ratio <= image_width: + width_new = w_ratio + else: + width_new = image_width + + if width_new == 0: + width_new = img.shape[1] + + img = resize_image(img, image_height, width_new) + img_fin = np.ones((image_height, image_width, 3))*255 + img_fin[:,:+width_new,:] = img[:,:,:] + img_fin = img_fin / 255. + return img_fin + + def run(self): + ls_imgs = os.listdir(self.dir_in) + + if self.tr_ocr: + b_s = 2 + for ind_img in ls_imgs: + t0 = time.time() + file_name = ind_img.split('.')[0] + dir_img = os.path.join(self.dir_in, ind_img) + dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') + out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') + img = cv2.imread(dir_img) + + ##file_name = Path(dir_xmls).stem + tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + name_space = alltags[0].split('}')[0] + name_space = name_space.split('{')[1] + + region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) + + + + cropped_lines = [] + cropped_lines_region_indexer = [] + cropped_lines_meging_indexing = [] + + indexer_text_region = 0 + for nn in root1.iter(region_tags): + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + + for child_textlines in child_textregion: + if child_textlines.tag.endswith("Coords"): + cropped_lines_region_indexer.append(indexer_text_region) + p_h=child_textlines.attrib['points'].split(' ') + textline_coords = np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) + x,y,w,h = cv2.boundingRect(textline_coords) + + h2w_ratio = h/float(w) + + img_poly_on_img = np.copy(img) + mask_poly = np.zeros(img.shape) + mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + img_crop[mask_poly==0] = 255 + + if h2w_ratio > 0.05: + cropped_lines.append(img_crop) + cropped_lines_meging_indexing.append(0) + else: + splited_images, _ = self.return_textlines_split_if_needed(img_crop, None) + #print(splited_images) + if splited_images: + cropped_lines.append(splited_images[0]) + cropped_lines_meging_indexing.append(1) + cropped_lines.append(splited_images[1]) + cropped_lines_meging_indexing.append(-1) + else: + cropped_lines.append(img_crop) + cropped_lines_meging_indexing.append(0) + indexer_text_region = indexer_text_region +1 + + + extracted_texts = [] + n_iterations = math.ceil(len(cropped_lines) / b_s) + + for i in range(n_iterations): + if i==(n_iterations-1): + n_start = i*b_s + imgs = cropped_lines[n_start:] + else: + n_start = i*b_s + n_end = (i+1)*b_s + imgs = cropped_lines[n_start:n_end] + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + #print(extracted_texts_merged, len(extracted_texts_merged)) + + unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + + #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') + text_by_textregion = [] + for ind in unique_cropped_lines_region_indexer: + extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] + + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + + #print(len(text_by_textregion) , indexer_text_region, "text_by_textregion") + + + #print(time.time() - t0 ,'elapsed time') + + + indexer = 0 + indexer_textregion = 0 + for nn in root1.iter(region_tags): + text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') + unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + + + has_textline = False + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + unicode_textline = ET.SubElement(text_subelement, 'Unicode') + unicode_textline.text = extracted_texts_merged[indexer] + indexer = indexer + 1 + has_textline = True + if has_textline: + unicode_textregion.text = text_by_textregion[indexer_textregion] + indexer_textregion = indexer_textregion + 1 + + + + ET.register_namespace("",name_space) + tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + #print("Job done in %.1fs", time.time() - t0) + else: + max_len = 512 + padding_token = 299 + image_width = 512#max_len * 4 + image_height = 32 + b_s = 8 + + + img_size=(image_width, image_height) + + for ind_img in ls_imgs: + t0 = time.time() + file_name = ind_img.split('.')[0] + dir_img = os.path.join(self.dir_in, ind_img) + dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') + out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') + img = cv2.imread(dir_img) + if self.prediction_with_both_of_rgb_and_bin: + cropped_lines_bin = [] + dir_img_bin = os.path.join(self.dir_in_bin, file_name+'.png') + img_bin = cv2.imread(dir_img_bin) + + if self.draw_texts_on_image: + out_image_with_text = os.path.join(self.dir_out_image_text, file_name+'.png') + image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") + draw = ImageDraw.Draw(image_text) + total_bb_coordinates = [] + + tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + name_space = alltags[0].split('}')[0] + name_space = name_space.split('{')[1] + + region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) + + cropped_lines = [] + cropped_lines_region_indexer = [] + cropped_lines_meging_indexing = [] + + tinl = time.time() + indexer_text_region = 0 + indexer_textlines = 0 + for nn in root1.iter(region_tags): + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + for child_textlines in child_textregion: + if child_textlines.tag.endswith("Coords"): + cropped_lines_region_indexer.append(indexer_text_region) + p_h=child_textlines.attrib['points'].split(' ') + textline_coords = np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) + + x,y,w,h = cv2.boundingRect(textline_coords) + + if self.draw_texts_on_image: + total_bb_coordinates.append([x,y,w,h]) + + h2w_ratio = h/float(w) + + img_poly_on_img = np.copy(img) + if self.prediction_with_both_of_rgb_and_bin: + img_poly_on_img_bin = np.copy(img_bin) + img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] + + mask_poly = np.zeros(img.shape) + mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin[mask_poly==0] = 255 + + if not self.export_textline_images_and_text: + if h2w_ratio > 0.1: + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(0) + if self.prediction_with_both_of_rgb_and_bin: + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) + cropped_lines_bin.append(img_fin) + else: + if self.prediction_with_both_of_rgb_and_bin: + splited_images, splited_images_bin = self.return_textlines_split_if_needed(img_crop, img_crop_bin) + else: + splited_images, splited_images_bin = self.return_textlines_split_if_needed(img_crop, None) + if splited_images: + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(1) + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) + + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(-1) + + if self.prediction_with_both_of_rgb_and_bin: + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[0], image_height, image_width) + cropped_lines_bin.append(img_fin) + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[1], image_height, image_width) + cropped_lines_bin.append(img_fin) + + else: + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(0) + + if self.prediction_with_both_of_rgb_and_bin: + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) + cropped_lines_bin.append(img_fin) + + if self.export_textline_images_and_text: + if child_textlines.tag.endswith("TextEquiv"): + for cheild_text in child_textlines: + if cheild_text.tag.endswith("Unicode"): + textline_text = cheild_text.text + if textline_text: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.txt'), 'w') as text_file: + text_file.write(textline_text) + + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.png'), img_crop ) + + indexer_textlines+=1 + + if not self.export_textline_images_and_text: + indexer_text_region = indexer_text_region +1 + + if not self.export_textline_images_and_text: + extracted_texts = [] + + n_iterations = math.ceil(len(cropped_lines) / b_s) + + for i in range(n_iterations): + if i==(n_iterations-1): + n_start = i*b_s + imgs = cropped_lines[n_start:] + imgs = np.array(imgs) + imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) + if self.prediction_with_both_of_rgb_and_bin: + imgs_bin = cropped_lines_bin[n_start:] + imgs_bin = np.array(imgs_bin) + imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) + else: + n_start = i*b_s + n_end = (i+1)*b_s + imgs = cropped_lines[n_start:n_end] + imgs = np.array(imgs).reshape(b_s, image_height, image_width, 3) + + if self.prediction_with_both_of_rgb_and_bin: + imgs_bin = cropped_lines_bin[n_start:n_end] + imgs_bin = np.array(imgs_bin).reshape(b_s, image_height, image_width, 3) + + + preds = self.prediction_model.predict(imgs, verbose=0) + if self.prediction_with_both_of_rgb_and_bin: + preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) + preds = (preds + preds_bin) / 2. + + pred_texts = self.decode_batch_predictions(preds) + + for ib in range(imgs.shape[0]): + pred_texts_ib = pred_texts[ib].strip("[UNK]") + extracted_texts.append(pred_texts_ib) + + extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + + + if self.draw_texts_on_image: + + font_path = "NotoSans-Regular.ttf" # Make sure this file exists! + font = ImageFont.truetype(font_path, 40) + + for indexer_text, bb_ind in enumerate(total_bb_coordinates): + + + x_bb = bb_ind[0] + y_bb = bb_ind[1] + w_bb = bb_ind[2] + h_bb = bb_ind[3] + + font = self.fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally + text_y = y_bb + (h_bb - text_height) // 2 # Center vertically + + # Draw the text + draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) + image_text.save(out_image_with_text) + + text_by_textregion = [] + for ind in unique_cropped_lines_region_indexer: + extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + + indexer = 0 + indexer_textregion = 0 + for nn in root1.iter(region_tags): + + is_textregion_text = False + for childtest in nn: + if childtest.tag.endswith("TextEquiv"): + is_textregion_text = True + + if not is_textregion_text: + text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') + unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + + + has_textline = False + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + + is_textline_text = False + for childtest2 in child_textregion: + if childtest2.tag.endswith("TextEquiv"): + is_textline_text = True + + + if not is_textline_text: + text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + unicode_textline = ET.SubElement(text_subelement, 'Unicode') + unicode_textline.text = extracted_texts_merged[indexer] + else: + for childtest3 in child_textregion: + if childtest3.tag.endswith("TextEquiv"): + for child_uc in childtest3: + if child_uc.tag.endswith("Unicode"): + child_uc.text = extracted_texts_merged[indexer] + + indexer = indexer + 1 + has_textline = True + if has_textline: + if is_textregion_text: + for child4 in nn: + if child4.tag.endswith("TextEquiv"): + for childtr_uc in child4: + if childtr_uc.tag.endswith("Unicode"): + childtr_uc.text = text_by_textregion[indexer_textregion] + else: + unicode_textregion.text = text_by_textregion[indexer_textregion] + indexer_textregion = indexer_textregion + 1 + + ET.register_namespace("",name_space) + tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + #print("Job done in %.1fs", time.time() - t0) diff --git a/src/eynollah/eynollah_imports.py b/src/eynollah/eynollah_imports.py deleted file mode 100644 index 496406c..0000000 --- a/src/eynollah/eynollah_imports.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Load libraries with possible race conditions once. This must be imported as the first module of eynollah. -""" -import os -os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 - -from ocrd_utils import tf_disable_interactive_logs -from torch import * -tf_disable_interactive_logs() -import tensorflow.keras -from shapely import * -imported_libs = True -__all__ = ['imported_libs'] diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py deleted file mode 100644 index 1b49077..0000000 --- a/src/eynollah/eynollah_ocr.py +++ /dev/null @@ -1,837 +0,0 @@ -# FIXME: fix all of those... -# pyright: reportOptionalSubscript=false - -from logging import Logger, getLogger -from typing import List, Optional -from pathlib import Path -import os -import gc -import math -from dataclasses import dataclass - -import cv2 -from cv2.typing import MatLike -from xml.etree import ElementTree as ET -from PIL import Image, ImageDraw -import numpy as np -from eynollah.model_zoo import EynollahModelZoo -from eynollah.utils.font import get_font -from eynollah.utils.xml import etree_namespace_for_element_tag -try: - import torch -except ImportError: - torch = None - - -from .utils import is_image_filename -from .utils.resize import resize_image -from .utils.utils_ocr import ( - break_curved_line_into_small_pieces_and_then_merge, - decode_batch_predictions, - fit_text_single_line, - get_contours_and_bounding_boxes, - get_orientation_moments, - preprocess_and_resize_image_for_ocrcnn_model, - return_textlines_split_if_needed, - rotate_image_with_padding, -) - -# TODO: refine typing -@dataclass -class EynollahOcrResult: - extracted_texts_merged: List - extracted_conf_value_merged: Optional[List] - cropped_lines_region_indexer: List - total_bb_coordinates:List - -class Eynollah_ocr: - def __init__( - self, - *, - model_zoo: EynollahModelZoo, - tr_ocr=False, - batch_size: Optional[int]=None, - do_not_mask_with_textline_contour: bool=False, - min_conf_value_of_textline_text : Optional[float]=None, - logger: Optional[Logger]=None, - ): - self.tr_ocr = tr_ocr - # masking for OCR and GT generation, relevant for skewed lines and bounding boxes - self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour - self.logger = logger if logger else getLogger('eynollah.ocr') - self.model_zoo = model_zoo - - self.min_conf_value_of_textline_text = min_conf_value_of_textline_text if min_conf_value_of_textline_text else 0.3 - self.b_s = 2 if batch_size is None and tr_ocr else 8 if batch_size is None else batch_size - - if tr_ocr: - self.model_zoo.load_models('trocr_processor') - self.model_zoo.load_models(['ocr', 'tr']) - self.model_zoo.get('ocr').to(self.device) - else: - self.model_zoo.load_models('ocr') - self.model_zoo.load_models('num_to_char') - self.model_zoo.load_models('characters') - self.end_character = len(self.model_zoo.get('characters')) + 2 - - @property - def device(self): - assert torch - if torch.cuda.is_available(): - self.logger.info("Using GPU acceleration") - return torch.device("cuda:0") - else: - self.logger.info("Using CPU processing") - return torch.device("cpu") - - def run_trocr( - self, - *, - img: MatLike, - page_tree: ET.ElementTree, - page_ns, - tr_ocr_input_height_and_width, - ) -> EynollahOcrResult: - - total_bb_coordinates = [] - - - cropped_lines = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - extracted_texts = [] - - indexer_text_region = 0 - indexer_b_s = 0 - - for nn in page_tree.getroot().iter(f'{{{page_ns}}}TextRegion'): - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - x,y,w,h = cv2.boundingRect(textline_coords) - - total_bb_coordinates.append([x,y,w,h]) - - h2w_ratio = h/float(w) - - img_poly_on_img = np.copy(img) - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - img_crop[mask_poly==0] = 255 - - self.logger.debug("processing %d lines for '%s'", - len(cropped_lines), nn.attrib['id']) - if h2w_ratio > 0.1: - cropped_lines.append(resize_image(img_crop, - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width) ) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - splited_images, _ = return_textlines_split_if_needed(img_crop, None) - #print(splited_images) - if splited_images: - cropped_lines.append(resize_image(splited_images[0], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - cropped_lines.append(resize_image(splited_images[1], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(-1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - cropped_lines.append(img_crop) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - - indexer_text_region = indexer_text_region +1 - - if indexer_b_s!=0: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode(generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - ####extracted_texts = [] - ####n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - ####for i in range(n_iterations): - ####if i==(n_iterations-1): - ####n_start = i*self.b_s - ####imgs = cropped_lines[n_start:] - ####else: - ####n_start = i*self.b_s - ####n_end = (i+1)*self.b_s - ####imgs = cropped_lines[n_start:n_end] - ####pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - ####generated_ids_merged = self.model_ocr.generate( - #### pixel_values_merged.to(self.device)) - ####generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - #### generated_ids_merged, skip_special_tokens=True) - - ####extracted_texts = extracted_texts + generated_text_merged - - del cropped_lines - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - #print(extracted_texts_merged, len(extracted_texts_merged)) - - return EynollahOcrResult( - extracted_texts_merged=extracted_texts_merged, - extracted_conf_value_merged=None, - cropped_lines_region_indexer=cropped_lines_region_indexer, - total_bb_coordinates=total_bb_coordinates, - ) - - def run_cnn( - self, - *, - img: MatLike, - img_bin: Optional[MatLike], - page_tree: ET.ElementTree, - page_ns, - image_width, - image_height, - ) -> EynollahOcrResult: - - total_bb_coordinates = [] - - cropped_lines = [] - img_crop_bin = None - imgs_bin = None - imgs_bin_ver_flipped = None - cropped_lines_bin = [] - cropped_lines_ver_index = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - indexer_text_region = 0 - for nn in page_tree.getroot().iter(f'{{{page_ns}}}TextRegion'): - try: - type_textregion = nn.attrib['type'] - except: - type_textregion = 'paragraph' - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - - x,y,w,h = cv2.boundingRect(textline_coords) - - angle_radians = math.atan2(h, w) - # Convert to degrees - angle_degrees = math.degrees(angle_radians) - if type_textregion=='drop-capital': - angle_degrees = 0 - - total_bb_coordinates.append([x,y,w,h]) - - w_scaled = w * image_height/float(h) - - img_poly_on_img = np.copy(img) - if img_bin: - img_poly_on_img_bin = np.copy(img_bin) - img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] - - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - # print(file_name, angle_degrees, w*h, - # mask_poly[:,:,0].sum(), - # mask_poly[:,:,0].sum() /float(w*h) , - # 'didi') - - if angle_degrees > 3: - better_des_slope = get_orientation_moments(textline_coords) - - img_crop = rotate_image_with_padding(img_crop, better_des_slope) - if img_bin: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) - - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) - mask_poly = mask_poly.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) - - mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if img_bin: - img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: - if img_bin: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - else: - better_des_slope = 0 - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if img_bin: - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - if type_textregion=='drop-capital': - pass - else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: - if img_bin: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - if w_scaled < 750:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - cropped_lines_meging_indexing.append(0) - if img_bin: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed( - img_crop, img_crop_bin if img_bin else None) - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[0], image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[1], image_height, image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if img_bin: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[0], image_height, image_width) - cropped_lines_bin.append(img_fin) - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[1], image_height, image_width) - cropped_lines_bin.append(img_fin) - - else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if img_bin: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - - - indexer_text_region = indexer_text_region +1 - - extracted_texts = [] - extracted_conf_value = [] - - n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - # FIXME: copy pasta - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*self.b_s - imgs = cropped_lines[n_start:] - imgs = np.array(imgs) - imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) - indices_ver = np.where(ver_imgs == 1)[0] - - #print(indices_ver, 'indices_ver') - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_ver_flipped = None - - if img_bin: - imgs_bin = cropped_lines_bin[n_start:] - imgs_bin = np.array(imgs_bin) - imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_bin_ver_flipped = None - else: - n_start = i*self.b_s - n_end = (i+1)*self.b_s - imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) - indices_ver = np.where(ver_imgs == 1)[0] - #print(indices_ver, 'indices_ver') - - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_ver_flipped = None - - - if img_bin: - imgs_bin = cropped_lines_bin[n_start:n_end] - imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) - - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_bin_ver_flipped = None - - - self.logger.debug("processing next %d lines", len(imgs)) - preds = self.model_zoo.get('ocr').predict(imgs, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.model_zoo.get('ocr').predict(imgs_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - - if img_bin: - preds_bin = self.model_zoo.get('ocr').predict(imgs_bin, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.model_zoo.get('ocr').predict(imgs_bin_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds_bin[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - - preds = (preds + preds_bin) / 2. - - pred_texts = decode_batch_predictions(preds, self.model_zoo.get('num_to_char')) - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - - for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - if masked_means[ib] >= self.min_conf_value_of_textline_text: - extracted_texts.append(pred_texts_ib) - extracted_conf_value.append(masked_means[ib]) - else: - extracted_texts.append("") - extracted_conf_value.append(0) - del cropped_lines - del cropped_lines_bin - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value[ind] # type: ignore - if cropped_lines_meging_indexing[ind]==0 - else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged: List[float] = [extracted_conf_value_merged[ind_cfm] - for ind_cfm in range(len(extracted_texts_merged)) - if extracted_texts_merged[ind_cfm] is not None] - - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - - return EynollahOcrResult( - extracted_texts_merged=extracted_texts_merged, - extracted_conf_value_merged=extracted_conf_value_merged, - cropped_lines_region_indexer=cropped_lines_region_indexer, - total_bb_coordinates=total_bb_coordinates, - ) - - def write_ocr( - self, - *, - result: EynollahOcrResult, - page_tree: ET.ElementTree, - out_file_ocr, - page_ns, - img, - out_image_with_text, - ): - cropped_lines_region_indexer = result.cropped_lines_region_indexer - total_bb_coordinates = result.total_bb_coordinates - extracted_texts_merged = result.extracted_texts_merged - extracted_conf_value_merged = result.extracted_conf_value_merged - - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - if out_image_with_text: - image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") - draw = ImageDraw.Draw(image_text) - font = get_font() - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer)==ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) - else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - indexer = 0 - indexer_textregion = 0 - for nn in page_tree.getroot().iter(f'{{{page_ns}}}TextRegion'): - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') - - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - if extracted_conf_value_merged: - text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - if extracted_conf_value_merged: - childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] - else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 - - ET.register_namespace("",page_ns) - page_tree.write(out_file_ocr, xml_declaration=True, method='xml', encoding="utf-8", default_namespace=None) - - def run( - self, - *, - overwrite: bool = False, - dir_in: Optional[str] = None, - dir_in_bin: Optional[str] = None, - image_filename: Optional[str] = None, - dir_xmls: str, - dir_out_image_text: Optional[str] = None, - dir_out: str, - ): - """ - Run OCR. - - Args: - - dir_in_bin (str): Prediction with RGB and binarized images for selected pages, should not be the default - """ - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - else: - assert image_filename - ls_imgs = [image_filename] - - for img_filename in ls_imgs: - file_stem = Path(img_filename).stem - page_file_in = os.path.join(dir_xmls, file_stem+'.xml') - out_file_ocr = os.path.join(dir_out, file_stem+'.xml') - - if os.path.exists(out_file_ocr): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) - else: - self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) - return - - img = cv2.imread(img_filename) - - page_tree = ET.parse(page_file_in, parser = ET.XMLParser(encoding="utf-8")) - page_ns = etree_namespace_for_element_tag(page_tree.getroot().tag) - - out_image_with_text = None - if dir_out_image_text: - out_image_with_text = os.path.join(dir_out_image_text, file_stem + '.png') - - img_bin = None - if dir_in_bin: - img_bin = cv2.imread(os.path.join(dir_in_bin, file_stem+'.png')) - - - if self.tr_ocr: - result = self.run_trocr( - img=img, - page_tree=page_tree, - page_ns=page_ns, - - tr_ocr_input_height_and_width = 384 - ) - else: - result = self.run_cnn( - img=img, - page_tree=page_tree, - page_ns=page_ns, - - img_bin=img_bin, - image_width=512, - image_height=32, - ) - - self.write_ocr( - result=result, - img=img, - page_tree=page_tree, - page_ns=page_ns, - out_file_ocr=out_file_ocr, - out_image_with_text=out_image_with_text, - ) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py deleted file mode 100644 index fe1e16d..0000000 --- a/src/eynollah/image_enhancer.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -Image enhancer. The output can be written as same scale of input or in new predicted scale. -""" - -import logging -import os -from typing import Optional -from pathlib import Path - -import cv2 - -from .eynollah import Eynollah -from .model_zoo import EynollahModelZoo -from .utils.resize import resize_image -from .utils import is_image_filename - - -class Enhancer(Eynollah): - def __init__( - self, - *, - model_zoo: EynollahModelZoo, - num_col_upper: int = 0, - num_col_lower: int = 0, - save_org_scale: bool = False, - device: str = '', - ): - self.save_org_scale = save_org_scale - self.num_col_upper = int(num_col_upper) - self.num_col_lower = int(num_col_lower) - self.input_binary = False - self.ignore_page_extraction = False - - self.logger = logging.getLogger('eynollah.enhance') - self.model_zoo = model_zoo - self.setup_models(device=device) - - def setup_models(self, device=''): - loadable = ['enhancement', 'col_classifier', 'page'] - self.model_zoo.load_models(*loadable, device=device) - for model in loadable: - self.logger.debug("model %s has input shape %s", model, - self.model_zoo.get(model).input_shape) - - def run_single(self, - img_filename: str, - img_pil=None, - dir_out: Optional[str] = None, - overwrite: bool = False, - ) -> None: - - image = self.cache_images(image_filename=img_filename, image_pil=img_pil) - output_filename = os.path.join(dir_out or "", image['name'] + '.png') - - if os.path.exists(output_filename): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", output_filename) - else: - self.logger.warning("will skip input for existing output file '%s'", output_filename) - return - - self.resize_image_with_column_classifier(image) - img_org = image['img'] - img_res = image['img_res'] - if self.save_org_scale: - img_res = resize_image(img_res, img_org.shape[0], img_org.shape[1]) - - cv2.imwrite(output_filename, img_res) - self.logger.info("output filename: '%s'", output_filename) - - def run(self, - overwrite: bool = False, - image_filename: Optional[str] = None, - dir_in: Optional[str] = None, - dir_out: Optional[str] = None, - ): - """ - Enlarge and enhance the scanned images - """ - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - elif image_filename: - ls_imgs = [image_filename] - else: - raise ValueError("run requires either a single image filename or a directory") - - for img_filename in ls_imgs: - self.logger.info(img_filename) - - self.run_single(img_filename, - dir_out=dir_out, - overwrite=overwrite) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py deleted file mode 100644 index b0b5910..0000000 --- a/src/eynollah/mb_ro_on_layout.py +++ /dev/null @@ -1,805 +0,0 @@ -""" -Machine learning based reading order detection -""" - -# pyright: reportCallIssue=false -# pyright: reportUnboundVariable=false -# pyright: reportArgumentType=false - -import logging -import os -import time -from typing import Optional -from pathlib import Path -import xml.etree.ElementTree as ET - -import cv2 -import numpy as np -import statistics - -os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 -import tensorflow as tf - -from .model_zoo import EynollahModelZoo -from .utils.resize import resize_image -from .utils.contour import ( - find_new_features_of_contours, - return_contours_of_image, - return_parent_contours, -) -from .utils import is_xml_filename - -DPI_THRESHOLD = 298 -KERNEL = np.ones((5, 5), np.uint8) - - -class machine_based_reading_order_on_layout: - def __init__( - self, - *, - model_zoo: EynollahModelZoo, - logger : Optional[logging.Logger] = None, - ): - self.logger = logger or logging.getLogger('eynollah.mbreorder') - self.model_zoo = model_zoo - - try: - for device in tf.config.list_physical_devices('GPU'): - tf.config.experimental.set_memory_growth(device, True) - except: - self.logger.warning("no GPU device available") - - self.model_zoo.load_models('reading_order') - - def read_xml(self, xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - index_tot_regions = [] - tot_region_ref = [] - - y_len, x_len = 0, 0 - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - for jj in root1.iter(link+'RegionRefIndexed'): - index_tot_regions.append(jj.attrib['index']) - tot_region_ref.append(jj.attrib['regionRef']) - - if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): - co_printspace = [] - if link+'PrintSpace' in alltags: - region_tags_printspace = np.unique([x for x in alltags if x.endswith('PrintSpace')]) - else: - region_tags_printspace = np.unique([x for x in alltags if x.endswith('Border')]) - - for tag in region_tags_printspace: - if link+'PrintSpace' in alltags: - tag_endings_printspace = ['}PrintSpace','}printspace'] - else: - tag_endings_printspace = ['}Border','}border'] - - if tag.endswith(tag_endings_printspace[0]) or tag.endswith(tag_endings_printspace[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_printspace.append(np.array(c_t_in)) - img_printspace = np.zeros( (y_len,x_len,3) ) - img_printspace=cv2.fillPoly(img_printspace, pts =co_printspace, color=(1,1,1)) - img_printspace = img_printspace.astype(np.uint8) - - imgray = cv2.cvtColor(img_printspace, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) - cnt = contours[np.argmax(cnt_size)] - x, y, w, h = cv2.boundingRect(cnt) - - bb_coord_printspace = [x, y, w, h] - - else: - bb_coord_printspace = None - - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text_paragraph=[] - co_text_drop=[] - co_text_heading=[] - co_text_header=[] - co_text_marginalia=[] - co_text_catch=[] - co_text_page_number=[] - co_text_signature_mark=[] - co_sep=[] - co_img=[] - co_table=[] - co_graphic=[] - co_graphic_text_annotation=[] - co_graphic_decoration=[] - co_noise=[] - - co_text_paragraph_text=[] - co_text_drop_text=[] - co_text_heading_text=[] - co_text_header_text=[] - co_text_marginalia_text=[] - co_text_catch_text=[] - co_text_page_number_text=[] - co_text_signature_mark_text=[] - co_sep_text=[] - co_img_text=[] - co_table_text=[] - co_graphic_text=[] - co_graphic_text_annotation_text=[] - co_graphic_decoration_text=[] - co_noise_text=[] - - id_paragraph = [] - id_header = [] - id_heading = [] - id_marginalia = [] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - for child2 in nn: - tag2 = child2.tag - if tag2.endswith('}TextEquiv') or tag2.endswith('}TextEquiv'): - for childtext2 in child2: - if childtext2.tag.endswith('}Unicode') or childtext2.tag.endswith('}Unicode'): - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - co_text_drop_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='heading': - co_text_heading_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - co_text_signature_mark_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='header': - co_text_header_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###co_text_catch_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - ###co_text_page_number_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - co_text_marginalia_text.append(childtext2.text) - else: - co_text_paragraph_text.append(childtext2.text) - c_t_in_drop=[] - c_t_in_paragraph=[] - c_t_in_heading=[] - c_t_in_header=[] - c_t_in_page_number=[] - c_t_in_signature_mark=[] - c_t_in_catch=[] - c_t_in_marginalia=[] - - - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - #print('birda1') - p_h=vv.attrib['points'].split(' ') - - - - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - ##id_heading.append(nn.attrib['id']) - c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - #id_paragraph.append(nn.attrib['id']) - - c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - #id_heading.append(nn.attrib['id']) - c_t_in_heading.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - #id_paragraph.append(nn.attrib['id']) - c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - if len(c_t_in_drop)>0: - co_text_drop.append(np.array(c_t_in_drop)) - if len(c_t_in_paragraph)>0: - co_text_paragraph.append(np.array(c_t_in_paragraph)) - id_paragraph.append(nn.attrib['id']) - if len(c_t_in_heading)>0: - co_text_heading.append(np.array(c_t_in_heading)) - id_heading.append(nn.attrib['id']) - - if len(c_t_in_header)>0: - co_text_header.append(np.array(c_t_in_header)) - id_header.append(nn.attrib['id']) - if len(c_t_in_page_number)>0: - co_text_page_number.append(np.array(c_t_in_page_number)) - if len(c_t_in_catch)>0: - co_text_catch.append(np.array(c_t_in_catch)) - - if len(c_t_in_signature_mark)>0: - co_text_signature_mark.append(np.array(c_t_in_signature_mark)) - - if len(c_t_in_marginalia)>0: - co_text_marginalia.append(np.array(c_t_in_marginalia)) - id_marginalia.append(nn.attrib['id']) - - - elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - for nn in root1.iter(tag): - c_t_in=[] - c_t_in_text_annotation=[] - c_t_in_decoration=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - if len(c_t_in_text_annotation)>0: - co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) - if len(c_t_in_decoration)>0: - co_graphic_decoration.append(np.array(c_t_in_decoration)) - if len(c_t_in)>0: - co_graphic.append(np.array(c_t_in)) - - - - elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - co_img_text.append(' ') - - - elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - co_table_text.append(' ') - - elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - co_noise_text.append(' ') - - img = np.zeros( (y_len,x_len,3) ) - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) - - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(3,3,3)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) - - return tree1, root1, bb_coord_printspace, id_paragraph, id_header+id_heading, co_text_paragraph, co_text_header+co_text_heading,\ - tot_region_ref,x_len, y_len,index_tot_regions, img_poly - - def return_indexes_of_contours_loctaed_inside_another_list_of_contours(self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): - indexes_of_located_cont = [] - center_x_coordinates_of_located = [] - center_y_coordinates_of_located = [] - #M_main_tot = [cv2.moments(contours_loc[j]) - #for j in range(len(contours_loc))] - #cx_main_loc = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - #cy_main_loc = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - - for ij in range(len(contours)): - results = [cv2.pointPolygonTest(contours[ij], (cx_main_loc[ind], cy_main_loc[ind]), False) - for ind in range(len(cy_main_loc)) ] - results = np.array(results) - indexes_in = np.where((results == 0) | (results == 1)) - indexes = indexes_loc[indexes_in]# [(results == 0) | (results == 1)]#np.where((results == 0) | (results == 1)) - - indexes_of_located_cont.append(indexes) - center_x_coordinates_of_located.append(np.array(cx_main_loc)[indexes_in] ) - center_y_coordinates_of_located.append(np.array(cy_main_loc)[indexes_in] ) - - return indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located - - def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): - height1 =672#448 - width1 = 448#224 - - height2 =672#448 - width2= 448#224 - - height3 =672#448 - width3 = 448#224 - - inference_bs = 3 - - ver_kernel = np.ones((5, 1), dtype=np.uint8) - hor_kernel = np.ones((1, 5), dtype=np.uint8) - - - min_cont_size_to_be_dilated = 10 - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: - cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) - args_cont_located = np.array(range(len(contours_only_text_parent))) - - diff_y_conts = np.abs(y_max_conts[:]-y_min_conts) - diff_x_conts = np.abs(x_max_conts[:]-x_min_conts) - - mean_x = statistics.mean(diff_x_conts) - median_x = statistics.median(diff_x_conts) - - - diff_x_ratio= diff_x_conts/mean_x - - args_cont_located_excluded = args_cont_located[diff_x_ratio>=1.3] - args_cont_located_included = args_cont_located[diff_x_ratio<1.3] - - contours_only_text_parent_excluded = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]>=1.3]#contours_only_text_parent[diff_x_ratio>=1.3] - contours_only_text_parent_included = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]<1.3]#contours_only_text_parent[diff_x_ratio<1.3] - - - cx_conts_excluded = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]>=1.3]#cx_conts[diff_x_ratio>=1.3] - cx_conts_included = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]<1.3]#cx_conts[diff_x_ratio<1.3] - - cy_conts_excluded = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]>=1.3]#cy_conts[diff_x_ratio>=1.3] - cy_conts_included = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]<1.3]#cy_conts[diff_x_ratio<1.3] - - #print(diff_x_ratio, 'ratio') - text_regions_p = text_regions_p.astype('uint8') - - if len(contours_only_text_parent_excluded)>0: - textregion_par = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1])).astype('uint8') - textregion_par = cv2.fillPoly(textregion_par, pts=contours_only_text_parent_included, color=(1,1)) - else: - textregion_par = (text_regions_p[:,:]==1)*1 - textregion_par = textregion_par.astype('uint8') - - text_regions_p_textregions_dilated = cv2.erode(textregion_par , hor_kernel, iterations=2) - text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=4) - text_regions_p_textregions_dilated = cv2.erode(text_regions_p_textregions_dilated , hor_kernel, iterations=1) - text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=5) - text_regions_p_textregions_dilated[text_regions_p[:,:]>1] = 0 - - - contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) - contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) - - indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located = self.return_indexes_of_contours_loctaed_inside_another_list_of_contours(contours_only_dilated, contours_only_text_parent_included, cx_conts_included, cy_conts_included, args_cont_located_included) - - - if len(args_cont_located_excluded)>0: - for ind in args_cont_located_excluded: - indexes_of_located_cont.append(np.array([ind])) - contours_only_dilated.append(contours_only_text_parent[ind]) - center_y_coordinates_of_located.append(0) - - array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] - flattened_array = np.concatenate([arr.ravel() for arr in array_list]) - #print(len( np.unique(flattened_array)), 'indexes_of_located_cont uniques') - - missing_textregions = list( set(np.array(range(len(contours_only_text_parent))) ) - set(np.unique(flattened_array)) ) - #print(missing_textregions, 'missing_textregions') - - for ind in missing_textregions: - indexes_of_located_cont.append(np.array([ind])) - contours_only_dilated.append(contours_only_text_parent[ind]) - center_y_coordinates_of_located.append(0) - - - if contours_only_text_parent_h: - for vi in range(len(contours_only_text_parent_h)): - indexes_of_located_cont.append(int(vi+len(contours_only_text_parent))) - - array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] - flattened_array = np.concatenate([arr.ravel() for arr in array_list]) - - y_len = text_regions_p.shape[0] - x_len = text_regions_p.shape[1] - - img_poly = np.zeros((y_len,x_len), dtype='uint8') - ###img_poly[text_regions_p[:,:]==1] = 1 - ###img_poly[text_regions_p[:,:]==2] = 2 - ###img_poly[text_regions_p[:,:]==3] = 4 - ###img_poly[text_regions_p[:,:]==6] = 5 - - ##img_poly[text_regions_p[:,:]==1] = 1 - ##img_poly[text_regions_p[:,:]==2] = 2 - ##img_poly[text_regions_p[:,:]==3] = 3 - ##img_poly[text_regions_p[:,:]==4] = 4 - ##img_poly[text_regions_p[:,:]==5] = 5 - - img_poly = np.copy(text_regions_p) - - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') - if contours_only_text_parent_h: - _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours( - contours_only_text_parent_h) - for j in range(len(cy_main)): - img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, - int(x_min_main[j]):int(x_max_main[j])] = 1 - co_text_all_org = contours_only_text_parent + contours_only_text_parent_h - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: - co_text_all = contours_only_dilated + contours_only_text_parent_h - else: - co_text_all = contours_only_text_parent + contours_only_text_parent_h - else: - co_text_all_org = contours_only_text_parent - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: - co_text_all = contours_only_dilated - else: - co_text_all = contours_only_text_parent - - if not len(co_text_all): - return [], [] - - labels_con = np.zeros((int(y_len /6.), int(x_len/6.), len(co_text_all)), dtype=bool) - - co_text_all = [(i/6).astype(int) for i in co_text_all] - for i in range(len(co_text_all)): - img = labels_con[:,:,i].astype(np.uint8) - - #img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) - - cv2.fillPoly(img, pts=[co_text_all[i]], color=(1,)) - labels_con[:,:,i] = img - - - labels_con = resize_image(labels_con.astype(np.uint8), height1, width1).astype(bool) - img_header_and_sep = resize_image(img_header_and_sep, height1, width1) - img_poly = resize_image(img_poly, height3, width3) - - - - input_1 = np.zeros((inference_bs, height1, width1, 3)) - ordered = [list(range(len(co_text_all)))] - index_update = 0 - #print(labels_con.shape[2],"number of regions for reading order") - while index_update>=0: - ij_list = ordered.pop(index_update) - i = ij_list.pop(0) - - ante_list = [] - post_list = [] - tot_counter = 0 - batch = [] - for j in ij_list: - img1 = labels_con[:,:,i].astype(float) - img2 = labels_con[:,:,j].astype(float) - img1[img_poly==5] = 2 - img2[img_poly==5] = 2 - img1[img_header_and_sep==1] = 3 - img2[img_header_and_sep==1] = 3 - - input_1[len(batch), :, :, 0] = img1 / 3. - input_1[len(batch), :, :, 2] = img2 / 3. - input_1[len(batch), :, :, 1] = img_poly / 5. - - tot_counter += 1 - batch.append(j) - if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): - y_pr = self.model_zoo.get('reading_order').predict(input_1 , verbose='0') - for jb, j in enumerate(batch): - if y_pr[jb][0]>=0.5: - post_list.append(j) - else: - ante_list.append(j) - batch = [] - - if len(ante_list): - ordered.insert(index_update, ante_list) - index_update += 1 - ordered.insert(index_update, [i]) - if len(post_list): - ordered.insert(index_update + 1, post_list) - - index_update = -1 - for index_next, ij_list in enumerate(ordered): - if len(ij_list) > 1: - index_update = index_next - break - - ordered = [i[0] for i in ordered] - - ##id_all_text = np.array(id_all_text)[index_sort] - - - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: - org_contours_indexes = [] - for ind in range(len(ordered)): - region_with_curr_order = ordered[ind] - if region_with_curr_order < len(contours_only_dilated): - if np.isscalar(indexes_of_located_cont[region_with_curr_order]): - org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] - else: - arg_sort_located_cont = np.argsort(center_y_coordinates_of_located[region_with_curr_order]) - org_contours_indexes = org_contours_indexes + list(np.array(indexes_of_located_cont[region_with_curr_order])[arg_sort_located_cont]) ##org_contours_indexes + list ( - else: - org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] - - region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] - return org_contours_indexes, region_ids - else: - region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] - return ordered, region_ids - - - - - def run(self, - overwrite: bool = False, - xml_filename: Optional[str] = None, - dir_in: Optional[str] = None, - dir_out: Optional[str] = None, - ): - """ - Get image and scales, then extract the page of scanned image - """ - self.logger.debug("enter run") - t0_tot = time.time() - - if dir_in: - ls_xmls = [os.path.join(dir_in, xml_filename) - for xml_filename in filter(is_xml_filename, - os.listdir(dir_in))] - elif xml_filename: - ls_xmls = [xml_filename] - else: - raise ValueError("run requires either a single image filename or a directory") - - for xml_filename in ls_xmls: - self.logger.info(xml_filename) - t0 = time.time() - - file_name = Path(xml_filename).stem - (tree_xml, root_xml, bb_coord_printspace, id_paragraph, id_header, - co_text_paragraph, co_text_header, tot_region_ref, - x_len, y_len, index_tot_regions, img_poly) = self.read_xml(xml_filename) - - id_all_text = id_paragraph + id_header - - order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model(co_text_paragraph, co_text_header, img_poly[:,:,0]) - - id_all_text = np.array(id_all_text)[order_text_new] - - alltags=[elem.tag for elem in root_xml.iter()] - - - - link=alltags[0].split('}')[0]+'}' - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - page_element = root_xml.find(link+'Page') - - - old_ro = root_xml.find(".//{*}ReadingOrder") - - if old_ro is not None: - page_element.remove(old_ro) - - #print(old_ro, 'old_ro') - ro_subelement = ET.Element('ReadingOrder') - - ro_subelement2 = ET.SubElement(ro_subelement, 'OrderedGroup') - ro_subelement2.set('id', "ro357564684568544579089") - - for index, id_text in enumerate(id_all_text): - new_element_2 = ET.SubElement(ro_subelement2, 'RegionRefIndexed') - new_element_2.set('regionRef', id_all_text[index]) - new_element_2.set('index', str(index)) - - if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): - page_element.insert(1, ro_subelement) - else: - page_element.insert(0, ro_subelement) - - alltags=[elem.tag for elem in root_xml.iter()] - - ET.register_namespace("",name_space) - assert dir_out - tree_xml.write(os.path.join(dir_out, file_name+'.xml'), - xml_declaration=True, - method='xml', - encoding="utf-8", - default_namespace=None) - - #sys.exit() - diff --git a/src/eynollah/model_zoo/.nfs00000002feddea7d00000031 b/src/eynollah/model_zoo/.nfs00000002feddea7d00000031 deleted file mode 100644 index c7dd87d..0000000 Binary files a/src/eynollah/model_zoo/.nfs00000002feddea7d00000031 and /dev/null differ diff --git a/src/eynollah/model_zoo/__init__.py b/src/eynollah/model_zoo/__init__.py deleted file mode 100644 index e1dc985..0000000 --- a/src/eynollah/model_zoo/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -__all__ = [ - 'EynollahModelZoo', -] -from .model_zoo import EynollahModelZoo diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py deleted file mode 100644 index dc725e4..0000000 --- a/src/eynollah/model_zoo/default_specs.py +++ /dev/null @@ -1,252 +0,0 @@ -from .specs import EynollahModelSpec, EynollahModelSpecSet - -# NOTE: This needs to change whenever models/versions change -ZENODO = "https://zenodo.org/records/17727267" -MODELS_VERSION = "v0_8_0" - -def dist_url(dist_name: str="layout") -> str: - return f'{ZENODO}/models_{dist_name}_{MODELS_VERSION}.zip' - -DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ - - EynollahModelSpec( - category="enhancement", - variant='', - filename="models_eynollah/eynollah-enhancement_20210425", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="binarization", - variant='hybrid', - filename="models_eynollah/eynollah-binarization-hybrid_20230504/model_bin_hybrid_trans_cnn_sbb_ens", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="binarization", - variant='20210309', - filename="models_eynollah/eynollah-binarization_20210309", - dist_url=dist_url("extra"), - type='Keras', - ), - - EynollahModelSpec( - category="binarization", - variant='', - filename="models_eynollah/eynollah-binarization_20210425", - dist_url=dist_url("extra"), - type='Keras', - ), - - EynollahModelSpec( - category="col_classifier", - variant='', - filename="models_eynollah/eynollah-column-classifier_20210425", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="page", - variant='', - filename="models_eynollah/model_eynollah_page_extraction_20250915", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="region", - variant='', - filename="models_eynollah/eynollah-main-regions-ensembled_20210425", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="extract_images", - variant='', - filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="region", - variant='', - filename="models_eynollah/eynollah-main-regions_20220314", - dist_url=dist_url(), - help="early layout", - type='Keras', - ), - - EynollahModelSpec( - category="region_p2", - variant='non-light', - filename="models_eynollah/eynollah-main-regions-aug-rotation_20210425", - dist_url=dist_url('extra'), - help="early layout, non-light, 2nd part", - type='Keras', - ), - - EynollahModelSpec( - category="region_1_2", - variant='', - #filename="models_eynollah/modelens_12sp_elay_0_3_4__3_6_n", - #filename="models_eynollah/modelens_earlylayout_12spaltige_2_3_5_6_7_8", - #filename="models_eynollah/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18", - #filename="models_eynollah/modelens_1_2_4_5_early_lay_1_2_spaltige", - #filename="models_eynollah/model_3_eraly_layout_no_patches_1_2_spaltige", - filename="models_eynollah/modelens_e_l_all_sp_0_1_2_3_4_171024", - dist_url=dist_url("layout"), - help="early layout, light, 1-or-2-column", - type='Keras', - ), - - EynollahModelSpec( - category="region_fl_np", - variant='', - #'filename="models_eynollah/modelens_full_lay_1_3_031124", - #'filename="models_eynollah/modelens_full_lay_13__3_19_241024", - #'filename="models_eynollah/model_full_lay_13_241024", - #'filename="models_eynollah/modelens_full_lay_13_17_231024", - #'filename="models_eynollah/modelens_full_lay_1_2_221024", - #'filename="models_eynollah/eynollah-full-regions-1column_20210425", - filename="models_eynollah/modelens_full_lay_1__4_3_091124", - dist_url=dist_url(), - help="full layout / no patches", - type='Keras', - ), - - # FIXME: Why is region_fl and region_fl_np the same model? - EynollahModelSpec( - category="region_fl", - variant='', - # filename="models_eynollah/eynollah-full-regions-3+column_20210425", - # filename="models_eynollah/model_2_full_layout_new_trans", - # filename="models_eynollah/modelens_full_lay_1_3_031124", - # filename="models_eynollah/modelens_full_lay_13__3_19_241024", - # filename="models_eynollah/model_full_lay_13_241024", - # filename="models_eynollah/modelens_full_lay_13_17_231024", - # filename="models_eynollah/modelens_full_lay_1_2_221024", - # filename="models_eynollah/modelens_full_layout_24_till_28", - # filename="models_eynollah/model_2_full_layout_new_trans", - filename="models_eynollah/modelens_full_lay_1__4_3_091124", - dist_url=dist_url(), - help="full layout / with patches", - type='Keras', - ), - - EynollahModelSpec( - category="reading_order", - variant='', - #filename="models_eynollah/model_mb_ro_aug_ens_11", - #filename="models_eynollah/model_step_3200000_mb_ro", - #filename="models_eynollah/model_ens_reading_order_machine_based", - #filename="models_eynollah/model_mb_ro_aug_ens_8", - #filename="models_eynollah/model_ens_reading_order_machine_based", - filename="models_eynollah/model_eynollah_reading_order_20250824", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="textline", - variant='non-light', - #filename="models_eynollah/modelens_textline_1_4_16092024", - #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", - #filename="models_eynollah/modelens_textline_1_3_4_20240915", - #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", - #filename="models_eynollah/modelens_textline_9_12_13_14_15", - #filename="models_eynollah/eynollah-textline_20210425", - filename="models_eynollah/modelens_textline_0_1__2_4_16092024", - dist_url=dist_url('extra'), - type='Keras', - ), - - EynollahModelSpec( - category="textline", - variant='', - #filename="models_eynollah/eynollah-textline_light_20210425", - filename="models_eynollah/modelens_textline_0_1__2_4_16092024", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="table", - variant='non-light', - filename="models_eynollah/eynollah-tables_20210319", - dist_url=dist_url('extra'), - type='Keras', - ), - - EynollahModelSpec( - category="table", - variant='', - filename="models_eynollah/modelens_table_0t4_201124", - dist_url=dist_url(), - type='Keras', - ), - - EynollahModelSpec( - category="ocr", - variant='', - filename="models_eynollah/model_eynollah_ocr_cnnrnn_20250930", - dist_url=dist_url("ocr"), - type='Keras', - ), - - EynollahModelSpec( - category="ocr", - variant='degraded', - filename="models_eynollah/model_eynollah_ocr_cnnrnn__degraded_20250805/", - help="slightly better at degraded Fraktur", - dist_url=dist_url("ocr"), - type='Keras', - ), - - EynollahModelSpec( - category="num_to_char", - variant='', - filename="characters_org.txt", - dist_url=dist_url("ocr"), - type='decoder', - ), - - EynollahModelSpec( - category="characters", - variant='', - filename="characters_org.txt", - dist_url=dist_url("ocr"), - type='List[str]', - ), - - EynollahModelSpec( - category="ocr", - variant='tr', - filename="models_eynollah/model_eynollah_ocr_trocr_20250919", - dist_url=dist_url("ocr"), - help='much slower transformer-based', - type='Keras', - ), - - EynollahModelSpec( - category="trocr_processor", - variant='', - filename="models_eynollah/model_eynollah_ocr_trocr_20250919", - dist_url=dist_url("ocr"), - type='TrOCRProcessor', - ), - - EynollahModelSpec( - category="trocr_processor", - variant='htr', - filename="models_eynollah/microsoft/trocr-base-handwritten", - dist_url=dist_url("extra"), - type='TrOCRProcessor', - ), - -]) diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py deleted file mode 100644 index 9611388..0000000 --- a/src/eynollah/model_zoo/model_zoo.py +++ /dev/null @@ -1,281 +0,0 @@ -import os -import json -import logging -from copy import deepcopy -from pathlib import Path -from fnmatch import fnmatchcase -from typing import Dict, List, Optional, Tuple, Type, Union - -from tabulate import tabulate - -from ..predictor import Predictor -from .specs import EynollahModelSpecSet -from .default_specs import DEFAULT_MODEL_SPECS -from .types import AnyModel, T - - -class EynollahModelZoo: - """ - Wrapper class that handles storage and loading of models for all eynollah runners. - """ - - model_basedir: Path - specs: EynollahModelSpecSet - - def __init__( - self, - basedir: str, - model_overrides: Optional[List[Tuple[str, str, str]]] = None, - ) -> None: - self.model_basedir = Path(basedir).resolve() - self.logger = logging.getLogger('eynollah.model_zoo') - if not self.model_basedir.exists(): - self.logger.warning(f"Model basedir does not exist: {basedir}. Set eynollah --model-basedir to the correct directory.") - self.specs = deepcopy(DEFAULT_MODEL_SPECS) - self._overrides = [] - if model_overrides: - self.override_models(*model_overrides) - self._loaded: Dict[str, Predictor] = {} - - @property - def model_overrides(self): - return self._overrides - - def override_models( - self, - *model_overrides: Tuple[str, str, str], - ): - """ - Override the default model versions - """ - for model_category, model_variant, model_filename in model_overrides: - spec = self.specs.get(model_category, model_variant) - self.logger.warning("Overriding filename for model spec %s to %s", spec, model_filename) - self.specs.get(model_category, model_variant).filename = str(Path(model_filename).resolve()) - self._overrides += model_overrides - - def model_path( - self, - model_category: str, - model_variant: str = '', - absolute: bool = True, - ) -> Path: - """ - Translate model_{type,variant} tuple into an absolute (or relative) Path - """ - spec = self.specs.get(model_category, model_variant) - if spec.category in ('characters', 'num_to_char'): - return self.model_path('ocr') / spec.filename - if not Path(spec.filename).is_absolute() and absolute: - model_path = Path(self.model_basedir).joinpath(spec.filename) - else: - model_path = Path(spec.filename) - return model_path - - def load_models( - self, - *all_load_args: Union[str, Tuple[str], Tuple[str, str], Tuple[str, str, str]], - device: str = '', - ) -> Dict: - """ - Load all models by calling load_model and return a dictionary mapping model_category to loaded model - """ - ret = {} # cannot use self._loaded here, yet – first spawn all predictors - for load_args in all_load_args: - if isinstance(load_args, str): - model_category = load_args - load_args = [model_category] - else: - model_category = load_args[0] - load_kwargs = {} - if model_category.endswith('_resized'): - load_args[0] = model_category[:-8] - load_kwargs["resized"] = True - elif model_category.endswith('_patched'): - load_args[0] = model_category[:-8] - load_kwargs["patched"] = True - spec = self.specs.get(model_category, load_args[1] if len(load_args) > 1 else '') - if spec.type in ['Keras'] and spec.category != 'ocr': - ret[model_category] = Predictor(self.logger, self) - ret[model_category].load_model(*load_args, **load_kwargs, device=device) - else: - ret[model_category] = self.load_model(*load_args, **load_kwargs, device=device) - self._loaded.update(ret) - return self._loaded - - def load_model( - self, - model_category: str, - model_variant: str = '', - model_path_override: Optional[str] = None, - patched: bool = False, - resized: bool = False, - device: str = '', - ) -> AnyModel: - """ - Load any model - """ - os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 - from ocrd_utils import tf_disable_interactive_logs - tf_disable_interactive_logs() - - import tensorflow as tf - from tensorflow.keras.models import load_model - - from ..patch_encoder import ( - PatchEncoder, - Patches, - wrap_layout_model_patched, - wrap_layout_model_resized, - ) - cuda = False - try: - gpus = tf.config.list_physical_devices('GPU') - if device: - if ',' in device: - for spec in device.split(','): - cat, dev = spec.split(':') - if fnmatchcase(model_category, cat): - device = dev - break - if device == 'CPU': - gpus = [] - else: - assert device.startswith('GPU') - gpus = [gpus[int(device[3:])]] - else: - gpus = gpus[:1] # TF will always use first allowable - tf.config.set_visible_devices(gpus, 'GPU') - for device in gpus: - tf.config.experimental.set_memory_growth(device, True) - vendor_name = ( - tf.config.experimental.get_device_details(device) - .get('device_name', 'unknown')) - cuda = True - self.logger.info("using GPU %s (%s) for model %s", - device.name, - vendor_name, - model_category + ( - "_patched" if patched else - "_resized" if resized else "")) - except RuntimeError: - self.logger.exception("cannot configure GPU devices") - if not cuda: - self.logger.warning("no GPU device available") - - if model_path_override: - self.override_models((model_category, model_variant, model_path_override)) - model_path = self.model_path(model_category, model_variant) - if model_path.suffix == '.h5' and Path(model_path.stem).exists(): - # prefer SavedModel over HDF5 format if it exists - model_path = Path(model_path.stem) - if model_category == 'ocr': - model = self._load_ocr_model(variant=model_variant) - elif model_category == 'num_to_char': - model = self._load_num_to_char() - elif model_category == 'characters': - model = self._load_characters() - elif model_category == 'trocr_processor': - from transformers import TrOCRProcessor - model = TrOCRProcessor.from_pretrained(model_path) - else: - try: - # avoid wasting VRAM on non-transformer models - model = load_model(model_path, compile=False) - except Exception as e: - self.logger.error(e) - model = load_model( - model_path, compile=False, - custom_objects=dict(PatchEncoder=PatchEncoder, - Patches=Patches)) - model._name = model_category - if resized: - model = wrap_layout_model_resized(model) - model._name = model_category + '_resized' - elif patched: - model = wrap_layout_model_patched(model) - model._name = model_category + '_patched' - else: - model.jit_compile = True - model.make_predict_function() - return model - - def get(self, model_category: str) -> Predictor: - if model_category not in self._loaded: - raise ValueError(f'Model "{model_category}" not previously loaded with "load_model(..)"') - return self._loaded[model_category] - - def _load_ocr_model(self, variant: str) -> AnyModel: - """ - Load OCR model - """ - from tensorflow.keras.models import Model as KerasModel - from tensorflow.keras.models import load_model - - ocr_model_dir = self.model_path('ocr', variant) - if variant == 'tr': - from transformers import VisionEncoderDecoderModel - ret = VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) - assert isinstance(ret, VisionEncoderDecoderModel) - return ret - else: - ocr_model = load_model(ocr_model_dir, compile=False) - assert isinstance(ocr_model, KerasModel) - return KerasModel( - ocr_model.get_layer(name="image").input, # type: ignore - ocr_model.get_layer(name="dense2").output, # type: ignore - ) - - def _load_characters(self) -> List[str]: - """ - Load encoding for OCR - """ - with open(self.model_path('num_to_char'), "r") as config_file: - return json.load(config_file) - - def _load_num_to_char(self) -> 'StringLookup': - """ - Load decoder for OCR - """ - from tensorflow.keras.layers import StringLookup - - characters = self._load_characters() - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=characters, mask_token=None) - # Mapping integers back to original characters. - return StringLookup(vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True) - - def __str__(self): - return tabulate( - [ - [ - spec.type, - spec.category, - spec.variant, - spec.help, - f'Yes, at {self.model_path(spec.category, spec.variant)}' - if self.model_path(spec.category, spec.variant).exists() - else f'No, download {spec.dist_url}', - # self.model_path(spec.category, spec.variant), - ] - for spec in sorted(self.specs.specs, key=lambda x: x.dist_url) - ], - headers=[ - 'Type', - 'Category', - 'Variant', - 'Help', - 'Used in', - 'Installed', - ], - tablefmt='github', - ) - - def shutdown(self): - """ - Ensure that a loaded models is not referenced by ``self._loaded`` anymore - """ - if hasattr(self, '_loaded') and getattr(self, '_loaded'): - for needle in list(self._loaded.keys()): - self._loaded[needle].shutdown() - del self._loaded[needle] diff --git a/src/eynollah/model_zoo/specs.py b/src/eynollah/model_zoo/specs.py deleted file mode 100644 index 3c47b7b..0000000 --- a/src/eynollah/model_zoo/specs.py +++ /dev/null @@ -1,52 +0,0 @@ -from dataclasses import dataclass -from typing import Dict, List, Set, Tuple - - -@dataclass -class EynollahModelSpec(): - """ - Describing a single model abstractly. - """ - category: str - # Relative filename to the models_eynollah directory in the dists - filename: str - # URL to the smallest model distribution containing this model (link to Zenodo) - dist_url: str - type: str - variant: str = '' - help: str = '' - -class EynollahModelSpecSet(): - """ - List of all used models for eynollah. - """ - specs: List[EynollahModelSpec] - - def __init__(self, specs: List[EynollahModelSpec]) -> None: - self.specs = sorted(specs, key=lambda x: x.category + '0' + x.variant) - self.categories: Set[str] = set([spec.category for spec in self.specs]) - self.variants: Dict[str, Set[str]] = { - spec.category: set([x.variant for x in self.specs if x.category == spec.category]) - for spec in self.specs - } - self._index_category_variant: Dict[Tuple[str, str], EynollahModelSpec] = { - (spec.category, spec.variant): spec - for spec in self.specs - } - - def asdict(self) -> Dict[str, Dict[str, str]]: - return { - spec.category: { - spec.variant: spec.filename - } - for spec in self.specs - } - - def get(self, category: str, variant: str) -> EynollahModelSpec: - if category not in self.categories: - raise ValueError(f"Unknown category '{category}', must be one of {self.categories}") - if variant not in self.variants[category]: - raise ValueError(f"Unknown variant {variant} for {category}. Known variants: {self.variants[category]}") - return self._index_category_variant[(category, variant)] - - diff --git a/src/eynollah/model_zoo/types.py b/src/eynollah/model_zoo/types.py deleted file mode 100644 index 43f6859..0000000 --- a/src/eynollah/model_zoo/types.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import TypeVar - -# NOTE: Creating an actual union type requires loading transformers which is expensive and error-prone -# from transformers import TrOCRProcessor, VisionEncoderDecoderModel -# AnyModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, KerasModel, List] -AnyModel = object -T = TypeVar('T') diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index 89ed0da..e972ec8 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -1,5 +1,5 @@ { - "version": "0.8.0", + "version": "0.4.0", "git_url": "https://github.com/qurator-spk/eynollah", "dockerhub": "ocrd/eynollah", "tools": { @@ -28,19 +28,17 @@ "full_layout": { "type": "boolean", "default": true, - "description": "Try to detect all region subtypes, including drop-capital and heading" + "description": "Try to detect all element subtypes, including drop-caps and headings" }, - "light_version": { + "light_version": { "type": "boolean", "default": true, - "enum": [true], - "description": "ignored (only for backwards-compatibility)" + "description": "Try to detect all element subtypes in light version (faster+simpler method for main region detection and deskewing)" }, - "textline_light": { + "textline_light": { "type": "boolean", "default": true, - "enum": [true], - "description": "ignored (only for backwards-compatibility)" + "description": "Light version need textline light" }, "tables": { "type": "boolean", @@ -50,12 +48,12 @@ "curved_line": { "type": "boolean", "default": false, - "description": "retrieve textline polygons independent of each other (needs more processing time)" + "description": "try to return contour of textlines instead of just rectangle bounding box. Needs more processing time" }, "ignore_page_extraction": { "type": "boolean", "default": false, - "description": "if true, do not attempt page frame detection (cropping)" + "description": "if this parameter set to true, this tool would ignore page extraction" }, "allow_scaling": { "type": "boolean", @@ -67,56 +65,30 @@ "default": false, "description": "if this parameter set to true, this tool would check that input image need resizing and enhancement or not." }, + "textline_light": { + "type": "boolean", + "default": false, + "description": "if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method." + }, "right_to_left": { "type": "boolean", "default": false, - "description": "if true, return reading order in right-to-left reading direction." + "description": "if this parameter set to true, this tool will extract right-to-left reading order." }, "headers_off": { "type": "boolean", "default": false, "description": "ignore the special role of headings during reading order detection" - }, - "reading_order_machine_based": { - "type": "boolean", - "default": false, - "description": "use data-driven (rather than rule-based) reading order detection" } }, "resources": [ - { - "url": "https://zenodo.org/records/17727267/files/models_all_v0_8_0.zip", - "name": "models_all_v0_8_0", - "type": "archive", - "size": 5636009377, - "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization and image enhancement", - "version_range": ">= v0.8.0" - }, - { - "url": "https://zenodo.org/records/17580627/files/models_all_v0_7_0.zip?download=1", - "name": "models_layout_v0_7_0", - "type": "archive", - "size": 6119874002, - "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization, image enhancement and OCR", - "version_range": ">= v0.7.0" - }, - { - "url": "https://zenodo.org/records/17295988/files/models_layout_v0_6_0.tar.gz?download=1", - "name": "models_layout_v0_6_0", - "type": "archive", - "path_in_archive": "models_layout_v0_6_0", - "size": 3525684179, - "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization, image enhancement and OCR", - "version_range": ">= v0.5.0" - }, { "description": "models for eynollah (TensorFlow SavedModel format)", "url": "https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz", "name": "default", "size": 1894627041, "type": "archive", - "path_in_archive": "models_eynollah", - "version_range": ">= v0.3.0, < v0.5.0" + "path_in_archive": "models_eynollah" } ] }, @@ -143,30 +115,13 @@ } }, "resources": [ - { - "url": "https://zenodo.org/records/17727267/files/models_all_v0_8_0.zip", - "name": "models_all_v0_8_0", - "type": "archive", - "size": 5636009377, - "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization and image enhancement", - "version_range": ">= v0.8.0" - }, - { - "url": "https://zenodo.org/records/17580627/files/models_all_v0_7_0.zip?download=1", - "name": "models_layout_v0_7_0", - "type": "archive", - "size": 6119874002, - "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization, image enhancement and OCR", - "version_range": ">= v0.7.0" - }, { "url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2020_01_16.zip", "name": "default", "type": "archive", "path_in_archive": "saved_model_2020_01_16", "size": 563147331, - "description": "default models provided by github.com/qurator-spk (SavedModel format)", - "version_range": "< v0.7.0" + "description": "default models provided by github.com/qurator-spk (SavedModel format)" }, { "url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip", @@ -174,8 +129,7 @@ "type": "archive", "path_in_archive": ".", "size": 133230419, - "description": "updated default models provided by github.com/qurator-spk (SavedModel format)", - "version_range": "< v0.7.0" + "description": "updated default models provided by github.com/qurator-spk (SavedModel format)" } ] } diff --git a/src/eynollah/ocrd_cli.py b/src/eynollah/ocrd_cli.py index acd8d4e..8929927 100644 --- a/src/eynollah/ocrd_cli.py +++ b/src/eynollah/ocrd_cli.py @@ -1,6 +1,3 @@ -# NOTE: For predictable order of imports of torch/shapely/tensorflow -# this must be the first import of the CLI! -from .eynollah_imports import imported_libs from .processor import EynollahProcessor from click import command from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor diff --git a/src/eynollah/ocrd_cli_binarization.py b/src/eynollah/ocrd_cli_binarization.py index a0667c5..848bbac 100644 --- a/src/eynollah/ocrd_cli_binarization.py +++ b/src/eynollah/ocrd_cli_binarization.py @@ -1,8 +1,6 @@ -from functools import cached_property from typing import Optional from PIL import Image -from frozendict import frozendict import numpy as np import cv2 from click import command @@ -11,17 +9,23 @@ from ocrd import Processor, OcrdPageResult, OcrdPageResultImage from ocrd_models.ocrd_page import OcrdPage, AlternativeImageType from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor -from eynollah.model_zoo.model_zoo import EynollahModelZoo - from .sbb_binarize import SbbBinarizer -from .utils.pil_cv2 import cv2pil +def cv2pil(img): + return Image.fromarray(img.astype('uint8')) + +def pil2cv(img): + # from ocrd/workspace.py + color_conversion = cv2.COLOR_GRAY2BGR if img.mode in ('1', 'L') else cv2.COLOR_RGB2BGR + pil_as_np_array = np.array(img).astype('uint8') if img.mode == '1' else np.array(img) + return cv2.cvtColor(pil_as_np_array, color_conversion) + class SbbBinarizeProcessor(Processor): # already employs GPU (without singleton process atm) max_workers = 1 - @cached_property + @property def executable(self): return 'ocrd-sbb-binarize' @@ -30,9 +34,8 @@ class SbbBinarizeProcessor(Processor): Set up the model prior to processing. """ # resolve relative path via OCR-D ResourceManager - assert isinstance(self.parameter, frozendict) - model_zoo = EynollahModelZoo(basedir=self.parameter['model']) - self.binarizer = SbbBinarizer(model_zoo=model_zoo, logger=self.logger) + model_path = self.resolve_resource(self.parameter['model']) + self.binarizer = SbbBinarizer(model_dir=model_path, logger=self.logger) def process_page_pcgts(self, *input_pcgts: Optional[OcrdPage], page_id: Optional[str] = None) -> OcrdPageResult: """ @@ -67,8 +70,7 @@ class SbbBinarizeProcessor(Processor): if oplevel == 'page': self.logger.info("Binarizing on 'page' level in page '%s'", page_id) - page_image_bin = cv2pil(self.binarizer.run_single("", img_pil=page_image, - use_patches=True)) + page_image_bin = cv2pil(self.binarizer.run(image=pil2cv(page_image), use_patches=True)) # update PAGE (reference the image file): page_image_ref = AlternativeImageType(comments=page_xywh['features'] + ',binarized,clipped') page.add_AlternativeImage(page_image_ref) @@ -81,8 +83,7 @@ class SbbBinarizeProcessor(Processor): for region in regions: region_image, region_xywh = self.workspace.image_from_segment( region, page_image, page_xywh, feature_filter='binarized') - region_image_bin = cv2pil(self.binarizer.run_single("", img_pil=region_image, - use_patches=True)) + region_image_bin = cv2pil(self.binarizer.run(image=pil2cv(region_image), use_patches=True)) # update PAGE (reference the image file): region_image_ref = AlternativeImageType(comments=region_xywh['features'] + ',binarized') region.add_AlternativeImage(region_image_ref) @@ -94,11 +95,10 @@ class SbbBinarizeProcessor(Processor): self.logger.warning("Page '%s' contains no text lines", page_id) for line in lines: line_image, line_xywh = self.workspace.image_from_segment(line, page_image, page_xywh, feature_filter='binarized') - line_image_bin = cv2pil(self.binarizer.run_single("", img_pil=line_image, - use_patches=True)) + line_image_bin = cv2pil(self.binarizer.run(image=pil2cv(line_image), use_patches=True)) # update PAGE (reference the image file): line_image_ref = AlternativeImageType(comments=line_xywh['features'] + ',binarized') - line.add_AlternativeImage(line_image_ref) + line.add_AlternativeImage(region_image_ref) result.images.append(OcrdPageResultImage(line_image_bin, line.id + '.IMG-BIN', line_image_ref)) return result diff --git a/src/eynollah/patch_encoder.py b/src/eynollah/patch_encoder.py deleted file mode 100644 index f163132..0000000 --- a/src/eynollah/patch_encoder.py +++ /dev/null @@ -1,156 +0,0 @@ -import os -os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 -import tensorflow as tf -from tensorflow.keras import layers, models - -class PatchEncoder(layers.Layer): - - # 441=21*21 # 14*14 # 28*28 - def __init__(self, num_patches=441, projection_dim=64): - super().__init__() - self.num_patches = num_patches - self.projection_dim = projection_dim - self.projection = layers.Dense(self.projection_dim) - self.position_embedding = layers.Embedding(self.num_patches, self.projection_dim) - - def call(self, patch): - positions = tf.range(start=0, limit=self.num_patches, delta=1) - return self.projection(patch) + self.position_embedding(positions) - - def get_config(self): - return dict(num_patches=self.num_patches, - projection_dim=self.projection_dim, - **super().get_config()) - -class Patches(layers.Layer): - def __init__(self, patch_size_x=1, patch_size_y=1): - super().__init__() - self.patch_size_x = patch_size_x - self.patch_size_y = patch_size_y - - def call(self, images): - batch_size = tf.shape(images)[0] - patches = tf.image.extract_patches( - images=images, - sizes=[1, self.patch_size_y, self.patch_size_x, 1], - strides=[1, self.patch_size_y, self.patch_size_x, 1], - rates=[1, 1, 1, 1], - padding="VALID", - ) - patch_dims = patches.shape[-1] - return tf.reshape(patches, [batch_size, -1, patch_dims]) - - def get_config(self): - return dict(patch_size_x=self.patch_size_x, - patch_size_y=self.patch_size_y, - **super().get_config()) - -class wrap_layout_model_resized(models.Model): - """ - replacement for layout model using resizing to model width/height and back - - (accepts arbitrary width/height input [B, H, W, 3], returns same size segmentation [B, H, W, C]) - """ - def __init__(self, model): - super().__init__(name=model.name + '_resized') - self.model = model - self.height = model.layers[-1].output_shape[1] - self.width = model.layers[-1].output_shape[2] - - @tf.function(reduce_retracing=True, - #jit_compile=True, (ScaleAndTranslate is not supported by XLA) - input_signature=[tf.TensorSpec([1, None, None, 3], - dtype=tf.float32)]) - def call(self, img, training=False): - height = tf.shape(img)[1] - width = tf.shape(img)[2] - img_resized = tf.image.resize(img, - (self.height, self.width), - antialias=True) - pred_resized = self.model(img_resized) - pred = tf.image.resize(pred_resized, - (height, width)) - return pred - -class wrap_layout_model_patched(models.Model): - """ - replacement for layout model using sliding window for patches - - (accepts arbitrary width/height input [B, H, W, 3], returns same size segmentation [B, H, W, C]) - """ - def __init__(self, model): - super().__init__(name=model.name + '_patched') - self.model = model - self.height = model.layers[-1].output_shape[1] - self.width = model.layers[-1].output_shape[2] - self.classes = model.layers[-1].output_shape[3] - # equivalent of marginal_of_patch_percent=0.1 ... - self.stride_x = int(self.width * (1 - 0.1)) - self.stride_y = int(self.height * (1 - 0.1)) - offset_height = (self.height - self.stride_y) // 2 - offset_width = (self.width - self.stride_x) // 2 - window = tf.image.pad_to_bounding_box( - tf.ones((self.stride_y, self.stride_x, 1), dtype=tf.int32), - offset_height, offset_width, - self.height, self.width) - self.window = tf.expand_dims(window, axis=0) - - @tf.function(reduce_retracing=True, - #jit_compile=True, (ScaleAndTranslate and ExtractImagePatches not supported by XLA) - input_signature=[tf.TensorSpec([1, None, None, 3], - dtype=tf.float32)]) - def call(self, img, training=False): - height = tf.shape(img)[1] - width = tf.shape(img)[2] - if (height < self.height or - width < self.width): - img_resized = tf.image.resize(img, - (self.height, self.width), - antialias=True) - pred_resized = self.model(img_resized) - pred = tf.image.resize(pred_resized, - (height, width)) - return pred - - img_patches = tf.image.extract_patches( - images=img, - sizes=[1, self.height, self.width, 1], - strides=[1, self.stride_y, self.stride_x, 1], - rates=[1, 1, 1, 1], - padding='SAME') - img_patches = tf.squeeze(img_patches) - index_shape = (-1, self.height, self.width, 2) - input_shape = (-1, self.height, self.width, 3) - output_shape = (-1, self.height, self.width, self.classes) - img_patches = tf.reshape(img_patches, shape=input_shape) - # may be too large: - #pred_patches = self.model(img_patches) - # so rebatch to fit in memory: - img_patches = tf.expand_dims(img_patches, 1) - pred_patches = tf.map_fn(self.model, img_patches, - parallel_iterations=1, - infer_shape=False) - pred_patches = tf.squeeze(pred_patches, 1) - # calculate corresponding indexes for reconstruction - x = tf.range(width) - y = tf.range(height) - x, y = tf.meshgrid(x, y) - indices = tf.stack([y, x], axis=-1) - indices_patches = tf.image.extract_patches( - images=tf.expand_dims(indices, axis=0), - sizes=[1, self.height, self.width, 1], - strides=[1, self.stride_y, self.stride_x, 1], - rates=[1, 1, 1, 1], - padding='SAME') - indices_patches = tf.squeeze(indices_patches) - indices_patches = tf.reshape(indices_patches, shape=index_shape) - - # use margins for sliding window approach - indices_patches = indices_patches * self.window - - pred = tf.scatter_nd( - indices_patches, - pred_patches, - (height, width, self.classes)) - pred = tf.expand_dims(pred, axis=0) - return pred diff --git a/src/eynollah/plot.py b/src/eynollah/plot.py index 608ca4f..412ae5a 100644 --- a/src/eynollah/plot.py +++ b/src/eynollah/plot.py @@ -12,7 +12,7 @@ from .utils import crop_image_inside_box from .utils.rotate import rotate_image_different from .utils.resize import resize_image -class EynollahPlotter: +class EynollahPlotter(): """ Class collecting all the plotting and image writing methods """ @@ -26,6 +26,10 @@ class EynollahPlotter: dir_of_deskewed, dir_of_layout, dir_of_cropped_images, + image_filename_stem, + image_org=None, + scale_x=1, + scale_y=1, ): self.dir_out = dir_out self.dir_of_all = dir_of_all @@ -33,8 +37,13 @@ class EynollahPlotter: self.dir_of_layout = dir_of_layout self.dir_of_cropped_images = dir_of_cropped_images self.dir_of_deskewed = dir_of_deskewed + self.image_filename_stem = image_filename_stem + # XXX TODO hacky these cannot be set at init time + self.image_org = image_org + self.scale_x = scale_x + self.scale_y = scale_y - def save_plot_of_layout_main(self, text_regions_p, image_page, name=None): + def save_plot_of_layout_main(self, text_regions_p, image_page): if self.dir_of_layout is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] @@ -46,10 +55,10 @@ class EynollahPlotter: colors = [im.cmap(im.norm(value)) for value in values] patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=40) - plt.savefig(os.path.join(self.dir_of_layout, - (name or "page") + "_layout_main.png")) + plt.savefig(os.path.join(self.dir_of_layout, self.image_filename_stem + "_layout_main.png")) + - def save_plot_of_layout_main_all(self, text_regions_p, image_page, name=None): + def save_plot_of_layout_main_all(self, text_regions_p, image_page): if self.dir_of_all is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] @@ -64,10 +73,9 @@ class EynollahPlotter: colors = [im.cmap(im.norm(value)) for value in values] patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60) - plt.savefig(os.path.join(self.dir_of_all, - (name or "page") + "_layout_main_and_page.png")) + plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + "_layout_main_and_page.png")) - def save_plot_of_layout(self, text_regions_p, image_page, name=None): + def save_plot_of_layout(self, text_regions_p, image_page): if self.dir_of_layout is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] @@ -79,10 +87,9 @@ class EynollahPlotter: colors = [im.cmap(im.norm(value)) for value in values] patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=40) - plt.savefig(os.path.join(self.dir_of_layout, - (name or "page") + "_layout.png")) + plt.savefig(os.path.join(self.dir_of_layout, self.image_filename_stem + "_layout.png")) - def save_plot_of_layout_all(self, text_regions_p, image_page, name=None): + def save_plot_of_layout_all(self, text_regions_p, image_page): if self.dir_of_all is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] @@ -97,10 +104,9 @@ class EynollahPlotter: colors = [im.cmap(im.norm(value)) for value in values] patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60) - plt.savefig(os.path.join(self.dir_of_all, - (name or "page") + "_layout_and_page.png")) + plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + "_layout_and_page.png")) - def save_plot_of_textlines(self, textline_mask_tot_ea, image_page, name=None): + def save_plot_of_textlines(self, textline_mask_tot_ea, image_page): if self.dir_of_all is not None: values = np.unique(textline_mask_tot_ea[:, :]) pixels = ["Background", "Textlines"] @@ -114,31 +120,24 @@ class EynollahPlotter: colors = [im.cmap(im.norm(value)) for value in values] patches = [mpatches.Patch(color=colors[np.where(values == i)[0][0]], label="{l}".format(l=pixels[int(np.where(values_indexes == i)[0][0])])) for i in values] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize=60) - plt.savefig(os.path.join(self.dir_of_all, - (name or "page") + "_textline_and_page.png")) + plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem + "_textline_and_page.png")) - def save_deskewed_image(self, slope_deskew, image_org, name=None): + def save_deskewed_image(self, slope_deskew): if self.dir_of_all is not None: - cv2.imwrite(os.path.join(self.dir_of_all, - (name or "page") + "_org.png"), image_org) + cv2.imwrite(os.path.join(self.dir_of_all, self.image_filename_stem + "_org.png"), self.image_org) if self.dir_of_deskewed is not None: - img_rotated = rotate_image_different(image_org, slope_deskew) - cv2.imwrite(os.path.join(self.dir_of_deskewed, - (name or "page") + "_deskewed.png"), img_rotated) + img_rotated = rotate_image_different(self.image_org, slope_deskew) + cv2.imwrite(os.path.join(self.dir_of_deskewed, self.image_filename_stem + "_deskewed.png"), img_rotated) - def save_page_image(self, image_page, name=None): + def save_page_image(self, image_page): if self.dir_of_all is not None: - cv2.imwrite(os.path.join(self.dir_of_all, - (name or "page") + "_page.png"), image_page) + cv2.imwrite(os.path.join(self.dir_of_all, self.image_filename_stem + "_page.png"), image_page) if self.dir_save_page is not None: - cv2.imwrite(os.path.join(self.dir_save_page, - (name or "page") + "_page.png"), image_page) - - def save_enhanced_image(self, img_res, name=None): - cv2.imwrite(os.path.join(self.dir_out, - (name or "page") + "_enhanced.png"), img_res) + cv2.imwrite(os.path.join(self.dir_save_page, self.image_filename_stem + "_page.png"), image_page) + def save_enhanced_image(self, img_res): + cv2.imwrite(os.path.join(self.dir_out, self.image_filename_stem + "_enhanced.png"), img_res) - def save_plot_of_textline_density(self, img_patch_org, name=None): + def save_plot_of_textline_density(self, img_patch_org): if self.dir_of_all is not None: plt.figure(figsize=(80,40)) plt.rcParams['font.size']='50' @@ -150,10 +149,9 @@ class EynollahPlotter: plt.ylabel('Height',fontsize=60) plt.yticks([0,len(gaussian_filter1d(img_patch_org.sum(axis=1), 3))]) plt.gca().invert_yaxis() - plt.savefig(os.path.join(self.dir_of_all, - (name or "page") + '_density_of_textline.png')) + plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem+'_density_of_textline.png')) - def save_plot_of_rotation_angle(self, angels, var_res, name=None): + def save_plot_of_rotation_angle(self, angels, var_res): if self.dir_of_all is not None: plt.figure(figsize=(60,30)) plt.rcParams['font.size']='50' @@ -162,20 +160,19 @@ class EynollahPlotter: plt.ylabel('variance of sum of rotated textline in direction of x axis',fontsize=50) plt.plot(angels[np.argmax(var_res)],var_res[np.argmax(np.array(var_res))] ,'*',markersize=50,label='Angle of deskewing=' +str("{:.2f}".format(angels[np.argmax(var_res)]))+r'$\degree$') plt.legend(loc='best') - plt.savefig(os.path.join(self.dir_of_all, - (name or "page") + '_rotation_angle.png')) + plt.savefig(os.path.join(self.dir_of_all, self.image_filename_stem+'_rotation_angle.png')) - def write_images_into_directory(self, img_contours, image_page, scale_x=1.0, scale_y=1.0, name=None): + def write_images_into_directory(self, img_contours, image_page): if self.dir_of_cropped_images is not None: index = 0 for cont_ind in img_contours: x, y, w, h = cv2.boundingRect(cont_ind) box = [x, y, w, h] - image, _ = crop_image_inside_box(box, image_page) - image = resize_image(image, - int(image.shape[0] / scale_y), - int(image.shape[1] / scale_x)) - cv2.imwrite(os.path.join(self.dir_of_cropped_images, - (name or "page") + f"_{index:03d}.jpg"), image) + croped_page, page_coord = crop_image_inside_box(box, image_page) + + croped_page = resize_image(croped_page, int(croped_page.shape[0] / self.scale_y), int(croped_page.shape[1] / self.scale_x)) + + path = os.path.join(self.dir_of_cropped_images, self.image_filename_stem + "_" + str(index) + ".jpg") + cv2.imwrite(path, croped_page) index += 1 diff --git a/src/eynollah/predictor.py b/src/eynollah/predictor.py deleted file mode 100644 index e1159e7..0000000 --- a/src/eynollah/predictor.py +++ /dev/null @@ -1,210 +0,0 @@ -from contextlib import ExitStack -from typing import List, Dict -import logging -import logging.handlers -import multiprocessing as mp -import numpy as np - -from .utils.shm import share_ndarray, ndarray_shared - -QSIZE = 200 - - -class Predictor(mp.context.SpawnProcess): - """ - singleton subprocess solely responsible for prediction with TensorFlow, - communicates with any number of worker processes, - acting as a shallow replacement for various model types in EynollahModelZoo's - _loaded dict for each single model - """ - def __init__(self, logger, model_zoo): - self.logger = logger - self.loglevel = logger.parent.level - self.model_zoo = model_zoo - ctxt = mp.get_context('spawn') - self.taskq = ctxt.Queue(maxsize=QSIZE) - self.resultq = ctxt.Queue(maxsize=QSIZE) - self.logq = ctxt.Queue(maxsize=QSIZE * 100) - logging.handlers.QueueListener( - self.logq, *( - # as per ocrd_utils.initLogging(): - logging.root.handlers + - # as per eynollah_cli.main(): - self.logger.parent.handlers - ), respect_handler_level=False).start() - self.stopped = ctxt.Event() - self.closable = ctxt.Manager().list() - super().__init__(name="EynollahPredictor", daemon=True) - - @property - def input_shape(self): - return self({}) - - def predict(self, data: dict, verbose=0): - return self(data) - - def __call__(self, data: dict): - # unusable as per python/cpython#79967 - #with self.jobid.get_lock(): - # would work, but not public: - #with self.jobid._mutex: - with self.joblock: - self.jobid.value += 1 - jobid = self.jobid.value - if not len(data): - self.taskq.put((jobid, data)) - #self.logger.debug("sent shape query task '%d' for model '%s'", jobid, self.name) - return self.result(jobid) - with share_ndarray(data) as shared_data: - self.taskq.put((jobid, shared_data)) - #self.logger.debug("sent prediction task '%d' for model '%s': %s", jobid, self.name, shared_data) - return self.result(jobid) - - def result(self, jobid): - while not self.stopped.is_set(): - if jobid in self.results: - #self.logger.debug("received result for '%d'", jobid) - result = self.results.pop(jobid) - if isinstance(result, Exception): - raise Exception(f"predictor {self.name} failed for {jobid}") from result - elif isinstance(result, dict): - with ndarray_shared(result) as shared_result: - result = np.copy(shared_result) - self.closable.append(jobid) - return result - try: - jobid0, result = self.resultq.get(timeout=0.7) - except mp.queues.Empty: - continue - #self.logger.debug("storing results for '%d': '%s'", jobid0, result) - self.results[jobid0] = result - raise Exception(f"predictor {self.name} terminated while waiting on results for {jobid}") - - def run(self): - try: - self.setup() # fill model_zoo etc - except Exception as e: - self.logger.exception("setup failed") - self.stopped.set() - return - closing = {} - def close_all(): - for jobid in list(self.closable): - self.closable.remove(jobid) - closing.pop(jobid).close() - #self.logger.debug("closed shm for '%d'", jobid) - while not self.stopped.is_set(): - close_all() - try: - TIMEOUT = 4.5 # 1.1 too is greedy - not enough for rebatching - jobid, shared_data = self.taskq.get(timeout=TIMEOUT) - except mp.queues.Empty: - continue - try: - # up to what batch size fits into small (8GB) VRAM? - # (notice we are not listing _resized/_patched models here, - # because its inputs/outputs will have varying shapes) - REBATCH_SIZE = { - # small models (448x448)... - "col_classifier": 2, - "page": 2, - "binarization": 4, - "enhancement": 4, - "reading_order": 4, - # medium size (672x672x3)... - "textline": 2, - # large models... - "table": 1, - "region_1_2": 1, - "region_fl_np": 1, - "region_fl": 1, - }.get(self.name, 1) - REBATCH_SIZE = 1 # save VRAM; FIXME: re-enable w/ runtime parameter - if not len(shared_data): - #self.logger.debug("getting '%d' output shape of model '%s'", jobid, self.name) - result = self.model.input_shape - self.resultq.put((jobid, result)) - #self.logger.debug("sent result for '%d': %s", jobid, result) - else: - tasks = [(jobid, shared_data)] - batch_size = shared_data['shape'][0] - while (not self.taskq.empty() and - # climb to target batch size - batch_size * len(tasks) < REBATCH_SIZE): - jobid0, shared_data0 = self.taskq.get() - if len(shared_data0): - # add to our batch - tasks.append((jobid0, shared_data0)) - else: - # immediately anser - self.resultq.put((jobid0, self.model.input_shape)) - if len(tasks) > 1: - self.logger.debug("rebatching %d '%s' tasks of batch size %d", - len(tasks), self.name, batch_size) - with ExitStack() as stack: - data = [] - jobs = [] - for jobid, shared_data in tasks: - #self.logger.debug("predicting '%d' with model '%s': %s", jobid, self.name, shared_data) - jobs.append(jobid) - data.append(stack.enter_context(ndarray_shared(shared_data))) - data = np.concatenate(data) - #result = self.model.predict(data, verbose=0) - # faster, less VRAM - result = self.model.predict_on_batch(data) - results = np.split(result, len(jobs)) - #self.logger.debug("sharing result array for '%d'", jobid) - with ExitStack() as stack: - for jobid, result in zip(jobs, results): - # we don't know when the result will be received, - # but don't want to wait either, so track closing - # context per job, and wait for closable signal - # from client - result = stack.enter_context(share_ndarray(result)) - closing[jobid] = stack.pop_all() - self.resultq.put((jobid, result)) - #self.logger.debug("sent result for '%d': %s", jobid, result) - except Exception as e: - self.logger.error("prediction for %s failed: %s", self.name, e.__class__.__name__) - result = e - self.resultq.put((jobid, result)) - close_all() - #self.logger.debug("predictor terminated") - - def load_model(self, *load_args, **load_kwargs): - assert len(load_args) - self.name = '_'.join(list(load_args[:1]) + - list(key for key in load_kwargs - if key != 'device')) - self.load_args = load_args - self.load_kwargs = load_kwargs - self.start() # call run() in subprocess - # parent context here - del self.model_zoo # only in subprocess - ctxt = mp.get_context('fork') # ocrd.Processor will fork workers - mngr = ctxt.Manager() - self.jobid = mngr.Value('i', 0) - self.joblock = mngr.Lock() - self.results = mngr.dict() - - def setup(self): - logging.root.handlers = [logging.handlers.QueueHandler(self.logq)] - self.logger.setLevel(self.loglevel) - self.model = self.model_zoo.load_model(*self.load_args, **self.load_kwargs) - - def shutdown(self): - # do not terminate from forked processor instances - if mp.parent_process() is None: - self.stopped.set() - self.taskq.close() - self.taskq.cancel_join_thread() - self.resultq.close() - self.resultq.cancel_join_thread() - self.logq.close() - self.terminate() - else: - del self.model - - def __del__(self): - #self.logger.debug(f"deinit of {self} in {mp.current_process().name}") - self.shutdown() diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py index 47fa770..8f99489 100644 --- a/src/eynollah/processor.py +++ b/src/eynollah/processor.py @@ -1,32 +1,35 @@ -from functools import cached_property from typing import Optional from ocrd_models import OcrdPage -from ocrd import OcrdPageResultImage, Processor, OcrdPageResult - -from eynollah.model_zoo.model_zoo import EynollahModelZoo +from ocrd import Processor, OcrdPageResult from .eynollah import Eynollah, EynollahXmlWriter class EynollahProcessor(Processor): - @cached_property - def executable(self) -> str: + # already employs background CPU multiprocessing per page + # already employs GPU (without singleton process atm) + max_workers = 1 + + @property + def executable(self): return 'ocrd-eynollah-segment' def setup(self) -> None: - assert self.parameter - model_zoo = EynollahModelZoo(basedir=self.parameter['models']) + if self.parameter['textline_light'] and not self.parameter['light_version']: + raise ValueError("Error: You set parameter 'textline_light' to enable light textline detection, " + "but parameter 'light_version' is not enabled") self.eynollah = Eynollah( - model_zoo=model_zoo, + self.resolve_resource(self.parameter['models']), + logger=self.logger, allow_enhancement=self.parameter['allow_enhancement'], curved_line=self.parameter['curved_line'], right2left=self.parameter['right_to_left'], - reading_order_machine_based=self.parameter['reading_order_machine_based'], ignore_page_extraction=self.parameter['ignore_page_extraction'], + light_version=self.parameter['light_version'], + textline_light=self.parameter['textline_light'], full_layout=self.parameter['full_layout'], allow_scaling=self.parameter['allow_scaling'], headers_off=self.parameter['headers_off'], tables=self.parameter['tables'], - logger=self.logger ) self.eynollah.plotter = None @@ -53,8 +56,6 @@ class EynollahProcessor(Processor): - If ``ignore_page_extraction``, then attempt no cropping of the page. - If ``curved_line``, then compute contour polygons for text lines instead of simple bounding boxes. - - If ``reading_order_machine_based``, then detect reading order via - data-driven model instead of geometrical heuristics. Produce a new output file by serialising the resulting hierarchy. """ @@ -76,8 +77,15 @@ class EynollahProcessor(Processor): image_filename = "dummy" # will be replaced by ocrd.Processor.process_page_file result.images.append(OcrdPageResultImage(page_image, '.IMG', page)) # mark as new original # FIXME: mask out already existing regions (incremental segmentation) - self.eynollah.run_single(image_filename, - img_pil=page_image, pcgts=pcgts, - # ocrd.Processor will handle OCRD_EXISTING_OUTPUT more flexibly - overwrite=True) + self.eynollah.cache_images( + image_pil=page_image, + dpi=self.parameter['dpi'], + ) + self.eynollah.writer = EynollahXmlWriter( + dir_out=None, + image_filename=image_filename, + curved_line=self.eynollah.curved_line, + textline_light=self.eynollah.textline_light, + pcgts=pcgts) + self.eynollah.run_single() return result diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 9b154a8..f43b6ba 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -2,90 +2,376 @@ Tool to load model and binarize a given image. """ -# pyright: reportIndexIssue=false -# pyright: reportCallIssue=false -# pyright: reportArgumentType=false -# pyright: reportPossiblyUnboundVariable=false - +import sys +from glob import glob import os import logging -from pathlib import Path -from typing import Optional import numpy as np +from PIL import Image import cv2 +from ocrd_utils import tf_disable_interactive_logs +tf_disable_interactive_logs() +import tensorflow as tf +from tensorflow.keras.models import load_model +from tensorflow.python.keras import backend as tensorflow_backend -from .eynollah import Eynollah -from .model_zoo import EynollahModelZoo -from .utils.resize import resize_image -from .utils import is_image_filename -class SbbBinarizer(Eynollah): +def resize_image(img_in, input_height, input_width): + return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) - def __init__( - self, - *, - model_zoo: EynollahModelZoo, - logger: Optional[logging.Logger] = None, - device: str = '', - ): - self.logger = logger if logger else logging.getLogger('eynollah.binarization') - self.model_zoo = model_zoo - self.setup_models(device=device) +class SbbBinarizer: - def setup_models(self, device=''): - loadable = ['binarization'] - self.model_zoo.load_models(*loadable, device=device) - for model in loadable: - self.logger.debug("model %s has input shape %s", model, - self.model_zoo.get(model).input_shape) + def __init__(self, model_dir, logger=None): + self.model_dir = model_dir + self.log = logger if logger else logging.getLogger('SbbBinarizer') - def run(self, - image=None, - image_filename=None, - output=None, - use_patches=False, - dir_in=None, - overwrite=False - ): - """ - Binarize the scanned images - """ - if dir_in: - ls_imgs = [(os.path.join(dir_in, image_filename), - os.path.join(output, Path(image_filename).stem + '.png')) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - elif image_filename: - ls_imgs = [(image_filename, output)] + self.start_new_session() + + self.model_files = glob(self.model_dir+"/*/", recursive = True) + + self.models = [] + for model_file in self.model_files: + self.models.append(self.load_model(model_file)) + + def start_new_session(self): + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + + self.session = tf.compat.v1.Session(config=config) # tf.InteractiveSession() + tensorflow_backend.set_session(self.session) + + def end_session(self): + tensorflow_backend.clear_session() + self.session.close() + del self.session + + def load_model(self, model_name): + model = load_model(os.path.join(self.model_dir, model_name), compile=False) + model_height = model.layers[len(model.layers)-1].output_shape[1] + model_width = model.layers[len(model.layers)-1].output_shape[2] + n_classes = model.layers[len(model.layers)-1].output_shape[3] + return model, model_height, model_width, n_classes + + def predict(self, model_in, img, use_patches, n_batch_inference=5): + tensorflow_backend.set_session(self.session) + model, model_height, model_width, n_classes = model_in + + img_org_h = img.shape[0] + img_org_w = img.shape[1] + + if img.shape[0] < model_height and img.shape[1] >= model_width: + img_padded = np.zeros(( model_height, img.shape[1], img.shape[2] )) + + index_start_h = int( abs( img.shape[0] - model_height) /2.) + index_start_w = 0 + + img_padded [ index_start_h: index_start_h+img.shape[0], :, : ] = img[:,:,:] + + elif img.shape[0] >= model_height and img.shape[1] < model_width: + img_padded = np.zeros(( img.shape[0], model_width, img.shape[2] )) + + index_start_h = 0 + index_start_w = int( abs( img.shape[1] - model_width) /2.) + + img_padded [ :, index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:] + + + elif img.shape[0] < model_height and img.shape[1] < model_width: + img_padded = np.zeros(( model_height, model_width, img.shape[2] )) + + index_start_h = int( abs( img.shape[0] - model_height) /2.) + index_start_w = int( abs( img.shape[1] - model_width) /2.) + + img_padded [ index_start_h: index_start_h+img.shape[0], index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:] + else: - raise ValueError("run requires either a single image filename or a directory") + index_start_h = 0 + index_start_w = 0 + img_padded = np.copy(img) + + + img = np.copy(img_padded) + + - for img_filename, output_filename in ls_imgs: - self.logger.info(img_filename) + if use_patches: - if os.path.exists(output_filename): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", output_filename) - else: - self.logger.warning("will skip input for existing output file '%s'", output_filename) - continue + margin = int(0.1 * model_width) - img_res = self.run_single(img_filename, - use_patches=use_patches) + width_mid = model_width - 2 * margin + height_mid = model_height - 2 * margin - cv2.imwrite(output_filename, img_res) - self.logger.info("output filename: '%s'", output_filename) - def run_single(self, - img_filename: str, - img_pil=None, - use_patches: bool = False, - ): - image = self.cache_images(image_filename=img_filename, image_pil=img_pil) - img = self.imread(image) - img_bin = self.do_prediction(use_patches, img, self.model_zoo.get("binarization"), - n_batch_inference=5) - img_bin = 255 * (img_bin == 0).astype(np.uint8) - #img_bin = np.repeat(img_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) - return img_bin + img = img / float(255.0) + + img_h = img.shape[0] + img_w = img.shape[1] + + prediction_true = np.zeros((img_h, img_w, 3)) + mask_true = np.zeros((img_h, img_w)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + + if nxf > int(nxf): + nxf = int(nxf) + 1 + else: + nxf = int(nxf) + + if nyf > int(nyf): + nyf = int(nyf) + 1 + else: + nyf = int(nyf) + + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + + img_patch = np.zeros((n_batch_inference, model_height, model_width,3)) + + for i in range(nxf): + for j in range(nyf): + + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + model_width + elif i > 0: + index_x_d = i * width_mid + index_x_u = index_x_d + model_width + + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + model_height + elif j > 0: + index_y_d = j * height_mid + index_y_u = index_y_d + model_height + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - model_width + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - model_height + + + list_i_s.append(i) + list_j_s.append(j) + list_x_u.append(index_x_u) + list_x_d.append(index_x_d) + list_y_d.append(index_y_d) + list_y_u.append(index_y_u) + + + img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + + batch_indexer = batch_indexer + 1 + + + + if batch_indexer == n_batch_inference: + + label_p_pred = model.predict(img_patch,verbose=0) + + seg = np.argmax(label_p_pred, axis=3) + + #print(seg.shape, len(seg), len(list_i_s)) + + indexer_inside_batch = 0 + for i_batch, j_batch in zip(list_i_s, list_j_s): + seg_in = seg[indexer_inside_batch,:,:] + seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2) + + index_y_u_in = list_y_u[indexer_inside_batch] + index_y_d_in = list_y_d[indexer_inside_batch] + + index_x_u_in = list_x_u[indexer_inside_batch] + index_x_d_in = list_x_d[indexer_inside_batch] + + if i_batch == 0 and j_batch == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color + elif i_batch == nxf - 1 and j_batch == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :] + prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color + elif i_batch == 0 and j_batch == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color + elif i_batch == nxf - 1 and j_batch == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] + prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color + elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color + elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] + prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color + else: + seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color + + indexer_inside_batch = indexer_inside_batch +1 + + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + + img_patch = np.zeros((n_batch_inference, model_height, model_width,3)) + + elif i==(nxf-1) and j==(nyf-1): + label_p_pred = model.predict(img_patch,verbose=0) + + seg = np.argmax(label_p_pred, axis=3) + + #print(seg.shape, len(seg), len(list_i_s)) + + indexer_inside_batch = 0 + for i_batch, j_batch in zip(list_i_s, list_j_s): + seg_in = seg[indexer_inside_batch,:,:] + seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2) + + index_y_u_in = list_y_u[indexer_inside_batch] + index_y_d_in = list_y_d[indexer_inside_batch] + + index_x_u_in = list_x_u[indexer_inside_batch] + index_x_d_in = list_x_d[indexer_inside_batch] + + if i_batch == 0 and j_batch == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color + elif i_batch == nxf - 1 and j_batch == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :] + prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color + elif i_batch == 0 and j_batch == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color + elif i_batch == nxf - 1 and j_batch == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] + prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color + elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color + elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] + prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color + else: + seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] + prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color + + indexer_inside_batch = indexer_inside_batch +1 + + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + + img_patch = np.zeros((n_batch_inference, model_height, model_width,3)) + + + + prediction_true = prediction_true[index_start_h: index_start_h+img_org_h, index_start_w: index_start_w+img_org_w,:] + prediction_true = prediction_true.astype(np.uint8) + + else: + img_h_page = img.shape[0] + img_w_page = img.shape[1] + img = img / float(255.0) + img = resize_image(img, model_height, model_width) + + label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2])) + + seg = np.argmax(label_p_pred, axis=3)[0] + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + prediction_true = resize_image(seg_color, img_h_page, img_w_page) + prediction_true = prediction_true.astype(np.uint8) + return prediction_true[:,:,0] + + def run(self, image=None, image_path=None, save=None, use_patches=False, dir_in=None, dir_out=None): + print(dir_in,'dir_in') + if not dir_in: + if (image is not None and image_path is not None) or \ + (image is None and image_path is None): + raise ValueError("Must pass either a opencv2 image or an image_path") + if image_path is not None: + image = cv2.imread(image_path) + img_last = 0 + for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): + self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) + + res = self.predict(model, image, use_patches) + + img_fin = np.zeros((res.shape[0], res.shape[1], 3)) + res[:, :][res[:, :] == 0] = 2 + res = res - 1 + res = res * 255 + img_fin[:, :, 0] = res + img_fin[:, :, 1] = res + img_fin[:, :, 2] = res + + img_fin = img_fin.astype(np.uint8) + img_fin = (res[:, :] == 0) * 255 + img_last = img_last + img_fin + + kernel = np.ones((5, 5), np.uint8) + img_last[:, :][img_last[:, :] > 0] = 255 + img_last = (img_last[:, :] == 0) * 255 + if save: + cv2.imwrite(save, img_last) + return img_last + else: + ls_imgs = os.listdir(dir_in) + for image_name in ls_imgs: + image_stem = image_name.split('.')[0] + print(image_name,'image_name') + image = cv2.imread(os.path.join(dir_in,image_name) ) + img_last = 0 + for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): + self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) + + res = self.predict(model, image, use_patches) + + img_fin = np.zeros((res.shape[0], res.shape[1], 3)) + res[:, :][res[:, :] == 0] = 2 + res = res - 1 + res = res * 255 + img_fin[:, :, 0] = res + img_fin[:, :, 1] = res + img_fin[:, :, 2] = res + + img_fin = img_fin.astype(np.uint8) + img_fin = (res[:, :] == 0) * 255 + img_last = img_last + img_fin + + kernel = np.ones((5, 5), np.uint8) + img_last[:, :][img_last[:, :] > 0] = 255 + img_last = (img_last[:, :] == 0) * 255 + + cv2.imwrite(os.path.join(dir_out,image_stem+'.png'), img_last) diff --git a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py b/src/eynollah/training/build_model_load_pretrained_weights_and_save.py deleted file mode 100644 index 15eaf64..0000000 --- a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py +++ /dev/null @@ -1,18 +0,0 @@ -import sys -import click - -from .models import resnet50_unet - - -@click.command() -def build_model_load_pretrained_weights_and_save(): - n_classes = 2 - input_height = 224 - input_width = 448 - weight_decay = 1e-6 - pretraining = False - dir_of_weights = 'model_bin_sbb_ens.h5' - - model = resnet50_unet(n_classes, input_height, input_width, weight_decay, pretraining) - model.load_weights(dir_of_weights) - model.save('./name_in_another_python_version.h5') diff --git a/src/eynollah/training/cli.py b/src/eynollah/training/cli.py deleted file mode 100644 index ae14f04..0000000 --- a/src/eynollah/training/cli.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - -import click -import sys - -from .build_model_load_pretrained_weights_and_save import build_model_load_pretrained_weights_and_save -from .generate_gt_for_training import main as generate_gt_cli -from .inference import main as inference_cli -from .train import ex -from .extract_line_gt import linegt_cli -from .weights_ensembling import ensemble_cli - -@click.command(context_settings=dict( - ignore_unknown_options=True, -)) -@click.argument('SACRED_ARGS', nargs=-1, type=click.UNPROCESSED) -def train_cli(sacred_args): - ex.run_commandline([sys.argv[0]] + list(sacred_args)) - -@click.group('training') -def main(): - pass - -main.add_command(build_model_load_pretrained_weights_and_save) -main.add_command(generate_gt_cli, 'generate-gt') -main.add_command(inference_cli, 'inference') -main.add_command(train_cli, 'train') -main.add_command(linegt_cli, 'export_textline_images_and_text') -main.add_command(ensemble_cli, 'ensembling') diff --git a/src/eynollah/training/extract_line_gt.py b/src/eynollah/training/extract_line_gt.py deleted file mode 100644 index 58fc253..0000000 --- a/src/eynollah/training/extract_line_gt.py +++ /dev/null @@ -1,134 +0,0 @@ -from logging import Logger, getLogger -from typing import Optional -from pathlib import Path -import os - -import click -import cv2 -import xml.etree.ElementTree as ET -import numpy as np - -from ..utils import is_image_filename - -@click.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_xmls", - "-dx", - help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--out", - "-o", - 'dir_out', - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--dataset_abbrevation", - "-ds_pref", - 'pref_of_dataset', - help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", -) -@click.option( - "--do_not_mask_with_textline_contour", - "-nmtc/-mtc", - is_flag=True, - help="if this parameter set to true, cropped textline images will not be masked with textline contour.", -) -def linegt_cli( - image, - dir_in, - dir_xmls, - dir_out, - pref_of_dataset, - do_not_mask_with_textline_contour, -): - assert bool(dir_in) ^ bool(image), "Set --dir-in or --image-filename, not both" - if dir_in: - ls_imgs = [ - os.path.join(dir_in, image) for image in filter(is_image_filename, os.listdir(dir_in)) - ] - else: - assert image - ls_imgs = [image] - - for dir_img in ls_imgs: - file_name = Path(dir_img).stem - dir_xml = os.path.join(dir_xmls, file_name + '.xml') - - img = cv2.imread(dir_img) - - total_bb_coordinates = [] - - tree1 = ET.parse(dir_xml, parser=ET.XMLParser(encoding="utf-8")) - root1 = tree1.getroot() - alltags = [elem.tag for elem in root1.iter()] - - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - region_tags = np.unique([x for x in alltags if x.endswith('TextRegion')]) - - cropped_lines_region_indexer = [] - - indexer_text_region = 0 - indexer_textlines = 0 - # FIXME: non recursive, use OCR-D PAGE generateDS API. Or use an existing tool for this purpose altogether - for nn in root1.iter(region_tags): - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h = child_textlines.attrib['points'].split(' ') - textline_coords = np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h]) - - x, y, w, h = cv2.boundingRect(textline_coords) - - total_bb_coordinates.append([x, y, w, h]) - - img_poly_on_img = np.copy(img) - - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - mask_poly = mask_poly[y : y + h, x : x + w, :] - img_crop = img_poly_on_img[y : y + h, x : x + w, :] - - if not do_not_mask_with_textline_contour: - img_crop[mask_poly == 0] = 255 - - if img_crop.shape[0] == 0 or img_crop.shape[1] == 0: - continue - if child_textlines.tag.endswith("TextEquiv"): - for cheild_text in child_textlines: - if cheild_text.tag.endswith("Unicode"): - textline_text = cheild_text.text - if textline_text: - base_name = os.path.join( - dir_out, file_name + '_line_' + str(indexer_textlines) - ) - if pref_of_dataset: - base_name += '_' + pref_of_dataset - if not do_not_mask_with_textline_contour: - base_name += '_masked' - - with open(base_name + '.txt', 'w') as text_file: - text_file.write(textline_text) - cv2.imwrite(base_name + '.png', img_crop) - indexer_textlines += 1 diff --git a/src/eynollah/training/generate_gt_for_training.py b/src/eynollah/training/generate_gt_for_training.py deleted file mode 100644 index cc5a1b2..0000000 --- a/src/eynollah/training/generate_gt_for_training.py +++ /dev/null @@ -1,621 +0,0 @@ -import click -import json -import os -from tqdm import tqdm -from pathlib import Path -from PIL import Image, ImageDraw, ImageFont -import cv2 -import numpy as np - -from .gt_gen_utils import ( - filter_contours_area_of_image, - find_format_of_given_filename_in_dir, - find_new_features_of_contours, - fit_text_single_line, - get_content_of_dir, - get_images_of_ground_truth, - get_layout_contours_for_visualization, - get_textline_contours_and_ocr_text, - get_textline_contours_for_visualization, - overlay_layout_on_image, - read_xml, - resize_image, - visualize_image_from_contours, - visualize_image_from_contours_layout -) - -@click.group() -def main(): - """ - extract GT data suitable for model training for various tasks - """ - pass - -@main.command() -@click.option( - "--dir_xml", - "-dx", - help="input directory of GT PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--dir_images", - "-di", - help="input directory of GT image files (only needed for '--printspace' or scaling configured via 'columns_width'; filename stems should match those in --dir_xml)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_out_images", - "-doi", - help="output directory for training image files (for printspace cropping or scaling)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_out", - "-do", - help="output directory for training label files", - type=click.Path(exists=True, file_okay=False), - required=True, -) - -@click.option( - "--config", - "-cfg", - help="config file of prefered layout or use case.", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--type_output", - "-to", - type=click.Choice(["2d", "3d"]), - default="2d", - help="generate labels as [H, W] array pseudo index-color images for training ('2d') or [H, W, C] array RGB color images for plotting ('3d')", -) -@click.option( - "--printspace", - "-ps", - is_flag=True, - help="crop pages from annotated PrintSpace or Border to generate labels and images (will also require -di for so original images so output images are cropped along with labels)", -) -@click.option( - "--missing-printspace", - "-mps", - type=click.Choice(["full", "skip", "project"]), - default="full", - help="if -ps is set, what to do in case a PAGE-XML has no PrintSpace or Border annotation: keep entire page ('full'), ignore file ('skip') or crop artificially from outer hull of all segments ('project')", -) - -def pagexml2label(dir_xml, dir_out, type_output, config, printspace, missing_printspace, dir_images, dir_out_images): - """ - extract PAGE-XML GT data suitable for model training for segmentation tasks - """ - if config: - with open(config) as f: - config_params = json.load(f) - else: - print("passed") - config_params = None - get_images_of_ground_truth(get_content_of_dir(dir_xml), - dir_xml, - dir_out, - type_output, - config, - config_params, - printspace, - missing_printspace, - dir_images, - dir_out_images - ) - -@main.command() -@click.option( - "--dir_imgs", - "-dis", - help="directory of images with high resolution.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_out_images", - "-dois", - help="directory where degraded images will be written.", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out_labels", - "-dols", - help="directory where original images will be written as labels.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--scales", - "-scs", - help="json dictionary where the scales are written.", - type=click.Path(exists=True, dir_okay=False), -) -def image_enhancement(dir_imgs, dir_out_images, dir_out_labels, scales): - """ - extract image GT data suitable for model training for image enhancement tasks - """ - ls_imgs = os.listdir(dir_imgs) - with open(scales) as f: - scale_dict = json.load(f) - ls_scales = scale_dict['scales'] - - for img in tqdm(ls_imgs): - img_name = img.split('.')[0] - img_type = img.split('.')[1] - image = cv2.imread(os.path.join(dir_imgs, img)) - for i, scale in enumerate(ls_scales): - height_sc = int(image.shape[0]*scale) - width_sc = int(image.shape[1]*scale) - - image_down_scaled = resize_image(image, height_sc, width_sc) - image_back_to_org_scale = resize_image(image_down_scaled, image.shape[0], image.shape[1]) - - cv2.imwrite(os.path.join(dir_out_images, img_name+'_'+str(i)+'.'+img_type), image_back_to_org_scale) - cv2.imwrite(os.path.join(dir_out_labels, img_name+'_'+str(i)+'.'+img_type), image) - - -@main.command() -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out_modal_image", - "-domi", - help="directory where ground truth images would be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out_classes", - "-docl", - help="directory where ground truth classes would be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--input_height", - "-ih", - help="input height", -) -@click.option( - "--input_width", - "-iw", - help="input width", -) -@click.option( - "--min_area_size", - "-min", - help="min area size of regions considered for reading order training.", -) - -@click.option( - "--min_area_early", - "-min_early", - help="If you have already generated a training dataset using a specific minimum area value and now wish to create a dataset with a smaller minimum area value, you can avoid regenerating the previous dataset by providing the earlier minimum area value. This will ensure that only the missing data is generated.", -) - -def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size, min_area_early): - """ - extract PAGE-XML GT data suitable for model training for reading-order task - """ - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - input_height = int(input_height) - input_width = int(input_width) - min_area = float(min_area_size) - if min_area_early: - min_area_early = float(min_area_early) - - - indexer_start= 0#55166 - max_area = 1 - #min_area = 0.0001 - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = ind_xml.split('.')[0] - _, _, _, file_name, id_paragraph, id_header,co_text_paragraph,co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) - - id_all_text = id_paragraph + id_header - co_text_all = co_text_paragraph + co_text_header - - - _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_header) - - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') - - for j in range(len(cy_main)): - img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, - int(x_min_main[j]):int(x_max_main[j]) ] = 1 - - - try: - texts_corr_order_index_int = [int(index_tot_regions[tot_region_ref.index(i)]) - for i in id_all_text] - except ValueError as e: - print("incomplete ReadingOrder in", xml_file, "- skipping:", str(e)) - continue - - co_text_all, texts_corr_order_index_int, regions_ar_less_than_early_min = \ - filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, - max_area, min_area, min_area_early) - - - arg_array = np.array(range(len(texts_corr_order_index_int))) - - labels_con = np.zeros((y_len,x_len,len(arg_array)),dtype='uint8') - for i in range(len(co_text_all)): - img_label = np.zeros((y_len,x_len,3),dtype='uint8') - img_label=cv2.fillPoly(img_label, pts =[co_text_all[i]], color=(1,1,1)) - - img_label[:,:,0][img_poly[:,:,0]==5] = 2 - img_label[:,:,0][img_header_and_sep[:,:]==1] = 3 - - labels_con[:,:,i] = img_label[:,:,0] - - labels_con = resize_image(labels_con, input_height, input_width) - img_poly = resize_image(img_poly, input_height, input_width) - - - for i in range(len(texts_corr_order_index_int)): - for j in range(len(texts_corr_order_index_int)): - if i!=j: - if regions_ar_less_than_early_min: - if regions_ar_less_than_early_min[i]==1: - input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) - final_f_name = f_name+'_'+str(indexer+indexer_start) - order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] - if order_class_condition<0: - class_type = 1 - else: - class_type = 0 - - input_multi_visual_modal[:,:,0] = labels_con[:,:,i] - input_multi_visual_modal[:,:,1] = img_poly[:,:,0] - input_multi_visual_modal[:,:,2] = labels_con[:,:,j] - - np.save(os.path.join(dir_out_classes,final_f_name+'_missed.npy' ), class_type) - - cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'_missed.png' ), input_multi_visual_modal) - indexer = indexer+1 - - else: - input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) - final_f_name = f_name+'_'+str(indexer+indexer_start) - order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] - if order_class_condition<0: - class_type = 1 - else: - class_type = 0 - - input_multi_visual_modal[:,:,0] = labels_con[:,:,i] - input_multi_visual_modal[:,:,1] = img_poly[:,:,0] - input_multi_visual_modal[:,:,2] = labels_con[:,:,j] - - np.save(os.path.join(dir_out_classes,final_f_name+'.npy' ), class_type) - - cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_multi_visual_modal) - indexer = indexer+1 - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_imgs", - "-di", - help="directory where the overlayed plots will be written", ) - -def visualize_reading_order(xml_file, dir_xml, dir_out, dir_imgs): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - indexer_start= 0#55166 - #min_area = 0.0001 - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - #xml_file = os.path.join(dir_xml,ind_xml ) - - if dir_xml: - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - else: - xml_file = os.path.join(ind_xml ) - f_name = Path(ind_xml).stem - print(f_name, 'f_name') - - #f_name = ind_xml.split('.')[0] - _, _, _, file_name, id_paragraph, id_header,co_text_paragraph,co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) - - id_all_text = id_paragraph + id_header - co_text_all = co_text_paragraph + co_text_header - - - cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_all) - - texts_corr_order_index = [int(index_tot_regions[tot_region_ref.index(i)]) for i in id_all_text ] - #texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] - - - #cx_ordered = np.array(cx_main)[np.array(texts_corr_order_index)] - #cx_ordered = cx_ordered.astype(np.int32) - - cx_ordered = [int(val) for (_, val) in sorted(zip(texts_corr_order_index, cx_main), key=lambda x: \ - x[0], reverse=False)] - #cx_ordered = cx_ordered.astype(np.int32) - - cy_ordered = [int(val) for (_, val) in sorted(zip(texts_corr_order_index, cy_main), key=lambda x: \ - x[0], reverse=False)] - #cy_ordered = cy_ordered.astype(np.int32) - - - color = (0, 0, 255) - thickness = 20 - if dir_imgs: - layout = np.zeros( (y_len,x_len,3) ) - layout = cv2.fillPoly(layout, pts =co_text_all, color=(1,1,1)) - - img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) - img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) - - overlayed = overlay_layout_on_image(layout, img, cx_ordered, cy_ordered, color, thickness) - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), overlayed) - - else: - img = np.zeros( (y_len,x_len,3) ) - img = cv2.fillPoly(img, pts =co_text_all, color=(255,0,0)) - for i in range(len(cx_ordered)-1): - start_point = (int(cx_ordered[i]), int(cy_ordered[i])) - end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) - img = cv2.arrowedLine(img, start_point, end_point, - color, thickness, tipLength = 0.03) - - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img) - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_imgs", - "-di", - help="directory of images where textline segmentation will be overlayed", ) - -def visualize_textline_segmentation(xml_file, dir_xml, dir_out, dir_imgs): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - - img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) - img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) - - co_tetxlines, y_len, x_len = get_textline_contours_for_visualization(xml_file) - - added_image = visualize_image_from_contours(co_tetxlines, img) - - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) - - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_imgs", - "-di", - help="directory of images where textline segmentation will be overlayed", ) - -def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - if dir_xml: - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - else: - xml_file = os.path.join(ind_xml ) - f_name = Path(ind_xml).stem - print(f_name, 'f_name') - - img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) - img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) - - co_text, co_graphic, co_sep, co_img, co_table, co_map, co_noise, y_len, x_len = get_layout_contours_for_visualization(xml_file) - - - added_image = visualize_image_from_contours_layout(co_text['paragraph'], co_text['header']+co_text['heading'], co_text['drop-capital'], co_sep, co_img, co_text['marginalia'], co_table, img) - - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) - - - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - - -def visualize_ocr_text(xml_file, dir_xml, dir_out): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = ImageFont.truetype(font_path, 40) - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - if dir_xml: - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - else: - xml_file = os.path.join(ind_xml ) - f_name = Path(ind_xml).stem - print(f_name, 'f_name') - - co_tetxlines, y_len, x_len, ocr_texts = get_textline_contours_and_ocr_text(xml_file) - - total_bb_coordinates = [] - - image_text = Image.new("RGB", (x_len, y_len), "white") - draw = ImageDraw.Draw(image_text) - - - - for index, cnt in enumerate(co_tetxlines): - x,y,w,h = cv2.boundingRect(cnt) - #total_bb_coordinates.append([x,y,w,h]) - - #fit_text_single_line - - #x_bb = bb_ind[0] - #y_bb = bb_ind[1] - #w_bb = bb_ind[2] - #h_bb = bb_ind[3] - if ocr_texts[index]: - - - is_vertical = h > 2*w # Check orientation - font = fit_text_single_line(draw, ocr_texts[index], font_path, w, int(h*0.4) ) - - if is_vertical: - - vertical_font = fit_text_single_line(draw, ocr_texts[index], font_path, h, int(w * 0.8)) - - text_img = Image.new("RGBA", (h, w), (255, 255, 255, 0)) # Note: dimensions are swapped - text_draw = ImageDraw.Draw(text_img) - text_draw.text((0, 0), ocr_texts[index], font=vertical_font, fill="black") - - # Rotate text image by 90 degrees - rotated_text = text_img.rotate(90, expand=1) - - # Calculate paste position (centered in bbox) - paste_x = x + (w - rotated_text.width) // 2 - paste_y = y + (h - rotated_text.height) // 2 - - image_text.paste(rotated_text, (paste_x, paste_y), rotated_text) # Use rotated image as mask - else: - text_bbox = draw.textbbox((0, 0), ocr_texts[index], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x + (w - text_width) // 2 # Center horizontally - text_y = y + (h - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) - image_text.save(os.path.join(dir_out, f_name+'.png')) diff --git a/src/eynollah/training/gt_gen_utils.py b/src/eynollah/training/gt_gen_utils.py deleted file mode 100644 index 796e896..0000000 --- a/src/eynollah/training/gt_gen_utils.py +++ /dev/null @@ -1,1880 +0,0 @@ -import os -import numpy as np -import warnings -from lxml import etree as ET -from tqdm import tqdm -import cv2 -from shapely import geometry -from pathlib import Path -from PIL import ImageFont -from ocrd_utils import bbox_from_points - - -KERNEL = np.ones((5, 5), np.uint8) -NS = { 'pc': 'http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15' -} - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - - -def visualize_image_from_contours_layout(co_par, co_header, co_drop, co_sep, co_image, co_marginal, co_table, co_map, img): - alpha = 0.5 - - blank_image = np.ones( (img.shape[:]), dtype=np.uint8) * 255 - - col_header = (173, 216, 230) - col_drop = (0, 191, 255) - boundary_color = (143, 216, 200)#(0, 0, 255) # Dark gray for the boundary - col_par = (0, 0, 139) # Lighter gray for the filled area - col_image = (0, 100, 0) - col_sep = (255, 0, 0) - col_marginal = (106, 90, 205) - col_table = (0, 90, 205) - col_map = (90, 90, 205) - - if len(co_image)>0: - cv2.drawContours(blank_image, co_image, -1, col_image, thickness=cv2.FILLED) # Fill the contour - - if len(co_sep)>0: - cv2.drawContours(blank_image, co_sep, -1, col_sep, thickness=cv2.FILLED) # Fill the contour - - - if len(co_header)>0: - cv2.drawContours(blank_image, co_header, -1, col_header, thickness=cv2.FILLED) # Fill the contour - - if len(co_par)>0: - cv2.drawContours(blank_image, co_par, -1, col_par, thickness=cv2.FILLED) # Fill the contour - - cv2.drawContours(blank_image, co_par, -1, boundary_color, thickness=1) # Draw the boundary - - if len(co_drop)>0: - cv2.drawContours(blank_image, co_drop, -1, col_drop, thickness=cv2.FILLED) # Fill the contour - - if len(co_marginal)>0: - cv2.drawContours(blank_image, co_marginal, -1, col_marginal, thickness=cv2.FILLED) # Fill the contour - - if len(co_table)>0: - cv2.drawContours(blank_image, co_table, -1, col_table, thickness=cv2.FILLED) # Fill the contour - - if len(co_map)>0: - cv2.drawContours(blank_image, co_map, -1, col_map, thickness=cv2.FILLED) # Fill the contour - - img_final =cv2.cvtColor(blank_image, cv2.COLOR_BGR2RGB) - - added_image = cv2.addWeighted(img,alpha,img_final,1- alpha,0) - return added_image - - -def visualize_image_from_contours(contours, img): - alpha = 0.5 - - blank_image = np.ones( (img.shape[:]), dtype=np.uint8) * 255 - - boundary_color = (0, 0, 255) # Dark gray for the boundary - fill_color = (173, 216, 230) # Lighter gray for the filled area - - cv2.drawContours(blank_image, contours, -1, fill_color, thickness=cv2.FILLED) # Fill the contour - cv2.drawContours(blank_image, contours, -1, boundary_color, thickness=1) # Draw the boundary - - img_final =cv2.cvtColor(blank_image, cv2.COLOR_BGR2RGB) - - added_image = cv2.addWeighted(img,alpha,img_final,1- alpha,0) - return added_image - -def visualize_model_output(prediction, img, task): - if task == "binarization": - prediction = prediction * -1 - prediction = prediction + 1 - added_image = prediction * 255 - layout_only = None - else: - unique_classes = np.unique(prediction[:,:,0]) - rgb_colors = {'0' : [255, 255, 255], - '1' : [255, 0, 0], - '2' : [255, 125, 0], - '3' : [255, 0, 125], - '4' : [125, 125, 125], - '5' : [125, 125, 0], - '6' : [0, 125, 255], - '7' : [0, 125, 0], - '8' : [125, 125, 125], - '9' : [0, 125, 255], - '10' : [125, 0, 125], - '11' : [0, 255, 0], - '12' : [0, 0, 255], - '13' : [0, 255, 255], - '14' : [255, 125, 125], - '15' : [255, 0, 255]} - - layout_only = np.zeros(prediction.shape) - - for unq_class in unique_classes: - rgb_class_unique = rgb_colors[str(int(unq_class))] - layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] - layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] - layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] - - - - img = resize_image(img, layout_only.shape[0], layout_only.shape[1]) - - layout_only = layout_only.astype(np.int32) - img = img.astype(np.int32) - - - - added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) - - return added_image, layout_only - -def get_content_of_dir(dir_in): - """ - Listing all ground truth page xml files. All files are needed to have xml format. - """ - - gt_all=os.listdir(dir_in) - gt_list = [file for file in gt_all if os.path.splitext(file)[1] == '.xml'] - return gt_list - -def return_parent_contours(contours, hierarchy): - contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] - return contours_parent -def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): - found_polygons_early = list() - - jv = 0 - for c in contours: - if len(np.shape(c)) == 3: - c = c[0] - elif len(np.shape(c)) == 2: - pass - #c = c[0] - if len(c) < 3: # A polygon cannot have less than 3 points - continue - - c_e = [point for point in c] - polygon = geometry.Polygon(c_e) - # area = cv2.contourArea(c) - area = polygon.area - # Check that polygon has area greater than minimal area - if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) - jv += 1 - return found_polygons_early - -def filter_contours_area_of_image(image, contours, order_index, max_area, min_area, min_early=None): - found_polygons_early = list() - order_index_filtered = list() - regions_ar_less_than_early_min = list() - #jv = 0 - for jv, c in enumerate(contours): - if len(np.shape(c)) == 3: - c = c[0] - elif len(np.shape(c)) == 2: - pass - if len(c) < 3: # A polygon cannot have less than 3 points - continue - c_e = [point for point in c] - polygon = geometry.Polygon(c_e) - area = polygon.area - if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) - order_index_filtered.append(order_index[jv]) - if min_early: - if area < min_early * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - regions_ar_less_than_early_min.append(1) - else: - regions_ar_less_than_early_min.append(0) - else: - regions_ar_less_than_early_min = None - - #jv += 1 - return found_polygons_early, order_index_filtered, regions_ar_less_than_early_min - -def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): - - # pixels of images are identified by 5 - if len(region_pre_p.shape) == 3: - cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 - else: - cnts_images = (region_pre_p[:, :] == pixel) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - #print(len(contours_imgs), hierarchy) - - contours_imgs = return_parent_contours(contours_imgs, hierarchy) - - #print(len(contours_imgs), "iki") - #contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) - - return contours_imgs -def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=None, erosion_early=None): - co_text_eroded = [] - for con in co_text: - img_boundary_in = np.zeros( (y_len,x_len) ) - img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - - if dilation_early: - img_boundary_in = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_early) - - if erosion_early: - img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_early) - - #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica - if erosion_rate > 0: - img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_rate) - - pixel = 1 - min_size = 0 - - img_boundary_in = img_boundary_in.astype("uint8") - - con_eroded = return_contours_of_interested_region(img_boundary_in,pixel, min_size ) - - try: - if len(con_eroded) > 1: - largest = np.argmax(list(map(cv2.contourArea, con_eroded))) - else: - largest = 0 - co_text_eroded.append(con_eroded[largest]) - except: - co_text_eroded.append(con) - - - img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_rate) - #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) - - boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - img_boundary[:,:][boundary[:,:]==1] =1 - return co_text_eroded, img_boundary - -def get_textline_contours_for_visualization(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - - x_len, y_len = 0, 0 - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - tag_endings = ['}TextLine','}textline'] - co_use_case = [] - - for tag in region_tags: - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_use_case.append(np.array(c_t_in)) - return co_use_case, y_len, x_len - - -def get_textline_contours_and_ocr_text(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - - x_len, y_len = 0, 0 - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - tag_endings = ['}TextLine','}textline'] - co_use_case = [] - ocr_textlines = [] - - for tag in region_tags: - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - ocr_text_in = [''] - sumi = 0 - for vv in nn.iter(): - if vv.tag == link + 'Coords': - for childtest2 in nn: - if childtest2.tag.endswith("TextEquiv"): - for child_uc in childtest2: - if child_uc.tag.endswith("Unicode"): - text = child_uc.text - ocr_text_in[0]= text - - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - - - co_use_case.append(np.array(c_t_in)) - ocr_textlines.append(ocr_text_in[0]) - return co_use_case, y_len, x_len, ocr_textlines - -def fit_text_single_line(draw, text, font_path, max_width, max_height): - initial_font_size = 50 - font_size = initial_font_size - while font_size > 10: # Minimum font size - font = ImageFont.truetype(font_path, font_size) - text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - if text_width <= max_width and text_height <= max_height: - return font # Return the best-fitting font - - font_size -= 2 # Reduce font size and retry - - return ImageFont.truetype(font_path, 10) # Smallest font fallback - -def get_layout_contours_for_visualization(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - x_len, y_len = 0, 0 - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - all_defined_textregion_types = list(co_text.keys()) - co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - all_defined_graphic_types = list(co_graphic.keys()) - co_sep=[] - co_img=[] - co_table=[] - co_map=[] - co_noise=[] - - types_text = [] - types_graphic = [] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - c_t_in = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_text in list(c_t_in.keys()): - if len(c_t_in[element_text])>0: - co_text[element_text].append(np.array(c_t_in[element_text])) - - - if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_graphic in list(c_t_in_graphic.keys()): - if len(c_t_in_graphic[element_graphic])>0: - co_graphic[element_graphic].append(np.array(c_t_in_graphic[element_graphic])) - - - if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - - if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - - if tag.endswith('}MapRegion') or tag.endswith('}mapregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_map.append(np.array(c_t_in)) - - - if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - return co_text, co_graphic, co_sep, co_img, co_table, co_map, co_noise, y_len, x_len - -def get_images_of_ground_truth( - gt_list, - dir_in, - output_dir, - output_type, - config_file, - config_params, - printspace, - missing_printspace, - dir_images, - dir_out_images -): - """ - Reading the page xml files and write the ground truth images into given output directory. - """ - ## to do: add footnote to text regions - - if dir_images: - ls_org_imgs = os.listdir(dir_images) - ls_org_imgs = {os.path.splitext(item)[0]: item - for item in ls_org_imgs - if not item.endswith('.xml')} - - for index in tqdm(range(len(gt_list))): - #try: - print(gt_list[index]) - tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - x_len, y_len = 0, 0 - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - if 'columns_width' in list(config_params.keys()): - columns_width_dict = config_params['columns_width'] - # FIXME: look in /Page/@custom as well - metadata_element = root1.find(link+'Metadata') - num_col = None - for child in metadata_element: - tag2 = child.tag - if tag2.endswith('}Comments') or tag2.endswith('}comments'): - text_comments = child.text - num_col = int(text_comments.split('num_col')[1]) - - if num_col: - x_new = columns_width_dict[str(num_col)] - y_new = int ( x_new * (y_len / float(x_len)) ) - - if printspace or "printspace_as_class_in_layout" in list(config_params.keys()): - ps = (root1.xpath('/pc:PcGts/pc:Page/pc:Border', namespaces=NS) + - root1.xpath('/pc:PcGts/pc:Page/pc:PrintSpace', namespaces=NS)) - coords = root1.xpath('//pc:Coords/@points', namespaces=NS) - if len(ps): - points = ps[0].find('pc:Coords', NS).get('points') - ps_bbox = bbox_from_points(points) - elif missing_printspace == 'skip': - print(gt_list[index], "has no Border or PrintSpace - skipping file") - continue - elif missing_printspace == 'project' and len(coords): - print(gt_list[index], "has no Border or PrintSpace - projecting hull of segments") - bboxes = list(map(bbox_from_points, coords)) - left, top, right, bottom = zip(*bboxes) - left = max(0, min(left) - 5) - top = max(0, min(top) - 5) - right = min(x_len, max(right) + 5) - bottom = min(y_len, max(bottom) + 5) - ps_bbox = [left, top, right, bottom] - else: - print(gt_list[index], "has no Border or PrintSpace - using full page") - ps_bbox = [0, 0, None, None] - - - if config_file and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph' or config_params['use_case']=='printspace'): - keys = list(config_params.keys()) - if "artificial_class_label" in keys: - artificial_class_rgb_color = (255,255,0) - artificial_class_label = config_params['artificial_class_label'] - - textline_rgb_color = (255, 0, 0) - - if config_params['use_case']=='textline': - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - elif config_params['use_case']=='word': - region_tags = np.unique([x for x in alltags if x.endswith('Word')]) - elif config_params['use_case']=='glyph': - region_tags = np.unique([x for x in alltags if x.endswith('Glyph')]) - elif config_params['use_case']=='printspace': - region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace')]) - - co_use_case = [] - - for tag in region_tags: - if config_params['use_case']=='textline': - tag_endings = ['}TextLine','}textline'] - elif config_params['use_case']=='word': - tag_endings = ['}Word','}word'] - elif config_params['use_case']=='glyph': - tag_endings = ['}Glyph','}glyph'] - elif config_params['use_case']=='printspace': - tag_endings = ['}PrintSpace','}printspace'] - - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_use_case.append(np.array(c_t_in)) - - - if "artificial_class_label" in keys: - img_boundary = np.zeros((y_len, x_len)) - erosion_rate = 0#1 - dilation_rate = 2 - dilation_early = 0 - erosion_early = 2 - co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=dilation_early, erosion_early=erosion_early) - - - img = np.zeros((y_len, x_len, 3)) - if output_type == '2d': - img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) - if "artificial_class_label" in keys: - img_mask = np.copy(img_poly) - ##img_poly[:,:][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=1)] = artificial_class_label - img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label - elif output_type == '3d': - img_poly = cv2.fillPoly(img, pts=co_use_case, color=textline_rgb_color) - if "artificial_class_label" in keys: - img_mask = np.copy(img_poly) - img_poly[:,:,0][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[0] - img_poly[:,:,1][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[1] - img_poly[:,:,2][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[2] - - - if printspace and config_params['use_case']!='printspace': - img_poly = img_poly[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2], :] - - - if 'columns_width' in list(config_params.keys()) and num_col and config_params['use_case']!='printspace': - img_poly = resize_image(img_poly, y_new, x_new) - - try: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - except: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - - if dir_images: - org_image_name = ls_org_imgs[xml_file_stem] - if not org_image_name: - print("image file for XML stem", xml_file_stem, "is missing") - continue - if not os.path.isfile(os.path.join(dir_images, org_image_name)): - print("image file for XML stem", xml_file_stem, "is not readable") - continue - img_org = cv2.imread(os.path.join(dir_images, org_image_name)) - - if printspace and config_params['use_case']!='printspace': - img_org = img_org[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2], :] - - if 'columns_width' in list(config_params.keys()) and num_col and config_params['use_case']!='printspace': - img_org = resize_image(img_org, y_new, x_new) - - cv2.imwrite(os.path.join(dir_out_images, org_image_name), img_org) - - - if config_file and config_params['use_case']=='layout': - keys = list(config_params.keys()) - - if "artificial_class_on_boundary" in keys: - elements_with_artificial_class = list(config_params['artificial_class_on_boundary']) - artificial_class_rgb_color = (255,255,0) - artificial_class_label = config_params['artificial_class_label'] - #values = config_params.values() - - if "printspace_as_class_in_layout" in list(config_params.keys()): - printspace_class_rgb_color = (125,125,255) - printspace_class_label = config_params['printspace_as_class_in_layout'] - - if 'textregions' in keys: - types_text_dict = config_params['textregions'] - types_text = list(types_text_dict.keys()) - types_text_label = list(types_text_dict.values()) - if 'graphicregions' in keys: - types_graphic_dict = config_params['graphicregions'] - types_graphic = list(types_graphic_dict.keys()) - types_graphic_label = list(types_graphic_dict.values()) - - - labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125), (255,125,255), (125,255,0), (125,255,255)] - - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - all_defined_textregion_types = list(co_text.keys()) - co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - all_defined_graphic_types = list(co_graphic.keys()) - co_sep=[] - co_img=[] - co_table=[] - co_map=[] - co_noise=[] - - for tag in region_tags: - if 'textregions' in keys: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - c_t_in = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_text in list(c_t_in.keys()): - if len(c_t_in[element_text])>0: - co_text[element_text].append(np.array(c_t_in[element_text])) - - if 'graphicregions' in keys: - if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_graphic in list(c_t_in_graphic.keys()): - if len(c_t_in_graphic[element_graphic])>0: - co_graphic[element_graphic].append(np.array(c_t_in_graphic[element_graphic])) - - - if 'imageregion' in keys: - if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - - if 'separatorregion' in keys: - if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - if 'tableregion' in keys: - if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - - if 'mapregion' in keys: - if tag.endswith('}MapRegion') or tag.endswith('}mapregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_map.append(np.array(c_t_in)) - - if 'noiseregion' in keys: - if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - - if "artificial_class_on_boundary" in keys: - img_boundary = np.zeros( (y_len,x_len) ) - if "paragraph" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text['paragraph'], img_boundary = update_region_contours(co_text['paragraph'], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "drop-capital" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 3 - co_text["drop-capital"], img_boundary = update_region_contours(co_text["drop-capital"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "catch-word" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 3#4 - co_text["catch-word"], img_boundary = update_region_contours(co_text["catch-word"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "page-number" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 3#4 - co_text["page-number"], img_boundary = update_region_contours(co_text["page-number"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "header" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text["header"], img_boundary = update_region_contours(co_text["header"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "heading" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text["heading"], img_boundary = update_region_contours(co_text["heading"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "signature-mark" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text["signature-mark"], img_boundary = update_region_contours(co_text["signature-mark"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "marginalia" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text["marginalia"], img_boundary = update_region_contours(co_text["marginalia"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "footnote" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 2#4 - co_text["footnote"], img_boundary = update_region_contours(co_text["footnote"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "footnote-continued" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 2#4 - co_text["footnote-continued"], img_boundary = update_region_contours(co_text["footnote-continued"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "tableregion" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 3#4 - co_table, img_boundary = update_region_contours(co_table, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "mapregion" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 3#4 - co_map, img_boundary = update_region_contours(co_map, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - - - - img = np.zeros( (y_len,x_len,3) ) - - if output_type == '3d': - if 'graphicregions' in keys: - if 'rest_as_decoration' in types_graphic: - types_graphic[types_graphic=='rest_as_decoration'] = 'decoration' - for element_graphic in types_graphic: - if element_graphic == 'decoration': - color_label = labels_rgb_color[ config_params['graphicregions']['rest_as_decoration']] - else: - color_label = labels_rgb_color[ config_params['graphicregions'][element_graphic]] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - else: - for element_graphic in types_graphic: - color_label = labels_rgb_color[ config_params['graphicregions'][element_graphic]] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - - - if 'imageregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_img, color=labels_rgb_color[ config_params['imageregion']]) - if 'tableregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_table, color=labels_rgb_color[ config_params['tableregion']]) - if 'mapregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_map, color=labels_rgb_color[ config_params['mapregion']]) - if 'noiseregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_noise, color=labels_rgb_color[ config_params['noiseregion']]) - - if 'textregions' in keys: - if 'rest_as_paragraph' in types_text: - types_text = ['paragraph'if ttind=='rest_as_paragraph' else ttind for ttind in types_text] - for element_text in types_text: - if element_text == 'paragraph': - color_label = labels_rgb_color[ config_params['textregions']['rest_as_paragraph']] - else: - color_label = labels_rgb_color[ config_params['textregions'][element_text]] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - else: - for element_text in types_text: - color_label = labels_rgb_color[ config_params['textregions'][element_text]] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - - - if "artificial_class_on_boundary" in keys: - img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] - img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] - img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] - - if 'separatorregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_sep, color=labels_rgb_color[ config_params['separatorregion']]) - - - if "printspace_as_class_in_layout" in list(config_params.keys()): - printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) - printspace_mask[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2]] = 1 - - img_poly[:,:,0][printspace_mask[:,:] == 0] = printspace_class_rgb_color[0] - img_poly[:,:,1][printspace_mask[:,:] == 0] = printspace_class_rgb_color[1] - img_poly[:,:,2][printspace_mask[:,:] == 0] = printspace_class_rgb_color[2] - - - - - elif output_type == '2d': - if 'graphicregions' in keys: - if 'rest_as_decoration' in types_graphic: - types_graphic[types_graphic=='rest_as_decoration'] = 'decoration' - for element_graphic in types_graphic: - if element_graphic == 'decoration': - color_label = config_params['graphicregions']['rest_as_decoration'] - else: - color_label = config_params['graphicregions'][element_graphic] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - else: - for element_graphic in types_graphic: - color_label = config_params['graphicregions'][element_graphic] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - - - if 'imageregion' in keys: - color_label = config_params['imageregion'] - img_poly=cv2.fillPoly(img, pts =co_img, color=(color_label,color_label,color_label)) - if 'tableregion' in keys: - color_label = config_params['tableregion'] - img_poly=cv2.fillPoly(img, pts =co_table, color=(color_label,color_label,color_label)) - if 'mapregion' in keys: - color_label = config_params['mapregion'] - img_poly=cv2.fillPoly(img, pts =co_map, color=(color_label,color_label,color_label)) - if 'noiseregion' in keys: - color_label = config_params['noiseregion'] - img_poly=cv2.fillPoly(img, pts =co_noise, color=(color_label,color_label,color_label)) - - if 'textregions' in keys: - if 'rest_as_paragraph' in types_text: - types_text = ['paragraph'if ttind=='rest_as_paragraph' else ttind for ttind in types_text] - for element_text in types_text: - if element_text == 'paragraph': - color_label = config_params['textregions']['rest_as_paragraph'] - else: - color_label = config_params['textregions'][element_text] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - else: - for element_text in types_text: - color_label = config_params['textregions'][element_text] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - - if "artificial_class_on_boundary" in keys: - img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label - - if 'separatorregion' in keys: - color_label = config_params['separatorregion'] - img_poly=cv2.fillPoly(img, pts =co_sep, color=(color_label,color_label,color_label)) - - if "printspace_as_class_in_layout" in list(config_params.keys()): - printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) - printspace_mask[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2]] = 1 - - img_poly[:,:,0][printspace_mask[:,:] == 0] = printspace_class_label - img_poly[:,:,1][printspace_mask[:,:] == 0] = printspace_class_label - img_poly[:,:,2][printspace_mask[:,:] == 0] = printspace_class_label - - - - if printspace: - img_poly = img_poly[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2], :] - - if 'columns_width' in list(config_params.keys()) and num_col: - img_poly = resize_image(img_poly, y_new, x_new) - - try: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - except: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - - - if dir_images: - org_image_name = ls_org_imgs[xml_file_stem] - if not org_image_name: - print("image file for XML stem", xml_file_stem, "is missing") - continue - if not os.path.isfile(os.path.join(dir_images, org_image_name)): - print("image file for XML stem", xml_file_stem, "is not readable") - continue - img_org = cv2.imread(os.path.join(dir_images, org_image_name)) - - if printspace: - img_org = img_org[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2], :] - - if 'columns_width' in list(config_params.keys()) and num_col: - img_org = resize_image(img_org, y_new, x_new) - - cv2.imwrite(os.path.join(dir_out_images, org_image_name), img_org) - - - -def find_new_features_of_contours(contours_main): - - areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) - M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] - cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - try: - x_min_main = np.array([np.min(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) - - argmin_x_main = np.array([np.argmin(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) - - x_min_from_argmin = np.array([contours_main[j][0][argmin_x_main[j], 0] for j in range(len(contours_main))]) - y_corr_x_min_from_argmin = np.array([contours_main[j][0][argmin_x_main[j], 1] for j in range(len(contours_main))]) - - x_max_main = np.array([np.max(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) - - y_min_main = np.array([np.min(contours_main[j][0][:, 1]) for j in range(len(contours_main))]) - y_max_main = np.array([np.max(contours_main[j][0][:, 1]) for j in range(len(contours_main))]) - except: - x_min_main = np.array([np.min(contours_main[j][:, 0]) for j in range(len(contours_main))]) - - argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) for j in range(len(contours_main))]) - - x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] for j in range(len(contours_main))]) - y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] for j in range(len(contours_main))]) - - x_max_main = np.array([np.max(contours_main[j][:, 0]) for j in range(len(contours_main))]) - - y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))]) - y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))]) - - return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin - -def read_xml(xml_file): - file_name = Path(xml_file).stem - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - index_tot_regions = [] - tot_region_ref = [] - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - for jj in root1.iter(link+'RegionRefIndexed'): - index_tot_regions.append(jj.attrib['index']) - tot_region_ref.append(jj.attrib['regionRef']) - - ps = (root1.xpath('/pc:PcGts/pc:Page/pc:Border', namespaces=NS) + - root1.xpath('/pc:PcGts/pc:Page/pc:PrintSpace', namespaces=NS)) - if len(ps): - points = ps[0].find('pc:Coords', NS).get('points') - ps_bbox = bbox_from_points(points) - else: - ps_bbox = [0, 0, None, None] - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text_paragraph=[] - co_text_drop=[] - co_text_heading=[] - co_text_header=[] - co_text_marginalia=[] - co_text_catch=[] - co_text_page_number=[] - co_text_signature_mark=[] - co_sep=[] - co_img=[] - co_table=[] - co_graphic=[] - co_graphic_text_annotation=[] - co_graphic_decoration=[] - co_noise=[] - - co_text_paragraph_text=[] - co_text_drop_text=[] - co_text_heading_text=[] - co_text_header_text=[] - co_text_marginalia_text=[] - co_text_catch_text=[] - co_text_page_number_text=[] - co_text_signature_mark_text=[] - co_sep_text=[] - co_img_text=[] - co_table_text=[] - co_graphic_text=[] - co_graphic_text_annotation_text=[] - co_graphic_decoration_text=[] - co_noise_text=[] - - id_paragraph = [] - id_header = [] - id_heading = [] - id_marginalia = [] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - for child2 in nn: - tag2 = child2.tag - if tag2.endswith('}TextEquiv') or tag2.endswith('}TextEquiv'): - for childtext2 in child2: - if childtext2.tag.endswith('}Unicode') or childtext2.tag.endswith('}Unicode'): - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - co_text_drop_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='heading': - co_text_heading_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - co_text_signature_mark_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='header': - co_text_header_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###co_text_catch_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - ###co_text_page_number_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - co_text_marginalia_text.append(childtext2.text) - else: - co_text_paragraph_text.append(childtext2.text) - c_t_in_drop=[] - c_t_in_paragraph=[] - c_t_in_heading=[] - c_t_in_header=[] - c_t_in_page_number=[] - c_t_in_signature_mark=[] - c_t_in_catch=[] - c_t_in_marginalia=[] - - - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - #print('birda1') - p_h=vv.attrib['points'].split(' ') - - - - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - ##id_heading.append(nn.attrib['id']) - c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - #id_paragraph.append(nn.attrib['id']) - - c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - #id_heading.append(nn.attrib['id']) - c_t_in_heading.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - #id_paragraph.append(nn.attrib['id']) - c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - if len(c_t_in_drop)>0: - co_text_drop.append(np.array(c_t_in_drop)) - if len(c_t_in_paragraph)>0: - co_text_paragraph.append(np.array(c_t_in_paragraph)) - id_paragraph.append(nn.attrib['id']) - if len(c_t_in_heading)>0: - co_text_heading.append(np.array(c_t_in_heading)) - id_heading.append(nn.attrib['id']) - - if len(c_t_in_header)>0: - co_text_header.append(np.array(c_t_in_header)) - id_header.append(nn.attrib['id']) - if len(c_t_in_page_number)>0: - co_text_page_number.append(np.array(c_t_in_page_number)) - if len(c_t_in_catch)>0: - co_text_catch.append(np.array(c_t_in_catch)) - - if len(c_t_in_signature_mark)>0: - co_text_signature_mark.append(np.array(c_t_in_signature_mark)) - - if len(c_t_in_marginalia)>0: - co_text_marginalia.append(np.array(c_t_in_marginalia)) - id_marginalia.append(nn.attrib['id']) - - - elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - for nn in root1.iter(tag): - c_t_in=[] - c_t_in_text_annotation=[] - c_t_in_decoration=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - if len(c_t_in_text_annotation)>0: - co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) - if len(c_t_in_decoration)>0: - co_graphic_decoration.append(np.array(c_t_in_decoration)) - if len(c_t_in)>0: - co_graphic.append(np.array(c_t_in)) - - - - elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - co_img_text.append(' ') - - - elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - co_table_text.append(' ') - - elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - co_noise_text.append(' ') - - img = np.zeros( (y_len,x_len,3) ) - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) - - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(3,3,3)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) - - return (tree1, - root1, - ps_bbox, - file_name, - id_paragraph, - id_header + id_heading, - co_text_paragraph, - co_text_header + co_text_heading, - tot_region_ref, - x_len, - y_len, - index_tot_regions, - img_poly) - -# def bounding_box(cnt,color, corr_order_index ): -# x, y, w, h = cv2.boundingRect(cnt) -# x = int(x*scale_w) -# y = int(y*scale_h) -# -# w = int(w*scale_w) -# h = int(h*scale_h) -# -# return [x,y,w,h,int(color), int(corr_order_index)+1] - -def resize_image(seg_in,input_height,input_width): - return cv2.resize(seg_in,(input_width,input_height),interpolation=cv2.INTER_NEAREST) - -def make_image_from_bb(width_l, height_l, bb_all): - bb_all =np.array(bb_all) - img_remade = np.zeros((height_l,width_l )) - - for i in range(bb_all.shape[0]): - img_remade[bb_all[i,1]:bb_all[i,1]+bb_all[i,3],bb_all[i,0]:bb_all[i,0]+bb_all[i,2] ] = 1 - return img_remade - -def update_list_and_return_first_with_length_bigger_than_one(index_element_to_be_updated, innner_index_pr_pos, pr_list, pos_list,list_inp): - list_inp.pop(index_element_to_be_updated) - if len(pr_list)>0: - list_inp.insert(index_element_to_be_updated, pr_list) - else: - index_element_to_be_updated = index_element_to_be_updated -1 - - list_inp.insert(index_element_to_be_updated+1, [innner_index_pr_pos]) - if len(pos_list)>0: - list_inp.insert(index_element_to_be_updated+2, pos_list) - - len_all_elements = [len(i) for i in list_inp] - list_len_bigger_1 = np.where(np.array(len_all_elements)>1) - list_len_bigger_1 = list_len_bigger_1[0] - - if len(list_len_bigger_1)>0: - early_list_bigger_than_one = list_len_bigger_1[0] - else: - early_list_bigger_than_one = -20 - return list_inp, early_list_bigger_than_one - -def overlay_layout_on_image(prediction, img, cx_ordered, cy_ordered, color, thickness): - - unique_classes = np.unique(prediction[:,:,0]) - rgb_colors = {'0' : [255, 255, 255], - '1' : [255, 0, 0], - '2' : [0, 0, 255], - '3' : [255, 0, 125], - '4' : [125, 125, 125], - '5' : [125, 125, 0], - '6' : [0, 125, 255], - '7' : [0, 125, 0], - '8' : [125, 125, 125], - '9' : [0, 125, 255], - '10' : [125, 0, 125], - '11' : [0, 255, 0], - '12' : [255, 125, 0], - '13' : [0, 255, 255], - '14' : [255, 125, 125], - '15' : [255, 0, 255]} - - layout_only = np.zeros(prediction.shape) - - for unq_class in unique_classes: - rgb_class_unique = rgb_colors[str(int(unq_class))] - layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] - layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] - layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] - - - - #img = self.resize_image(img, layout_only.shape[0], layout_only.shape[1]) - - layout_only = layout_only.astype(np.int32) - - for i in range(len(cx_ordered)-1): - start_point = (int(cx_ordered[i]), int(cy_ordered[i])) - end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) - layout_only = cv2.arrowedLine(layout_only, start_point, end_point, - color, thickness, tipLength = 0.03) - - img = img.astype(np.int32) - - - - added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) - - return added_image - -def find_format_of_given_filename_in_dir(dir_imgs, f_name): - ls_imgs = os.listdir(dir_imgs) - file_interested = [ind for ind in ls_imgs if ind.startswith(f_name+'.')] - return file_interested[0] diff --git a/src/eynollah/training/inference.py b/src/eynollah/training/inference.py deleted file mode 100644 index 2be937d..0000000 --- a/src/eynollah/training/inference.py +++ /dev/null @@ -1,682 +0,0 @@ -""" -Tool to load model and predict for given image. -""" - -import sys -import os -from typing import Tuple -import warnings -import json - -import click -import numpy as np -from numpy._typing import NDArray -import cv2 -import xml.etree.ElementTree as ET - -os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 -import tensorflow as tf -from tensorflow.keras.models import Model, load_model -from tensorflow.keras.layers import StringLookup - -from .gt_gen_utils import ( - filter_contours_area_of_image, - find_new_features_of_contours, - read_xml, - resize_image, - update_list_and_return_first_with_length_bigger_than_one -) -from ..patch_encoder import ( - PatchEncoder, - Patches -) -from .metrics import ( - soft_dice_loss, - weighted_categorical_crossentropy, -) -from.utils import scale_padd_image_for_ocr -from ..utils.utils_ocr import decode_batch_predictions - - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - -class SBBPredict: - def __init__(self, - image, - dir_in, - model, - task, - config_params_model, - patches, - save, - save_layout, - ground_truth, - xml_file, - cpu, - out, - min_area, - ): - self.image=image - self.dir_in=dir_in - self.patches=patches - self.save=save - self.save_layout=save_layout - self.model_dir=model - self.ground_truth=ground_truth - self.task=task - self.config_params_model=config_params_model - self.xml_file = xml_file - self.out = out - self.cpu = cpu - if min_area: - self.min_area = float(min_area) - else: - self.min_area = 0 - - def resize_image(self,img_in,input_height,input_width): - return cv2.resize(img_in, (input_width, - input_height), - interpolation=cv2.INTER_NEAREST) - - def color_images(self,seg): - ann_u=range(self.n_classes) - if len(np.shape(seg))==3: - seg=seg[:,:,0] - - seg_img=np.zeros((np.shape(seg)[0],np.shape(seg)[1],3)).astype(np.uint8) - - for c in ann_u: - c=int(c) - seg_img[:,:,0][seg==c]=c - seg_img[:,:,1][seg==c]=c - seg_img[:,:,2][seg==c]=c - return seg_img - - def IoU(self,Yi,y_predi): - ## mean Intersection over Union - ## Mean IoU = TP/(FN + TP + FP) - - IoUs = [] - Nclass = np.unique(Yi) - for c in Nclass: - TP = np.sum( (Yi == c)&(y_predi==c) ) - FP = np.sum( (Yi != c)&(y_predi==c) ) - FN = np.sum( (Yi == c)&(y_predi != c)) - IoU = TP/float(TP + FP + FN) - if self.n_classes>2: - print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c,TP,FP,FN,IoU)) - IoUs.append(IoU) - if self.n_classes>2: - mIoU = np.mean(IoUs) - print("_________________") - print("Mean IoU: {:4.3f}".format(mIoU)) - return mIoU - elif self.n_classes==2: - mIoU = IoUs[1] - print("_________________") - print("IoU: {:4.3f}".format(mIoU)) - return mIoU - - def start_new_session_and_model(self): - if self.cpu: - tf.config.set_visible_devices([], 'GPU') - else: - try: - for device in tf.config.list_physical_devices('GPU'): - tf.config.experimental.set_memory_growth(device, True) - except: - print("no GPU device available", file=sys.stderr) - - if self.task == "cnn-rnn-ocr": - self.model = Model( - self.model.get_layer(name = "image").input, - self.model.get_layer(name = "dense2").output) - else: - self.model = load_model(self.model_dir, compile=False, - custom_objects={"PatchEncoder": PatchEncoder, - "Patches": Patches}) - - ##if self.weights_dir!=None: - ##self.model.load_weights(self.weights_dir) - - assert isinstance(self.model, Model) - if self.task != 'classification' and self.task != 'reading_order': - last = self.model.layers[-1] - self.img_height = last.output_shape[1] - self.img_width = last.output_shape[2] - self.n_classes = last.output_shape[3] - - def visualize_model_output(self, prediction, img, task) -> Tuple[NDArray, NDArray]: - if task == "binarization": - prediction = prediction * -1 - prediction = prediction + 1 - added_image = prediction * 255 - layout_only = None - else: - unique_classes = np.unique(prediction[:,:,0]) - rgb_colors = {'0' : [255, 255, 255], - '1' : [255, 0, 0], - '2' : [255, 125, 0], - '3' : [255, 0, 125], - '4' : [125, 125, 125], - '5' : [125, 125, 0], - '6' : [0, 125, 255], - '7' : [0, 125, 0], - '8' : [125, 125, 125], - '9' : [0, 125, 255], - '10' : [125, 0, 125], - '11' : [0, 255, 0], - '12' : [0, 0, 255], - '13' : [0, 255, 255], - '14' : [255, 125, 125], - '15' : [255, 0, 255]} - - layout_only = np.zeros(prediction.shape) - for unq_class in unique_classes: - where = prediction[:,:,0]==unq_class - rgb_class_unique = rgb_colors[str(int(unq_class))] - layout_only[:,:,0][where] = rgb_class_unique[0] - layout_only[:,:,1][where] = rgb_class_unique[1] - layout_only[:,:,2][where] = rgb_class_unique[2] - layout_only = layout_only.astype(np.int32) - - img = self.resize_image(img, layout_only.shape[0], layout_only.shape[1]) - img = img.astype(np.int32) - - added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) - - assert isinstance(added_image, np.ndarray) - assert isinstance(layout_only, np.ndarray) - return added_image, layout_only - - def predict(self, image_dir): - assert isinstance(self.model, Model) - if self.task == 'classification': - classes_names = self.config_params_model['classification_classes_name'] - img_1ch = cv2.imread(image_dir, 0) / 255.0 - img_1ch = cv2.resize(img_1ch, (self.config_params_model['input_height'], - self.config_params_model['input_width']), - interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.model.predict(img_in, verbose='0') - index_class = np.argmax(label_p_pred[0]) - - print("Predicted Class: {}".format(classes_names[str(int(index_class))])) - - elif self.task == "cnn-rnn-ocr": - img=cv2.imread(image_dir) - img = scale_padd_image_for_ocr(img, self.config_params_model['input_height'], self.config_params_model['input_width']) - - img = img / 255. - - with open(os.path.join(self.model_dir, "characters_org.txt"), 'r') as char_txt_f: - characters = json.load(char_txt_f) - - AUTOTUNE = tf.data.AUTOTUNE - - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - - # Mapping integers back to original characters. - num_to_char = StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) - preds = self.model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2]), verbose=0) - pred_texts = decode_batch_predictions(preds, num_to_char) - pred_texts = pred_texts[0].replace("[UNK]", "") - return pred_texts - - - elif self.task == 'reading_order': - img_height = self.config_params_model['input_height'] - img_width = self.config_params_model['input_width'] - - tree_xml, root_xml, ps_bbox, file_name, \ - id_paragraph, id_header, \ - co_text_paragraph, co_text_header, \ - tot_region_ref, x_len, y_len, index_tot_regions, \ - img_poly = read_xml(self.xml_file) - _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = \ - find_new_features_of_contours(co_text_header) - - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') - for j in range(len(cy_main)): - img_header_and_sep[int(y_max_main[j]): int(y_max_main[j]) + 12, - int(x_min_main[j]): int(x_max_main[j])] = 1 - - co_text_all = co_text_paragraph + co_text_header - id_all_text = id_paragraph + id_header - - ##texts_corr_order_index = [index_tot_regions[tot_region_ref.index(i)] for i in id_all_text ] - ##texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] - texts_corr_order_index_int = list(np.array(range(len(co_text_all)))) - - #print(texts_corr_order_index_int) - - max_area = 1 - #print(np.shape(co_text_all[0]), len( np.shape(co_text_all[0]) ),'co_text_all') - #co_text_all = filter_contours_area_of_image_tables(img_poly, co_text_all, _, max_area, min_area) - #print(co_text_all,'co_text_all') - co_text_all, texts_corr_order_index_int, _ = filter_contours_area_of_image( - img_poly, co_text_all, texts_corr_order_index_int, max_area, self.min_area) - - #print(texts_corr_order_index_int) - - #co_text_all = [co_text_all[index] for index in texts_corr_order_index_int] - id_all_text = [id_all_text[index] for index in texts_corr_order_index_int] - - labels_con = np.zeros((y_len,x_len,len(co_text_all)),dtype='uint8') - for i in range(len(co_text_all)): - img_label = np.zeros((y_len,x_len,3),dtype='uint8') - img_label=cv2.fillPoly(img_label, pts =[co_text_all[i]], color=(1,1,1)) - labels_con[:,:,i] = img_label[:,:,0] - - if ps_bbox: - labels_con = labels_con[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2], :] - img_poly = img_poly[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2], :] - img_header_and_sep = img_header_and_sep[ps_bbox[1]:ps_bbox[3], - ps_bbox[0]:ps_bbox[2]] - - - - img3= np.copy(img_poly) - labels_con = resize_image(labels_con, img_height, img_width) - - img_header_and_sep = resize_image(img_header_and_sep, img_height, img_width) - - img3= resize_image (img3, img_height, img_width) - img3 = img3.astype(np.uint16) - - inference_bs = 1#4 - - input_1= np.zeros( (inference_bs, img_height, img_width,3)) - - - starting_list_of_regions = [list(range(labels_con.shape[2]))] - - index_update = 0 - index_selected = starting_list_of_regions[0] - - scalibility_num = 0 - while index_update>=0: - ij_list = starting_list_of_regions[index_update] - i = ij_list[0] - ij_list.pop(0) - - - pr_list = [] - post_list = [] - - batch_counter = 0 - tot_counter = 1 - - tot_iteration = len(ij_list) - full_bs_ite= tot_iteration//inference_bs - last_bs = tot_iteration % inference_bs - - jbatch_indexer =[] - for j in ij_list: - img1= np.repeat(labels_con[:,:,i][:, :, np.newaxis], 3, axis=2) - img2 = np.repeat(labels_con[:,:,j][:, :, np.newaxis], 3, axis=2) - - - img2[:,:,0][img3[:,:,0]==5] = 2 - img2[:,:,0][img_header_and_sep[:,:]==1] = 3 - - - - img1[:,:,0][img3[:,:,0]==5] = 2 - img1[:,:,0][img_header_and_sep[:,:]==1] = 3 - - #input_1= np.zeros( (height1, width1,3)) - - - jbatch_indexer.append(j) - - input_1[batch_counter,:,:,0] = img1[:,:,0]/3. - input_1[batch_counter,:,:,2] = img2[:,:,0]/3. - input_1[batch_counter,:,:,1] = img3[:,:,0]/5. - #input_1[batch_counter,:,:,:]= np.zeros( (batch_counter, height1, width1,3)) - batch_counter = batch_counter+1 - - #input_1[:,:,0] = img1[:,:,0]/3. - #input_1[:,:,2] = img2[:,:,0]/3. - #input_1[:,:,1] = img3[:,:,0]/5. - - if batch_counter==inference_bs or ( (tot_counter//inference_bs)==full_bs_ite and tot_counter%inference_bs==last_bs): - y_pr = self.model.predict(input_1 , verbose='0') - scalibility_num = scalibility_num+1 - - if batch_counter==inference_bs: - iteration_batches = inference_bs - else: - iteration_batches = last_bs - for jb in range(iteration_batches): - if y_pr[jb][0]>=0.5: - post_list.append(jbatch_indexer[jb]) - else: - pr_list.append(jbatch_indexer[jb]) - - batch_counter = 0 - jbatch_indexer = [] - - tot_counter = tot_counter+1 - - starting_list_of_regions, index_update = update_list_and_return_first_with_length_bigger_than_one(index_update, i, pr_list, post_list,starting_list_of_regions) - - - index_sort = [i[0] for i in starting_list_of_regions ] - - id_all_text = np.array(id_all_text)[index_sort] - - alltags=[elem.tag for elem in root_xml.iter()] - - - - link=alltags[0].split('}')[0]+'}' - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - page_element = root_xml.find(link+'Page') - assert isinstance(page_element, ET.Element) - - """ - ro_subelement = ET.SubElement(page_element, 'ReadingOrder') - #print(page_element, 'page_element') - - #new_element = ET.Element('ReadingOrder') - - new_element_element = ET.Element('OrderedGroup') - new_element_element.set('id', "ro357564684568544579089") - - for index, id_text in enumerate(id_all_text): - new_element_2 = ET.Element('RegionRefIndexed') - new_element_2.set('regionRef', id_all_text[index]) - new_element_2.set('index', str(index_sort[index])) - - new_element_element.append(new_element_2) - - ro_subelement.append(new_element_element) - """ - ##ro_subelement = ET.SubElement(page_element, 'ReadingOrder') - - ro_subelement = ET.Element('ReadingOrder') - - ro_subelement2 = ET.SubElement(ro_subelement, 'OrderedGroup') - ro_subelement2.set('id', "ro357564684568544579089") - - for index, id_text in enumerate(id_all_text): - new_element_2 = ET.SubElement(ro_subelement2, 'RegionRefIndexed') - new_element_2.set('regionRef', id_all_text[index]) - new_element_2.set('index', str(index)) - - if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): - page_element.insert(1, ro_subelement) - else: - page_element.insert(0, ro_subelement) - - alltags=[elem.tag for elem in root_xml.iter()] - - ET.register_namespace("",name_space) - tree_xml.write(os.path.join(self.out, file_name+'.xml'),xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) - #tree_xml.write('library2.xml') - - else: - if self.patches: - #def textline_contours(img,input_width,input_height,n_classes,model): - - img=cv2.imread(image_dir) - self.img_org = np.copy(img) - - if img.shape[0] < self.img_height: - img = self.resize_image(img, self.img_height, img.shape[1]) - - if img.shape[1] < self.img_width: - img = self.resize_image(img, img.shape[0], self.img_width) - - margin = int(0.1 * self.img_width) - width_mid = self.img_width - 2 * margin - height_mid = self.img_height - 2 * margin - img = img / float(255.0) - - img_h = img.shape[0] - img_w = img.shape[1] - - prediction_true = np.zeros((img_h, img_w, 3)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - - for i in range(nxf): - for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + self.img_width - else: - index_x_d = i * width_mid - index_x_u = index_x_d + self.img_width - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + self.img_height - else: - index_y_d = j * height_mid - index_y_u = index_y_d + self.img_height - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - self.img_width - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - self.img_height - - img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), - verbose='0') - - if self.task == 'enhancement': - seg = label_p_pred[0, :, :, :] - seg = seg * 255 - elif self.task == 'segmentation' or self.task == 'binarization': - seg = np.argmax(label_p_pred, axis=3)[0] - seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - else: - raise ValueError(f"Unhandled task {self.task}") - - - if i == 0 and j == 0: - seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg - elif i == nxf - 1 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg - elif i == 0 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg - elif i == nxf - 1 and j == 0: - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg - elif i == 0 and j != 0 and j != nyf - 1: - seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg - elif i == nxf - 1 and j != 0 and j != nyf - 1: - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg - elif i != 0 and i != nxf - 1 and j == 0: - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg - elif i != 0 and i != nxf - 1 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg - else: - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg - prediction_true = prediction_true.astype(int) - prediction_true = cv2.resize(prediction_true, (self.img_org.shape[1], self.img_org.shape[0]), interpolation=cv2.INTER_NEAREST) - return prediction_true - - else: - - img=cv2.imread(image_dir) - self.img_org = np.copy(img) - - width=self.img_width - height=self.img_height - - img=img/255.0 - img=self.resize_image(img,self.img_height,self.img_width) - - - label_p_pred=self.model.predict( - img.reshape(1,img.shape[0],img.shape[1],img.shape[2])) - - if self.task == 'enhancement': - seg = label_p_pred[0, :, :, :] - seg = seg * 255 - elif self.task == 'segmentation' or self.task == 'binarization': - seg = np.argmax(label_p_pred, axis=3)[0] - seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - else: - raise ValueError(f"Unhandled task {self.task}") - - prediction_true = seg.astype(int) - - prediction_true = cv2.resize(prediction_true, (self.img_org.shape[1], self.img_org.shape[0]), interpolation=cv2.INTER_NEAREST) - return prediction_true - - - - def run(self): - self.start_new_session_and_model() - if self.image: - res=self.predict(image_dir = self.image) - - if self.task == 'classification' or self.task == 'reading_order': - pass - elif self.task == 'enhancement': - if self.save: - cv2.imwrite(self.save,res) - elif self.task == "cnn-rnn-ocr": - print(f"Detected text: {res}") - else: - img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) - if self.save: - cv2.imwrite(self.save,img_seg_overlayed) - if self.save_layout: - cv2.imwrite(self.save_layout, only_layout) - - if self.ground_truth: - gt_img=cv2.imread(self.ground_truth) - self.IoU(gt_img[:,:,0],res[:,:,0]) - - else: - ls_images = os.listdir(self.dir_in) - for ind_image in ls_images: - f_name = ind_image.split('.')[0] - image_dir = os.path.join(self.dir_in, ind_image) - res=self.predict(image_dir) - - if self.task == 'classification' or self.task == 'reading_order': - pass - elif self.task == 'enhancement': - self.save = os.path.join(self.out, f_name+'.png') - cv2.imwrite(self.save,res) - elif self.task == "cnn-rnn-ocr": - print(f"Detected text for file name {f_name} is: {res}") - else: - img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) - self.save = os.path.join(self.out, f_name+'_overlayed.png') - cv2.imwrite(self.save,img_seg_overlayed) - self.save_layout = os.path.join(self.out, f_name+'_layout.png') - cv2.imwrite(self.save_layout, only_layout) - - if self.ground_truth: - gt_img=cv2.imread(self.ground_truth) - self.IoU(gt_img[:,:,0],res[:,:,0]) - - - -@click.command() -@click.option( - "--image", - "-i", - help="image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of images", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--out", - "-o", - help="output directory where xml with detected reading order will be written.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--patches/--no-patches", - "-p/-nop", - is_flag=True, - help="if this parameter set to true, this tool will try to do inference in patches.", -) -@click.option( - "--save", - "-s", - help="save prediction as a png file in current folder.", -) -@click.option( - "--save_layout", - "-sl", - help="save layout prediction only as a png file in current folder.", -) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--ground_truth", - "-gt", - help="ground truth directory if you want to see the iou of prediction.", -) -@click.option( - "--xml_file", - "-xml", - help="xml file with layout coordinates that reading order detection will be implemented on. The result will be written in the same xml file.", -) -@click.option( - "--cpu", - "-cpu", - help="For OCR, the default device is the GPU. If this parameter is set to true, inference will be performed on the CPU", - is_flag=True, -) -@click.option( - "--min_area", - "-min", - help="min area size of regions considered for reading order detection. The default value is zero and means that all text regions are considered for reading order.", -) -def main(image, dir_in, model, patches, save, save_layout, ground_truth, xml_file, cpu, out, min_area): - assert image or dir_in, "Either a single image -i or a dir_in -di input is required" - with open(os.path.join(model,'config.json')) as f: - config_params_model = json.load(f) - task = config_params_model['task'] - if task not in ['classification', 'reading_order', "cnn-rnn-ocr"]: - assert not image or save, "For segmentation or binarization, an input single image -i also requires an output filename -s" - assert not dir_in or out, "For segmentation or binarization, an input directory -di also requires an output directory -o" - x = SBBPredict(image, dir_in, model, task, config_params_model, - patches, save, save_layout, ground_truth, xml_file, - cpu, out, min_area) - x.run() - diff --git a/src/eynollah/training/metrics.py b/src/eynollah/training/metrics.py deleted file mode 100644 index caa0e65..0000000 --- a/src/eynollah/training/metrics.py +++ /dev/null @@ -1,526 +0,0 @@ -import os - -os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 -import tensorflow as tf -from tensorflow.keras import backend as K -from tensorflow.keras.metrics import Metric, MeanMetricWrapper, get -from tensorflow.keras.initializers import Zeros -from tensorflow_addons.image import connected_components -import numpy as np - - -EPS = K.epsilon() - -def focal_loss(gamma=2., alpha=4., epsilon=EPS): - gamma = float(gamma) - alpha = float(alpha) - - def focal_loss_fixed(y_true, y_pred): - """Focal loss for multi-classification - FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t) - Notice: y_pred is probability after softmax - gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper - d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x) - Focal Loss for Dense Object Detection - https://arxiv.org/abs/1708.02002 - - Arguments: - y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls] - y_pred {tensor} -- model's output, shape of [batch_size, num_cls] - - Keyword Arguments: - gamma {float} -- (default: {2.0}) - alpha {float} -- (default: {4.0}) - - Returns: - [tensor] -- loss. - """ - y_true = tf.convert_to_tensor(y_true, tf.float32) - y_pred = tf.convert_to_tensor(y_pred, tf.float32) - - model_out = tf.add(y_pred, epsilon) - ce = tf.multiply(y_true, -tf.log(model_out)) - weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma)) - fl = tf.multiply(alpha, tf.multiply(weight, ce)) - reduced_fl = tf.reduce_max(fl, axis=1) - return tf.reduce_mean(reduced_fl) - - return focal_loss_fixed - - -def weighted_categorical_crossentropy(weights=None): - """ weighted_categorical_crossentropy - - Args: - * weights: crossentropy weights - Returns: - * weighted categorical crossentropy function - """ - - def loss(y_true, y_pred): - labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) - - if weights is not None: - weight_mask = tf.maximum(tf.reduce_max(tf.constant( - np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) - per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] - return tf.reduce_mean(per_pixel_loss) - - return loss - - -def image_categorical_cross_entropy(y_true, y_pred, weights=None): - """ - :param y_true: tensor of shape (batch_size, height, width) representing the ground truth. - :param y_pred: tensor of shape (batch_size, height, width) representing the prediction. - :return: The mean cross-entropy on softmaxed tensors. - """ - - labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) - - if weights is not None: - weight_mask = tf.maximum( - tf.reduce_max(tf.constant( - np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) - per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] - - return tf.reduce_mean(per_pixel_loss) - - -def class_tversky(y_true, y_pred): - smooth = 1.0 # 1.00 - - y_true = K.permute_dimensions(y_true, (3, 1, 2, 0)) - y_pred = K.permute_dimensions(y_pred, (3, 1, 2, 0)) - - y_true_pos = K.batch_flatten(y_true) - y_pred_pos = K.batch_flatten(y_pred) - true_pos = K.sum(y_true_pos * y_pred_pos, 1) - false_neg = K.sum(y_true_pos * (1 - y_pred_pos), 1) - false_pos = K.sum((1 - y_true_pos) * y_pred_pos, 1) - alpha = 0.2 # 0.5 - beta = 0.8 - return (true_pos + smooth) / (true_pos + alpha * false_neg + beta * false_pos + smooth) - - -def focal_tversky_loss(y_true, y_pred): - pt_1 = class_tversky(y_true, y_pred) - gamma = 1.3 # 4./3.0#1.3#4.0/3.00# 0.75 - return K.sum(K.pow((1 - pt_1), gamma)) - - -def generalized_dice_coeff2(y_true, y_pred): - n_el = 1 - for dim in y_true.shape: - n_el *= int(dim) - n_cl = y_true.shape[-1] - w = K.zeros(shape=(n_cl,)) - w = (K.sum(y_true, axis=(0, 1, 2))) / n_el - w = 1 / (w ** 2 + 0.000001) - numerator = y_true * y_pred - numerator = w * K.sum(numerator, (0, 1, 2)) - numerator = K.sum(numerator) - denominator = y_true + y_pred - denominator = w * K.sum(denominator, (0, 1, 2)) - denominator = K.sum(denominator) - return 2 * numerator / denominator - - -def generalized_dice_coeff(y_true, y_pred): - axes = tuple(range(1, len(y_pred.shape) - 1)) - Ncl = y_pred.shape[-1] - w = K.zeros(shape=(Ncl,)) - w = K.sum(y_true, axis=axes) - w = 1 / (w ** 2 + 0.000001) - # Compute gen dice coef: - numerator = y_true * y_pred - numerator = w * K.sum(numerator, axes) - numerator = K.sum(numerator) - - denominator = y_true + y_pred - denominator = w * K.sum(denominator, axes) - denominator = K.sum(denominator) - - gen_dice_coef = 2 * numerator / denominator - - return gen_dice_coef - - -def generalized_dice_loss(y_true, y_pred): - return 1 - generalized_dice_coeff2(y_true, y_pred) - - -# TODO: document where this is from -def soft_dice_loss(y_true, y_pred, epsilon=EPS): - """ - Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions. - Assumes the `channels_last` format. - - # Arguments - y_true: b x X x Y( x Z...) x c One hot encoding of ground truth - y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) - epsilon: Used for numerical stability to avoid divide by zero errors - - # References - V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation - https://arxiv.org/abs/1606.04797 - More details on Dice loss formulation - https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72) - - Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022 - """ - - # skip the batch and class axis for calculating Dice score - axes = tuple(range(1, len(y_pred.shape) - 1)) - - numerator = 2. * K.sum(y_pred * y_true, axes) - - denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) - return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch - - -# TODO: document where this is from -def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last=True, mean_per_class=False, - verbose=False): - """ - Compute mean metrics of two segmentation masks, via Keras. - - IoU(A,B) = |A & B| / (| A U B|) - Dice(A,B) = 2*|A & B| / (|A| + |B|) - - Args: - y_true: true masks, one-hot encoded. - y_pred: predicted masks, either softmax outputs, or one-hot encoded. - metric_name: metric to be computed, either 'iou' or 'dice'. - metric_type: one of 'standard' (default), 'soft', 'naive'. - In the standard version, y_pred is one-hot encoded and the mean - is taken only over classes that are present (in y_true or y_pred). - The 'soft' version of the metrics are computed without one-hot - encoding y_pred. - The 'naive' version return mean metrics where absent classes contribute - to the class mean as 1.0 (instead of being dropped from the mean). - drop_last = True: boolean flag to drop last class (usually reserved - for background class in semantic segmentation) - mean_per_class = False: return mean along batch axis for each class. - verbose = False: print intermediate results such as intersection, union - (as number of pixels). - Returns: - IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True - in which case it returns the per-class metric, averaged over the batch. - - Inputs are B*W*H*N tensors, with - B = batch size, - W = width, - H = height, - N = number of classes - """ - - flag_soft = (metric_type == 'soft') - flag_naive_mean = (metric_type == 'naive') - - # always assume one or more classes - num_classes = K.shape(y_true)[-1] - - if not flag_soft: - # get one-hot encoded masks from y_pred (true masks should already be one-hot) - y_pred = K.one_hot(K.argmax(y_pred), num_classes) - y_true = K.one_hot(K.argmax(y_true), num_classes) - - # if already one-hot, could have skipped above command - # keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64) - y_true = K.cast(y_true, 'float32') - y_pred = K.cast(y_pred, 'float32') - - # intersection and union shapes are batch_size * n_classes (values = area in pixels) - axes = (1, 2) # W,H axes of each image - intersection = K.sum(K.abs(y_true * y_pred), axis=axes) - mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes) - union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot - - smooth = .001 - iou = (intersection + smooth) / (union + smooth) - dice = 2 * (intersection + smooth) / (mask_sum + smooth) - - metric = {'iou': iou, 'dice': dice}[metric_name] - - # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise - mask = K.cast(K.not_equal(union, 0), 'float32') - - if drop_last: - metric = metric[:, :-1] - mask = mask[:, :-1] - - if verbose: - print('intersection, union') - print(K.eval(intersection), K.eval(union)) - print(K.eval(intersection / union)) - - # return mean metrics: remaining axes are (batch, classes) - if flag_naive_mean: - return K.mean(metric) - - # take mean only over non-absent classes - class_count = K.sum(mask, axis=0) - non_zero = tf.greater(class_count, 0) - non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero) - non_zero_count = tf.boolean_mask(class_count, non_zero) - - if verbose: - print('Counts of inputs with class present, metrics for non-absent classes') - print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count)) - - return K.mean(non_zero_sum / non_zero_count) - - -# TODO: document where this is from -# TODO: Why a different implementation than IoU from utils? -def mean_iou(y_true, y_pred, **kwargs): - """ - Compute mean Intersection over Union of two segmentation masks, via Keras. - - Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs. - """ - return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs) - - -def Mean_IOU(y_true, y_pred): - nb_classes = K.int_shape(y_pred)[-1] - iou = [] - true_pixels = K.argmax(y_true, axis=-1) - pred_pixels = K.argmax(y_pred, axis=-1) - void_labels = K.equal(K.sum(y_true, axis=-1), 0) - for i in range(0, nb_classes): # exclude first label (background) and last label (void) - true_labels = K.equal(true_pixels, i) # & ~void_labels - pred_labels = K.equal(pred_pixels, i) # & ~void_labels - inter = tf.to_int32(true_labels & pred_labels) - union = tf.to_int32(true_labels | pred_labels) - legal_batches = K.sum(tf.to_int32(true_labels), axis=1) > 0 - ious = K.sum(inter, axis=1) / K.sum(union, axis=1) - iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects - iou = tf.stack(iou) - legal_labels = ~tf.debugging.is_nan(iou) - iou = tf.gather(iou, indices=tf.where(legal_labels)) - return K.mean(iou) - - -def iou_vahid(y_true, y_pred): - nb_classes = tf.shape(y_true)[-1] + tf.to_int32(1) - true_pixels = K.argmax(y_true, axis=-1) - pred_pixels = K.argmax(y_pred, axis=-1) - iou = [] - - for i in tf.range(nb_classes): - tp = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.equal(pred_pixels, i))) - fp = K.sum(tf.to_int32(K.not_equal(true_pixels, i) & K.equal(pred_pixels, i))) - fn = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.not_equal(pred_pixels, i))) - iouh = tp / (tp + fp + fn) - iou.append(iouh) - return K.mean(iou) - - -# TODO: copy from utils? -def IoU_metric(Yi, y_predi): - # mean Intersection over Union - # Mean IoU = TP/(FN + TP + FP) - y_predi = np.argmax(y_predi, axis=3) - y_testi = np.argmax(Yi, axis=3) - IoUs = [] - Nclass = int(np.max(Yi)) + 1 - for c in range(Nclass): - TP = np.sum((Yi == c) & (y_predi == c)) - FP = np.sum((Yi != c) & (y_predi == c)) - FN = np.sum((Yi == c) & (y_predi != c)) - IoU = TP / float(TP + FP + FN) - IoUs.append(IoU) - return K.cast(np.mean(IoUs), dtype='float32') - - -def IoU_metric_keras(y_true, y_pred): - # mean Intersection over Union - # Mean IoU = TP/(FN + TP + FP) - init = tf.global_variables_initializer() - sess = tf.Session() - sess.run(init) - - return IoU_metric(y_true.eval(session=sess), y_pred.eval(session=sess)) - - -# TODO: unused, remove? -def jaccard_distance_loss(y_true, y_pred, smooth=100): - """ - Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) - = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) - - The jaccard distance loss is usefull for unbalanced datasets. This has been - shifted so it converges on 0 and is smoothed to avoid exploding or disapearing - gradient. - - Ref: https://en.wikipedia.org/wiki/Jaccard_index - - @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 - @author: wassname - """ - intersection = K.sum(K.abs(y_true * y_pred), axis=-1) - sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) - jac = (intersection + smooth) / (sum_ - intersection + smooth) - return (1 - jac) * smooth - - -def metrics_superposition(*metrics, weights=None): - """ - return a single metric derived by adding all given metrics - - default weights are uniform - """ - if weights is None: - weights = len(metrics) * [tf.constant(1.0)] - def mixed(y_true, y_pred): - results = [] - for metric, weight in zip(metrics, weights): - results.append(metric(y_true, y_pred) * weight) - return tf.reduce_mean(tf.stack(results), 0) - mixed.__name__ = '/'.join(m.__name__ for m in metrics) - return mixed - - -class Superposition(MeanMetricWrapper): - def __init__(self, metrics, weights=None, dtype=None): - self._metrics = metrics - self._weights = weights - mixed = metrics_superposition(*metrics, weights=weights) - super().__init__(mixed, name=mixed.__name__, dtype=dtype) - def get_config(self): - return dict(metrics=self._metrics, - weights=self._weights, - **super().get_config()) - -class ConfusionMatrix(Metric): - def __init__(self, nlabels=None, nrm="all", name="confusion_matrix", dtype=tf.float32): - super().__init__(name=name, dtype=dtype) - assert nlabels is not None - self._nlabels = nlabels - self._shape = (self._nlabels, self._nlabels) - self._matrix = self.add_weight(name, shape=self._shape, - initializer=Zeros) - assert nrm in ("all", "true", "pred", "none") - self._nrm = nrm - - def update_state(self, y_true, y_pred, sample_weight=None): - y_pred = tf.math.argmax(y_pred, axis=-1) - y_true = tf.math.argmax(y_true, axis=-1) - - y_pred = tf.reshape(y_pred, shape=(-1,)) - y_true = tf.reshape(y_true, shape=(-1,)) - - y_pred.shape.assert_is_compatible_with(y_true.shape) - confusion = tf.math.confusion_matrix(y_true, y_pred, num_classes=self._nlabels, dtype=self._dtype) - - return self._matrix.assign_add(confusion) - - def result(self): - """normalize""" - if self._nrm == "all": - denom = tf.math.reduce_sum(self._matrix, axis=(0, 1)) - elif self._nrm == "true": - denom = tf.math.reduce_sum(self._matrix, axis=1, keepdims=True) - elif self._nrm == "pred": - denom = tf.math.reduce_sum(self._matrix, axis=0, keepdims=True) - else: - denom = tf.constant(1.0) - return tf.math.divide_no_nan(self._matrix, denom) - - def reset_state(self): - for v in self.variables: - v.assign(tf.zeros(shape=self._shape)) - - def get_config(self): - return dict(nlabels=self._nlabels, - **super().get_config()) - -def connected_components_loss(artificial=0): - """ - metric/loss function capturing the separability of segmentation maps - - For both sides (true and predicted, resp.), computes - 1. the argmax() of class-wise softmax input (i.e. the segmentation map) - 2. the connected components (i.e. the instance label map) - 3. the max() (i.e. the highest label = nr of components) - - The original idea was to then calculate a regression formula - between those two targets. But it is insufficient to just - approximate the same number of components, for they might be - completely different (true components being merged, predicted - components splitting others). We really want to capture the - correspondence between those labels, which is localised. - - For that we now calculate the label pairs and their counts. - Looking at the M,N incidence matrix, we want those counts - to be distributed orthogonally (ideally). So we compute a - singular value decomposition and compare the sum total of - singular values to the sum total of all label counts. The - rate of the two determines a measure of congruence. - - Moreover, for the case of artificial boundary segments around - regions, optionally introduced by the training extractor to - represent segment identity in the loss (and removed at runtime): - Reduce this class to background as well. - """ - def metric(y_true, y_pred): - if artificial: - # convert artificial border class to background - y_true = y_true[:, :, :, :artificial] - y_pred = y_pred[:, :, :, :artificial] - # [B, H, W, C] - l_true = tf.math.argmax(y_true, axis=-1) - l_pred = tf.math.argmax(y_pred, axis=-1) - # [B, H, W] - c_true = tf.cast(connected_components(l_true), tf.int64) - c_pred = tf.cast(connected_components(l_pred), tf.int64) - # [B, H, W] - n_batch = y_true.shape[0] - C_true = tf.math.reduce_max(c_true, (1, 2)) + 1 - C_pred = tf.math.reduce_max(c_pred, (1, 2)) + 1 - MODULUS = tf.constant(2**22, tf.int64) - tf.debugging.assert_less(C_true, MODULUS, - message="cannot compare segments: too many connected components in GT") - tf.debugging.assert_less(C_pred, MODULUS, - message="cannot compare segments: too many connected components in prediction") - c_comb = MODULUS * c_pred + c_true - tf.debugging.assert_greater_equal(c_comb, tf.constant(0, tf.int64), - message="overflow pairing components") - # [B, H, W] - # tf.unique does not support batch dim, so... - results = [] - for c_comb, C_true, C_pred in zip( - tf.unstack(c_comb, num=n_batch), - tf.unstack(C_true, num=n_batch), - tf.unstack(C_pred, num=n_batch), - ): - prod, _, count = tf.unique_with_counts(tf.reshape(c_comb, (-1,))) - # [L] - #corr = tf.zeros([C_pred, C_true], tf.int32) - #corr[prod // 2**24, prod % 2**24] = count - corr = tf.scatter_nd(tf.stack([prod // MODULUS, prod % MODULUS], axis=1), - count, (C_pred, C_true)) - corr = tf.cast(corr, tf.float32) - # [Cpred, Ctrue] - sgv = tf.linalg.svd(corr, compute_uv=False) - results.append(tf.reduce_sum(sgv) / tf.reduce_sum(corr)) - return 1.0 - tf.reduce_mean(tf.stack(results), 0) - # c_true = tf.reshape(c_true, (n_batch, -1)) - # c_pred = tf.reshape(c_pred, (n_batch, -1)) - # # [B, H*W] - # n_true = tf.math.reduce_max(c_true, axis=1) - # n_pred = tf.math.reduce_max(c_pred, axis=1) - # # [B] - # diff = tf.cast(n_true - n_pred, tf.float32) - # return tf.reduce_mean(tf.math.abs(diff) + alpha * diff, axis=-1) - - metric.__name__ = 'nCC' - metric._direction = 'down' - return metric - diff --git a/src/eynollah/training/models.py b/src/eynollah/training/models.py deleted file mode 100644 index 3494249..0000000 --- a/src/eynollah/training/models.py +++ /dev/null @@ -1,502 +0,0 @@ -import os - -os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 -import tensorflow as tf -from tensorflow.keras.layers import ( - Activation, - Add, - AveragePooling2D, - BatchNormalization, - Bidirectional, - Conv1D, - Conv2D, - Dense, - Dropout, - Embedding, - Flatten, - Input, - Layer, - LayerNormalization, - LSTM, - MaxPooling2D, - MultiHeadAttention, - Reshape, - UpSampling2D, - ZeroPadding2D, - add, - concatenate -) -from tensorflow.keras.models import Model -from tensorflow.keras.regularizers import l2 -from tensorflow.keras.backend import ctc_batch_cost - -from ..patch_encoder import Patches, PatchEncoder - -##mlp_head_units = [512, 256]#[2048, 1024] -###projection_dim = 64 -##transformer_layers = 2#8 -##num_heads = 1#4 -RESNET50_WEIGHTS_PATH = './pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' -RESNET50_WEIGHTS_URL = ('https://github.com/fchollet/deep-learning-models/releases/download/v0.2/' - 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') - -IMAGE_ORDERING = 'channels_last' -MERGE_AXIS = -1 - - -class CTCLayer(Layer): - def call(self, y_true, y_pred): - batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64") - input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64") - label_length = tf.cast(tf.shape(y_true)[1], dtype="int64") - - input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64") - label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64") - loss = ctc_batch_cost(y_true, y_pred, input_length, label_length) - self.add_loss(loss) - - # At test time, just return the computed predictions. - return y_pred - -def mlp(x, hidden_units, dropout_rate): - for units in hidden_units: - x = Dense(units, activation=tf.nn.gelu)(x) - x = Dropout(dropout_rate)(x) - return x - -def one_side_pad(x): - x = ZeroPadding2D(((1, 0), (1, 0)), data_format=IMAGE_ORDERING)(x) - return x - -def identity_block(input_tensor, kernel_size, filters, stage, block): - """The identity block is the block that has no conv layer at shortcut. - # Arguments - input_tensor: input tensor - kernel_size: defualt 3, the kernel size of middle conv layer at main path - filters: list of integers, the filterss of 3 conv layer at main path - stage: integer, current stage label, used for generating layer names - block: 'a','b'..., current block label, used for generating layer names - # Returns - Output tensor for the block. - """ - filters1, filters2, filters3 = filters - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - conv_name_base = 'res' + str(stage) + block + '_branch' - bn_name_base = 'bn' + str(stage) + block + '_branch' - - x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2a')(input_tensor) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) - x = Activation('relu')(x) - - x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, - padding='same', name=conv_name_base + '2b')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) - x = Activation('relu')(x) - - x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) - - x = add([x, input_tensor]) - x = Activation('relu')(x) - return x - - -def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): - """conv_block is the block that has a conv layer at shortcut - # Arguments - input_tensor: input tensor - kernel_size: defualt 3, the kernel size of middle conv layer at main path - filters: list of integers, the filterss of 3 conv layer at main path - stage: integer, current stage label, used for generating layer names - block: 'a','b'..., current block label, used for generating layer names - # Returns - Output tensor for the block. - Note that from stage 3, the first conv layer at main path is with strides=(2,2) - And the shortcut should have strides=(2,2) as well - """ - filters1, filters2, filters3 = filters - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - conv_name_base = 'res' + str(stage) + block + '_branch' - bn_name_base = 'bn' + str(stage) + block + '_branch' - - x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, strides=strides, - name=conv_name_base + '2a')(input_tensor) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) - x = Activation('relu')(x) - - x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, padding='same', - name=conv_name_base + '2b')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) - x = Activation('relu')(x) - - x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) - - shortcut = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, strides=strides, - name=conv_name_base + '1')(input_tensor) - shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) - - x = add([x, shortcut]) - x = Activation('relu')(x) - return x - -def resnet50(inputs, weight_decay=1e-6, pretraining=False): - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(inputs) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2), kernel_regularizer=l2(weight_decay), - name='conv1')(x) - f1 = x - - x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) - x = Activation('relu')(x) - x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) - - x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) - x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') - x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x) - - x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x - - x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x - - x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x - - if pretraining: - model = Model(inputs, x).load_weights(RESNET50_WEIGHTS_PATH) - - return f1, f2, f3, f4, f5 - -def unet_decoder(img, f1, f2, f3, f4, f5, n_classes, light=False, task="segmentation", weight_decay=1e-6): - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - o = Conv2D(512 if light else 1024, (1, 1), padding='same', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f5) - o = BatchNormalization(axis=bn_axis)(o) - o = Activation('relu')(o) - - if light: - f4 = Conv2D(512, (1, 1), padding='same', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f4) - f4 = BatchNormalization(axis=bn_axis)(f4) - f4 = Activation('relu')(f4) - - o = UpSampling2D((2, 2), data_format=IMAGE_ORDERING, interpolation="bilinear")(o) - o = concatenate([o, f4], axis=MERGE_AXIS) - o = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(o) - o = Conv2D(512, (3, 3), padding='valid', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - o = BatchNormalization(axis=bn_axis)(o) - o = Activation('relu')(o) - - o = UpSampling2D((2, 2), data_format=IMAGE_ORDERING, interpolation="bilinear")(o) - o = concatenate([o, f3], axis=MERGE_AXIS) - o = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(o) - o = Conv2D(256, (3, 3), padding='valid', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - o = BatchNormalization(axis=bn_axis)(o) - o = Activation('relu')(o) - - o = UpSampling2D((2, 2), data_format=IMAGE_ORDERING, interpolation="bilinear")(o) - o = concatenate([o, f2], axis=MERGE_AXIS) - o = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(o) - o = Conv2D(128, (3, 3), padding='valid', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - o = BatchNormalization(axis=bn_axis)(o) - o = Activation('relu')(o) - - o = UpSampling2D((2, 2), data_format=IMAGE_ORDERING, interpolation="bilinear")(o) - o = concatenate([o, f1], axis=MERGE_AXIS) - o = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(o) - o = Conv2D(64, (3, 3), padding='valid', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - o = BatchNormalization(axis=bn_axis)(o) - o = Activation('relu')(o) - - o = UpSampling2D((2, 2), data_format=IMAGE_ORDERING, interpolation="bilinear")(o) - o = concatenate([o, img], axis=MERGE_AXIS) - o = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(o) - o = Conv2D(32, (3, 3), padding='valid', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - o = BatchNormalization(axis=bn_axis)(o) - o = Activation('relu')(o) - - o = Conv2D(n_classes, (1, 1), padding='same', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - if task == "segmentation": - o = BatchNormalization(axis=bn_axis)(o) - o = Activation('softmax')(o) - else: - o = Activation('sigmoid')(o) - - return Model(img, o) - -def resnet50_unet_light(n_classes, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): - assert input_height % 32 == 0 - assert input_width % 32 == 0 - - img_input = Input(shape=(input_height, input_width, 3)) - - features = resnet50(img_input, weight_decay=weight_decay, pretraining=pretraining) - - return unet_decoder(img_input, *features, n_classes, light=True, task=task, weight_decay=weight_decay) - -def resnet50_unet(n_classes, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): - assert input_height % 32 == 0 - assert input_width % 32 == 0 - - img_input = Input(shape=(input_height, input_width, 3)) - - features = resnet50(img_input, weight_decay=weight_decay, pretraining=pretraining) - - return unet_decoder(img_input, *features, n_classes, light=False, task=task, weight_decay=weight_decay) - -def transformer_block(img, - num_patches, - patchsize_x, - patchsize_y, - mlp_head_units, - n_layers, - num_heads, - projection_dim): - patches = Patches(patchsize_x, patchsize_y)(img) - # Encode patches. - encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) - - for _ in range(n_layers): - # Layer normalization 1. - x1 = LayerNormalization(epsilon=1e-6)(encoded_patches) - # Create a multi-head attention layer. - attention_output = MultiHeadAttention(num_heads=num_heads, - key_dim=projection_dim, - dropout=0.1)(x1, x1) - # Skip connection 1. - x2 = Add()([attention_output, encoded_patches]) - # Layer normalization 2. - x3 = LayerNormalization(epsilon=1e-6)(x2) - # MLP. - x3 = mlp(x3, hidden_units=mlp_head_units, dropout_rate=0.1) - # Skip connection 2. - encoded_patches = Add()([x3, x2]) - - encoded_patches = tf.reshape(encoded_patches, - [-1, - img.shape[1], - img.shape[2], - projection_dim // (patchsize_x * patchsize_y)]) - return encoded_patches - -def vit_resnet50_unet(num_patches, - n_classes, - transformer_patchsize_x, - transformer_patchsize_y, - transformer_mlp_head_units=None, - transformer_layers=8, - transformer_num_heads=4, - transformer_projection_dim=64, - input_height=224, - input_width=224, - task="segmentation", - weight_decay=1e-6, - pretraining=False): - if transformer_mlp_head_units is None: - transformer_mlp_head_units = [128, 64] - inputs = Input(shape=(input_height, input_width, 3)) - - features = list(resnet50(inputs, weight_decay=weight_decay, pretraining=pretraining)) - - features[-1] = transformer_block(features[-1], - num_patches, - transformer_patchsize_x, - transformer_patchsize_y, - transformer_mlp_head_units, - transformer_layers, - transformer_num_heads, - transformer_projection_dim) - - return unet_decoder(inputs, *features, n_classes, task=task, weight_decay=weight_decay) - -def vit_resnet50_unet_transformer_before_cnn(num_patches, - n_classes, - transformer_patchsize_x, - transformer_patchsize_y, - transformer_mlp_head_units=None, - transformer_layers=8, - transformer_num_heads=4, - transformer_projection_dim=64, - input_height=224, - input_width=224, - task="segmentation", - weight_decay=1e-6, - pretraining=False): - if transformer_mlp_head_units is None: - transformer_mlp_head_units = [128, 64] - inputs = Input(shape=(input_height, input_width, 3)) - - encoded_patches = transformer_block(inputs, - num_patches, - transformer_patchsize_x, - transformer_patchsize_y, - transformer_mlp_head_units, - transformer_layers, - transformer_num_heads, - transformer_projection_dim) - encoded_patches = Conv2D(3, (1, 1), padding='same', - data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay), - name='convinput')(encoded_patches) - - features = resnet50(encoded_patches, weight_decay=weight_decay, pretraining=pretraining) - - return unet_decoder(inputs, *features, n_classes, task=task, weight_decay=weight_decay) - -def resnet50_classifier(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): - include_top=True - assert input_height%32 == 0 - assert input_width%32 == 0 - - - img_input = Input(shape=(input_height,input_width , 3 )) - - _, _, _, _, x = resnet50(img_input, weight_decay, pretraining) - - x = AveragePooling2D((7, 7), name='avg_pool')(x) - x = Flatten()(x) - - ## - x = Dense(256, activation='relu', name='fc512')(x) - x=Dropout(0.2)(x) - ## - x = Dense(n_classes, activation='softmax', name='fc1000')(x) - model = Model(img_input, x) - - return model - -def machine_based_reading_order_model(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): - assert input_height%32 == 0 - assert input_width%32 == 0 - - img_input = Input(shape=(input_height,input_width , 3 )) - - _, _, _, _, x = resnet50(img_input, weight_decay, pretraining) - - x = AveragePooling2D((7, 7), name='avg_pool1')(x) - flattened = Flatten()(x) - - o = Dense(256, activation='relu', name='fc512')(flattened) - o=Dropout(0.2)(o) - - o = Dense(256, activation='relu', name='fc512a')(o) - o=Dropout(0.2)(o) - - o = Dense(n_classes, activation='sigmoid', name='fc1000')(o) - model = Model(img_input , o) - - return model - -def cnn_rnn_ocr_model(image_height=None, image_width=None, n_classes=None, max_seq=None): - input_img = Input(shape=(image_height, image_width, 3), name="image") - labels = Input(name="label", shape=(None,)) - - x = Conv2D(64,kernel_size=(3,3),padding="same")(input_img) - x = BatchNormalization(name="bn1")(x) - x = Activation("relu", name="relu1")(x) - x = Conv2D(64,kernel_size=(3,3),padding="same")(x) - x = BatchNormalization(name="bn2")(x) - x = Activation("relu", name="relu2")(x) - x = MaxPooling2D(pool_size=(1,2),strides=(1,2))(x) - - x = Conv2D(128,kernel_size=(3,3),padding="same")(x) - x = BatchNormalization(name="bn3")(x) - x = Activation("relu", name="relu3")(x) - x = Conv2D(128,kernel_size=(3,3),padding="same")(x) - x = BatchNormalization(name="bn4")(x) - x = Activation("relu", name="relu4")(x) - x = MaxPooling2D(pool_size=(1,2),strides=(1,2))(x) - - x = Conv2D(256,kernel_size=(3,3),padding="same")(x) - x = BatchNormalization(name="bn5")(x) - x = Activation("relu", name="relu5")(x) - x = Conv2D(256,kernel_size=(3,3),padding="same")(x) - x = BatchNormalization(name="bn6")(x) - x = Activation("relu", name="relu6")(x) - x = MaxPooling2D(pool_size=(2,2),strides=(2,2))(x) - - x = Conv2D(image_width,kernel_size=(3,3),padding="same")(x) - x = BatchNormalization(name="bn7")(x) - x = Activation("relu", name="relu7")(x) - x = Conv2D(image_width,kernel_size=(16,1))(x) - x = BatchNormalization(name="bn8")(x) - x = Activation("relu", name="relu8")(x) - x2d = MaxPooling2D(pool_size=(1,2),strides=(1,2))(x) - x4d = MaxPooling2D(pool_size=(1,2),strides=(1,2))(x2d) - - - new_shape = (x.shape[1]*x.shape[2], x.shape[3]) - new_shape2 = (x2d.shape[1]*x2d.shape[2], x2d.shape[3]) - new_shape4 = (x4d.shape[1]*x4d.shape[2], x4d.shape[3]) - - x = Reshape(target_shape=new_shape, name="reshape")(x) - x2d = Reshape(target_shape=new_shape2, name="reshape2")(x2d) - x4d = Reshape(target_shape=new_shape4, name="reshape4")(x4d) - - xrnnorg = Bidirectional(LSTM(image_width, return_sequences=True, dropout=0.25))(x) - xrnn2d = Bidirectional(LSTM(image_width, return_sequences=True, dropout=0.25))(x2d) - xrnn4d = Bidirectional(LSTM(image_width, return_sequences=True, dropout=0.25))(x4d) - - xrnn2d = Reshape(target_shape=(1, xrnn2d.shape[1], xrnn2d.shape[2]), name="reshape6")(xrnn2d) - xrnn4d = Reshape(target_shape=(1, xrnn4d.shape[1], xrnn4d.shape[2]), name="reshape8")(xrnn4d) - - - xrnn2dup = UpSampling2D(size=(1, 2), interpolation="nearest")(xrnn2d) - xrnn4dup = UpSampling2D(size=(1, 4), interpolation="nearest")(xrnn4d) - - xrnn2dup = Reshape(target_shape=(xrnn2dup.shape[2], xrnn2dup.shape[3]), name="reshape10")(xrnn2dup) - xrnn4dup = Reshape(target_shape=(xrnn4dup.shape[2], xrnn4dup.shape[3]), name="reshape12")(xrnn4dup) - - addition = Add()([xrnnorg, xrnn2dup, xrnn4dup]) - - addition_rnn = Bidirectional(LSTM(image_width, return_sequences=True, dropout=0.25))(addition) - - out = Conv1D(max_seq, 1, data_format="channels_first")(addition_rnn) - out = BatchNormalization(name="bn9")(out) - out = Activation("relu", name="relu9")(out) - #out = Conv1D(n_classes, 1, activation='relu', data_format="channels_last")(out) - - out = Dense(n_classes, activation="softmax", name="dense2")(out) - - # Add CTC layer for calculating CTC loss at each step. - output = CTCLayer(name="ctc_loss")(labels, out) - - model = Model(inputs=(input_img, labels), outputs=output, name="handwriting_recognizer") - - return model diff --git a/src/eynollah/training/reload-models-v0.8.mk b/src/eynollah/training/reload-models-v0.8.mk deleted file mode 100644 index b7a38dd..0000000 --- a/src/eynollah/training/reload-models-v0.8.mk +++ /dev/null @@ -1,48 +0,0 @@ -SHELL = bash -e - -MODELS_SRC = models_eynollah -MODELS_DST = reloaded/models_eynollah - - -# $(MODELS_DST)/eynollah-binarization_20210425 \ -# $(MODELS_DST)/eynollah-column-classifier_20210425 \ -# $(MODELS_DST)/eynollah-enhancement_20210425 \ -# $(MODELS_DST)/eynollah-main-regions-aug-rotation_20210425 \ -# $(MODELS_DST)/eynollah-main-regions-aug-scaling_20210425 \ -# $(MODELS_DST)/eynollah-main-regions-ensembled_20210425 \ -# $(MODELS_DST)/eynollah-main-regions_20220314 \ -# $(MODELS_DST)/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18 \ -# $(MODELS_DST)/eynollah-tables_20210319 \ -# $(MODELS_DST)/model_eynollah_ocr_cnnrnn_20250930 \ - -RELOADABLE_MODELS = \ - $(MODELS_DST)/model_eynollah_page_extraction_20250915 \ - $(MODELS_DST)/model_eynollah_reading_order_20250824 \ - $(MODELS_DST)/modelens_e_l_all_sp_0_1_2_3_4_171024 \ - $(MODELS_DST)/modelens_full_lay_1__4_3_091124 \ - $(MODELS_DST)/modelens_table_0t4_201124 \ - $(MODELS_DST)/modelens_textline_0_1__2_4_16092024 - -all: $(RELOADABLE_MODELS) - -$(MODELS_DST)/%: $(MODELS_SRC)/% - mkdir -p $@ - test -e $&1 | tee $(notdir $<).log - cp $ tf.Tensor: - """ - Implements training.inference.SBBPredict.visualize_model_output for TF - (effectively plotting the layout segmentation map on the input image). - - In doing so, also converts: - - from Eynollah's BGR/float on the input side - - to std RGB/int format on the output side - """ - # in_: [B, H, W, 3] (BGR float) - image = in_[..., ::-1] * 255 - # out: [B, H, W, C] - lab = tf.math.argmax(out, axis=-1) - # lab: [B, H, W] - colors = tf.constant([[255, 255, 255], - [255, 0, 0], - [255, 125, 0], - [255, 0, 125], - [125, 125, 125], - [125, 125, 0], - [0, 125, 255], - [0, 125, 0], - [125, 125, 125], - [0, 125, 255], - [125, 0, 125], - [0, 255, 0], - [0, 0, 255], - [0, 255, 255], - [255, 125, 125], - [255, 0, 255]]) - layout = tf.gather(colors, lab) - # layout: [B, H, W, 3] - image = tf.cast(image, tf.float32) - layout = tf.cast(layout, tf.float32) - #weighted = image * 0.5 + layout * 0.1 (too dark) - weighted = image * 0.9 + layout * 0.1 - return tf.cast(weighted, tf.uint8) - -def plot_confusion_matrix(cm, name="Confusion Matrix"): - """ - Plot the confusion matrix with matplotlib and tensorflow - """ - fig, ax = plt.subplots(figsize=(10, 8), dpi=300) - im = ax.imshow(cm, vmin=0.0, vmax=1.0, interpolation='nearest', cmap=plt.cm.Blues) - ax.figure.colorbar(im, ax=ax) - ax.set(xticks=np.arange(cm.shape[1]), - yticks=np.arange(cm.shape[0]), - xlim=[-0.5, cm.shape[1] - 0.5], - ylim=[-0.5, cm.shape[0] - 0.5], - #xticklabels=labels, - #yticklabels=labels, - title=name, - ylabel='True class', - xlabel='Predicted class') - # Loop over data dimensions and create text annotations. - thresh = cm.max() / 2. - for i in range(cm.shape[0]): - for j in range(cm.shape[1]): - ax.text(j, i, format(cm[i, j], ".2f"), - ha="center", va="center", - color="white" if cm[i, j] > thresh else "black") - fig.tight_layout() - # convert to PNG - buf = io.BytesIO() - fig.savefig(buf, format='png') - plt.close(fig) - buf.seek(0) - # Convert PNG buffer to TF image - image = tf.image.decode_png(buf.getvalue(), channels=4) - # Add the batch dimension - image = tf.expand_dims(image, 0) - return image - -# plot predictions on train and test set during every epoch -class TensorBoardPlotter(TensorBoard): - def __init__(self, plot_freqs, *args, **kwargs): - super().__init__(*args, **kwargs) - self.model_call = None - self.plot_frequency_train, self.plot_frequency_val = plot_freqs - def on_epoch_begin(self, epoch, logs=None): - super().on_epoch_begin(epoch, logs=logs) - # override the model's call(), so we don't have to invest extra cycles - # to predict our samples (plotting itself can be neglected) - self.model_call = self.model.call - def new_call(inputs, **kwargs): - outputs = self.model_call(inputs, **kwargs) - images = plot_layout_tf(inputs, outputs) - self.plot(images, training=kwargs.get('training', None), epoch=epoch) - with tf.control_dependencies(None): - return outputs - self.model.call = new_call - # force rebuild of tf.function (so Python binding for epoch gets re-evaluated) - self.model.train_function = self.model.make_train_function(True) - self.model.test_function = self.model.make_test_function(True) - def on_epoch_end(self, epoch, logs=None): - # re-instate (so ModelCheckpoint does not see our override call) - self.model.call = self.model_call - super().on_epoch_end(epoch, logs=logs) - def plot(self, images, training=None, epoch=0): - if training: - writer = self._train_writer - freq = self.plot_frequency_train - mode, step = "train", self._train_step.value() - else: - writer = self._val_writer - freq = self.plot_frequency_val - mode, step = "test", self._val_step.value() - # skip most samples, because TF's EncodePNG is so costly, - # and now ends up in the middle of our pipeline, thus causing stalls - # (cannot use max_outputs, as batch size may be too small) - if not tf.cast(step % freq, tf.bool): - with writer.as_default(): - # used to be family kwarg for tf.summary.image name prefix - family = "epoch_%03d/" % (1 + epoch) - name = family + mode - tf.summary.image(name, images, step=step, max_outputs=len(images)) - def on_train_batch_end(self, batch, logs=None): - if logs is not None: - logs = dict(logs) - # cannot be logged as scalar: - logs.pop('confusion_matrix', None) - super().on_train_batch_end(batch, logs) - def on_test_end(self, logs=None): - if logs is not None: - logs = dict(logs) - # cannot be logged as scalar: - logs.pop('confusion_matrix', None) - super().on_test_end(logs) - def _log_epoch_metrics(self, epoch, logs): - if not logs: - return - logs = dict(logs) - # cannot be logged as scalar: - train_matrix = logs.pop('confusion_matrix', None) - val_matrix = logs.pop('val_confusion_matrix', None) - super()._log_epoch_metrics(epoch, logs) - # now plot confusion_matrix - with tf.summary.record_if(True): - if train_matrix is not None: - train_image = plot_confusion_matrix(train_matrix) - with self._train_writer.as_default(): - tf.summary.image("confusion_matrix", train_image, step=epoch) - if val_matrix is not None: - val_image = plot_confusion_matrix(val_matrix) - with self._val_writer.as_default(): - tf.summary.image("confusion_matrix", val_image, step=epoch) - -def get_dirs_or_files(input_data): - image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') - if os.path.isdir(input_data): - # Check if training dir exists - assert os.path.isdir(image_input), "{} is not a directory".format(image_input) - assert os.path.isdir(labels_input), "{} is not a directory".format(labels_input) - return image_input, labels_input - -def download_file(url, path): - with open(path, 'wb') as f: - with requests.get(url, stream=True) as r: - r.raise_for_status() - for data in r.iter_content(chunk_size=4096): - f.write(data) - -ex = Experiment(save_git_info=False) - - -@ex.config -def config_params(): - task = "segmentation" # This parameter defines task of model which can be segmentation, enhancement or classification. - if task in ["segmentation", "binarization", "enhancement"]: - backbone_type = "nontransformer" # Type of image feature map network backbone. Either a vision transformer alongside a CNN we call "transformer", or only a CNN which we call "nontransformer" - if backbone_type == "transformer": - transformer_patchsize_x = None # Patch size of vision transformer patches in x direction. - transformer_patchsize_y = None # Patch size of vision transformer patches in y direction. - transformer_num_patches_xy = None # Number of patches for vision transformer in x and y direction respectively. - transformer_projection_dim = 64 # Transformer projection dimension. Default value is 64. - transformer_mlp_head_units = [128, 64] # Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64] - transformer_layers = 8 # transformer layers. Default value is 8. - transformer_num_heads = 4 # Transformer number of heads. Default value is 4. - transformer_cnn_first = True # We have two types of vision transformers: either the CNN is applied first, followed by the transformer, or reversed. - n_classes = None # Number of classes. In the case of binary classification this should be 2. - n_epochs = 1 # Number of epochs to train. - n_batch = 1 # Number of images per batch at each iteration. (Try as large as fits on VRAM.) - if task == 'cnn-rnn-ocr': - max_len = None # Maximum sequence length (characters per line) for OCR output. - characters_txt_file = None # Path of JSON file defining character set needed of OCR model. - input_height = 224 * 1 # Height of model's input in pixels. - input_width = 224 * 1 # Width of model's input in pixels. - weight_decay = 1e-6 # Weight decay of l2 regularization of model layers. - learning_rate = 1e-4 # Set the learning rate. - if task in ["segmentation", "binarization"]: - is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. - weighted_loss = False # Use weighted categorical cross entropy as loss fucntion. When set to true, "is_loss_soft_dice" must be false. - add_ncc_loss = 0 # Add regression loss for number of connected components. When non-zero, use this as weight for the nCC term. - elif task == "classification": - f1_threshold_classification = None # This threshold is used to consider models with an evaluation f1 scores bigger than it. The selected model weights undergo a weights ensembling. And avreage ensembled model will be written to output. - classification_classes_name = None # Dictionary of classification classes names. - patches = False # Divides input image into smaller patches (input size of the model) when set to true. For the model to see the full image, like page extraction, set this to false. - augmentation = False # To apply any kind of augmentation, this parameter must be set to true. - if augmentation: - flip_aug = False # Whether different types of flipping will be applied to the image. Requires "flip_index" setting. - blur_aug = False # Whether images will be blurred. Requires "blur_k" setting. - if blur_aug: - blur_k = None # Method of blurring (gauss, median or blur). - padding_white = False # If true, white padding will be applied to the image. - if padding_white and task == 'cnn-rnn-ocr': - white_padds = None # List of padding sizes. - padd_colors = None # List of padding colors, but only "white" or "black" or both. - padding_black = False # If true, black padding will be applied to the image. - scaling = False # Whether images will be scaled up or down. Requires "scales" setting. - scaling_bluring = False # Whether a combination of scaling and blurring will be applied to the image. - scaling_binarization = False # Whether a combination of scaling and binarization will be applied to the image. - scaling_brightness = False # Whether a combination of scaling and brightening will be applied to the image. - scaling_flip = False # Whether a combination of scaling and flipping will be applied to the image. - if scaling or scaling_brightness or scaling_bluring or scaling_binarization or scaling_flip: - scales = None # Scale patches for augmentation. - if flip_aug or scaling_flip: - flip_index = None # List of codes (as in cv2.flip) for flip augmentation. - shifting = False - brightening = False # Whether images will be brightened. Requires "brightness" setting. - if brightening: - brightness = None # List of intensity factors for brightening. - binarization = False # Whether binary images will be used, too. (Will use Otsu thresholding unless supplying precomputed images in "dir_img_bin".) - if binarization: - dir_img_bin = None # Directory of training dataset subdirectory of binarized images - add_red_textlines = False - adding_rgb_background = False # Whether texture images will be added as artificial background. - if adding_rgb_background: - dir_rgb_backgrounds = None # Directory of texture images for synthetic background - adding_rgb_foreground = False # Whether texture images will be added as artificial foreground. - if adding_rgb_foreground: - dir_rgb_foregrounds = None # Directory of texture images for synthetic foreground - if adding_rgb_background or adding_rgb_foreground: - number_of_backgrounds_per_image = 1 - if task == 'cnn-rnn-ocr': - image_inversion = False # Whether the binarized images will be inverted. - textline_skewing_bin = False # Whether binarized textline images will be rotated. - textline_left_in_depth_bin = False # Whether left side of binary textline image will be displayed in depth. - textline_right_in_depth_bin = False # Whether right side of binary textline image will be displayed in depth. - textline_up_in_depth_bin = False # Whether upper side of binary textline image will be displayed in depth. - textline_down_in_depth_bin = False # Whether lower side of binary textline image will be displayed in depth. - pepper_bin_aug = False # Whether pepper noise will be added to binary textline images. - bin_deg = False # Whether a combination of degrading and binarization will be applied to the image. - degrading = False # Whether images will be artificially degraded. Requires the "degrade_scales" setting. - if degrading or binarization and task == 'cnn-rnn-ocr' and bin_deg: - degrade_scales = None # List of quality factors for degradation. - channels_shuffling = False # Re-arrange color channels. - if channels_shuffling: - shuffle_indexes = None # List of channels to switch between. - rotation = False # Whether images will be rotated by 90 degrees. - rotation_not_90 = False # Whether images will be rotated arbitrarily (skewed). Requires "thetha" setting. - if rotation_not_90: - thetha = None # List of rotation angles in degrees. - if task == 'cnn-rnn-ocr': - white_noise_strap = False # Whether white noise will be applied on some straps on the textline image. - textline_skewing = False # Whether textline images will be skewed for augmentation. - if textline_skewing or binarization and textline_skewing_bin: - skewing_amplitudes = None # List of skewing angles in degrees like [5, 8] - textline_left_in_depth = False # If true, left side of textline image will be displayed in depth. - textline_right_in_depth = False # If true, right side of textline image will be displayed in depth. - textline_up_in_depth = False # If true, upper side of textline image will be displayed in depth. - textline_down_in_depth = False # If true, lower side of textline image will be displayed in depth. - pepper_aug = False # Whether pepper noise will be added to textline images. - if pepper_aug or binarization and pepper_bin_aug: - pepper_indexes = None # List of pepper noise factors, e.g. [0.01, 0.005]. - color_padding_rotation = False # Whether images will be rotated with color padding. Requires "thetha_padd" setting. - if color_padding_rotation: - thetha_padd = None # List of angles (in degrees) used for rotation alongside padding. - dir_train = None # Directory of training dataset with subdirectories having the names "images" and "labels". - dir_eval = None # Directory of validation dataset with subdirectories having the names "images" and "labels". - dir_output = None # Directory where the augmented training data and the model checkpoints will be saved. - pretraining = False # Set to true to (down)load pretrained weights of ResNet50 encoder. - save_interval = None # frequency for writing model checkpoints (positive integer for number of batches saved under "model_step_{batch:04d}", otherwise epoch saved under "model_{epoch:02d}") - reload_weights = False # Set true to build new model from config, load weights from dir_of_start_model, save under dir_output and exit. - continue_training = False # Whether to continue training an existing model. - if continue_training: - dir_of_start_model = '' # Directory of model checkpoint to load to continue training. (E.g. if you already trained for 3 epochs, set "dir_of_start_model=dir_output/model_03".) - index_start = 0 # Epoch counter initial value to continue training. (E.g. if you already trained for 3 epochs, set "index_start=3" to continue naming checkpoints model_04, model_05 etc.) - data_is_provided = False # Whether the preprocessed input data (subdirectories "images" and "labels" in both subdirectories "train" and "eval" of "dir_output") has already been generated (in the first epoch of a previous run). - -@ex.main -def run(_config, - _log, - task, - pretraining, - data_is_provided, - dir_train, - dir_eval, - dir_output, - n_classes, - n_epochs, - n_batch, - input_height, - input_width, - weight_decay, - learning_rate, - continue_training, - reload_weights, - save_interval, - augmentation, - # dependent config keys need a default, - # otherwise yields sacred.utils.ConfigAddedError - ## if rotation_not_90 - thetha=None, - is_loss_soft_dice=False, - weighted_loss=False, - add_ncc_loss=None, - ## if continue_training - index_start=0, - dir_of_start_model=None, - backbone_type=None, - ## if backbone_type=transformer - transformer_projection_dim=None, - transformer_mlp_head_units=None, - transformer_layers=None, - transformer_num_heads=None, - transformer_cnn_first=None, - transformer_patchsize_x=None, - transformer_patchsize_y=None, - transformer_num_patches_xy=None, - ## if task=classification - f1_threshold_classification=None, - classification_classes_name=None, - ## if task=cnn-rnn-ocr - characters_txt_file=None, - color_padding_rotation=False, - thetha_padd=None, - bin_deg=False, - image_inversion=False, - white_noise_strap=False, - textline_skewing=False, - textline_skewing_bin=False, - textline_left_in_depth=False, - textline_left_in_depth_bin=False, - textline_right_in_depth=False, - textline_right_in_depth_bin=False, - textline_up_in_depth=False, - textline_up_in_depth_bin=False, - textline_down_in_depth=False, - textline_down_in_depth_bin=False, - pepper_aug=False, - pepper_bin_aug=False, - pepper_indexes=None, - padd_colors=None, - white_padds=None, - skewing_amplitudes=None, - max_len=None, -): - """ - run configured experiment via sacred - """ - - if continue_training: - assert n_epochs > index_start, "with continue_training, n_epochs must be greater than index_start" - - if pretraining and not os.path.isfile(RESNET50_WEIGHTS_PATH): - _log.info("downloading RESNET50 pretrained weights to %s", RESNET50_WEIGHTS_PATH) - download_file(RESNET50_WEIGHTS_URL, RESNET50_WEIGHTS_PATH) - - # set the gpu configuration - configuration() - - if task in ["segmentation", "enhancement", "binarization"]: - dir_train_flowing = os.path.join(dir_output, 'train') - dir_eval_flowing = os.path.join(dir_output, 'eval') - - dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images') - dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels') - - dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images') - dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels') - - if weighted_loss: - weights = np.zeros(n_classes) - if data_is_provided: - dirs = dir_flow_train_labels - else: - dirs = os.path.join(dir_train, "labels") - for obj in os.listdir(dirs): - label_file = os.path.join(dirs, + obj) - try: - label_obj = cv2.imread(label_file) - label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) - weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) - except Exception: - _log.exception("error reading data file '%s'", label_file) - - weights = 1.00 / weights - weights = weights / float(np.sum(weights)) - weights = weights / float(np.min(weights)) - weights = weights / float(np.sum(weights)) - - if task == "enhancement": - assert not is_loss_soft_dice, "for enhancement, soft_dice loss does not apply" - assert not weighted_loss, "for enhancement, weighted loss does not apply" - if continue_training: - custom_objects = dict() - if is_loss_soft_dice: - custom_objects.update(soft_dice_loss=soft_dice_loss) - elif weighted_loss: - custom_objects.update(loss=weighted_categorical_crossentropy(weights)) - if backbone_type == 'transformer': - custom_objects.update(PatchEncoder=PatchEncoder, - Patches=Patches) - model = load_model(dir_of_start_model, compile=False, - custom_objects=custom_objects) - else: - index_start = 0 - if backbone_type == 'nontransformer': - model = resnet50_unet(n_classes, - input_height, - input_width, - task, - weight_decay, - pretraining) - else: - num_patches_x = transformer_num_patches_xy[0] - num_patches_y = transformer_num_patches_xy[1] - num_patches = num_patches_x * num_patches_y - - if transformer_cnn_first: - model_builder = vit_resnet50_unet - multiple = 32 - else: - model_builder = vit_resnet50_unet_transformer_before_cnn - multiple = 1 - - assert input_height == ( - num_patches_y * transformer_patchsize_y * multiple), ( - "transformer_patchsize_y or transformer_num_patches_xy height value error: " - "input_height should be equal to " - "(transformer_num_patches_xy height value * transformer_patchsize_y * %d)" % multiple) - assert input_width == ( - num_patches_x * transformer_patchsize_x * multiple), ( - "transformer_patchsize_x or transformer_num_patches_xy width value error: " - "input_width should be equal to " - "(transformer_num_patches_xy width value * transformer_patchsize_x * %d)" % multiple) - assert 0 == (transformer_projection_dim % - (transformer_patchsize_y * transformer_patchsize_x)), ( - "transformer_projection_dim error: " - "The remainder when parameter transformer_projection_dim is divided by " - "(transformer_patchsize_y*transformer_patchsize_x) should be zero") - - model_builder = create_captured_function(model_builder) - model_builder.config = _config - model_builder.logger = _log - model = model_builder(num_patches) - - assert model is not None - #if you want to see the model structure just uncomment model summary. - #model.summary() - - metrics = ['categorical_accuracy'] - if task in ["segmentation", "binarization"]: - if is_loss_soft_dice: - loss = soft_dice_loss - elif weighted_loss: - loss = weighted_categorical_crossentropy(weights) - else: - loss = get_metric('categorical_crossentropy') - if add_ncc_loss: - loss = metrics_superposition(loss, connected_components_loss(n_classes - 1), - weights=[1 - add_ncc_loss, add_ncc_loss]) - metrics.append(connected_components_loss(n_classes - 1)) - metrics.append(MeanIoU(n_classes, - name='iou', - ignore_class=0, - sparse_y_true=False, - sparse_y_pred=False)) - metrics.append(ConfusionMatrix(n_classes)) - else: # task == "enhancement" - loss = 'mean_squared_error' - model.compile(loss=loss, - #jit_compile=True, - optimizer=Adam(learning_rate=learning_rate), - metrics=metrics) - - if reload_weights: - model.load_weights(dir_of_start_model).assert_existing_objects_matched().expect_partial() - dir_save = os.path.join(dir_output, os.path.basename(os.path.normpath(dir_of_start_model))) - model.save(dir_save, include_optimizer=False) - with open(os.path.join(dir_save, "config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - _log.info("reloaded model from %s to %s", dir_of_start_model, dir_save) - return - - if not data_is_provided: - # first create a directory in output for both training and evaluations - # in order to flow data from these directories. - if os.path.isdir(dir_train_flowing): - os.system('rm -rf ' + dir_train_flowing) - os.makedirs(dir_train_flowing) - - if os.path.isdir(dir_eval_flowing): - os.system('rm -rf ' + dir_eval_flowing) - os.makedirs(dir_eval_flowing) - - os.mkdir(dir_flow_train_imgs) - os.mkdir(dir_flow_train_labels) - - os.mkdir(dir_flow_eval_imgs) - os.mkdir(dir_flow_eval_labels) - - # writing patches into a sub-folder in order to be flowed from directory. - def gen(dir_img, dir_lab, dir_flow_imgs, dir_flow_labs, augmentation=True): - indexer = 0 - for img, lab in tqdm(preprocess_imgs(_config, - dir_img, - dir_lab, - augmentation=augmentation), - desc="data_is_provided"): - fname = 'img_%d.png' % indexer - cv2.imwrite(os.path.join(dir_flow_imgs, fname), img) - cv2.imwrite(os.path.join(dir_flow_labs, fname), lab) - indexer += 1 - gen(*get_dirs_or_files(dir_train), - dir_flow_train_imgs, - dir_flow_train_labels) - gen(*get_dirs_or_files(dir_eval), - dir_flow_eval_imgs, - dir_flow_eval_labels, - augmentation=False) - - def _to_cv2float(img): - # rgb→bgr and uint8→float, as expected by Eynollah models - return tf.cast(tf.reverse(img, [-1]), tf.float32) / 255 - def _to_intrgb(img): - # bgr→rgb and float→uint8 for plotting - return tf.reverse(tf.cast(img * 255, tf.uint8), [-1]) - def _to_categorical(seg): - seg = tf.cast(seg * 255, tf.int8) - # gt_gen_utils/pagexml2label uses peculiar pseudo-RGB/index colors - #seg = tf.image.rgb_to_grayscale(seg) - seg = tf.gather(seg, [0], axis=-1) - seg = tf.squeeze(seg, axis=-1) - return one_hot(seg, n_classes) - def get_dataset(dir_imgs, dir_labs, shuffle=None): - gen_kwargs = dict(labels=None, - label_mode=None, - batch_size=None, # batch after zip below - image_size=(input_height, input_width), - color_mode='rgb', - shuffle=shuffle is not None, - seed=shuffle, - interpolation='nearest', - crop_to_aspect_ratio=False, - # Keras 3 only... - #pad_to_aspect_ratio=False, - #data_format='channel_last', - #verbose=False, - ) - img_gen = image_dataset_from_directory(dir_imgs, **gen_kwargs) - lab_gen = image_dataset_from_directory(dir_labs, **gen_kwargs) - img_gen = img_gen.map(_to_cv2float, num_parallel_calls=tf.data.AUTOTUNE) - lab_gen = lab_gen.map(_to_cv2float, num_parallel_calls=tf.data.AUTOTUNE) - if task in ["segmentation", "binarization"]: - lab_gen = lab_gen.map(_to_categorical, num_parallel_calls=tf.data.AUTOTUNE) - ds = tf.data.Dataset.zip(img_gen, lab_gen) - return ds.batch(n_batch, drop_remainder=True, num_parallel_calls=tf.data.AUTOTUNE) - train_gen = get_dataset(dir_flow_train_imgs, dir_flow_train_labels, shuffle=np.random.randint(1e6)) - valdn_gen = get_dataset(dir_flow_eval_imgs, dir_flow_eval_labels) - train_steps = len(os.listdir(dir_flow_train_imgs)) // n_batch - valdn_steps = len(os.listdir(dir_flow_eval_imgs)) // n_batch - _log.info("training on %d batches in %d epochs", train_steps, n_epochs) - _log.info("validating on %d batches", valdn_steps) - - callbacks = [TensorBoardPlotter((max(1, train_steps * n_batch // 1000), - max(1, valdn_steps * n_batch // 100)), - os.path.join(dir_output, 'logs'), - profile_batch=(10, 20)), - SaveWeightsAfterSteps(0, dir_output, _config), - ] - if save_interval: - callbacks.append(SaveWeightsAfterSteps(save_interval, dir_output, _config)) - train_gen = train_gen.shuffle(train_steps // 1000, reshuffle_each_iteration=True) - valdn_gen = valdn_gen.shuffle(valdn_steps // 10, reshuffle_each_iteration=False) - # from matplotlib import pyplot as plt - # from tensorflow_addons.image import connected_components - # def plot(x, ytrue): - # ypred = model.call(x) - # gt = plot_layout_tf(x, ytrue) - # dt = plot_layout_tf(x, ypred) - # segtrue = tf.math.argmax(ytrue, axis=-1) - # segpred = tf.math.argmax(ypred, axis=-1) - # cctrue = connected_components(segtrue) - # ccpred = connected_components(segpred) - # cc = connected_components_loss(n_classes-1)(ytrue, ypred) - # sd = soft_dice_loss(ytrue, ypred) - # return gt, dt, cctrue, ccpred, cc, sd - # for gt, dt, gtcc, dtcc, cc, sd in train_gen.take(15).rebatch(1).map(plot).as_numpy_iterator(): - # plt.subplot(2, 2, 1) - # plt.imshow(np.squeeze(gt)) - # plt.title('GT') - # plt.subplot(2, 2, 3) - # plt.imshow(np.squeeze(gtcc)) - # plt.title('GT CC') - # plt.subplot(2, 2, 4) - # plt.imshow(np.squeeze(dtcc)) - # plt.title('prediction CC') - # plt.subplot(2, 2, 2) - # plt.imshow(np.squeeze(dt)) - # plt.title(f'prediction (nCC={cc} soft dice={sd:.3f})') - # plt.show() - model.fit( - train_gen.prefetch(tf.data.AUTOTUNE), - steps_per_epoch=train_steps, - validation_data=valdn_gen.prefetch(tf.data.AUTOTUNE), - validation_steps=valdn_steps, - verbose=1, - epochs=n_epochs, - callbacks=callbacks, - initial_epoch=index_start) - - elif task=="cnn-rnn-ocr": - - with open(characters_txt_file, 'r') as char_txt_f: - characters = json.load(char_txt_f) - padding_token = len(characters) + 5 - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - n_classes = len(char_to_num.get_vocabulary()) + 2 - - if continue_training: - model = load_model(dir_of_start_model) - else: - index_start = 0 - model = cnn_rnn_ocr_model(image_height=input_height, - image_width=input_width, - n_classes=n_classes, - max_seq=max_len) - #initial_learning_rate = 1e-4 - #decay_steps = int (n_epochs * ( len_dataset / n_batch )) - #alpha = 0.01 - #lr_schedule = 1e-4 - #tf.keras.optimizers.schedules.CosineDecay(initial_learning_rate, decay_steps, alpha) - opt = Adam(learning_rate=learning_rate) - model.compile(optimizer=opt) # rs: loss seems to be (ctc_batch_cost) in last layer - - #print(model.summary()) - - if reload_weights: - model.load_weights(dir_of_start_model).assert_existing_objects_matched().expect_partial() - dir_save = os.path.join(dir_output, os.path.basename(os.path.normpath(dir_of_start_model))) - model.save(dir_save, include_optimizer=False) - with open(os.path.join(dir_save, "config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - _log.info("reloaded model from %s to %s", dir_of_start_model, dir_save) - return - - # todo: use Dataset.map() on Dataset.list_files() - def get_dataset(dir_img, dir_lab): - def gen(): - return preprocess_imgs(_config, - dir_img, - dir_lab, - # extra+overrides - char_to_num=char_to_num, - padding_token=padding_token - ) - return (tf.data.Dataset.from_generator(gen, (tf.float32, tf.int64)) - .padded_batch(n_batch, - padded_shapes=([input_height, input_width, 3], [None]), - padding_values=(None, tf.constant(padding_token, dtype=tf.int64)), - drop_remainder=True, - #num_parallel_calls=tf.data.AUTOTUNE, - ) - .map(lambda x, y: {"image": x, "label": y}) - .prefetch(tf.data.AUTOTUNE) - ) - train_ds = get_dataset(*get_dirs_or_files(dir_train)) - valdn_ds = get_dataset(*get_dirs_or_files(dir_eval)) - - callbacks = [TensorBoard(os.path.join(dir_output, 'logs'), write_graph=False), - EarlyStopping(verbose=1, patience=3, restore_best_weights=False, start_from_epoch=3), - SaveWeightsAfterSteps(0, dir_output, _config)] - if save_interval: - callbacks.append(SaveWeightsAfterSteps(save_interval, dir_output, _config)) - model.fit( - train_ds.shuffle(200), - validation_data=valdn_ds, - verbose=1, - epochs=n_epochs, - callbacks=callbacks, - initial_epoch=index_start) - - elif task=='classification': - if continue_training: - model = load_model(dir_of_start_model, compile=False) - else: - index_start = 0 - model = resnet50_classifier(n_classes, - input_height, - input_width, - weight_decay, - pretraining) - - model.compile(loss='categorical_crossentropy', - optimizer=Adam(learning_rate=0.001), # rs: why not learning_rate? - metrics=['accuracy', F1Score(average='macro', name='f1')]) - - if reload_weights: - model.load_weights(dir_of_start_model).assert_existing_objects_matched().expect_partial() - dir_save = os.path.join(dir_output, os.path.basename(os.path.normpath(dir_of_start_model))) - model.save(dir_save, include_optimizer=False) - with open(os.path.join(dir_save, "config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - _log.info("reloaded model from %s to %s", dir_of_start_model, dir_save) - return - - list_classes = list(classification_classes_name.values()) - data_args = dict(label_mode="categorical", - class_names=list_classes, - batch_size=n_batch, - image_size=(input_height, input_width), - interpolation="nearest") - trainXY = image_dataset_from_directory(dir_train, shuffle=True, **data_args) - testXY = image_dataset_from_directory(dir_eval, shuffle=False, **data_args) - callbacks = [TensorBoard(os.path.join(dir_output, 'logs'), write_graph=False), - SaveWeightsAfterSteps(0, dir_output, _config, - monitor='val_f1', - #save_best_only=True, # we need all for ensembling - mode='max')] - - history = model.fit(trainXY, - #class_weight=weights) - validation_data=testXY, - verbose=1, - epochs=n_epochs, - callbacks=callbacks, - initial_epoch=index_start) - - usable_checkpoints = np.flatnonzero(np.array(history.history['val_f1']) > - f1_threshold_classification) - if len(usable_checkpoints) >= 1: - _log.info("averaging over usable checkpoints: %s", str(usable_checkpoints)) - usable_checkpoints = [os.path.join(dir_output, 'model_{epoch:02d}'.format(epoch=epoch + 1)) - for epoch in usable_checkpoints] - ens_path = os.path.join(dir_output, 'model_ens_avg') - run_ensembling(usable_checkpoints, ens_path) - _log.info("ensemble model saved under '%s'", ens_path) - - elif task=='reading_order': - if continue_training: - model = load_model(dir_of_start_model, compile=False) - else: - index_start = 0 - model = machine_based_reading_order_model(n_classes, - input_height, - input_width, - weight_decay, - pretraining) - - #f1score_tot = [0] - model.compile(loss="binary_crossentropy", - #optimizer=SGD(learning_rate=0.01, momentum=0.9), - optimizer=Adam(learning_rate=0.0001), # rs: why not learning_rate? - metrics=['accuracy']) - - if reload_weights: - model.load_weights(dir_of_start_model).assert_existing_objects_matched().expect_partial() - dir_save = os.path.join(dir_output, os.path.basename(os.path.normpath(dir_of_start_model))) - model.save(dir_save, include_optimizer=False) - with open(os.path.join(dir_save, "config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - _log.info("reloaded model from %s to %s", dir_of_start_model, dir_save) - return - - dir_flow_train_imgs = os.path.join(dir_train, 'images') - dir_flow_train_labels = os.path.join(dir_train, 'labels') - - classes = os.listdir(dir_flow_train_labels) - if augmentation: - num_rows = len(classes)*(len(thetha) + 1) - else: - num_rows = len(classes) - #ls_test = os.listdir(dir_flow_train_labels) - - callbacks = [TensorBoard(os.path.join(dir_output, 'logs'), write_graph=False), - SaveWeightsAfterSteps(0, dir_output, _config)] - if save_interval: - callbacks.append(SaveWeightsAfterSteps(save_interval, dir_output, _config)) - - trainXY = generate_arrays_from_folder_reading_order( - dir_flow_train_labels, dir_flow_train_imgs, - n_batch, input_height, input_width, n_classes, - thetha, augmentation) - - history = model.fit(trainXY, - steps_per_epoch=num_rows / n_batch, - verbose=1, - epochs=n_epochs, - callbacks=callbacks, - initial_epoch=index_start) - ''' - if f1score>f1score_tot[0]: - f1score_tot[0] = f1score - model_dir = os.path.join(dir_out,'model_best') - model.save(model_dir) - ''' diff --git a/src/eynollah/training/utils.py b/src/eynollah/training/utils.py deleted file mode 100644 index 33a1fd2..0000000 --- a/src/eynollah/training/utils.py +++ /dev/null @@ -1,1244 +0,0 @@ -import os -import math -import random -from logging import getLogger -from pathlib import Path - -import cv2 -import numpy as np -import seaborn as sns -from scipy.ndimage.interpolation import map_coordinates -from scipy.ndimage.filters import gaussian_filter -import imutils -import tensorflow as tf - -from PIL import Image, ImageFile, ImageEnhance - -ImageFile.LOAD_TRUNCATED_IMAGES = True - - -def vectorize_label(label, char_to_num, padding_token, max_len): - label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8")) - length = tf.shape(label)[0] - pad_amount = max_len - length - label = tf.pad(label, paddings=[[0, pad_amount]], constant_values=padding_token) - return label - -def scale_padd_image_for_ocr(img, height, width): - ratio = height /float(img.shape[0]) - - w_ratio = int(ratio * img.shape[1]) - - if w_ratio<=width: - width_new = w_ratio - else: - width_new = width - - if width_new <= 0: - width_new = width - - img_res= resize_image (img, height, width_new) - img_fin = np.ones((height, width, 3))*255 - - img_fin[:,:width_new,:] = img_res[:,:,:] - return img_fin - -# TODO: document where this is from -def add_salt_and_pepper_noise(img, salt_prob, pepper_prob): - """ - Add salt-and-pepper noise to an image. - - Parameters: - image: ndarray - Input image. - salt_prob: float - Probability of salt noise. - pepper_prob: float - Probability of pepper noise. - - Returns: - noisy_image: ndarray - Image with salt-and-pepper noise. - """ - # Make a copy of the image - noisy_image = np.copy(img) - - # Generate random noise - total_pixels = img.size - num_salt = int(salt_prob * total_pixels) - num_pepper = int(pepper_prob * total_pixels) - - # Add salt noise - coords = [np.random.randint(0, i - 1, num_salt) for i in img.shape[:2]] - noisy_image[coords[0], coords[1]] = 255 # white pixels - - # Add pepper noise - coords = [np.random.randint(0, i - 1, num_pepper) for i in img.shape[:2]] - noisy_image[coords[0], coords[1]] = 0 # black pixels - - return noisy_image - -def invert_image(img): - img_inv = 255 - img - return img_inv - -def return_image_with_strapped_white_noises(img): - img_w_noised = np.copy(img) - img_h, img_width = img.shape[0], img.shape[1] - n = 9 - p = 0.3 - num_windows = np.random.binomial(n, p, 1)[0] - - if num_windows<1: - num_windows = 1 - - loc_of_windows = np.random.uniform(0,img_width,num_windows).astype(np.int64) - width_windows = np.random.uniform(10,50,num_windows).astype(np.int64) - - for i, loc in enumerate(loc_of_windows): - noise = np.random.normal(0, 50, (img_h, width_windows[i], 3)) - - try: - img_w_noised[:, loc:loc+width_windows[i], : ] = noise[:,:,:] - except: - pass - return img_w_noised - -def do_padding_for_ocr(img, percent_height, padding_color): - padding_size = int( img.shape[0]*percent_height/2. ) - height_new = img.shape[0] + 2*padding_size - width_new = img.shape[1] + 2*padding_size - - h_start = padding_size - w_start = padding_size - - if padding_color == 'white': - img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 255 - elif padding_color == 'black': - img_new = np.zeros((height_new, width_new, img.shape[2])).astype(float) - else: - raise ValueError("padding_color must be 'white' or 'black'") - - img_new[h_start:h_start + img.shape[0], w_start:w_start + img.shape[1], :] = np.copy(img[:, :, :]) - - - return img_new - -# TODO: document where this is from -def do_deskewing(img, amplitude): - height, width = img.shape[:2] - - # Generate sinusoidal wave distortion with reduced amplitude - #amplitude = 8 # 5 # Reduce the amplitude for less curvature - frequency = 300 # Increase frequency to stretch the curve - x_indices = np.tile(np.arange(width), (height, 1)) - y_indices = np.arange(height).reshape(-1, 1) + amplitude * np.sin(2 * np.pi * x_indices / frequency) - - # Convert indices to float32 for remapping - map_x = x_indices.astype(np.float32) - map_y = y_indices.astype(np.float32) - - # Apply the remap to create the curve - curved_image = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) - return curved_image - -# TODO: document where this is from -def do_direction_in_depth(img, direction: str): - height, width = img.shape[:2] - - if direction == 'left': - # Define the original corner points of the image - src_points = np.float32([ - [0, 0], # Top-left corner - [width, 0], # Top-right corner - [0, height], # Bottom-left corner - [width, height] # Bottom-right corner - ]) - - # Define the new corner points for a subtle right-to-left tilt - dst_points = np.float32([ - [2, 13], # Slight inward shift for top-left - [width, 0], # Slight downward shift for top-right - [2, height-13], # Slight inward shift for bottom-left - [width, height] # Slight upward shift for bottom-right - ]) - elif direction == 'right': - # Define the original corner points of the image - src_points = np.float32([ - [0, 0], # Top-left corner - [width, 0], # Top-right corner - [0, height], # Bottom-left corner - [width, height] # Bottom-right corner - ]) - - # Define the new corner points for a subtle right-to-left tilt - dst_points = np.float32([ - [0, 0], # Slight inward shift for top-left - [width, 13], # Slight downward shift for top-right - [0, height], # Slight inward shift for bottom-left - [width, height - 13] # Slight upward shift for bottom-right - ]) - - elif direction == 'up': - # Define the original corner points of the image - src_points = np.float32([ - [0, 0], # Top-left corner - [width, 0], # Top-right corner - [0, height], # Bottom-left corner - [width, height] # Bottom-right corner - ]) - - # Define the new corner points to simulate a tilted perspective - # Make the top part appear closer and the bottom part farther - dst_points = np.float32([ - [50, 0], # Top-left moved inward - [width - 50, 0], # Top-right moved inward - [0, height], # Bottom-left remains the same - [width, height] # Bottom-right remains the same - ]) - elif direction == 'down': - # Define the original corner points of the image - src_points = np.float32([ - [0, 0], # Top-left corner - [width, 0], # Top-right corner - [0, height], # Bottom-left corner - [width, height] # Bottom-right corner - ]) - - # Define the new corner points to simulate a tilted perspective - # Make the top part appear closer and the bottom part farther - dst_points = np.float32([ - [0, 0], # Top-left moved inward - [width, 0], # Top-right moved inward - [50, height], # Bottom-left remains the same - [width - 50, height] # Bottom-right remains the same - ]) - else: - raise ValueError("direction must be 'left', 'right', 'up' or 'down'") - - # Compute the perspective transformation matrix - matrix = cv2.getPerspectiveTransform(src_points, dst_points) - - # Apply the perspective warp - warped_image = cv2.warpPerspective(img, matrix, (width, height)) - return warped_image - - -def return_shuffled_channels(img, channels_order): - """ - channels order in ordinary case is like this [0, 1, 2]. In the case of shuffling the order should be provided. - """ - img_sh = np.copy(img) - - img_sh[:,:,0]= img[:,:,channels_order[0]] - img_sh[:,:,1]= img[:,:,channels_order[1]] - img_sh[:,:,2]= img[:,:,channels_order[2]] - return img_sh - -# TODO: Refactor into one {{{ -def return_binary_image_with_red_textlines(img_bin): - img_red = np.copy(img_bin) - - img_red[:,:,0][img_bin[:,:,0] == 0] = 255 - return img_red - -def return_binary_image_with_given_rgb_background(img_bin, img_rgb_background): - img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) - - img_final = np.copy(img_bin) - - img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] - img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] - img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] - - return img_final - -def return_binary_image_with_given_rgb_background_and_given_foreground_rgb(img_bin, img_rgb_background, rgb_foreground): - img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) - - img_final = np.copy(img_bin) - img_foreground = np.zeros(img_bin.shape) - - - img_foreground[:,:,0][img_bin[:,:,0] == 0] = rgb_foreground[0] - img_foreground[:,:,1][img_bin[:,:,0] == 0] = rgb_foreground[1] - img_foreground[:,:,2][img_bin[:,:,0] == 0] = rgb_foreground[2] - - - img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] - img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] - img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] - - img_final = img_final + img_foreground - return img_final - -def return_binary_image_with_given_rgb_background_red_textlines(img_bin, img_rgb_background, img_color): - img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) - - img_final = np.copy(img_color) - - img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] - img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] - img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] - - return img_final - -def return_image_with_red_elements(img, img_bin): - img_final = np.copy(img) - - img_final[:,:,0][img_bin[:,:,0]==0] = 0 - img_final[:,:,1][img_bin[:,:,0]==0] = 0 - img_final[:,:,2][img_bin[:,:,0]==0] = 255 - return img_final - -# }}} - -def shift_image_and_label(img, label, type_shift): - h_n = int(img.shape[0]*1.06) - w_n = int(img.shape[1]*1.06) - - channel0_avg = int( np.mean(img[:,:,0]) ) - channel1_avg = int( np.mean(img[:,:,1]) ) - channel2_avg = int( np.mean(img[:,:,2]) ) - - h_diff = abs( img.shape[0] - h_n ) - w_diff = abs( img.shape[1] - w_n ) - - h_start = int(h_diff / 2.) - w_start = int(w_diff / 2.) - - img_scaled_padded = np.zeros((h_n, w_n, 3)) - label_scaled_padded = np.zeros((h_n, w_n, 3)) - - img_scaled_padded[:,:,0] = channel0_avg - img_scaled_padded[:,:,1] = channel1_avg - img_scaled_padded[:,:,2] = channel2_avg - - img_scaled_padded[h_start:h_start+img.shape[0], w_start:w_start+img.shape[1],:] = img[:,:,:] - label_scaled_padded[h_start:h_start+img.shape[0], w_start:w_start+img.shape[1],:] = label[:,:,:] - - - if type_shift=="xpos": - img_dis = img_scaled_padded[h_start:h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - label_dis = label_scaled_padded[h_start:h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - elif type_shift=="xmin": - img_dis = img_scaled_padded[h_start:h_start+img.shape[0],:img.shape[1],:] - label_dis = label_scaled_padded[h_start:h_start+img.shape[0],:img.shape[1],:] - elif type_shift=="ypos": - img_dis = img_scaled_padded[2*h_start:2*h_start+img.shape[0],w_start:w_start+img.shape[1],:] - label_dis = label_scaled_padded[2*h_start:2*h_start+img.shape[0],w_start:w_start+img.shape[1],:] - elif type_shift=="ymin": - img_dis = img_scaled_padded[:img.shape[0],w_start:w_start+img.shape[1],:] - label_dis = label_scaled_padded[:img.shape[0],w_start:w_start+img.shape[1],:] - elif type_shift=="xypos": - img_dis = img_scaled_padded[2*h_start:2*h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - label_dis = label_scaled_padded[2*h_start:2*h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - elif type_shift=="xymin": - img_dis = img_scaled_padded[:img.shape[0],:img.shape[1],:] - label_dis = label_scaled_padded[:img.shape[0],:img.shape[1],:] - return img_dis, label_dis - -def scale_image_for_no_patch(img, label, scale): - h_n = int(img.shape[0]*scale) - w_n = int(img.shape[1]*scale) - - channel0_avg = int( np.mean(img[:,:,0]) ) - channel1_avg = int( np.mean(img[:,:,1]) ) - channel2_avg = int( np.mean(img[:,:,2]) ) - - h_diff = img.shape[0] - h_n - w_diff = img.shape[1] - w_n - - h_start = int(h_diff / 2.) - w_start = int(w_diff / 2.) - - img_res = resize_image(img, h_n, w_n) - label_res = resize_image(label, h_n, w_n) - - img_scaled_padded = np.copy(img) - - label_scaled_padded = np.zeros(label.shape) - - img_scaled_padded[:,:,0] = channel0_avg - img_scaled_padded[:,:,1] = channel1_avg - img_scaled_padded[:,:,2] = channel2_avg - - img_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = img_res[:,:,:] - label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:] - - return img_scaled_padded, label_scaled_padded - - -def return_number_of_total_training_data(path_classes): - sub_classes = os.listdir(path_classes) - n_tot = 0 - for sub_c in sub_classes: - sub_files = os.listdir(os.path.join(path_classes,sub_c)) - n_tot = n_tot + len(sub_files) - return n_tot - - -def do_brightening(img, factor): - img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - im = Image.fromarray(img_rgb) - enhancer = ImageEnhance.Brightness(im) - out_img = enhancer.enhance(factor) - out_img = out_img.convert('RGB') - opencv_img = np.array(out_img) - opencv_img = opencv_img[:,:,::-1].copy() - return opencv_img - - -def bluring(img_in, kind): - if kind == 'gauss': - img_blur = cv2.GaussianBlur(img_in, (5, 5), 0) - elif kind == "median": - img_blur = cv2.medianBlur(img_in, 5) - elif kind == 'blur': - img_blur = cv2.blur(img_in, (5, 5)) - else: - raise ValueError("kind must be 'gauss', 'median' or 'blur'") - return img_blur - - -# TODO: document where this is from -def elastic_transform(image, alpha, sigma, seedj, random_state=None): - """Elastic deformation of images as described in [Simard2003]_. - .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for - Convolutional Neural Networks applied to Visual Document Analysis", in - Proc. of the International Conference on Document Analysis and - Recognition, 2003. - """ - if random_state is None: - random_state = np.random.RandomState(seedj) - - shape = image.shape - dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha - dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha - dz = np.zeros_like(dx) - - x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) - indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1)) - - distored_image = map_coordinates(image, indices, order=1, mode='reflect') - return distored_image.reshape(image.shape) - - -# TODO: Use one of the utils/rotate.py functions for this -def rotation_90(img): - img_rot = np.zeros((img.shape[1], img.shape[0], img.shape[2])) - img_rot[:, :, 0] = img[:, :, 0].T - img_rot[:, :, 1] = img[:, :, 1].T - img_rot[:, :, 2] = img[:, :, 2].T - return img_rot - - -# TODO: document where this is from -# TODO: Use one of the utils/rotate.py functions for this -def rotatedRectWithMaxArea(w, h, angle): - """ - Given a rectangle of size wxh that has been rotated by 'angle' (in - radians), computes the width and height of the largest possible - axis-aligned rectangle (maximal area) within the rotated rectangle. - """ - if w <= 0 or h <= 0: - return 0, 0 - - width_is_longer = w >= h - side_long, side_short = (w, h) if width_is_longer else (h, w) - - # since the solutions for angle, -angle and 180-angle are all the same, - # if suffices to look at the first quadrant and the absolute values of sin,cos: - sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) - if side_short <= 2. * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: - # half constrained case: two crop corners touch the longer side, - # the other two corners are on the mid-line parallel to the longer line - x = 0.5 * side_short - wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) - else: - # fully constrained case: crop touches all 4 sides - cos_2a = cos_a * cos_a - sin_a * sin_a - wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a - - return wr, hr - - -# TODO: Use one of the utils/rotate.py functions for this -def rotate_max_area(image, rotated, rotated_label, angle): - """ image: cv2 image matrix object - angle: in degree - """ - wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], - math.radians(angle)) - h, w, _ = rotated.shape - y1 = h // 2 - int(hr / 2) - y2 = y1 + int(hr) - x1 = w // 2 - int(wr / 2) - x2 = x1 + int(wr) - return rotated[y1:y2, x1:x2], rotated_label[y1:y2, x1:x2] - -# TODO: Use one of the utils/rotate.py functions for this -def rotate_max_area_single_image(image, rotated, angle): - """ image: cv2 image matrix object - angle: in degree - """ - wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], - math.radians(angle)) - h, w, _ = rotated.shape - y1 = h // 2 - int(hr / 2) - y2 = y1 + int(hr) - x1 = w // 2 - int(wr / 2) - x2 = x1 + int(wr) - return rotated[y1:y2, x1:x2] - -# TODO: Use one of the utils/rotate.py functions for this -def rotation_not_90_func(img, label, thetha): - rotated = imutils.rotate(img, thetha) - rotated_label = imutils.rotate(label, thetha) - return rotate_max_area(img, rotated, rotated_label, thetha) - - -# TODO: Use one of the utils/rotate.py functions for this -def rotation_not_90_func_single_image(img, thetha): - rotated = imutils.rotate(img, thetha) - return rotate_max_area_single_image(img, rotated, thetha) - - -def color_images(seg, n_classes): - ann_u = range(n_classes) - if len(np.shape(seg)) == 3: - seg = seg[:, :, 0] - - seg_img = np.zeros((np.shape(seg)[0], np.shape(seg)[1], 3)).astype(float) - colors = sns.color_palette("hls", n_classes) - - for c in ann_u: - c = int(c) - segl = (seg == c) - seg_img[:, :, 0] += segl * (colors[c][0]) - seg_img[:, :, 1] += segl * (colors[c][1]) - seg_img[:, :, 2] += segl * (colors[c][2]) - return seg_img - - -# TODO: use resize_image from utils -def resize_image(seg_in, input_height, input_width): - return cv2.resize(seg_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) - - -def get_one_hot(seg, input_height, input_width, n_classes): - seg = seg[:, :, 0] - seg_f = np.zeros((input_height, input_width, n_classes)) - for j in range(n_classes): - seg_f[:, :, j] = (seg == j).astype(int) - return seg_f - - -# TODO: document where this is from -def IoU(Yi, y_predi): - ## mean Intersection over Union - ## Mean IoU = TP/(FN + TP + FP) - - IoUs = [] - classes_true = np.unique(Yi) - for c in classes_true: - TP = np.sum((Yi == c) & (y_predi == c)) - FP = np.sum((Yi != c) & (y_predi == c)) - FN = np.sum((Yi == c) & (y_predi != c)) - IoU = TP / float(TP + FP + FN) - #print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c, TP, FP, FN, IoU)) - IoUs.append(IoU) - mIoU = np.mean(IoUs) - #print("_________________") - #print("Mean IoU: {:4.3f}".format(mIoU)) - return mIoU - -def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, n_batch, height, width, n_classes, thetha, augmentation=False): - all_labels_files = os.listdir(classes_file_dir) - ret_x= np.zeros((n_batch, height, width, 3))#.astype(np.int16) - ret_y= np.zeros((n_batch, n_classes)).astype(np.int16) - batchcount = 0 - while True: - for i in all_labels_files: - file_name = os.path.splitext(i)[0] - img = cv2.imread(os.path.join(modal_dir,file_name+'.png')) - - label_class = int( np.load(os.path.join(classes_file_dir,i)) ) - - ret_x[batchcount, :,:,0] = img[:,:,0]/3.0 - ret_x[batchcount, :,:,2] = img[:,:,2]/3.0 - ret_x[batchcount, :,:,1] = img[:,:,1]/5.0 - - ret_y[batchcount, :] = label_class - batchcount+=1 - if batchcount>=n_batch: - yield ret_x, ret_y - ret_x= np.zeros((n_batch, height, width, 3))#.astype(np.int16) - ret_y= np.zeros((n_batch, n_classes)).astype(np.int16) - batchcount = 0 - - if augmentation: - for thetha_i in thetha: - img_rot = rotation_not_90_func_single_image(img, thetha_i) - - img_rot = resize_image(img_rot, height, width) - - ret_x[batchcount, :,:,0] = img_rot[:,:,0]/3.0 - ret_x[batchcount, :,:,2] = img_rot[:,:,2]/3.0 - ret_x[batchcount, :,:,1] = img_rot[:,:,1]/5.0 - - ret_y[batchcount, :] = label_class - batchcount+=1 - if batchcount>=n_batch: - yield ret_x, ret_y - ret_x= np.zeros((n_batch, height, width, 3))#.astype(np.int16) - ret_y= np.zeros((n_batch, n_classes)).astype(np.int16) - batchcount = 0 - - -# TODO: Use otsu_copy from utils -def otsu_copy(img): - img_r = np.zeros(img.shape) - img1 = img[:, :, 0] - img2 = img[:, :, 1] - img3 = img[:, :, 2] - _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - _, threshold2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - _, threshold3 = cv2.threshold(img3, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - img_r[:, :, 0] = threshold1 - img_r[:, :, 1] = threshold1 - img_r[:, :, 2] = threshold1 - return img_r - - -def get_patches(img, label, height, width): - if img.shape[0] < height or img.shape[1] < width: - img, label = do_padding(img, label, height, width) - - img_h = img.shape[0] - img_w = img.shape[1] - - nxf = img_w / float(width) - nyf = img_h / float(height) - - if nxf > int(nxf): - nxf = int(nxf) + 1 - if nyf > int(nyf): - nyf = int(nyf) + 1 - - nxf = int(nxf) - nyf = int(nyf) - - for i in range(nxf): - for j in range(nyf): - index_x_d = i * width - index_x_u = (i + 1) * width - - index_y_d = j * height - index_y_u = (j + 1) * height - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - width - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - height - - img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] - - yield img_patch, label_patch - - -def do_padding_with_color(img, padding_color='black'): - index_start_h = 4 - index_start_w = 4 - - img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1]+ 2*index_start_w, img.shape[2])) - if padding_color == 'white': - img_padded += 255 - img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] - - return img_padded.astype(float) - - -def do_degrading(img, scale): - img_org_h = img.shape[0] - img_org_w = img.shape[1] - - img_res = resize_image(img, int(img_org_h * scale), int(img_org_w * scale)) - - return resize_image(img_res, img_org_h, img_org_w) - -# TODO: How is this different from do_padding_black? -def do_padding_label(img): - img_org_h = img.shape[0] - img_org_w = img.shape[1] - - index_start_h = 4 - index_start_w = 4 - - img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1] + 2*index_start_w, img.shape[2])) - img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] - - return img_padded.astype(np.int16) - -def do_padding(img, label, height, width): - height_new=img.shape[0] - width_new=img.shape[1] - - h_start = 0 - w_start = 0 - - if img.shape[0] < height: - h_start = int(abs(height - img.shape[0]) / 2.) - height_new = height - - if img.shape[1] < width: - w_start = int(abs(width - img.shape[1]) / 2.) - width_new = width - - img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 255 - label_new = np.zeros((height_new, width_new, label.shape[2])).astype(float) - - img_new[h_start:h_start + img.shape[0], w_start:w_start + img.shape[1], :] = np.copy(img[:, :, :]) - label_new[h_start:h_start + label.shape[0], w_start:w_start + label.shape[1], :] = np.copy(label[:, :, :]) - - return img_new,label_new - - -def get_patches_num_scale_new(img, label, height, width, scaler=1.0): - img = resize_image(img, int(img.shape[0] * scaler), int(img.shape[1] * scaler)) - label = resize_image(label, int(label.shape[0] * scaler), int(label.shape[1] * scaler)) - - if img.shape[0] < height or img.shape[1] < width: - img, label = do_padding(img, label, height, width) - - img_h = img.shape[0] - img_w = img.shape[1] - - height_scale = int(height * 1) - width_scale = int(width * 1) - - nxf = img_w / float(width_scale) - nyf = img_h / float(height_scale) - - if nxf > int(nxf): - nxf = int(nxf) + 1 - if nyf > int(nyf): - nyf = int(nyf) + 1 - - nxf = int(nxf) - nyf = int(nyf) - - for i in range(nxf): - for j in range(nyf): - index_x_d = i * width_scale - index_x_u = (i + 1) * width_scale - - index_y_d = j * height_scale - index_y_u = (j + 1) * height_scale - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - width_scale - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - height_scale - - img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] - - yield img_patch, label_patch - - -def preprocess_imgs(config, - dir_img, - dir_lab, - logger=None, - **kwargs): - if logger is None: - logger = getLogger('') - - # make a copy for this run - config = dict(config) - # add derived keys not part of config - if config.get('dir_rgb_backgrounds', None): - config['list_all_possible_background_images'] = \ - os.listdir(config['dir_rgb_backgrounds']) - if config.get('dir_rgb_foregrounds', None): - config['list_all_possible_foreground_rgbs'] = \ - os.listdir(config['dir_rgb_foregrounds']) - # override keys from call - config.update(kwargs) - - imgs_list = list(sorted(os.listdir(dir_img))) - labs_list = list(sorted(os.listdir(dir_lab))) - - seed = random.getstate() - random.shuffle(imgs_list) - random.setstate(seed) - random.shuffle(labs_list) - - # labs_list not used because stem matching more robust - for img, lab in zip(imgs_list, labs_list): - img_name = os.path.splitext(img)[0] - img = cv2.imread(os.path.join(dir_img, img)) - if config['task'] in ["segmentation", "binarization"]: - # assert lab == img_name + '.png' - lab = cv2.imread(os.path.join(dir_lab, img_name + '.png')) - elif config['task'] == "enhancement": - lab = cv2.imread(os.path.join(dir_lab, img)) - elif config['task'] == "cnn-rnn-ocr": - # assert lab == 'img_name + '.txt' - with open(os.path.join(dir_lab, img_name + '.txt'), 'r') as f: - lab = f.read().split('\n')[0] - else: - lab = None - - try: - if config['task'] == "cnn-rnn-ocr": - yield from preprocess_img_ocr(img, img_name, lab, **config) - continue - else: - for img, lab in preprocess_img(img, img_name, lab, **config): - yield (resize_image(img, - config['input_height'], - config['input_width']), - resize_image(lab, - config['input_height'], - config['input_width'])) - except: - logger.exception("skipping image %s", img_name) - -def preprocess_img(img, - img_name, - lab, - input_height=None, - input_width=None, - augmentation=False, - flip_aug=False, - flip_index=None, - blur_aug=False, - blur_k=None, - padding_white=False, - padding_black=False, - scaling=False, - scaling_bluring=False, - scaling_brightness=False, - scaling_binarization=False, - scaling_flip=False, - scales=None, - shifting=False, - degrading=False, - degrade_scales=None, - brightening=False, - brightness=None, - binarization=False, - dir_img_bin=None, - add_red_textlines=False, - adding_rgb_background=False, - dir_rgb_backgrounds=None, - adding_rgb_foreground=False, - dir_rgb_foregrounds=None, - number_of_backgrounds_per_image=None, - channels_shuffling=False, - shuffle_indexes=None, - rotation=False, - rotation_not_90=False, - thetha=None, - patches=False, - list_all_possible_background_images=None, - list_all_possible_foreground_rgbs=None, - **kwargs, -): - if not patches: - yield img, lab - if augmentation: - if flip_aug: - for f_i in flip_index: - yield cv2.flip(img, f_i), cv2.flip(lab, f_i) - if blur_aug: - for blur_i in blur_k: - yield bluring(img, blur_i), lab - if brightening: - for factor in brightness: - yield do_brightening(img, factor), lab - if binarization: - if dir_img_bin: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - else: - img_bin_corr = otsu_copy(img) - yield img_bin_corr, lab - if degrading: - for degrade_scale_ind in degrade_scales: - yield do_degrading(img, degrade_scale_ind), lab - if rotation_not_90: - for thetha_i in thetha: - yield rotation_not_90_func(img, lab, thetha_i) - if channels_shuffling: - for shuffle_index in shuffle_indexes: - yield return_shuffled_channels(img, shuffle_index), lab - if scaling: - for sc_ind in scales: - yield scale_image_for_no_patch(img, lab, sc_ind) - if shifting: - shift_types = ['xpos', 'xmin', 'ypos', 'ymin', 'xypos', 'xymin'] - for st_ind in shift_types: - yield shift_image_and_label(img, lab, st_ind) - if adding_rgb_background: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - img_rgb_background_chosen = \ - cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - img_with_overlayed_background = \ - return_binary_image_with_given_rgb_background( - img_bin_corr, img_rgb_background_chosen) - yield img_with_overlayed_background, lab - if adding_rgb_foreground: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - foreground_rgb_chosen_name = random.choice(list_all_possible_foreground_rgbs) - img_rgb_background_chosen = \ - cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - foreground_rgb_chosen = \ - np.load(dir_rgb_foregrounds + '/' + foreground_rgb_chosen_name) - img_with_overlayed_background = \ - return_binary_image_with_given_rgb_background_and_given_foreground_rgb( - img_bin_corr, img_rgb_background_chosen, foreground_rgb_chosen) - yield img_with_overlayed_background, lab - if add_red_textlines: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - yield return_image_with_red_elements(img, img_bin_corr), lab - else: - yield from get_patches(img, - lab, - input_height, - input_width) - if augmentation: - if rotation: - yield from get_patches(rotation_90(img), - rotation_90(lab), - input_height, - input_width) - if rotation_not_90: - for thetha_i in thetha: - img_max_rotated, label_max_rotated = \ - rotation_not_90_func(img, lab, thetha_i) - yield from get_patches(img_max_rotated, - label_max_rotated, - input_height, - input_width) - if channels_shuffling: - for shuffle_index in shuffle_indexes: - img_shuffled = \ - return_shuffled_channels(img, shuffle_index), - yield from get_patches(img_shuffled, - lab, - input_height, - input_width) - if adding_rgb_background: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - img_rgb_background_chosen = \ - cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - img_with_overlayed_background = \ - return_binary_image_with_given_rgb_background( - img_bin_corr, img_rgb_background_chosen) - yield from get_patches(img_with_overlayed_background, - lab, - input_height, - input_width) - if adding_rgb_foreground: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - foreground_rgb_chosen_name = random.choice(list_all_possible_foreground_rgbs) - img_rgb_background_chosen = \ - cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - foreground_rgb_chosen = \ - np.load(dir_rgb_foregrounds + '/' + foreground_rgb_chosen_name) - img_with_overlayed_background = \ - return_binary_image_with_given_rgb_background_and_given_foreground_rgb( - img_bin_corr, img_rgb_background_chosen, foreground_rgb_chosen) - yield from get_patches(img_with_overlayed_background, - lab, - input_height, - input_width) - if add_red_textlines: - img_bin_corr = cv2.imread(os.path.join(dir_img_bin, img_name + '.png')) - img_red_context = \ - return_image_with_red_elements(img, img_bin_corr) - yield from get_patches(img_red_context, - lab, - input_height, - input_width) - if flip_aug: - for f_i in flip_index: - yield from get_patches(cv2.flip(img, f_i), - cv2.flip(lab, f_i), - input_height, - input_width) - if blur_aug: - for blur_i in blur_k: - yield from get_patches(bluring(img, blur_i), - lab, - input_height, - input_width) - if padding_black: - yield from get_patches(do_padding_with_color(img, 'black'), - do_padding_label(lab), - input_height, - input_width) - if padding_white: - yield from get_patches(do_padding_with_color(img, 'white'), - do_padding_label(lab), - input_height, - input_width) - if brightening: - for factor in brightness: - yield from get_patches(do_brightening(img, factor), - lab, - input_height, - input_width) - if scaling: - for sc_ind in scales: - yield from get_patches_num_scale_new(img, - lab, - input_height, - input_width, - scaler=sc_ind) - if degrading: - for degrade_scale_ind in degrade_scales: - img_deg = \ - do_degrading(img, degrade_scale_ind), - yield from get_patches(img_deg, - lab, - input_height, - input_width) - if binarization: - if dir_img_bin: - img_bin_corr = cv2.imread(os.path.join(dir_img_bin, img_name + '.png')) - else: - img_bin_corr = otsu_copy(img) - yield from get_patches(img_bin_corr, - lab, - input_height, - input_width) - if scaling_brightness: - for sc_ind in scales: - for factor in brightness: - img_bright = do_brightening(img, factor) - yield from get_patches_num_scale_new(img_bright, - lab, - input_height, - input_width, - scaler=sc_ind) - if scaling_bluring: - for sc_ind in scales: - for blur_i in blur_k: - img_blur = bluring(img, blur_i), - yield from get_patches_num_scale_new(img_blur, - lab, - input_height, - input_width, - scaler=sc_ind) - if scaling_binarization: - for sc_ind in scales: - img_bin = otsu_copy(img), - yield from get_patches_num_scale_new(img_bin, - lab, - input_height, - input_width, - scaler=sc_ind) - if scaling_flip: - for sc_ind in scales: - for f_i in flip_index: - yield from get_patches_num_scale_new(cv2.flip(img, f_i), - cv2.flip(lab, f_i), - input_height, - input_width, - scaler=sc_ind) - -def preprocess_img_ocr( - img, - img_name, - lab, - char_to_num=None, - padding_token=-1, - max_len=500, - n_batch=1, - input_height=None, - input_width=None, - augmentation=False, - color_padding_rotation=None, - thetha_padd=None, - padd_colors=None, - rotation_not_90=None, - thetha=None, - padding_white=None, - white_padds=None, - degrading=False, - bin_deg=None, - degrade_scales=None, - blur_aug=False, - blur_k=None, - brightening=False, - brightness=None, - binarization=False, - image_inversion=False, - channels_shuffling=False, - shuffle_indexes=None, - white_noise_strap=False, - textline_skewing=False, - textline_skewing_bin=False, - skewing_amplitudes=None, - textline_left_in_depth=False, - textline_left_in_depth_bin=False, - textline_right_in_depth=False, - textline_right_in_depth_bin=False, - textline_up_in_depth=False, - textline_up_in_depth_bin=False, - textline_down_in_depth=False, - textline_down_in_depth_bin=False, - pepper_aug=False, - pepper_bin_aug=False, - pepper_indexes=None, - dir_img_bin=None, - add_red_textlines=False, - adding_rgb_background=False, - dir_rgb_backgrounds=None, - adding_rgb_foreground=False, - dir_rgb_foregrounds=None, - number_of_backgrounds_per_image=None, - list_all_possible_background_images=None, - list_all_possible_foreground_rgbs=None, - **kwargs -): - def scale_image(img): - return scale_padd_image_for_ocr(img, input_height, input_width).astype(np.float32) / 255. - #lab = vectorize_label(lab, char_to_num, padding_token, max_len) - # now padded at Dataset.padded_batch - lab = char_to_num(tf.strings.unicode_split(lab, input_encoding="UTF-8")) - yield scale_image(img), lab - #to_yield = {"image": ret_x, "label": ret_y} - - if dir_img_bin: - img_bin_corr = cv2.imread(os.path.join(dir_img_bin, img_name + '.png')) - else: - img_bin_corr = None - - if not augmentation: - return - - if color_padding_rotation: - for thetha_ind in thetha_padd: - for padd_col in padd_colors: - img_pad = do_padding_for_ocr(img, 1.2, padd_col) - img_rot = rotation_not_90_func_single_image(img_pad, thetha_ind) - yield scale_image(img_rot), lab - if rotation_not_90: - for thetha_ind in thetha: - img_rot = rotation_not_90_func_single_image(img, thetha_ind) - yield scale_image(img_rot), lab - if blur_aug: - for blur_type in blur_k: - img_blur = bluring(img, blur_type) - yield scale_image(img_blur), lab - if degrading: - for deg_scale_ind in degrade_scales: - img_deg = do_degrading(img, deg_scale_ind) - yield scale_image(img_deg), lab - if bin_deg: - for deg_scale_ind in degrade_scales: - img_deg = do_degrading(img_bin_corr, deg_scale_ind) - yield scale_image(img_deg), lab - if brightening: - for bright_scale_ind in brightness: - img_bright = do_brightening(img, bright_scale_ind) - yield scale_image(img_bright), lab - if padding_white: - for padding_size in white_padds: - for padd_col in padd_colors: - img_pad = do_padding_for_ocr(img, padding_size, padd_col) - yield scale_image(img_pad), lab - if adding_rgb_foreground: - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - foreground_rgb_chosen_name = random.choice(list_all_possible_foreground_rgbs) - - img_rgb_background_chosen = \ - cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - foreground_rgb_chosen = \ - np.load(dir_rgb_foregrounds + '/' + foreground_rgb_chosen_name) - - img_fg = \ - return_binary_image_with_given_rgb_background_and_given_foreground_rgb( - img_bin_corr, img_rgb_background_chosen, foreground_rgb_chosen) - yield scale_image(img_fg), lab - if adding_rgb_background: - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - img_rgb_background_chosen = \ - cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - img_bg = \ - return_binary_image_with_given_rgb_background(img_bin_corr, img_rgb_background_chosen) - yield scale_image(img_bg), lab - if binarization: - yield scale_image(img_bin_corr), lab - if image_inversion: - img_inv = invert_image(img_bin_corr) - yield scale_image(img_inv), lab - if channels_shuffling: - for shuffle_index in shuffle_indexes: - img_shuf = return_shuffled_channels(img, shuffle_index) - yield scale_image(img_shuf), lab - if add_red_textlines: - img_red = return_image_with_red_elements(img, img_bin_corr) - yield scale_image(img_red), lab - if white_noise_strap: - img_noisy = return_image_with_strapped_white_noises(img) - yield scale_image(img_noisy), lab - if textline_skewing: - for des_scale_ind in skewing_amplitudes: - img_rot = do_deskewing(img, des_scale_ind) - yield scale_image(img_rot), lab - if textline_skewing_bin: - for des_scale_ind in skewing_amplitudes: - img_rot = do_deskewing(img_bin_corr, des_scale_ind) - yield scale_image(img_rot), lab - if textline_left_in_depth: - img_warp = do_direction_in_depth(img, 'left') - yield scale_image(img_warp), lab - if textline_left_in_depth_bin: - img_warp = do_direction_in_depth(img_bin_corr, 'left') - yield scale_image(img_warp), lab - if textline_right_in_depth: - img_warp = do_direction_in_depth(img, 'right') - yield scale_image(img_warp), lab - if textline_right_in_depth_bin: - img_warp = do_direction_in_depth(img_bin_corr, 'right') - yield scale_image(img_warp), lab - if textline_up_in_depth: - img_warp = do_direction_in_depth(img, 'up') - yield scale_image(img_warp), lab - if textline_up_in_depth_bin: - img_warp = do_direction_in_depth(img_bin_corr, 'up') - yield scale_image(img_warp), lab - if textline_down_in_depth: - img_warp = do_direction_in_depth(img, 'down') - yield scale_image(img_warp), lab - if textline_down_in_depth_bin: - img_warp = do_direction_in_depth(img_bin_corr, 'down') - yield scale_image(img_warp), lab - if pepper_aug: - for pepper_ind in pepper_indexes: - img_noisy = add_salt_and_pepper_noise(img, pepper_ind, pepper_ind) - yield scale_image(img_noisy), lab - if pepper_bin_aug: - for pepper_ind in pepper_indexes: - img_noisy = add_salt_and_pepper_noise(img_bin_corr, pepper_ind, pepper_ind) - yield scale_image(img_noisy), lab diff --git a/src/eynollah/training/weights_ensembling.py b/src/eynollah/training/weights_ensembling.py deleted file mode 100644 index e3ede24..0000000 --- a/src/eynollah/training/weights_ensembling.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -from warnings import catch_warnings, simplefilter - -import click -import numpy as np - -os.environ['TF_USE_LEGACY_KERAS'] = '1' # avoid Keras 3 after TF 2.15 -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - -from ocrd_utils import tf_disable_interactive_logs -tf_disable_interactive_logs() -import tensorflow as tf -from tensorflow.keras.models import load_model - -from ..patch_encoder import ( - PatchEncoder, - Patches, -) - -def run_ensembling(model_dirs, out_dir): - all_weights = [] - - for model_dir in model_dirs: - assert os.path.isdir(model_dir), model_dir - model = load_model(model_dir, compile=False, - custom_objects=dict(PatchEncoder=PatchEncoder, - Patches=Patches)) - all_weights.append(model.get_weights()) - - new_weights = [] - for layer_weights in zip(*all_weights): - layer_weights = np.array([np.array(weights).mean(axis=0) - for weights in zip(*layer_weights)]) - new_weights.append(layer_weights) - - #model = tf.keras.models.clone_model(model) - model.set_weights(new_weights) - - model.save(out_dir) - os.system('cp ' + os.path.join(model_dirs[0], "config.json ") + out_dir + "/") - -@click.command() -@click.option( - "--in", - "-i", - help="input directory of checkpoint models to be read", - multiple=True, - required=True, - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--out", - "-o", - help="output directory where ensembled model will be written.", - required=True, - type=click.Path(exists=False, file_okay=False), -) -def ensemble_cli(in_, out): - """ - mix multiple model weights - - Load a sequence of models and mix them into a single ensemble model - by averaging their weights. Write the resulting model. - """ - run_ensembling(in_, out) - diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 47a765c..c5962f8 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1,11 +1,8 @@ -from typing import Iterable, List, Tuple -from logging import getLogger import time import math try: import matplotlib.pyplot as plt - import matplotlib.patches as patches except ImportError: plt = None import numpy as np @@ -13,164 +10,297 @@ from shapely import geometry import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d -from skimage import morphology from .is_nan import isNaN from .contour import (contours_in_same_horizon, - find_center_of_contours, find_new_features_of_contours, return_contours_of_image, return_parent_contours) +def return_x_start_end_mothers_childs_and_type_of_reading_order( + x_min_hor_some, x_max_hor_some, cy_hor_some, peak_points, cy_hor_diff): -def pairwise(iterable): - # pairwise('ABCDEFG') → AB BC CD DE EF FG + x_start=[] + x_end=[] + kind=[]#if covers 2 and more than 2 columns set it to 1 otherwise 0 + len_sep=[] + y_sep=[] + y_diff=[] + new_main_sep_y=[] - iterator = iter(iterable) - a = next(iterator, None) - - for b in iterator: - yield a, b - a = b - -def return_multicol_separators_x_start_end( - regions_without_separators, peak_points, top, bot, - x_min_hor_some, x_max_hor_some, cy_hor_some, y_min_hor_some, y_max_hor_some): - """ - Analyse which separators overlap multiple column candidates, - and how they overlap each other. - - Ignore separators not spanning multiple columns. - - For the separators to be returned, try to remove or unify them when there - is no region between them (vertically) and their neighbours. - - Arguments: - * the text mask (with all separators suppressed) - * the x column coordinates - * the y start coordinate to consider in total - * the y end coordinate to consider in total - * the x start coordinate of the horizontal separators - * the x end coordinate of the horizontal separators - * the y start coordinate of the horizontal separators - * the y center coordinate of the horizontal separators - * the y end coordinate of the horizontal separators - - Returns: - a tuple of: - * the x start column index of the resulting multi-span separators - * the x end column index of the resulting multi-span separators - * the y start coordinate of the resulting multi-span separators - * the y center coordinate of the resulting multi-span separators - * the y end coordinate of the resulting multi-span separators - """ - - x_start = [0] - x_end = [len(peak_points) - 1] - y_min = [top] - y_mid = [top] - y_max = [top + 2] - indexer = 1 + indexer=0 for i in range(len(x_min_hor_some)): - #print(indexer, "%d:%d" % (x_min_hor_some[i], x_max_hor_some[i]), cy_hor_some[i]) - starting = x_min_hor_some[i] - peak_points - min_start = np.flatnonzero(starting >= 0)[-1] # last left-of - ending = x_max_hor_some[i] - peak_points - max_end = np.flatnonzero(ending <= 0)[0] # first right-of - #print(indexer, "%d:%d" % (min_start, max_end)) + starting=x_min_hor_some[i]-peak_points + starting=starting[starting>=0] + min_start=np.argmin(starting) + ending=peak_points-x_max_hor_some[i] + len_ending_neg=len(ending[ending<=0]) + + ending=ending[ending>0] + max_end=np.argmin(ending)+len_ending_neg if (max_end-min_start)>=2: - # column range of separator spans more than one column candidate + if (max_end-min_start)==(len(peak_points)-1): + new_main_sep_y.append(indexer) + #print((max_end-min_start),len(peak_points),'(max_end-min_start)') - y_min.append(y_min_hor_some[i]) - y_mid.append(cy_hor_some[i]) - y_max.append(y_max_hor_some[i]) + y_sep.append(cy_hor_some[i]) + y_diff.append(cy_hor_diff[i]) x_end.append(max_end) - x_start.append(min_start) + + x_start.append( min_start) + + len_sep.append(max_end-min_start) + if max_end==min_start+1: + kind.append(0) + else: + kind.append(1) + indexer+=1 + + x_start_returned = np.array(x_start, dtype=int) + x_end_returned = np.array(x_end, dtype=int) + y_sep_returned = np.array(y_sep, dtype=int) + y_diff_returned = np.array(y_diff, dtype=int) + + all_args_uniq = contours_in_same_horizon(y_sep_returned) + args_to_be_unified=[] + y_unified=[] + y_diff_unified=[] + x_s_unified=[] + x_e_unified=[] + if len(all_args_uniq)>0: + #print('burda') + if type(all_args_uniq[0]) is list: + for dd in range(len(all_args_uniq)): + if len(all_args_uniq[dd])==2: + x_s_same_hor=np.array(x_start_returned)[all_args_uniq[dd]] + x_e_same_hor=np.array(x_end_returned)[all_args_uniq[dd]] + y_sep_same_hor=np.array(y_sep_returned)[all_args_uniq[dd]] + y_diff_same_hor=np.array(y_diff_returned)[all_args_uniq[dd]] + #print('burda2') + if (x_s_same_hor[0]==x_e_same_hor[1]-1 or + x_s_same_hor[1]==x_e_same_hor[0]-1 and + x_s_same_hor[0]!=x_s_same_hor[1] and + x_e_same_hor[0]!=x_e_same_hor[1]): + #print('burda3') + for arg_in in all_args_uniq[dd]: + #print(arg_in,'arg_in') + args_to_be_unified.append(arg_in) + y_selected=np.min(y_sep_same_hor) + y_diff_selected=np.max(y_diff_same_hor) + x_s_selected=np.min(x_s_same_hor) + x_e_selected=np.max(x_e_same_hor) + + x_s_unified.append(x_s_selected) + x_e_unified.append(x_e_selected) + y_unified.append(y_selected) + y_diff_unified.append(y_diff_selected) + #print(x_s_same_hor,'x_s_same_hor') + #print(x_e_same_hor[:]-1,'x_e_same_hor') + #print('#############################') + #print(x_s_unified,'y_selected') + #print(x_e_unified,'x_s_selected') + #print(y_unified,'x_e_same_hor') + + args_lines_not_unified=list( set(range(len(y_sep_returned)))-set(args_to_be_unified) ) + #print(args_lines_not_unified,'args_lines_not_unified') + + x_start_returned_not_unified=list( np.array(x_start_returned)[args_lines_not_unified] ) + x_end_returned_not_unified=list( np.array(x_end_returned)[args_lines_not_unified] ) + y_sep_returned_not_unified=list (np.array(y_sep_returned)[args_lines_not_unified] ) + y_diff_returned_not_unified=list (np.array(y_diff_returned)[args_lines_not_unified] ) + + for dv in range(len(y_unified)): + y_sep_returned_not_unified.append(y_unified[dv]) + y_diff_returned_not_unified.append(y_diff_unified[dv]) + x_start_returned_not_unified.append(x_s_unified[dv]) + x_end_returned_not_unified.append(x_e_unified[dv]) + + #print(y_sep_returned,'y_sep_returned') + #print(x_start_returned,'x_start_returned') + #print(x_end_returned,'x_end_returned') + + x_start_returned = np.array(x_start_returned_not_unified, dtype=int) + x_end_returned = np.array(x_end_returned_not_unified, dtype=int) + y_sep_returned = np.array(y_sep_returned_not_unified, dtype=int) + y_diff_returned = np.array(y_diff_returned_not_unified, dtype=int) + + #print(y_sep_returned,'y_sep_returned2') + #print(x_start_returned,'x_start_returned2') + #print(x_end_returned,'x_end_returned2') + #print(new_main_sep_y,'new_main_sep_y') + #print(x_start,'x_start') #print(x_end,'x_end') + if len(new_main_sep_y)>0: - x_start = np.array(x_start, dtype=int) - x_end = np.array(x_end, dtype=int) - y_min = np.array(y_min, dtype=int) - y_mid = np.array(y_mid, dtype=int) - y_max = np.array(y_max, dtype=int) - #print(y_mid,'y_mid') + min_ys=np.min(y_sep) + max_ys=np.max(y_sep) + + y_mains=[] + y_mains.append(min_ys) + y_mains_sep_ohne_grenzen=[] + + for ii in range(len(new_main_sep_y)): + y_mains.append(y_sep[new_main_sep_y[ii]]) + y_mains_sep_ohne_grenzen.append(y_sep[new_main_sep_y[ii]]) + + y_mains.append(max_ys) + + y_mains_sorted=np.sort(y_mains) + diff=np.diff(y_mains_sorted) + argm=np.argmax(diff) + + y_min_new=y_mains_sorted[argm] + y_max_new=y_mains_sorted[argm+1] + + #print(y_min_new,'y_min_new') + #print(y_max_new,'y_max_new') + #print(y_sep[new_main_sep_y[0]],y_sep,'yseps') + x_start=np.array(x_start) + x_end=np.array(x_end) + kind=np.array(kind) + y_sep=np.array(y_sep) + if (y_min_new in y_mains_sep_ohne_grenzen and + y_max_new in y_mains_sep_ohne_grenzen): + x_start=x_start[(y_sep>y_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sep<=y_max_new)] + #print('burda1') + x_end=x_end[(y_sep>y_min_new) & (y_sep<=y_max_new)] + #print('burda2') + kind=kind[(y_sep>y_min_new) & (y_sep<=y_max_new)] + y_sep=y_sep[(y_sep>y_min_new) & (y_sep<=y_max_new)] + elif (y_min_new not in y_mains_sep_ohne_grenzen and + y_max_new in y_mains_sep_ohne_grenzen): + x_start=x_start[(y_sep>=y_min_new) & (y_sep=y_min_new) & (y_sep=y_min_new) & (y_sep=y_min_new) & (y_sep=y_min_new) & (y_sep<=y_max_new)] + x_end=x_end[(y_sep>=y_min_new) & (y_sep<=y_max_new)] + kind=kind[(y_sep>=y_min_new) & (y_sep<=y_max_new)] + y_sep=y_sep[(y_sep>=y_min_new) & (y_sep<=y_max_new)] #print(x_start,'x_start') #print(x_end,'x_end') + #print(len_sep) - # remove redundant separators (with nothing in between) - args_emptysep = set() - args_ysorted = np.argsort(y_mid) - for i in range(len(y_mid)): - # find nearest neighbours above with nothing in between - prev = (~np.eye(len(y_mid), dtype=bool)[i] & - (y_mid[i] >= y_mid) & - # complete subsumption: - # (x_start[i] >= x_start) & - # (x_end[i] <= x_end) - # partial overlap - (x_start[i] < x_end) & - (x_end[i] > x_start) - ) - prev[list(args_emptysep)] = False # but no pair we already saw - if not prev.any(): - continue - prev = np.flatnonzero(prev[args_ysorted]) - j = args_ysorted[prev[-1]] - if not np.any(regions_without_separators[y_max[j]: y_min[i], - peak_points[min(x_start[i], x_start[j])]: - peak_points[max(x_end[i], x_end[j])]]): - args_emptysep.add(i) - if x_start[j] > x_start[i]: - # print(j, "now starts at", x_start[i]) - x_start[j] = x_start[i] - if x_end[j] < x_end[i]: - x_end[j] = x_end[i] - # print(j, "now ends at", x_end[i]) - # print(j, i, "%d:%d" % (y_mid[j], y_mid[i]), "%d:%d" % (x_start[i], x_end[i]), "empty prev sep") - continue - # find nearest neighbours below with nothing in between - nExt = (~np.eye(len(y_mid), dtype=bool)[i] & - (y_mid[i] <= y_mid) & - (x_start[i] >= x_start) & - (x_end[i] <= x_end)) - nExt[list(args_emptysep)] = False # but no pair we already saw - if not nExt.any(): - continue - nExt = np.flatnonzero(nExt[args_ysorted]) - j = args_ysorted[nExt[0]] - if not np.any(regions_without_separators[y_max[i]: y_min[j], - peak_points[x_start[i]]: - peak_points[x_end[i]]]): - args_emptysep.add(i) - # print(j, i, "%d:%d" % (y_mid[j], y_mid[i]), "%d:%d" % (x_start[i], x_end[i]), "empty next sep") - args_to_be_kept = [arg for arg in args_ysorted - if arg not in args_emptysep] - x_start = x_start[args_to_be_kept] - x_end = x_end[args_to_be_kept] - y_min = y_min[args_to_be_kept] - y_mid = y_mid[args_to_be_kept] - y_max = y_max[args_to_be_kept] + deleted=[] + for i in range(len(x_start)-1): + nodes_i=set(range(x_start[i],x_end[i]+1)) + for j in range(i+1,len(x_start)): + if nodes_i==set(range(x_start[j],x_end[j]+1)): + deleted.append(j) + #print(np.unique(deleted)) - return (x_start, - x_end, - y_min, - y_mid, - y_max) + remained_sep_indexes=set(range(len(x_start)))-set(np.unique(deleted) ) + #print(remained_sep_indexes,'remained_sep_indexes') + mother=[]#if it has mother + child=[] + for index_i in remained_sep_indexes: + have_mother=0 + have_child=0 + nodes_ind=set(range(x_start[index_i],x_end[index_i]+1)) + for index_j in remained_sep_indexes: + nodes_ind_j=set(range(x_start[index_j],x_end[index_j]+1)) + if nodes_indnodes_ind_j: + have_child=1 + mother.append(have_mother) + child.append(have_child) -def box2rect(box: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: - return (box[1], box[1] + box[3], - box[0], box[0] + box[2]) + #print(mother,'mother') + #print(len(remained_sep_indexes)) + #print(len(remained_sep_indexes),len(x_start),len(x_end),len(y_sep),'lens') + y_lines_without_mother=[] + x_start_without_mother=[] + x_end_without_mother=[] -def box2slice(box: Tuple[int, int, int, int]) -> Tuple[slice, slice]: - return (slice(box[1], box[1] + box[3]), - slice(box[0], box[0] + box[2])) + y_lines_with_child_without_mother=[] + x_start_with_child_without_mother=[] + x_end_with_child_without_mother=[] + + mother = np.array(mother) + child = np.array(child) + #print(mother,'mother') + #print(child,'child') + remained_sep_indexes = np.array(list(remained_sep_indexes)) + x_start = np.array(x_start) + x_end = np.array(x_end) + y_sep = np.array(y_sep) + + if len(remained_sep_indexes)>1: + #print(np.array(remained_sep_indexes),'np.array(remained_sep_indexes)') + #print(np.array(mother),'mother') + remained_sep_indexes_without_mother = remained_sep_indexes[mother==0] + remained_sep_indexes_with_child_without_mother = remained_sep_indexes[(mother==0) & (child==1)] + #print(remained_sep_indexes_without_mother,'remained_sep_indexes_without_mother') + #print(remained_sep_indexes_without_mother,'remained_sep_indexes_without_mother') + + x_end_with_child_without_mother = x_end[remained_sep_indexes_with_child_without_mother] + x_start_with_child_without_mother = x_start[remained_sep_indexes_with_child_without_mother] + y_lines_with_child_without_mother = y_sep[remained_sep_indexes_with_child_without_mother] + + reading_orther_type=0 + x_end_without_mother = x_end[remained_sep_indexes_without_mother] + x_start_without_mother = x_start[remained_sep_indexes_without_mother] + y_lines_without_mother = y_sep[remained_sep_indexes_without_mother] + + if len(remained_sep_indexes_without_mother)>=2: + for i in range(len(remained_sep_indexes_without_mother)-1): + nodes_i=set(range(x_start[remained_sep_indexes_without_mother[i]], + x_end[remained_sep_indexes_without_mother[i]] + # + 1 + )) + for j in range(i+1,len(remained_sep_indexes_without_mother)): + nodes_j=set(range(x_start[remained_sep_indexes_without_mother[j]], + x_end[remained_sep_indexes_without_mother[j]] + # + 1 + )) + set_diff = nodes_i - nodes_j + if set_diff != nodes_i: + reading_orther_type = 1 + else: + reading_orther_type = 0 + #print(reading_orther_type,'javab') + #print(y_lines_with_child_without_mother,'y_lines_with_child_without_mother') + #print(x_start_with_child_without_mother,'x_start_with_child_without_mother') + #print(x_end_with_child_without_mother,'x_end_with_hild_without_mother') + + len_sep_with_child = len(child[child==1]) + + #print(len_sep_with_child,'len_sep_with_child') + there_is_sep_with_child = 0 + if len_sep_with_child >= 1: + there_is_sep_with_child = 1 + #print(all_args_uniq,'all_args_uniq') + #print(args_to_be_unified,'args_to_be_unified') + + return (reading_orther_type, + x_start_returned, + x_end_returned, + y_sep_returned, + y_diff_returned, + y_lines_without_mother, + x_start_without_mother, + x_end_without_mother, + there_is_sep_with_child, + y_lines_with_child_without_mother, + x_start_with_child_without_mother, + x_end_with_child_without_mother, + new_main_sep_y) def crop_image_inside_box(box, img_org_copy): - image_box = img_org_copy[box2slice(box)] - return image_box, box2rect(box) + image_box = img_org_copy[box[1] : box[1] + box[3], box[0] : box[0] + box[2]] + return image_box, [box[1], box[1] + box[3], box[0], box[0] + box[2]] def otsu_copy_binary(img): img_r = np.zeros((img.shape[0], img.shape[1], 3)) @@ -221,20 +351,20 @@ def find_features_of_lines(contours_main): def boosting_headers_by_longshot_region_segmentation(textregion_pre_p, textregion_pre_np, img_only_text): textregion_pre_p_org = np.copy(textregion_pre_p) # 4 is drop capitals - headers_in_longshot = textregion_pre_np == 2 - #headers_in_longshot = ((textregion_pre_np==2) | - # (textregion_pre_np==1)) - textregion_pre_p[headers_in_longshot & - (textregion_pre_p != 4)] = 2 - textregion_pre_p[textregion_pre_p == 1] = 0 + headers_in_longshot = textregion_pre_np[:, :, 0] == 2 + #headers_in_longshot = ((textregion_pre_np[:,:,0]==2) | + # (textregion_pre_np[:,:,0]==1)) + textregion_pre_p[:, :, 0][headers_in_longshot & + (textregion_pre_p[:, :, 0] != 4)] = 2 + textregion_pre_p[:, :, 0][textregion_pre_p[:, :, 0] == 1] = 0 # earlier it was so, but by this manner the drop capitals are also deleted - # textregion_pre_p[(img_only_text[:,:]==1) & - # (textregion_pre_p!=7) & - # (textregion_pre_p!=2)] = 1 - textregion_pre_p[(img_only_text[:, :] == 1) & - (textregion_pre_p != 7) & - (textregion_pre_p != 4) & - (textregion_pre_p != 2)] = 1 + # textregion_pre_p[:,:,0][(img_only_text[:,:]==1) & + # (textregion_pre_p[:,:,0]!=7) & + # (textregion_pre_p[:,:,0]!=2)] = 1 + textregion_pre_p[:, :, 0][(img_only_text[:, :] == 1) & + (textregion_pre_p[:, :, 0] != 7) & + (textregion_pre_p[:, :, 0] != 4) & + (textregion_pre_p[:, :, 0] != 2)] = 1 return textregion_pre_p def find_num_col_deskew(regions_without_separators, sigma_, multiplier=3.8): @@ -242,184 +372,124 @@ def find_num_col_deskew(regions_without_separators, sigma_, multiplier=3.8): z = gaussian_filter1d(regions_without_separators_0, sigma_) return np.std(z) -def find_num_col( - regions_without_separators, - num_col_classifier, - tables, - multiplier=3.8, - unbalanced=False, - vertical_separators=None -): - if not regions_without_separators.any(): - return 0, [] - if vertical_separators is None: - vertical_separators = np.zeros_like(regions_without_separators) +def find_num_col(regions_without_separators, num_col_classifier, tables, multiplier=3.8): regions_without_separators_0 = regions_without_separators.sum(axis=0) - vertical_separators_0 = vertical_separators.sum(axis=0) - # fig, (ax1, ax2) = plt.subplots(2, sharex=True) - # ax1.imshow(regions_without_separators, aspect="auto") - # ax2.plot(regions_without_separators_0) - # plt.show() - sigma_ = 25 # 70#35 - meda_n_updown = regions_without_separators_0[::-1] + ##plt.plot(regions_without_separators_0) + ##plt.show() + sigma_ = 35 # 70#35 + meda_n_updown = regions_without_separators_0[len(regions_without_separators_0) :: -1] first_nonzero = next((i for i, x in enumerate(regions_without_separators_0) if x), 0) last_nonzero = next((i for i, x in enumerate(meda_n_updown) if x), 0) last_nonzero = len(regions_without_separators_0) - last_nonzero - last_nonzero = last_nonzero - 50 #- 100 - first_nonzero = first_nonzero + 50 #+ 200 - last_offmargin = len(regions_without_separators_0) - 170 #370 - first_offmargin = 170 #370 - x = vertical_separators_0 y = regions_without_separators_0 # [first_nonzero:last_nonzero] - y_help = np.pad(y, (10, 10), constant_values=(0, 0)) - zneg_rev = y.max() - y_help - zneg = np.pad(zneg_rev, (10, 10), constant_values=(0, 0)) - x = gaussian_filter1d(x, sigma_) + y_help = np.zeros(len(y) + 20) + y_help[10 : len(y) + 10] = y + x = np.arange(len(y)) + zneg_rev = -y_help + np.max(y_help) + zneg = np.zeros(len(zneg_rev) + 20) + zneg[10 : len(zneg_rev) + 10] = zneg_rev z = gaussian_filter1d(y, sigma_) zneg = gaussian_filter1d(zneg, sigma_) - peaks, _ = find_peaks(z, height=0) peaks_neg, _ = find_peaks(zneg, height=0) - # _, (ax1, ax2) = plt.subplots(2, sharex=True) - # ax1.set_title("z") - # ax1.plot(z) - # ax1.scatter(peaks, z[peaks]) - # ax1.axvline(0.06 * len(y), label="first") - # ax1.axvline(0.94 * len(y), label="last") - # ax1.text(0.06 * len(y), 0, "first", rotation=90) - # ax1.text(0.94 * len(y), 0, "last", rotation=90) - # ax1.axhline(10, label="minimum") - # ax1.text(0, 10, "minimum") - # ax2.set_title("zneg") - # ax2.plot(zneg) - # ax2.scatter(peaks_neg, zneg[peaks_neg]) - # ax2.axvline(first_nonzero, label="first nonzero") - # ax2.axvline(last_nonzero, label="last nonzero") - # ax2.text(first_nonzero, 0, "first nonzero", rotation=90) - # ax2.text(last_nonzero, 0, "last nonzero", rotation=90) - # ax2.axvline(first_offmargin, label="first offmargin") - # ax2.axvline(last_offmargin, label="last offmargin") - # ax2.text(first_offmargin, 0, "first offmargin", rotation=90) - # ax2.text(last_offmargin, 0, "last offmargin", rotation=90) - # plt.show() + peaks, _ = find_peaks(z, height=0) peaks_neg = peaks_neg - 10 - 10 - # print("raw peaks", peaks) - peaks = peaks[(peaks > 0.06 * len(y)) & - (peaks < 0.94 * len(y))] - # print("non-marginal peaks", peaks) - interest_pos = z[peaks] - # print("interest_pos", interest_pos) - interest_pos = interest_pos[interest_pos > 10] - if not interest_pos.any(): - return 0, [] + last_nonzero = last_nonzero - 100 + first_nonzero = first_nonzero + 200 - # plt.plot(z) - # plt.show() - #print("raw peaks_neg", peaks_neg) peaks_neg = peaks_neg[(peaks_neg > first_nonzero) & (peaks_neg < last_nonzero)] - #print("non-zero peaks_neg", peaks_neg) - peaks_neg = peaks_neg[(peaks_neg > first_offmargin) & - (peaks_neg < last_offmargin)] - #print("non-marginal peaks_neg", peaks_neg) + peaks = peaks[(peaks > 0.06 * regions_without_separators.shape[1]) & + (peaks < 0.94 * regions_without_separators.shape[1])] + peaks_neg = peaks_neg[(peaks_neg > 370) & + (peaks_neg < (regions_without_separators.shape[1] - 370))] + interest_pos = z[peaks] + interest_pos = interest_pos[interest_pos > 10] + # plt.plot(z) + # plt.show() interest_neg = z[peaks_neg] - #print("interest_neg", interest_neg) - if not interest_neg.any(): - return 0, [] min_peaks_pos = np.min(interest_pos) max_peaks_pos = np.max(interest_pos) - #print(min_peaks_pos, max_peaks_pos, max_peaks_pos / min_peaks_pos, 'minmax') - if max_peaks_pos / (min_peaks_pos or 1e-9) >= 35: + if max_peaks_pos / min_peaks_pos >= 35: min_peaks_pos = np.mean(interest_pos) min_peaks_neg = 0 # np.min(interest_neg) - # cutoff criterion: fixed fraction of lowest column height + # print(np.min(interest_pos),np.max(interest_pos),np.max(interest_pos)/np.min(interest_pos),'minmax') dis_talaei = (min_peaks_pos - min_peaks_neg) / multiplier grenze = min_peaks_pos - dis_talaei - #np.mean(y[peaks_neg[0]:peaks_neg[-1]])-np.std(y[peaks_neg[0]:peaks_neg[-1]])/2.0 - - # extra criterion: fixed multiple of lowest gap height - # print("grenze", grenze, multiplier * (5 + np.min(interest_neg))) - grenze = min(grenze, multiplier * (5 + np.min(interest_neg))) + # np.mean(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])-np.std(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])/2.0 # print(interest_neg,'interest_neg') # print(grenze,'grenze') # print(min_peaks_pos,'min_peaks_pos') # print(dis_talaei,'dis_talaei') # print(peaks_neg,'peaks_neg') - # fig, (ax1, ax2) = plt.subplots(2, sharex=True) - # ax1.imshow(regions_without_separators + 5 * vertical_separators, aspect="auto") - # ax2.plot(z, color='red', label='z') - # ax2.plot(zneg[20:], color='blue', label='zneg') - # ax2.plot(x, color='green', label='vsep') - # ax2.scatter(peaks_neg, z[peaks_neg], color='red') - # ax2.scatter(peaks_neg, zneg[20:][peaks_neg], color='blue') - # ax2.axhline(min_peaks_pos, color='red') - # ax2.axhline(grenze, color='blue') - # ax2.annotate("min_peaks_pos", xy=(0, min_peaks_pos), color='red') - # ax2.annotate("grenze", xy=(0, grenze), color='blue') - # ax2.text(0, grenze, "grenze") - # ax2.legend() - # plt.show() - # print("vsep", x[peaks_neg]) - interest_neg = interest_neg - x[peaks_neg] interest_neg_fin = interest_neg[(interest_neg < grenze)] peaks_neg_fin = peaks_neg[(interest_neg < grenze)] + # interest_neg_fin=interest_neg[(interest_neg= 3: - # found too few columns here: ignore 'grenze' and take the deepest N peaks - sort_by_height = np.argsort(interest_neg)[:num_col_classifier] - peaks_neg_fin = peaks_neg[sort_by_height] - interest_neg_fin = interest_neg[sort_by_height] - # print(peaks_neg_fin, "peaks_neg[sorted_by_height]") - sort_by_pos = np.argsort(peaks_neg_fin) - peaks_neg_fin = peaks_neg_fin[sort_by_pos] - interest_neg_fin = interest_neg_fin[sort_by_pos] + index_sort_interest_neg_fin= np.argsort(interest_neg_fin) + peaks_neg_sorted = np.array(peaks_neg)[index_sort_interest_neg_fin] + interest_neg_fin_sorted = np.array(interest_neg_fin)[index_sort_interest_neg_fin] - num_col = len(interest_neg_fin) + 1 + if len(index_sort_interest_neg_fin)>=num_col_classifier: + peaks_neg_fin = list( peaks_neg_sorted[:num_col_classifier] ) + interest_neg_fin = list( interest_neg_fin_sorted[:num_col_classifier] ) + else: + peaks_neg_fin = peaks_neg[:] + interest_neg_fin = interest_neg[:] + + num_col = (len(interest_neg_fin)) + 1 # print(peaks_neg_fin,'peaks_neg_fin') # print(num_col,'diz') - # cancel if resulting split is highly unbalanced across available width - if unbalanced: - pass - elif ((num_col == 3 and - ((peaks_neg_fin[0] > 0.75 * len(y) and - peaks_neg_fin[1] > 0.75 * len(y)) or - (peaks_neg_fin[0] < 0.25 * len(y) and - peaks_neg_fin[1] < 0.25 * len(y)) or - (peaks_neg_fin[0] < 0.5 * len(y) - 200 and - peaks_neg_fin[1] < 0.5 * len(y)) or - (peaks_neg_fin[0] > 0.5 * len(y) + 200 and - peaks_neg_fin[1] > 0.5 * len(y)))) or - (num_col == 2 and - (peaks_neg_fin[0] > 0.75 * len(y) or - peaks_neg_fin[0] < 0.25 * len(y)))): - num_col = 1 - peaks_neg_fin = [] + p_l = 0 + p_u = len(y) - 1 + p_m = int(len(y) / 2.0) + p_g_l = int(len(y) / 4.0) + p_g_u = len(y) - int(len(y) / 4.0) + + if num_col == 3: + if ((peaks_neg_fin[0] > p_g_u and + peaks_neg_fin[1] > p_g_u) or + (peaks_neg_fin[0] < p_g_l and + peaks_neg_fin[1] < p_g_l) or + (peaks_neg_fin[0] + 200 < p_m and + peaks_neg_fin[1] < p_m) or + (peaks_neg_fin[0] - 200 > p_m and + peaks_neg_fin[1] > p_m)): + num_col = 1 + peaks_neg_fin = [] + + if num_col == 2: + if (peaks_neg_fin[0] > p_g_u or + peaks_neg_fin[0] < p_g_l): + num_col = 1 + peaks_neg_fin = [] ##print(len(peaks_neg_fin)) - # filter out peaks that are too close (<400px) to each other: - # among each group, pick the position with smallest amount of text diff_peaks = np.abs(np.diff(peaks_neg_fin)) - cut_off = 300 #400 + cut_off = 400 peaks_neg_true = [] forest = [] + # print(len(peaks_neg_fin),'len_') + for i in range(len(peaks_neg_fin)): if i == 0: forest.append(peaks_neg_fin[i]) if i < len(peaks_neg_fin) - 1: if diff_peaks[i] <= cut_off: forest.append(peaks_neg_fin[i + 1]) - else: + if diff_peaks[i] > cut_off: # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -431,61 +501,68 @@ def find_num_col( peaks_neg_true.append(forest[np.argmin(z[forest])]) num_col = len(peaks_neg_true) + 1 - #print(peaks_neg_true, "peaks_neg_true") + p_l = 0 + p_u = len(y) - 1 + p_m = int(len(y) / 2.0) + p_quarter = int(len(y) / 5.0) + p_g_l = int(len(y) / 4.0) + p_g_u = len(y) - int(len(y) / 4.0) + + p_u_quarter = len(y) - p_quarter + ##print(num_col,'early') - # cancel if resulting split is highly unbalanced across available width - if unbalanced: - pass - elif ((num_col == 3 and - ((peaks_neg_true[0] > 0.75 * len(y) and - peaks_neg_true[1] > 0.75 * len(y)) or - (peaks_neg_true[0] < 0.25 * len(y) and - peaks_neg_true[1] < 0.25 * len(y)) or - (peaks_neg_true[0] < 0.5 * len(y) - 200 and - peaks_neg_true[1] < 0.5 * len(y)) or - (peaks_neg_true[0] > 0.5 * len(y) + 200 and - peaks_neg_true[1] > 0.5 * len(y)))) or - (num_col == 2 and - (peaks_neg_true[0] > 0.75 * len(y) or - peaks_neg_true[0] < 0.25 * len(y)))): - num_col = 1 - peaks_neg_true = [] - elif (num_col == 3 and - (peaks_neg_true[0] < 0.75 * len(y) and - peaks_neg_true[0] > 0.25 * len(y) and - peaks_neg_true[1] > 0.80 * len(y))): - num_col = 2 - peaks_neg_true = [peaks_neg_true[0]] - elif (num_col == 3 and - (peaks_neg_true[1] < 0.75 * len(y) and - peaks_neg_true[1] > 0.25 * len(y) and - peaks_neg_true[0] < 0.20 * len(y))): - num_col = 2 - peaks_neg_true = [peaks_neg_true[1]] + if num_col == 3: + if ((peaks_neg_true[0] > p_g_u and + peaks_neg_true[1] > p_g_u) or + (peaks_neg_true[0] < p_g_l and + peaks_neg_true[1] < p_g_l) or + (peaks_neg_true[0] < p_m and + peaks_neg_true[1] + 200 < p_m) or + (peaks_neg_true[0] - 200 > p_m and + peaks_neg_true[1] > p_m)): + num_col = 1 + peaks_neg_true = [] + elif (peaks_neg_true[0] < p_g_u and + peaks_neg_true[0] > p_g_l and + peaks_neg_true[1] > p_u_quarter): + peaks_neg_true = [peaks_neg_true[0]] + elif (peaks_neg_true[1] < p_g_u and + peaks_neg_true[1] > p_g_l and + peaks_neg_true[0] < p_quarter): + peaks_neg_true = [peaks_neg_true[1]] - # get rid of too narrow columns (not used) - # if np.count_nonzero(diff_peaks < 360): - # arg_help = np.arange(len(diff_peaks)) - # arg_help_ann = arg_help[diff_peaks < 360] - # peaks_neg_fin_new = [] - # for ii in range(len(peaks_neg_fin)): - # if ii in arg_help_ann: - # if interest_neg_fin[ii] < interest_neg_fin[ii + 1]: - # peaks_neg_fin_new.append(peaks_neg_fin[ii]) - # else: - # peaks_neg_fin_new.append(peaks_neg_fin[ii + 1]) + if num_col == 2: + if (peaks_neg_true[0] > p_g_u or + peaks_neg_true[0] < p_g_l): + num_col = 1 + peaks_neg_true = [] - # elif (ii - 1) not in arg_help_ann: - # peaks_neg_fin_new.append(peaks_neg_fin[ii]) - # else: - # peaks_neg_fin_new = peaks_neg_fin + diff_peaks_abnormal = diff_peaks[diff_peaks < 360] + + if len(diff_peaks_abnormal) > 0: + arg_help = np.arange(len(diff_peaks)) + arg_help_ann = arg_help[diff_peaks < 360] + + peaks_neg_fin_new = [] + + for ii in range(len(peaks_neg_fin)): + if ii in arg_help_ann: + arg_min = np.argmin([interest_neg_fin[ii], interest_neg_fin[ii + 1]]) + if arg_min == 0: + peaks_neg_fin_new.append(peaks_neg_fin[ii]) + else: + peaks_neg_fin_new.append(peaks_neg_fin[ii + 1]) + + elif (ii - 1) not in arg_help_ann: + peaks_neg_fin_new.append(peaks_neg_fin[ii]) + else: + peaks_neg_fin_new = peaks_neg_fin # plt.plot(gaussian_filter1d(y, sigma_)) # plt.plot(peaks_neg_true,z[peaks_neg_true],'*') # plt.plot([0,len(y)], [grenze,grenze]) # plt.show() ##print(len(peaks_neg_true)) - #print(peaks_neg_true, "peaks_neg_true") return len(peaks_neg_true), peaks_neg_true def find_num_col_only_image(regions_without_separators, multiplier=3.8): @@ -687,7 +764,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): return len(peaks_fin_true), peaks_fin_true def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): - regions_without_separators_0 = regions_without_separators.sum(axis=0) + regions_without_separators_0 = regions_without_separators[:, :, 0].sum(axis=0) ##plt.plot(regions_without_separators_0) ##plt.show() @@ -697,19 +774,24 @@ def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): peaks, _ = find_peaks(z, height=0) # print(peaks,'peaksnew') - # fig, (ax1, ax2) = plt.subplots(2, sharex=True, suptitle='find_num_col_by_vertical_lines') - # ax1.imshow(regions_without_separators, aspect="auto") - # ax2.plot(z) - # ax2.scatter(peaks, z[peaks]) - # ax2.set_title('find_peaks(regions_without_separators.sum(axis=0), height=0)') - # plt.show() return peaks -def put_drop_out_from_only_drop_model(layout_no_patch, layout1): - if layout_no_patch.ndim == 3: - layout_no_patch = layout_no_patch[:, :, 0] +def return_regions_without_separators(regions_pre): + kernel = np.ones((5, 5), np.uint8) + regions_without_separators = ((regions_pre[:, :] != 6) & + (regions_pre[:, :] != 0)) + # regions_without_separators=( (image_regions_eraly_p[:,:,:]!=6) & + # (image_regions_eraly_p[:,:,:]!=0) & + # (image_regions_eraly_p[:,:,:]!=5) & + # (image_regions_eraly_p[:,:,:]!=8) & + # (image_regions_eraly_p[:,:,:]!=7)) - drop_only = (layout_no_patch[:, :] == 4) * 1 + regions_without_separators = cv2.erode(regions_without_separators.astype(np.uint8), kernel, iterations=6) + + return regions_without_separators + +def put_drop_out_from_only_drop_model(layout_no_patch, layout1): + drop_only = (layout_no_patch[:, :, 0] == 4) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) @@ -735,64 +817,50 @@ def put_drop_out_from_only_drop_model(layout_no_patch, layout1): (map_of_drop_contour_bb == 5).sum()) >= 15: contours_drop_parent_final.append(contours_drop_parent[jj]) - layout_no_patch[:, :][layout_no_patch[:, :] == 4] = 0 - layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=4) + layout_no_patch[:, :, 0][layout_no_patch[:, :, 0] == 4] = 0 + + layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=(4, 4, 4)) return layout_no_patch -def fill_bb_of_drop_capitals( - full_prediction, early_prediction, - label_bg=0, - label_text=1, - label_imgs=5, - label_drop_fl_model=3, - label_imgs_fl_model=4): - """ - Given segmentation maps from full layout model (including drop-capital) - and early layout model (after post-processing), re-assign regions which - are (large enough and) majority classified as drop-capital to that label. - """ - area_tot = full_prediction.size - drop_only = (full_prediction == label_drop_fl_model) * 1 +def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch, drop_capital_label, text_regions_p): + drop_only = (layout_in_patch[:, :, 0] == drop_capital_label) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) - text_mask = ((early_prediction == label_text) | - (early_prediction == label_imgs)) - _, text_segs, text_bbox, _ = cv2.connectedComponentsWithStats(early_prediction * text_mask) + + areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) + for j in range(len(contours_drop_parent))]) + areas_cnt_text = areas_cnt_text / float(drop_only.shape[0] * drop_only.shape[1]) + contours_drop_parent = [contours_drop_parent[jz] + for jz in range(len(contours_drop_parent)) + if areas_cnt_text[jz] > 0.00001] + areas_cnt_text = [areas_cnt_text[jz] + for jz in range(len(areas_cnt_text)) + if areas_cnt_text[jz] > 0.00001] contours_drop_parent_final = [] - for contour in contours_drop_parent: - area_drop = cv2.contourArea(contour) - if area_drop <= 0.00001 * area_tot: - continue - x, y, w, h = cv2.boundingRect(contour) + for jj in range(len(contours_drop_parent)): + x, y, w, h = cv2.boundingRect(contours_drop_parent[jj]) box = slice(y, y + h), slice(x, x + w) - area_box = w * h - area_text_in_early_layout = np.sum(text_mask[box] == label_text) + box0 = box + (0,) + mask_of_drop_cpaital_in_early_layout = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1])) + mask_of_drop_cpaital_in_early_layout[box] = text_regions_p[box] - if (area_drop > 0.6 * area_box and - area_text_in_early_layout >= 0.3 * area_box): - mask = np.ones((h, w), dtype=bool) + all_drop_capital_pixels_which_is_text_in_early_lo = np.sum(mask_of_drop_cpaital_in_early_layout[box]==1) + mask_of_drop_cpaital_in_early_layout[box] = 1 + all_drop_capital_pixels = np.sum(mask_of_drop_cpaital_in_early_layout==1) + + percent_text_to_all_in_drop = all_drop_capital_pixels_which_is_text_in_early_lo / float(all_drop_capital_pixels) + if (areas_cnt_text[jj] * float(drop_only.shape[0] * drop_only.shape[1]) / float(w * h) > 0.6 and + percent_text_to_all_in_drop >= 0.3): + layout_in_patch[box0] = drop_capital_label else: - mask = ((full_prediction[box] == label_drop_fl_model) | - (full_prediction[box] == label_imgs_fl_model) | - (full_prediction[box] == label_bg)) - full_prediction[box][mask] = label_drop_fl_model + layout_in_patch[box0][layout_in_patch[box0] == drop_capital_label] = drop_capital_label + layout_in_patch[box0][layout_in_patch[box0] == 0] = drop_capital_label + layout_in_patch[box0][layout_in_patch[box0] == 4] = drop_capital_label# images + #layout_in_patch[box0][layout_in_patch[box0] == drop_capital_label] = 1#drop_capital_label - # also try to enlarge to corresponding labels in early_prediction - for label in range(1, len(text_bbox)): - x0, y0, w0, h0, area0 = text_bbox[label] - x1 = max(0, x0 - x) - y1 = max(0, y0 - y) - w1 = min(w0, w - x1) if x0 >= x else min(w, w0 - x + x0) - h1 = min(h0, h - y1) if y0 >= y else min(h, h0 - y + y0) - if w1 < 0 or h1 < 0: - continue - area1 = np.count_nonzero(mask[y1: y1 + h1, x1: x1 + w1]) - if area1 and area1 >= 0.8 * area0: - full_prediction[text_segs == label] = label_drop_fl_model - - return full_prediction == label_drop_fl_model + return layout_in_patch def check_any_text_region_in_model_one_is_main_or_header( regions_model_1, regions_model_full, @@ -825,28 +893,29 @@ def check_any_text_region_in_model_one_is_main_or_header( contours_only_text_parent_main_d=[] contours_only_text_parent_head_d=[] - for ii, con in enumerate(contours_only_text_parent): - img = np.zeros(regions_model_1.shape[:2]) - img = cv2.fillPoly(img, pts=[con], color=255) + for ii in range(len(contours_only_text_parent)): + con=contours_only_text_parent[ii] + img=np.zeros((regions_model_1.shape[0],regions_model_1.shape[1],3)) + img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) - all_pixels=((img == 255)*1).sum() - pixels_header=( ( (img == 255) & (regions_model_full[:,:,0]==2) )*1 ).sum() + all_pixels=((img[:,:,0]==255)*1).sum() + pixels_header=( ( (img[:,:,0]==255) & (regions_model_full[:,:,0]==2) )*1 ).sum() pixels_main=all_pixels-pixels_header if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=2 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 contours_only_text_parent_head.append(con) - if len(contours_only_text_parent_d_ordered): + if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_head.append(all_box_coord[ii]) slopes_head.append(slopes[ii]) all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) conf_contours_head.append(None) else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=1 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 contours_only_text_parent_main.append(con) conf_contours_main.append(conf_contours[ii]) - if len(contours_only_text_parent_d_ordered): + if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_main.append(all_box_coord[ii]) slopes_main.append(slopes[ii]) @@ -868,103 +937,132 @@ def check_any_text_region_in_model_one_is_main_or_header( conf_contours_main, conf_contours_head) -def split_textregion_main_vs_head( - regions_model_1, - regions_model_full, - polygons_of_textregions, - polygons_of_textregions_d, - all_found_textline_polygons, +def check_any_text_region_in_model_one_is_main_or_header_light( + regions_model_1, regions_model_full, + contours_only_text_parent, + all_box_coord, all_found_textline_polygons, slopes, - conf_textregions, - label_text=1, - label_head_full=2, - label_head_final=2, - label_main_final=1, -): + contours_only_text_parent_d_ordered, + conf_contours): ### to make it faster h_o = regions_model_1.shape[0] w_o = regions_model_1.shape[1] zoom = 3 - regions_model_1 = cv2.resize(regions_model_1, - (regions_model_1.shape[1] // zoom, - regions_model_1.shape[0] // zoom), + regions_model_1 = cv2.resize(regions_model_1, (regions_model_1.shape[1] // zoom, + regions_model_1.shape[0] // zoom), interpolation=cv2.INTER_NEAREST) - regions_model_full = cv2.resize(regions_model_full, - (regions_model_full.shape[1] // zoom, - regions_model_full.shape[0] // zoom), + regions_model_full = cv2.resize(regions_model_full, (regions_model_full.shape[1] // zoom, + regions_model_full.shape[0] // zoom), interpolation=cv2.INTER_NEAREST) - contours_z = [contour // zoom - for contour in polygons_of_textregions] + contours_only_text_parent = [(i / zoom).astype(int) for i in contours_only_text_parent] ### - _, _, x_min_main, x_max_main, y_min_main, y_max_main, _ = \ - find_new_features_of_contours(contours_z) + cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin = \ + find_new_features_of_contours(contours_only_text_parent) length_con=x_max_main-x_min_main height_con=y_max_main-y_min_main - main = [] - head = [] - for ii, con in enumerate(contours_z): - parent = np.zeros_like(regions_model_1) - parent = cv2.fillPoly(parent, pts=[con], color=1) + all_found_textline_polygons_main=[] + all_found_textline_polygons_head=[] - pixels_head = ((parent > 0) & (regions_model_full == label_head_full)).sum() - pixels_main = parent.sum() - pixels_head + all_box_coord_main=[] + all_box_coord_head=[] - if (( pixels_head >= 0.6 * pixels_main and - length_con[ii] >= 1.3 * height_con[ii] and - length_con[ii] <= 3 * height_con[ii] ) or - ( pixels_head >= 0.3 * pixels_main and - length_con[ii] >= 3 * height_con[ii] )): + slopes_main=[] + slopes_head=[] - head.append(ii) - label = label_head_final + contours_only_text_parent_main=[] + contours_only_text_parent_head=[] + conf_contours_main=[] + conf_contours_head=[] + + contours_only_text_parent_main_d=[] + contours_only_text_parent_head_d=[] + + for ii in range(len(contours_only_text_parent)): + con=contours_only_text_parent[ii] + img=np.zeros((regions_model_1.shape[0], regions_model_1.shape[1], 3)) + img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) + + all_pixels = (img[:,:,0]==255).sum() + pixels_header=((img[:,:,0]==255) & + (regions_model_full[:,:,0]==2)).sum() + pixels_main = all_pixels - pixels_header + + if (pixels_header/float(pixels_main)>=0.3) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 + contours_only_text_parent_head.append(con) + if contours_only_text_parent_d_ordered is not None: + contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) + all_box_coord_head.append(all_box_coord[ii]) + slopes_head.append(slopes[ii]) + all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) + conf_contours_head.append(None) else: - main.append(ii) - label = label_main_final + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 + contours_only_text_parent_main.append(con) + conf_contours_main.append(conf_contours[ii]) + if contours_only_text_parent_d_ordered is not None: + contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) + all_box_coord_main.append(all_box_coord[ii]) + slopes_main.append(slopes[ii]) + all_found_textline_polygons_main.append(all_found_textline_polygons[ii]) - regions_model_1[(regions_model_1 == label_text) & (parent > 0)] = label + #print(all_pixels,pixels_main,pixels_header) ### to make it faster regions_model_1 = cv2.resize(regions_model_1, (w_o, h_o), interpolation=cv2.INTER_NEAREST) - # regions_model_full = cv2.resize(parent, (regions_model_full.shape[1] // zoom, - # regions_model_full.shape[0] // zoom), + # regions_model_full = cv2.resize(img, (regions_model_full.shape[1] // zoom, + # regions_model_full.shape[0] // zoom), # interpolation=cv2.INTER_NEAREST) + contours_only_text_parent_head = [(i * zoom).astype(int) for i in contours_only_text_parent_head] + contours_only_text_parent_main = [(i * zoom).astype(int) for i in contours_only_text_parent_main] ### - def select(lis, indexes): - if not len(lis): - return [] - return [lis[ind] for ind in indexes] - return (regions_model_1, - select(polygons_of_textregions, main), - select(polygons_of_textregions, head), - select(polygons_of_textregions_d, main), - select(polygons_of_textregions_d, head), - select(all_found_textline_polygons, main), - select(all_found_textline_polygons, head), - select(slopes, main), - select(slopes, head), - select(conf_textregions, main), - select(conf_textregions, head), - ) + contours_only_text_parent_main, + contours_only_text_parent_head, + all_box_coord_main, + all_box_coord_head, + all_found_textline_polygons_main, + all_found_textline_polygons_head, + slopes_main, + slopes_head, + contours_only_text_parent_main_d, + contours_only_text_parent_head_d, + conf_contours_main, + conf_contours_head) -def small_textlines_to_parent_adherence2(textlines_con, textline_mask, num_col): - """ - for each region, split up textlines into small and large areas; - keep only the ones with large area, but expanded (by merging - contours) by all intersecting lines with small area - """ - textlines_con_new = [] - for region in textlines_con: - areas_cnt_text = np.array(list(map(cv2.contourArea, region))) - areas_cnt_text = areas_cnt_text / float(textline_mask.size) - indexes_textlines = np.arange(len(region)) +def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col): + # print(textlines_con) + # textlines_con=textlines_con.astype(np.uint32) + textlines_con_changed = [] + for m1 in range(len(textlines_con)): + # textlines_tot=textlines_con[m1] + # textlines_tot=textlines_tot.astype() + textlines_tot = [] + textlines_tot_org_form = [] + # print(textlines_tot) + + for nn in range(len(textlines_con[m1])): + textlines_tot.append(np.array(textlines_con[m1][nn], dtype=np.int32)) + textlines_tot_org_form.append(textlines_con[m1][nn]) + + ##img_text_all=np.zeros((textline_iamge.shape[0],textline_iamge.shape[1])) + ##img_text_all=cv2.fillPoly(img_text_all, pts =textlines_tot , color=(1,1,1)) + + ##plt.imshow(img_text_all) + ##plt.show() + areas_cnt_text = np.array([cv2.contourArea(textlines_tot[j]) + for j in range(len(textlines_tot))]) + areas_cnt_text = areas_cnt_text / float(textline_iamge.shape[0] * textline_iamge.shape[1]) + indexes_textlines = np.arange(len(textlines_tot)) + + # print(areas_cnt_text,np.min(areas_cnt_text),np.max(areas_cnt_text)) if num_col == 0: min_area = 0.0004 elif num_col == 1: @@ -972,135 +1070,196 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_mask, num_col): else: min_area = 0.0001 indexes_textlines_small = indexes_textlines[areas_cnt_text < min_area] - indexes_textlines_large = indexes_textlines[areas_cnt_text >= min_area] - textlines_small = [region[i] for i in indexes_textlines_small] - textlines_large = [region[i] for i in indexes_textlines_large] + # print(indexes_textlines) - img_small = np.zeros_like(textline_mask) - img_small = cv2.fillPoly(img_small, pts=textlines_small, color=1) - img_large = np.zeros_like(textline_mask) - img_large = cv2.fillPoly(img_large, pts=textlines_large, color=1) - img_inter = img_small + img_large == 2 - if np.any(img_inter): - indexes_textlines_inter = [] - for contour_small in textlines_small: + textlines_small = [] + textlines_small_org_form = [] + for i in indexes_textlines_small: + textlines_small.append(textlines_tot[i]) + textlines_small_org_form.append(textlines_tot_org_form[i]) + + textlines_big = [] + textlines_big_org_form = [] + for i in list(set(indexes_textlines) - set(indexes_textlines_small)): + textlines_big.append(textlines_tot[i]) + textlines_big_org_form.append(textlines_tot_org_form[i]) + + img_textline_s = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_textline_s = cv2.fillPoly(img_textline_s, pts=textlines_small, color=(1, 1, 1)) + + img_textline_b = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_textline_b = cv2.fillPoly(img_textline_b, pts=textlines_big, color=(1, 1, 1)) + + sum_small_big_all = img_textline_s + img_textline_b + sum_small_big_all2 = (sum_small_big_all[:, :] == 2) * 1 + + sum_intersection_sb = sum_small_big_all2.sum(axis=1).sum() + if sum_intersection_sb > 0: + dis_small_from_bigs_tot = [] + for z1 in range(len(textlines_small)): + # print(len(textlines_small),'small') intersections = [] - for contour_large in textlines_large: - img0_small = np.zeros_like(textline_mask) - img0_small = cv2.fillPoly(img0_small, pts=[contour_small], color=1) - img0_large = np.zeros_like(textline_mask) - img0_large = cv2.fillPoly(img0_large, pts=[contour_large], color=1) - img0_inter = img0_small + img0_large == 2 - intersections.append(np.count_nonzero(img0_inter)) - idx_large = np.argmax(intersections) - if intersections[idx_large] <= 0: - idx_large = -1 - indexes_textlines_inter.append(idx_large) + for z2 in range(len(textlines_big)): + img_text = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_text = cv2.fillPoly(img_text, pts=[textlines_small[z1]], color=(1, 1, 1)) - indexes_textlines_inter = np.array(indexes_textlines_inter) - for idx_large in set(indexes_textlines_inter): - if idx_large < 0: - continue - img0_union = np.zeros_like(textline_mask) - img0_union = cv2.fillPoly(img0_union, pts=[textlines_large[idx_large]], color=255) - indexes_inter_small = np.flatnonzero(indexes_textlines_inter == idx_large) - for idx_small in indexes_inter_small: - img0_union = cv2.fillPoly(img0_union, pts=[textlines_small[idx_small]], color=255) + img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z2]], color=(1, 1, 1)) - _, thresh = cv2.threshold(img0_union, 0, 255, 0) - contours_union, _ = cv2.findContours(thresh.astype(np.uint8), - cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - areas_union = np.array(list(map(cv2.contourArea, contours_union))) - contour_union = contours_union[np.argmax(areas_union)] #contours_union[0] - textlines_large[idx_large] = contour_union + sum_small_big = img_text2 + img_text + sum_small_big_2 = (sum_small_big[:, :] == 2) * 1 - textlines_con_new.append(textlines_large) - return textlines_con_new + sum_intersection = sum_small_big_2.sum(axis=1).sum() + # print(sum_intersection) + intersections.append(sum_intersection) -def order_of_regions(textline_mask, contours_main, contours_head, contours_drop, y_ref, x_ref): + if len(np.array(intersections)[np.array(intersections) > 0]) == 0: + intersections = [] + try: + dis_small_from_bigs_tot.append(np.argmax(intersections)) + except: + dis_small_from_bigs_tot.append(-1) + + smalls_list = np.array(dis_small_from_bigs_tot)[np.array(dis_small_from_bigs_tot) >= 0] + # index_small_textlines_rest=list( set(indexes_textlines_small)-set(smalls_list) ) + + textlines_big_with_change = [] + textlines_big_with_change_con = [] + textlines_small_with_change = [] + for z in list(set(smalls_list)): + index_small_textlines = list(np.where(np.array(dis_small_from_bigs_tot) == z)[0]) + # print(z,index_small_textlines) + + img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1], 3)) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z]], color=(255, 255, 255)) + + textlines_big_with_change.append(z) + + for k in index_small_textlines: + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_small[k]], color=(255, 255, 255)) + textlines_small_with_change.append(k) + + img_text2 = img_text2.astype(np.uint8) + imgray = cv2.cvtColor(img_text2, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # print(cont[0],type(cont)) + textlines_big_with_change_con.append(cont) + textlines_big_org_form[z] = cont[0] + + # plt.imshow(img_text2) + # plt.show() + + # print(textlines_big_with_change,'textlines_big_with_change') + # print(textlines_small_with_change,'textlines_small_with_change') + # print(textlines_big) + textlines_con_changed.append(textlines_big_org_form) + else: + textlines_con_changed.append(textlines_big_org_form) + return textlines_con_changed + +def order_of_regions(textline_mask, contours_main, contours_header, y_ref): + ##plt.imshow(textline_mask) + ##plt.show() """ - Order text region contours within a single column bbox in a top-down-left-right way. + print(len(contours_main),'contours_main') + mada_n=textline_mask.sum(axis=1) + y=mada_n[:] - First, determine the vertical gaps. Then iterate over each vertical segment, - identifying the contours centered in that segment. Order them by their - horizontal center, and add them to the overall order. + y_help=np.zeros(len(y)+40) + y_help[20:len(y)+20]=y + x=np.arange(len(y)) - Arguments: - * textline_mask: the mask of the textline segmentation, cropped for that box - * contours_main: the paragraph text region contours expected to be here - * contours_head: the heading text region contours expected to be here - * contours_drop: the drop-capital region contours expected to be here - * y_ref: the vertical offset of that box within the page - * x_ref: the horizontal offset of that box within the page + peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) + ##plt.imshow(textline_mask[:,:]) + ##plt.show() - Returns: a tuple of - * the array of contour indexes overall within this box - (i.e. into main+head+drop) - * the array of types - (1 for paragraph, 2 for heading, 3 for drop-capital) - * the array of contour indexes for the respective type - (i.e. into contours_main or contours_head or contours_drop) + sigma_gaus=8 + z= gaussian_filter1d(y_help, sigma_gaus) + zneg_rev=-y_help+np.max(y_help) + zneg=np.zeros(len(zneg_rev)+40) + zneg[20:len(zneg_rev)+20]=zneg_rev + zneg= gaussian_filter1d(zneg, sigma_gaus) + + peaks, _ = find_peaks(z, height=0) + peaks_neg, _ = find_peaks(zneg, height=0) + peaks_neg=peaks_neg-20-20 + peaks=peaks-20 """ - total = len(contours_main) + len(contours_head) + len(contours_drop) - assert total == 0 or np.any(textline_mask) + textline_sum_along_width = textline_mask.sum(axis=1) - # ax1 = plt.subplot(2, 1, 1, title="order_of_regions textline_mask") - # plt.imshow(textline_mask, aspect='auto') - y = textline_mask.sum(axis=1) # horizontal projection profile + y = textline_sum_along_width[:] y_padded = np.zeros(len(y) + 40) y_padded[20 : len(y) + 20] = y + x = np.arange(len(y)) + + peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) sigma_gaus = 8 - #z = gaussian_filter1d(y_padded, sigma_gaus) - #peaks, _ = find_peaks(z, height=0) - #peaks = peaks - 20 - # ax2 = plt.subplot(2, 1, 2, title="smoothed horizontal projection", sharex=ax1) - # plt.plot(y) - zneg_rev = np.max(y_padded) - y_padded + z = gaussian_filter1d(y_padded, sigma_gaus) + zneg_rev = -y_padded + np.max(y_padded) zneg = np.zeros(len(zneg_rev) + 40) zneg[20 : len(zneg_rev) + 20] = zneg_rev zneg = gaussian_filter1d(zneg, sigma_gaus) + peaks, _ = find_peaks(z, height=0) peaks_neg, _ = find_peaks(zneg, height=0) - # plt.vlines(peaks_neg - 40, 0, None, label="peaks") - # plt.show() peaks_neg = peaks_neg - 20 - 20 + peaks = peaks - 20 - peaks_neg_new = np.array([0] + - # peaks can be beyond box due to padding and smoothing - [peak for peak in peaks_neg - if 0 < peak and peak < textline_mask.shape[0]] + - [textline_mask.shape[0]]) - # offset from bbox of mask - peaks_neg_new += y_ref + ##plt.plot(z) + ##plt.show() + if contours_main != None: + areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) + M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) - cx_main, cy_main = find_center_of_contours(contours_main) - cx_head, cy_head = find_center_of_contours(contours_head) - cx_drop, cy_drop = find_center_of_contours(contours_drop) - # assert not len(cy_main) or np.min(peaks_neg_new) <= np.min(cy_main) and np.max(cy_main) <= np.max(peaks_neg_new) - # assert not len(cy_head) or np.min(peaks_neg_new) <= np.min(cy_head) and np.max(cy_head) <= np.max(peaks_neg_new) - # assert not len(cy_drop) or np.min(peaks_neg_new) <= np.min(cy_drop) and np.max(cy_drop) <= np.max(peaks_neg_new) + y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) - slice_main = slice(0, len(contours_main)) - slice_head = slice(len(contours_main), - len(contours_main) + len(contours_head)) - slice_drop = slice(len(contours_main) + len(contours_head), - total) - matrix_of_orders = np.zeros((total, 5), dtype=int) - matrix_of_orders[:, 0] = np.arange(total) - matrix_of_orders[slice_main, 1] = 1 - matrix_of_orders[slice_head, 1] = 2 - matrix_of_orders[slice_drop, 1] = 3 - matrix_of_orders[slice_main, 2] = cx_main - matrix_of_orders[slice_head, 2] = cx_head - matrix_of_orders[slice_drop, 2] = cx_drop - matrix_of_orders[slice_main, 3] = cy_main - matrix_of_orders[slice_head, 3] = cy_head - matrix_of_orders[slice_drop, 3] = cy_drop - matrix_of_orders[slice_main, 4] = np.arange(len(contours_main)) - matrix_of_orders[slice_head, 4] = np.arange(len(contours_head)) - matrix_of_orders[slice_drop, 4] = np.arange(len(contours_drop)) + if len(contours_header) != None: + areas_header = np.array([cv2.contourArea(contours_header[j]) for j in range(len(contours_header))]) + M_header = [cv2.moments(contours_header[j]) for j in range(len(contours_header))] + cx_header = [(M_header[j]["m10"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] + cy_header = [(M_header[j]["m01"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] + + x_min_header = np.array([np.min(contours_header[j][:, 0, 0]) for j in range(len(contours_header))]) + x_max_header = np.array([np.max(contours_header[j][:, 0, 0]) for j in range(len(contours_header))]) + + y_min_header = np.array([np.min(contours_header[j][:, 0, 1]) for j in range(len(contours_header))]) + y_max_header = np.array([np.max(contours_header[j][:, 0, 1]) for j in range(len(contours_header))]) + # print(cy_main,'mainy') + + peaks_neg_new = [] + peaks_neg_new.append(0 + y_ref) + for iii in range(len(peaks_neg)): + peaks_neg_new.append(peaks_neg[iii] + y_ref) + peaks_neg_new.append(textline_mask.shape[0] + y_ref) + + if len(cy_main) > 0 and np.max(cy_main) > np.max(peaks_neg_new): + cy_main = np.array(cy_main) * (np.max(peaks_neg_new) / np.max(cy_main)) - 10 + if contours_main != None: + indexer_main = np.arange(len(contours_main)) + if contours_main != None: + len_main = len(contours_main) + else: + len_main = 0 + + matrix_of_orders = np.zeros((len(contours_main) + len(contours_header), 5)) + matrix_of_orders[:, 0] = np.arange(len(contours_main) + len(contours_header)) + matrix_of_orders[: len(contours_main), 1] = 1 + matrix_of_orders[len(contours_main) :, 1] = 2 + matrix_of_orders[: len(contours_main), 2] = cx_main + matrix_of_orders[len(contours_main) :, 2] = cx_header + matrix_of_orders[: len(contours_main), 3] = cy_main + matrix_of_orders[len(contours_main) :, 3] = cy_header + matrix_of_orders[: len(contours_main), 4] = np.arange(len(contours_main)) + matrix_of_orders[len(contours_main) :, 4] = np.arange(len(contours_header)) # print(peaks_neg_new,'peaks_neg_new') # print(matrix_of_orders,'matrix_of_orders') @@ -1108,166 +1267,141 @@ def order_of_regions(textline_mask, contours_main, contours_head, contours_drop, final_indexers_sorted = [] final_types = [] final_index_type = [] - for top, bot in pairwise(peaks_neg_new): - indexes_in, types_in, cxs_in, cys_in, typed_indexes_in = \ - matrix_of_orders[(matrix_of_orders[:, 3] >= top) & - (matrix_of_orders[:, 3] < bot)].T - # if indexes_in.size: - # img = textline_mask.copy() - # plt.imshow(img) - # plt.gca().add_patch(patches.Rectangle((0, top-y_ref), img.shape[1], bot-top, alpha=0.5, color='gray')) - # xrange = np.arange(0, img.shape[1], 50) - # yrange = np.arange(0, img.shape[0], 50) - # plt.gca().set_xticks(xrange, xrange + x_ref) - # plt.gca().set_yticks(yrange, yrange + y_ref) - # for idx, type_, cx, cy in zip(typed_indexes_in, types_in, cxs_in, cys_in): - # cnt = {1: contours_main, 2: contours_head, 3: contours_drop}[type_][idx] - # col = {1: 'red', 2: 'blue', 3: 'green'}[type_] - # plt.scatter(cx - x_ref, cy - y_ref, 20, c=col, marker='o') - # plt.text(cx - x_ref, cy - y_ref, str(idx), c=col) - # plt.gca().add_patch(patches.Polygon(cnt[:, 0] - [[x_ref, y_ref]], closed=False, fill=False, color=col)) - # plt.title("box contours centered in %d:%d (red=main / blue=heading / green=drop-capital)" % (top, bot)) - # plt.show() - + for i in range(len(peaks_neg_new) - 1): + top = peaks_neg_new[i] + down = peaks_neg_new[i + 1] + indexes_in = matrix_of_orders[:, 0][(matrix_of_orders[:, 3] >= top) & + ((matrix_of_orders[:, 3] < down))] + cxs_in = matrix_of_orders[:, 2][(matrix_of_orders[:, 3] >= top) & + ((matrix_of_orders[:, 3] < down))] + cys_in = matrix_of_orders[:, 3][(matrix_of_orders[:, 3] >= top) & + ((matrix_of_orders[:, 3] < down))] + types_of_text = matrix_of_orders[:, 1][(matrix_of_orders[:, 3] >= top) & + (matrix_of_orders[:, 3] < down)] + index_types_of_text = matrix_of_orders[:, 4][(matrix_of_orders[:, 3] >= top) & + (matrix_of_orders[:, 3] < down)] sorted_inside = np.argsort(cxs_in) - final_indexers_sorted.extend(indexes_in[sorted_inside]) - final_types.extend(types_in[sorted_inside]) - final_index_type.extend(typed_indexes_in[sorted_inside]) + ind_in_int = indexes_in[sorted_inside] + ind_in_type = types_of_text[sorted_inside] + ind_ind_type = index_types_of_text[sorted_inside] + for j in range(len(ind_in_int)): + final_indexers_sorted.append(int(ind_in_int[j])) + final_types.append(int(ind_in_type[j])) + final_index_type.append(int(ind_ind_type[j])) ##matrix_of_orders[:len_main,4]=final_indexers_sorted[:] - assert len(set(final_indexers_sorted)) == total - assert set(final_index_type) == ( - set(range(len(contours_main))) - .union(range(len(contours_head))) - .union(range(len(contours_drop)))) + # This fix is applied if the sum of the lengths of contours and contours_h + # does not match final_indexers_sorted. However, this is not the optimal solution.. + if len(cy_main) + len(cy_header) == len(final_index_type): + pass + else: + indexes_missed = set(np.arange(len(cy_main) + len(cy_header))) - set(final_indexers_sorted) + for ind_missed in indexes_missed: + final_indexers_sorted.append(ind_missed) + final_types.append(1) + final_index_type.append(ind_missed) - return np.array(final_indexers_sorted), np.array(final_types), np.array(final_index_type) + return final_indexers_sorted, matrix_of_orders, final_types, final_index_type def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( - img_p_in_ver: np.ndarray, - img_p_in_hor: np.ndarray, - num_col_classifier: int, -) -> Tuple[np.ndarray, List[float]]: - """ - Given a horizontal and vertical separator mask, combine horizontal separators - (where possible) and make sure they do not cross each other. - - Arguments: - * img_p_in_ver: mask of vertical separators - * img_p_in_hor: mask of horizontal separators - * num_col_classifier: predicted (expected) number of columns - - Returns: a tuple of - * the final horizontal separators - * the y coordinates with horizontal separators spanning the full width - """ - - # cut horizontal seps by vertical seps - img_p_in_hor[img_p_in_ver > 0] = 0 + img_p_in_ver, img_in_hor,num_col_classifier): #img_p_in_ver = cv2.erode(img_p_in_ver, self.kernel, iterations=2) - _, thresh = cv2.threshold(img_p_in_ver, 0, 255, 0) - contours_lines_ver, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + img_p_in_ver=img_p_in_ver.astype(np.uint8) + img_p_in_ver=np.repeat(img_p_in_ver[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(img_p_in_ver, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_lines_ver,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) slope_lines_ver, _, x_min_main_ver, _, _, _, y_min_main_ver, y_max_main_ver, cx_main_ver = \ find_features_of_lines(contours_lines_ver) for i in range(len(x_min_main_ver)): img_p_in_ver[int(y_min_main_ver[i]): int(y_min_main_ver[i])+30, int(cx_main_ver[i])-25: - int(cx_main_ver[i])+25] = 0 + int(cx_main_ver[i])+25, 0] = 0 img_p_in_ver[int(y_max_main_ver[i])-30: - int(y_max_main_ver[i]+1), + int(y_max_main_ver[i]), int(cx_main_ver[i])-25: - int(cx_main_ver[i])+25] = 0 - height, width = img_p_in_ver.shape + int(cx_main_ver[i])+25, 0] = 0 - _, thresh = cv2.threshold(img_p_in_hor, 0, 255, 0) - contours_lines_hor, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + img_in_hor=img_in_hor.astype(np.uint8) + img_in_hor=np.repeat(img_in_hor[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(img_in_hor, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_lines_hor,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - (slope_lines_hor, - dist_x_hor, - x_min_main_hor, - x_max_main_hor, - cy_main_hor, _, - y_min_main_hor, - y_max_main_hor, - _) = find_features_of_lines(contours_lines_hor) + slope_lines_hor, dist_x_hor, x_min_main_hor, x_max_main_hor, cy_main_hor, _, _, _, _ = \ + find_features_of_lines(contours_lines_hor) + x_width_smaller_than_acolumn_width=img_in_hor.shape[1]/float(num_col_classifier+1.) - avg_col_width = width / float(num_col_classifier + 1) - nseps_wider_than_than_avg_col_width = np.count_nonzero(dist_x_hor>=avg_col_width) - if nseps_wider_than_than_avg_col_width < 10 * num_col_classifier: + len_lines_bigger_than_x_width_smaller_than_acolumn_width=len( dist_x_hor[dist_x_hor>=x_width_smaller_than_acolumn_width] ) + len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column=int(len_lines_bigger_than_x_width_smaller_than_acolumn_width / + float(num_col_classifier)) + if len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column < 10: args_hor=np.arange(len(slope_lines_hor)) - sep_pairs=contours_in_same_horizon(cy_main_hor) - img_p_in = np.copy(img_p_in_hor) - if len(sep_pairs): - special_separators=[] - contours_new=[] - for pair in sep_pairs: - merged_all=None - some_args=args_hor[pair] - some_cy=cy_main_hor[pair] - some_x_min=x_min_main_hor[pair] - some_x_max=x_max_main_hor[pair] - some_y_min=y_min_main_hor[pair] - some_y_max=y_max_main_hor[pair] - if np.any(img_p_in_ver[some_y_min.min(): some_y_max.max(), - some_x_max.min(): some_x_min.max()]): - # print("horizontal pair cut by vertical sep", pair, some_args, some_cy, - # "%d:%d" % (some_x_min[0], some_x_max[0]), - # "%d:%d" % (some_x_min[1], some_x_max[1])) - continue + all_args_uniq=contours_in_same_horizon(cy_main_hor) + #print(all_args_uniq,'all_args_uniq') + if len(all_args_uniq)>0: + if type(all_args_uniq[0]) is list: + special_separators=[] + contours_new=[] + for dd in range(len(all_args_uniq)): + merged_all=None + some_args=args_hor[all_args_uniq[dd]] + some_cy=cy_main_hor[all_args_uniq[dd]] + some_x_min=x_min_main_hor[all_args_uniq[dd]] + some_x_max=x_max_main_hor[all_args_uniq[dd]] - #img_in=np.zeros(separators_closeup_n[:,:,2].shape) - #print(img_p_in_ver.shape[1],some_x_max-some_x_min,'xdiff') - sum_xspan = dist_x_hor[some_args].sum() - tot_xspan = (np.max(x_max_main_hor[some_args]) - - np.min(x_min_main_hor[some_args])) - dev_xspan = (np.std(dist_x_hor[some_args]) / - np.mean(dist_x_hor[some_args])) if sum_xspan else 1 - if (tot_xspan > sum_xspan and # no x overlap - sum_xspan > 0.85 * tot_xspan): # x close to each other - # print("merging horizontal pair", pair, some_args, some_cy, - # "%d:%d" % (some_x_min[0], some_x_max[0]), - # "%d:%d" % (some_x_min[1], some_x_max[1])) - img_p_in[int(np.mean(some_cy)) - 5: - int(np.mean(some_cy)) + 5, - np.min(some_x_min): - np.max(some_x_max)] = 255 + #img_in=np.zeros(separators_closeup_n[:,:,2].shape) + #print(img_p_in_ver.shape[1],some_x_max-some_x_min,'xdiff') + diff_x_some=some_x_max-some_x_min + for jv in range(len(some_args)): + img_p_in=cv2.fillPoly(img_in_hor, pts=[contours_lines_hor[some_args[jv]]], color=(1,1,1)) + if any(i_diff>(img_p_in_ver.shape[1]/float(3.3)) for i_diff in diff_x_some): + img_p_in[int(np.mean(some_cy))-5: + int(np.mean(some_cy))+5, + int(np.min(some_x_min)): + int(np.max(some_x_max)) ]=1 + sum_dis=dist_x_hor[some_args].sum() + diff_max_min_uniques=np.max(x_max_main_hor[some_args])-np.min(x_min_main_hor[some_args]) - if (tot_xspan > sum_xspan and # no x overlap - sum_xspan > 0.85 * tot_xspan and # x close to each other - tot_xspan > 0.85 * width and # nearly full width - dev_xspan < 0.55): # similar x span - # print(dist_x_hor[some_args], - # dist_x_hor[some_args].sum(), - # np.min(x_min_main_hor[some_args]), - # np.max(x_max_main_hor[some_args]),'jalibdi') - # print(np.mean( dist_x_hor[some_args] ), - # np.std( dist_x_hor[some_args] ), - # np.var( dist_x_hor[some_args] ),'jalibdiha') - special_separators.append(np.mean(cy_main_hor[some_args])) - # print("special separator for midline", special_separators[-1]) - # plt.subplot(1, 2, 1, title='original horizontal (1) / vertical (2) seps') - # plt.imshow(1 * (img_p_in_hor > 0) + 2 * (img_p_in_ver > 0)) - # plt.subplot(1, 2, 2, title='extended horizontal seps') - # plt.imshow(img_p_in) - # plt.show() + if (diff_max_min_uniques > sum_dis and + sum_dis / float(diff_max_min_uniques) > 0.85 and + diff_max_min_uniques / float(img_p_in_ver.shape[1]) > 0.85 and + np.std(dist_x_hor[some_args]) < 0.55 * np.mean(dist_x_hor[some_args])): + # print(dist_x_hor[some_args], + # dist_x_hor[some_args].sum(), + # np.min(x_min_main_hor[some_args]), + # np.max(x_max_main_hor[some_args]),'jalibdi') + # print(np.mean( dist_x_hor[some_args] ), + # np.std( dist_x_hor[some_args] ), + # np.var( dist_x_hor[some_args] ),'jalibdiha') + special_separators.append(np.mean(cy_main_hor[some_args])) + else: + img_p_in=img_in_hor + special_separators=[] else: - img_p_in = img_p_in_hor - special_separators = [] + img_p_in=img_in_hor + special_separators=[] - #img_p_in_ver[img_p_in_ver == 255] = 1 - # sep_ver_hor_cross = 255 * ((img_p_in > 0) & (img_p_in_ver > 0)) - # contours_cross, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - # center_cross = np.array(find_center_of_contours(contours_cross), dtype=int) - # for cx, cy in center_cross.T: - # img_p_in[cy - 30: cy + 30, cx + 5: cx + 40] = 0 - # img_p_in[cy - 30: cy + 30, cx - 40: cx - 4] = 0 + img_p_in_ver[:,:,0][img_p_in_ver[:,:,0]==255]=1 + sep_ver_hor=img_p_in+img_p_in_ver + sep_ver_hor_cross=(sep_ver_hor[:,:,0]==2)*1 + sep_ver_hor_cross=np.repeat(sep_ver_hor_cross[:, :, np.newaxis], 3, axis=2) + sep_ver_hor_cross=sep_ver_hor_cross.astype(np.uint8) + imgray = cv2.cvtColor(sep_ver_hor_cross, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_cross,_=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + cx_cross,cy_cross ,_ , _, _ ,_,_=find_new_features_of_contours(contours_cross) + for ii in range(len(cx_cross)): + img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])+5:int(cx_cross[ii])+40,0]=0 + img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])-40:int(cx_cross[ii])-4,0]=0 else: - img_p_in = np.copy(img_p_in_hor) - special_separators = [] - return img_p_in, special_separators + img_p_in=np.copy(img_in_hor) + special_separators=[] + return img_p_in[:,:,0], special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot = [] @@ -1277,200 +1411,203 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot.append(last_point) return peaks_neg_tot -def find_number_of_columns_in_document( - regions_without_separators: np.ndarray, - separator_mask: np.ndarray, - num_col_classifier: int, - tables: bool, - contours_h: List[np.ndarray] = None, - logger=None -) -> Tuple[int, List[int], np.ndarray, List[int], np.ndarray]: - """ - Extract vertical and horizontal separators, vertical splits and horizontal column boundaries on page. - - Arguments: - * regions_without_separators: mask of (non-separator) region labels - * separator_mask: mask of (separator-only) region labels - * num_col_classifier: predicted (expected) number of columns of the page - * tables: whether tables may be present - * contours_h: polygons of potential headings (serving as additional horizontal separators) - * logger - - Returns: a tuple of - * the actual number of columns found - * the x coordinates of the column boundaries - * an array of the separators (bounding boxes and types) - * the y coordinates of the page splits - """ - if logger is None: - logger = getLogger(__package__) - - height, width = separator_mask.shape - separators_closeup = separator_mask.astype(np.uint8) - separators_closeup[0:110] = 0 - separators_closeup[-150:] = 0 +def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, pixel_lines, contours_h=None): + t_ins_c0 = time.time() + separators_closeup=( (region_pre_p[:,:,:]==pixel_lines))*1 + separators_closeup[0:110,:,:]=0 + separators_closeup[separators_closeup.shape[0]-150:,:,:]=0 kernel = np.ones((5,5),np.uint8) - separators_closeup = cv2.morphologyEx(separators_closeup, cv2.MORPH_CLOSE, kernel, iterations=1) + separators_closeup=separators_closeup.astype(np.uint8) + separators_closeup = cv2.dilate(separators_closeup,kernel,iterations = 1) + separators_closeup = cv2.erode(separators_closeup,kernel,iterations = 1) - # find horizontal lines by contour properties - contours_sep_e, _ = cv2.findContours(separators_closeup, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cnts_hor_e = [] - for cnt in contours_sep_e: - max_xe = cnt[:, 0, 0].max() - min_xe = cnt[:, 0, 0].min() - max_ye = cnt[:, 0, 1].max() - min_ye = cnt[:, 0, 1].min() - med_ye = int(np.median(cnt[:, 0, 1])) - dist_xe = max_xe - min_xe - dist_ye = max_ye - min_ye - if dist_ye <= 50 and dist_xe >= 3 * dist_ye: - cnts_hor_e.append(cnt) + separators_closeup_new=np.zeros((separators_closeup.shape[0] ,separators_closeup.shape[1] )) + separators_closeup_n=np.copy(separators_closeup) + separators_closeup_n=separators_closeup_n.astype(np.uint8) - # delete horizontal contours (leaving only the edges) - separators_closeup = cv2.fillPoly(separators_closeup, pts=cnts_hor_e, color=0) - edges = cv2.adaptiveThreshold(separators_closeup * 255, 255, - cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) - horizontal = np.copy(edges) - vertical = np.copy(edges) + separators_closeup_n_binary=np.zeros(( separators_closeup_n.shape[0],separators_closeup_n.shape[1]) ) + separators_closeup_n_binary[:,:]=separators_closeup_n[:,:,0] + separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]!=0]=1 - horizontal_size = horizontal.shape[1] // 30 - # find horizontal lines by morphology + gray_early=np.repeat(separators_closeup_n_binary[:, :, np.newaxis], 3, axis=2) + gray_early=gray_early.astype(np.uint8) + imgray_e = cv2.cvtColor(gray_early, cv2.COLOR_BGR2GRAY) + ret_e, thresh_e = cv2.threshold(imgray_e, 0, 255, 0) + + contours_line_e,hierarchy_e=cv2.findContours(thresh_e,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + _, dist_xe, _, _, _, _, y_min_main, y_max_main, _ = \ + find_features_of_lines(contours_line_e) + dist_ye = y_max_main - y_min_main + args_e=np.arange(len(contours_line_e)) + args_hor_e=args_e[(dist_ye<=50) & + (dist_xe>=3*dist_ye)] + cnts_hor_e=[] + for ce in args_hor_e: + cnts_hor_e.append(contours_line_e[ce]) + figs_e=np.zeros(thresh_e.shape) + figs_e=cv2.fillPoly(figs_e,pts=cnts_hor_e,color=(1,1,1)) + + separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary, pts=cnts_hor_e, color=(0,0,0)) + gray = cv2.bitwise_not(separators_closeup_n_binary) + gray=gray.astype(np.uint8) + + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ + cv2.THRESH_BINARY, 15, -2) + horizontal = np.copy(bw) + vertical = np.copy(bw) + + cols = horizontal.shape[1] + horizontal_size = cols // 30 + # Create structure element for extracting horizontal lines through morphology operations horizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1)) - horizontal = cv2.morphologyEx(horizontal, cv2.MORPH_OPEN, horizontalStructure) - horizontal = cv2.morphologyEx(horizontal, cv2.MORPH_CLOSE, kernel, iterations=2) - # re-insert deleted horizontal contours - horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=255) + # Apply morphology operations + horizontal = cv2.erode(horizontal, horizontalStructure) + horizontal = cv2.dilate(horizontal, horizontalStructure) - vertical_size = vertical.shape[0] // 30 - # find vertical lines by morphology - verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size)) - vertical = cv2.morphologyEx(vertical, cv2.MORPH_OPEN, verticalStructure) - vertical = cv2.dilate(vertical, kernel, iterations=1) + kernel = np.ones((5,5),np.uint8) + horizontal = cv2.dilate(horizontal,kernel,iterations = 2) + horizontal = cv2.erode(horizontal,kernel,iterations = 2) + horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=(255,255,255)) + + rows = vertical.shape[0] + verticalsize = rows // 30 + # Create structure element for extracting vertical lines through morphology operations + verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, verticalsize)) + # Apply morphology operations + vertical = cv2.erode(vertical, verticalStructure) + vertical = cv2.dilate(vertical, verticalStructure) + vertical = cv2.dilate(vertical,kernel,iterations = 1) horizontal, special_separators = \ combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( vertical, horizontal, num_col_classifier) - _, thresh = cv2.threshold(vertical, 0, 255, 0) - contours_sep_vers, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - slope_seps, dist_x, x_min_seps, x_max_seps, cy_seps, slope_seps_org, y_min_seps, y_max_seps, cx_seps = \ - find_features_of_lines(contours_sep_vers) + separators_closeup_new[:,:][vertical[:,:]!=0]=1 + separators_closeup_new[:,:][horizontal[:,:]!=0]=1 - args=np.arange(len(slope_seps)) - args_ver=args[slope_seps==1] - dist_x_ver=dist_x[slope_seps==1] - y_min_seps_ver=y_min_seps[slope_seps==1] - y_max_seps_ver=y_max_seps[slope_seps==1] - x_min_seps_ver=x_min_seps[slope_seps==1] - x_max_seps_ver=x_max_seps[slope_seps==1] - cx_seps_ver=cx_seps[slope_seps==1] - dist_y_ver=y_max_seps_ver-y_min_seps_ver + vertical=np.repeat(vertical[:, :, np.newaxis], 3, axis=2) + vertical=vertical.astype(np.uint8) + + imgray = cv2.cvtColor(vertical, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_line_vers,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ + find_features_of_lines(contours_line_vers) + + args=np.arange(len(slope_lines)) + args_ver=args[slope_lines==1] + dist_x_ver=dist_x[slope_lines==1] + y_min_main_ver=y_min_main[slope_lines==1] + y_max_main_ver=y_max_main[slope_lines==1] + x_min_main_ver=x_min_main[slope_lines==1] + x_max_main_ver=x_max_main[slope_lines==1] + cx_main_ver=cx_main[slope_lines==1] + dist_y_ver=y_max_main_ver-y_min_main_ver len_y=separators_closeup.shape[0]/3.0 - _, thresh = cv2.threshold(horizontal, 0, 255, 0) - contours_sep_hors, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - slope_seps, dist_x, x_min_seps, x_max_seps, cy_seps, slope_seps_org, y_min_seps, y_max_seps, cx_seps = \ - find_features_of_lines(contours_sep_hors) + horizontal=np.repeat(horizontal[:, :, np.newaxis], 3, axis=2) + horizontal=horizontal.astype(np.uint8) + imgray = cv2.cvtColor(horizontal, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_line_hors,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ + find_features_of_lines(contours_line_hors) - slope_seps_org_hor=slope_seps_org[slope_seps==0] - args=np.arange(len(slope_seps)) + slope_lines_org_hor=slope_lines_org[slope_lines==0] + args=np.arange(len(slope_lines)) len_x=separators_closeup.shape[1]/5.0 - dist_y=np.abs(y_max_seps-y_min_seps) + dist_y=np.abs(y_max_main-y_min_main) - args_hor=args[slope_seps==0] - dist_x_hor=dist_x[slope_seps==0] - y_min_seps_hor=y_min_seps[slope_seps==0] - y_max_seps_hor=y_max_seps[slope_seps==0] - x_min_seps_hor=x_min_seps[slope_seps==0] - x_max_seps_hor=x_max_seps[slope_seps==0] - dist_y_hor=dist_y[slope_seps==0] - cy_seps_hor=cy_seps[slope_seps==0] + args_hor=args[slope_lines==0] + dist_x_hor=dist_x[slope_lines==0] + y_min_main_hor=y_min_main[slope_lines==0] + y_max_main_hor=y_max_main[slope_lines==0] + x_min_main_hor=x_min_main[slope_lines==0] + x_max_main_hor=x_max_main[slope_lines==0] + dist_y_hor=dist_y[slope_lines==0] + cy_main_hor=cy_main[slope_lines==0] args_hor=args_hor[dist_x_hor>=len_x/2.0] - x_max_seps_hor=x_max_seps_hor[dist_x_hor>=len_x/2.0] - x_min_seps_hor=x_min_seps_hor[dist_x_hor>=len_x/2.0] - cy_seps_hor=cy_seps_hor[dist_x_hor>=len_x/2.0] - y_min_seps_hor=y_min_seps_hor[dist_x_hor>=len_x/2.0] - y_max_seps_hor=y_max_seps_hor[dist_x_hor>=len_x/2.0] + x_max_main_hor=x_max_main_hor[dist_x_hor>=len_x/2.0] + x_min_main_hor=x_min_main_hor[dist_x_hor>=len_x/2.0] + cy_main_hor=cy_main_hor[dist_x_hor>=len_x/2.0] + y_min_main_hor=y_min_main_hor[dist_x_hor>=len_x/2.0] + y_max_main_hor=y_max_main_hor[dist_x_hor>=len_x/2.0] dist_y_hor=dist_y_hor[dist_x_hor>=len_x/2.0] - slope_seps_org_hor=slope_seps_org_hor[dist_x_hor>=len_x/2.0] + slope_lines_org_hor=slope_lines_org_hor[dist_x_hor>=len_x/2.0] dist_x_hor=dist_x_hor[dist_x_hor>=len_x/2.0] - matrix_of_seps_ch = np.zeros((len(cy_seps_hor)+len(cx_seps_ver), 10), dtype=int) - matrix_of_seps_ch[:len(cy_seps_hor),0]=args_hor - matrix_of_seps_ch[len(cy_seps_hor):,0]=args_ver - matrix_of_seps_ch[len(cy_seps_hor):,1]=cx_seps_ver - matrix_of_seps_ch[:len(cy_seps_hor),2]=x_min_seps_hor+50#x_min_seps_hor+150 - matrix_of_seps_ch[len(cy_seps_hor):,2]=x_min_seps_ver - matrix_of_seps_ch[:len(cy_seps_hor),3]=x_max_seps_hor-50#x_max_seps_hor-150 - matrix_of_seps_ch[len(cy_seps_hor):,3]=x_max_seps_ver - matrix_of_seps_ch[:len(cy_seps_hor),4]=dist_x_hor - matrix_of_seps_ch[len(cy_seps_hor):,4]=dist_x_ver - matrix_of_seps_ch[:len(cy_seps_hor),5]=cy_seps_hor - matrix_of_seps_ch[:len(cy_seps_hor),6]=y_min_seps_hor - matrix_of_seps_ch[len(cy_seps_hor):,6]=y_min_seps_ver - matrix_of_seps_ch[:len(cy_seps_hor),7]=y_max_seps_hor - matrix_of_seps_ch[len(cy_seps_hor):,7]=y_max_seps_ver - matrix_of_seps_ch[:len(cy_seps_hor),8]=dist_y_hor - matrix_of_seps_ch[len(cy_seps_hor):,8]=dist_y_ver - matrix_of_seps_ch[len(cy_seps_hor):,9]=1 + matrix_of_lines_ch=np.zeros((len(cy_main_hor)+len(cx_main_ver),10)) + matrix_of_lines_ch[:len(cy_main_hor),0]=args_hor + matrix_of_lines_ch[len(cy_main_hor):,0]=args_ver + matrix_of_lines_ch[len(cy_main_hor):,1]=cx_main_ver + matrix_of_lines_ch[:len(cy_main_hor),2]=x_min_main_hor+50#x_min_main_hor+150 + matrix_of_lines_ch[len(cy_main_hor):,2]=x_min_main_ver + matrix_of_lines_ch[:len(cy_main_hor),3]=x_max_main_hor-50#x_max_main_hor-150 + matrix_of_lines_ch[len(cy_main_hor):,3]=x_max_main_ver + matrix_of_lines_ch[:len(cy_main_hor),4]=dist_x_hor + matrix_of_lines_ch[len(cy_main_hor):,4]=dist_x_ver + matrix_of_lines_ch[:len(cy_main_hor),5]=cy_main_hor + matrix_of_lines_ch[:len(cy_main_hor),6]=y_min_main_hor + matrix_of_lines_ch[len(cy_main_hor):,6]=y_min_main_ver + matrix_of_lines_ch[:len(cy_main_hor),7]=y_max_main_hor + matrix_of_lines_ch[len(cy_main_hor):,7]=y_max_main_ver + matrix_of_lines_ch[:len(cy_main_hor),8]=dist_y_hor + matrix_of_lines_ch[len(cy_main_hor):,8]=dist_y_ver + matrix_of_lines_ch[len(cy_main_hor):,9]=1 if contours_h is not None: - _, dist_x_head, x_min_head, x_max_head, cy_head, _, y_min_head, y_max_head, _ = \ + _, dist_x_head, x_min_main_head, x_max_main_head, cy_main_head, _, y_min_main_head, y_max_main_head, _ = \ find_features_of_lines(contours_h) - matrix_l_n = np.zeros((len(cy_head), matrix_of_seps_ch.shape[1]), dtype=int) - args_head = np.arange(len(cy_head)) - matrix_l_n[:, 0] = args_head - matrix_l_n[:, 2] = x_min_head - matrix_l_n[:, 3] = x_max_head - matrix_l_n[:, 4] = dist_x_head - matrix_l_n[:, 5] = cy_head - matrix_l_n[:, 6] = y_min_head - matrix_l_n[:, 7] = y_max_head - matrix_l_n[:, 8] = y_max_head - y_min_head - matrix_l_n[:, 9] = 2 # mark as heading (so it can be split into 2 horizontal separators as needed) - matrix_of_seps_ch = np.append( - matrix_of_seps_ch, matrix_l_n, axis=0) + matrix_l_n=np.zeros((matrix_of_lines_ch.shape[0]+len(cy_main_head),matrix_of_lines_ch.shape[1])) + matrix_l_n[:matrix_of_lines_ch.shape[0],:]=np.copy(matrix_of_lines_ch[:,:]) + args_head=np.arange(len(cy_main_head)) + len(cy_main_hor) - # ensure no seps are out of bounds - matrix_of_seps_ch[:, 1] = np.maximum(np.minimum(matrix_of_seps_ch[:, 1], width), 0) - matrix_of_seps_ch[:, 2] = np.maximum(matrix_of_seps_ch[:, 2], 0) - matrix_of_seps_ch[:, 3] = np.minimum(matrix_of_seps_ch[:, 3], width) - matrix_of_seps_ch[:, 5] = np.maximum(np.minimum(matrix_of_seps_ch[:, 5], height), 0) - matrix_of_seps_ch[:, 6] = np.maximum(matrix_of_seps_ch[:, 6], 0) - matrix_of_seps_ch[:, 7] = np.minimum(matrix_of_seps_ch[:, 7], height) - - cy_seps_splitters=cy_seps_hor[(x_min_seps_hor <= .16 * width) & - (x_max_seps_hor >= .84 * width)] - cy_seps_splitters = np.append(cy_seps_splitters, special_separators) + matrix_l_n[matrix_of_lines_ch.shape[0]:,0]=args_head + matrix_l_n[matrix_of_lines_ch.shape[0]:,2]=x_min_main_head+30 + matrix_l_n[matrix_of_lines_ch.shape[0]:,3]=x_max_main_head-30 + matrix_l_n[matrix_of_lines_ch.shape[0]:,4]=dist_x_head + matrix_l_n[matrix_of_lines_ch.shape[0]:,5]=y_min_main_head-3-8 + matrix_l_n[matrix_of_lines_ch.shape[0]:,6]=y_min_main_head-5-8 + matrix_l_n[matrix_of_lines_ch.shape[0]:,7]=y_max_main_head#y_min_main_head+1-8 + matrix_l_n[matrix_of_lines_ch.shape[0]:,8]=4 + matrix_of_lines_ch=np.copy(matrix_l_n) + cy_main_splitters=cy_main_hor[(x_min_main_hor<=.16*region_pre_p.shape[1]) & + (x_max_main_hor>=.84*region_pre_p.shape[1])] + cy_main_splitters=np.array( list(cy_main_splitters)+list(special_separators)) if contours_h is not None: - y_min_splitters_head = y_min_head[(x_min_head <= .16 * width) & - (x_max_head >= .84 * width)] - y_max_splitters_head = y_max_head[(x_min_head <= .16 * width) & - (x_max_head >= .84 * width)] - cy_seps_splitters = np.append(cy_seps_splitters, y_min_splitters_head) - cy_seps_splitters = np.append(cy_seps_splitters, y_max_splitters_head) + try: + cy_main_splitters_head=cy_main_head[(x_min_main_head<=.16*region_pre_p.shape[1]) & + (x_max_main_head>=.84*region_pre_p.shape[1])] + cy_main_splitters=np.array( list(cy_main_splitters)+list(cy_main_splitters_head)) + except: + pass + args_cy_splitter=np.argsort(cy_main_splitters) + cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - cy_seps_splitters = np.sort(cy_seps_splitters).astype(int) - splitter_y_new = [0] + list(cy_seps_splitters) + [height] - big_part = 22 * height // 100 # percent height + splitter_y_new=[] + splitter_y_new.append(0) + for i in range(len(cy_main_splitters_sort)): + splitter_y_new.append( cy_main_splitters_sort[i] ) + splitter_y_new.append(region_pre_p.shape[0]) + splitter_y_new_diff=np.diff(splitter_y_new)/float(region_pre_p.shape[0])*100 + + args_big_parts=np.arange(len(splitter_y_new_diff))[ splitter_y_new_diff>22 ] + + regions_without_separators=return_regions_without_separators(region_pre_p) + length_y_threshold=regions_without_separators.shape[0]/4.0 num_col_fin=0 peaks_neg_fin_fin=[] - num_big_parts = 0 - for top, bot in pairwise(splitter_y_new): - if bot - top < big_part: - continue - num_big_parts += 1 + for itiles in args_big_parts: + regions_without_separators_tile=regions_without_separators[int(splitter_y_new[itiles]): + int(splitter_y_new[itiles+1]),:,0] try: - num_col, peaks_neg_fin = find_num_col(regions_without_separators[top: bot], - num_col_classifier, tables, - vertical_separators=1 * (vertical[top: bot] > 0), - multiplier=7.0) - logger.debug("big part %d:%d has %d columns", top, bot, num_col + 1) - # print(peaks_neg_fin) + num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile, + num_col_classifier, tables, multiplier=7.0) except: num_col = 0 peaks_neg_fin = [] @@ -1478,477 +1615,583 @@ def find_number_of_columns_in_document( num_col_fin=num_col peaks_neg_fin_fin=peaks_neg_fin - if num_big_parts == 1 and len(peaks_neg_fin_fin) + 1 < num_col_classifier: + if len(args_big_parts)==1 and (len(peaks_neg_fin_fin)+1)=500] peaks_neg_fin=peaks_neg_fin[peaks_neg_fin<=(vertical.shape[1]-500)] peaks_neg_fin_fin=peaks_neg_fin[:] - return num_col_fin, peaks_neg_fin_fin, matrix_of_seps_ch, splitter_y_new + return num_col_fin, peaks_neg_fin_fin,matrix_of_lines_ch,splitter_y_new,separators_closeup_n def return_boxes_of_images_by_order_of_reading_new( - splitter_y_new, - text_mask, - sep_mask, - matrix_of_seps_ch, - num_col_classifier, erosion_hurts, tables, - right2left_readingorder, - logger=None): - """ - Iterate through the vertical parts of a page, each with its own set of columns, - and from the matrix of horizontal separators for that part, find an ordered - list of bounding boxes through all columns and regions. - - Arguments: - * splitter_y_new: the y coordinates separating the parts - * text_mask: binary text region mask - (needed to find per-part columns and to combine separators if possible) - * sep_mask: binary separator region mask - (needed to elongate separators if possible) - * matrix_of_seps: type and coordinates of horizontal and vertical separators, - as well as headings - * num_col_classifier: predicted number of columns for the entire page - * erosion_hurts: whether region masks have already been eroded - (and thus gaps can be expected to be wider) - * tables: bool - * right2left_readingorder: whether to invert the default left-to-right order - - Returns: a tuple of - * the ordered list of bounding boxes - * a list of arrays: the x coordinates delimiting the columns for every page part - (according to splitter) - """ + splitter_y_new, regions_without_separators, + matrix_of_lines_ch, + num_col_classifier, erosion_hurts, tables, right2left_readingorder): if right2left_readingorder: - text_mask = cv2.flip(text_mask,1) - sep_mask = cv2.flip(sep_mask,1) - if logger is None: - logger = getLogger(__package__) - logger.debug('enter return_boxes_of_images_by_order_of_reading_new') - - # def dbg_imshow(box, title): - # xmin, xmax, ymin, ymax = box - # plt.imshow(1 * text_mask + 3 * sep_mask) #, extent=[0, width_tot, bot, top]) - # plt.gca().add_patch(patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, - # fill=False, linewidth=1, edgecolor='r')) - # plt.title(title + " at %d:%d, %d:%d" % (ymin, ymax, xmin, xmax)) - # plt.show() - # def dbg_plt(box=None, title=None, rectangles=None, rectangles_showidx=False): - # minx, maxx, miny, maxy = box or (0, None, 0, None) - # img = text_mask[miny:maxy, minx:maxx] - # plt.imshow(img) - # step = max(img.shape) // 10 - # xrange = np.arange(0, img.shape[1], step) - # yrange = np.arange(0, img.shape[0], step) - # ax = plt.gca() - # ax.set_xticks(xrange) - # ax.set_yticks(yrange) - # ax.set_xticklabels(xrange + minx) - # ax.set_yticklabels(yrange + miny) - # def format_coord(x, y): - # return 'x={:g}, y={:g}'.format(x + minx, y + miny) - # ax.format_coord = format_coord - # if title: - # plt.title(title) - # if rectangles: - # for i, (xmin, xmax, ymin, ymax) in enumerate(rectangles): - # ax.add_patch(patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, - # fill=False, linewidth=1, edgecolor='r')) - # if rectangles_showidx: - # ax.text((xmin+xmax)/2, (ymin+ymax)/2, str(i), c='r') - # plt.show() - # dbg_plt(title="return_boxes_of_images_by_order_of_reading_new") - + regions_without_separators = cv2.flip(regions_without_separators,1) boxes=[] peaks_neg_tot_tables = [] - splitter_y_new = np.array(splitter_y_new, dtype=int) - height_tot, width_tot = text_mask.shape - big_part = 22 * height_tot // 100 # percent height - _, ccomps, cstats, _ = cv2.connectedComponentsWithStats(text_mask.astype(np.uint8)) - args_ver = matrix_of_seps_ch[:, 9] == 1 - mask_ver = np.zeros_like(sep_mask, dtype=bool) - for i in np.flatnonzero(args_ver): - mask_ver[matrix_of_seps_ch[i, 6]: matrix_of_seps_ch[i, 7], - matrix_of_seps_ch[i, 2]: matrix_of_seps_ch[i, 3]] = True - vertical_seps = 1 * (sep_mask & mask_ver) - for top, bot in pairwise(splitter_y_new): - # print("%d:%d" % (top, bot), 'i') - # dbg_plt([0, None, top, bot], "image cut for y split %d:%d" % (top, bot)) - matrix_new = matrix_of_seps_ch[(matrix_of_seps_ch[:,6] >= top) & - (matrix_of_seps_ch[:,7] < bot)] + for i in range(len(splitter_y_new)-1): + #print(splitter_y_new[i],splitter_y_new[i+1]) + matrix_new = matrix_of_lines_ch[:,:][(matrix_of_lines_ch[:,6]> splitter_y_new[i] ) & + (matrix_of_lines_ch[:,7]< splitter_y_new[i+1] )] #print(len( matrix_new[:,9][matrix_new[:,9]==1] )) #print(matrix_new[:,8][matrix_new[:,9]==1],'gaddaaa') # check to see is there any vertical separator to find holes. #if (len(matrix_new[:,9][matrix_new[:,9]==1]) > 0 and # np.max(matrix_new[:,8][matrix_new[:,9]==1]) >= - # 0.1 * (np.abs(bot-top))): - num_col, peaks_neg_fin = find_num_col( - text_mask[top:bot], - # we do not expect to get all columns in small parts (headings etc.): - num_col_classifier if bot - top >= big_part else 1, - tables, vertical_separators=vertical_seps[top: bot], - multiplier=6. if erosion_hurts else 7., - unbalanced=True) - try: - if ((len(peaks_neg_fin) + 1 < num_col_classifier or - num_col_classifier == 6) and - # we do not expect to get all columns in small parts (headings etc.): - bot - top >= big_part): - # found too few columns here - #print('burda') - logger.debug("searching for more than %d columns in big part %d:%d", - len(peaks_neg_fin) + 1, top, bot) - peaks_neg_fin_org = np.copy(peaks_neg_fin) - #print("peaks_neg_fin_org", peaks_neg_fin_org) - if len(peaks_neg_fin) == 0: + # 0.1 * (np.abs(splitter_y_new[i+1]-splitter_y_new[i]))): + if True: + try: + if erosion_hurts: num_col, peaks_neg_fin = find_num_col( - text_mask[top:bot], - num_col_classifier, tables, - vertical_separators=vertical_seps[top: bot], - # try to be less strict (lower threshold than above) - multiplier=7. if erosion_hurts else 8., - unbalanced=True) - #print(peaks_neg_fin,'peaks_neg_fin') - peaks_neg_fin_early = [0] + peaks_neg_fin + [width_tot-1] - - #print(peaks_neg_fin_early,'burda2') - peaks_neg_fin_rev=[] - for left, right in pairwise(peaks_neg_fin_early): - # print("%d:%d" % (left, right), 'i_n') - # dbg_plt([left, right, top, bot], - # "image cut for y split %d:%d / x gap %d:%d" % ( - # top, bot, left, right)) - # plt.plot(text_mask[top:bot, left:right].sum(axis=0)) - # plt.title("vertical projection (sum over y)") - # plt.show() - # try to get more peaks with different multipliers - num_col_expected = round((right - left) / width_tot * num_col_classifier) - args = text_mask[top:bot, left:right], num_col_expected, tables - kwargs = dict(vertical_separators=vertical_seps[top: bot, left:right]) - _, peaks_neg_fin1 = find_num_col(*args, **kwargs, multiplier=7.) - _, peaks_neg_fin2 = find_num_col(*args, **kwargs, multiplier=5.) - if len(peaks_neg_fin1) >= len(peaks_neg_fin2): - peaks_neg_fin = peaks_neg_fin1 - else: - peaks_neg_fin = peaks_neg_fin2 - # print(peaks_neg_fin) - logger.debug("found %d additional column boundaries in %d:%d", - len(peaks_neg_fin), left, right) - # add offset to local result - peaks_neg_fin = list(np.array(peaks_neg_fin) + left) - #print(peaks_neg_fin,'peaks_neg_fin') - - peaks_neg_fin_rev.extend(peaks_neg_fin) - if right < peaks_neg_fin_early[-1]: - # all but the last column: interject the preexisting boundary - peaks_neg_fin_rev.append(right) - #print(peaks_neg_fin_rev,'peaks_neg_fin_rev') - - if len(peaks_neg_fin_rev) >= len(peaks_neg_fin_org): - #print("found more peaks than at first glance", peaks_neg_fin_rev, peaks_neg_fin_org) - peaks_neg_fin = peaks_neg_fin_rev + regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], + num_col_classifier, tables, multiplier=6.) else: - peaks_neg_fin = peaks_neg_fin_org - num_col = len(peaks_neg_fin) - #print(peaks_neg_fin,'peaks_neg_fin') - except: - logger.exception("cannot find peaks consistent with columns") - #num_col, peaks_neg_fin = find_num_col( - # text_mask[top:bot,:], - # multiplier=7.0) - peaks_neg_tot = np.array([0] + peaks_neg_fin + [width_tot]) - #print(peaks_neg_tot,'peaks_neg_tot') - peaks_neg_tot_tables.append(peaks_neg_tot) + num_col, peaks_neg_fin = find_num_col( + regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], + num_col_classifier, tables, multiplier=7.) + except: + peaks_neg_fin=[] + num_col = 0 + try: + peaks_neg_fin_org=np.copy(peaks_neg_fin) + if (len(peaks_neg_fin)+1)= 0)[-1] # last left-of - ending = xmax - peaks_neg_tot - max_end = np.flatnonzero(ending <= 0)[0] # first right-of - # skip elongation unless this is already a multi-column separator/heading: - if not max_end - min_start > 1: - continue - # is there anything left of min_start? - for j in range(min_start): - # dbg_imshow([peaks_neg_tot[j], xmin, ymin, ymax], "start of %d candidate %d" % (i, j)) - if not np.any(cut[:, peaks_neg_tot[j]: xmin]): - # print("elongated sep", i, "typ", typ, "start", xmin, "to", j, peaks_neg_tot[j]) - matrix_new[i, 2] = peaks_neg_tot[j] + 1 # elongate to start of this column - break - # is there anything right of max_end? - for j in range(len(peaks_neg_tot) - 1, max_end, -1): - # dbg_imshow([xmax, peaks_neg_tot[j], ymin, ymax], "end of %d candidate %d" % (i, j)) - if not np.any(cut[:, xmax: peaks_neg_tot[j]]): - # print("elongated sep", i, "typ", typ, "end", xmax, "to", j, peaks_neg_tot[j]) - matrix_new[i, 3] = peaks_neg_tot[j] - 1 # elongate to end of this column - break + if len(peaks_neg_fin1)>=len(peaks_neg_fin2): + peaks_neg_fin=list(np.copy(peaks_neg_fin1)) + else: + peaks_neg_fin=list(np.copy(peaks_neg_fin2)) + peaks_neg_fin=list(np.array(peaks_neg_fin)+peaks_neg_fin_early[i_n]) - args_hor = matrix_new[:, 9] == 0 - x_min_hor_some = matrix_new[:, 2][args_hor] - x_max_hor_some = matrix_new[:, 3][args_hor] - y_min_hor_some = matrix_new[:, 6][args_hor] - y_max_hor_some = matrix_new[:, 7][args_hor] - cy_hor_some = matrix_new[:, 5][args_hor] + if i_n!=(len(peaks_neg_fin_early)-2): + peaks_neg_fin_rev.append(peaks_neg_fin_early[i_n+1]) + #print(peaks_neg_fin,'peaks_neg_fin') + peaks_neg_fin_rev=peaks_neg_fin_rev+peaks_neg_fin - args_head = matrix_new[:, 9] == 2 - x_min_hor_head = matrix_new[:, 2][args_head] - x_max_hor_head = matrix_new[:, 3][args_head] - y_min_hor_head = matrix_new[:, 6][args_head] - y_max_hor_head = matrix_new[:, 7][args_head] - cy_hor_head = matrix_new[:, 5][args_head] - - # split headings at toplines (y_min_head) and baselines (y_max_head) - # instead of merely adding their center (cy_head) as horizontal separator - # (x +/- 30px to avoid crossing col peaks by accident) - x_min_hor_some = np.append(x_min_hor_some, np.tile(x_min_hor_head + 30, 2)) - x_max_hor_some = np.append(x_max_hor_some, np.tile(x_max_hor_head - 30, 2)) - y_min_hor_some = np.append(y_min_hor_some, # toplines - np.concatenate((y_min_hor_head - 2, - y_max_hor_head - 0))) - y_max_hor_some = np.append(y_max_hor_some, # baselines - np.concatenate((y_min_hor_head + 0, - y_max_hor_head + 2))) - cy_hor_some = np.append(cy_hor_some, # centerlines - np.concatenate((y_min_hor_head - 1, - y_max_hor_head + 1))) - - # analyse connected components of regions to gain additional separators - # and prepare a map for cross-column boxes - ccounts = np.bincount(ccomps[top: bot].flatten()) - ccounts_median = np.median(ccounts) - col_ccounts = np.stack([np.bincount(ccomps[top: bot, left: right].flatten(), - minlength=ccounts.size) - for left, right in pairwise(peaks_neg_tot)]) - labelcolmap = dict() - for label, label_count in enumerate(ccounts): - if not label: - continue - # ignore small labels for the purpose of finding multicol seps - if label_count < 0.5 * ccounts_median: - continue - label_left, label_top, label_width, label_height, label_area = cstats[label] - # if label_count < 0.9 * label_area: - # # mostly not in this part of the page - # continue - if label_count < 0.01 * (top - bot) * width_tot: - continue - #assert np.sum(col_ccounts[:, label]) == label_count - label_right = label_left + label_width - label_bot = label_top + label_height - label_start = np.flatnonzero(peaks_neg_tot > label_left)[0] - 1 - label_end = np.flatnonzero(peaks_neg_tot >= label_right)[0] - if label_end - label_start < 2: - continue - if np.count_nonzero(col_ccounts[:, label] > 0.1 * label_count) < 2: - continue - # store as dict for multi-column boxes: - for start in range(label_start, label_end): - labelcolmap.setdefault(start, list()).append( - (label_end, label_top, label_bot, sum(col_ccounts[start: label_end, label]))) - # make additional separators: - x_min_hor_some = np.append(x_min_hor_some, [label_left] * 2) - x_max_hor_some = np.append(x_max_hor_some, [label_right] * 2) - y_min_hor_some = np.append(y_min_hor_some, [label_top - 2, label_bot]) - y_max_hor_some = np.append(y_max_hor_some, [label_top, label_bot + 2]) - cy_hor_some = np.append(cy_hor_some, [label_top - 1, label_bot + 1]) - - # ensure no seps are out of bounds - x_min_hor_some = np.maximum(0, np.minimum(width_tot, x_min_hor_some)) - x_max_hor_some = np.maximum(0, np.minimum(width_tot, x_max_hor_some)) - y_min_hor_some = np.maximum(0, np.minimum(height_tot, y_min_hor_some)) - y_max_hor_some = np.maximum(0, np.minimum(height_tot, y_max_hor_some)) - cy_hor_some = np.maximum(0, np.minimum(height_tot, cy_hor_some)) - - if right2left_readingorder: - x_max_hor_some = width_tot - x_min_hor_some - x_min_hor_some = width_tot - x_max_hor_some - - x_starting, x_ending, y_min, y_mid, y_max = return_multicol_separators_x_start_end( - text_mask, peaks_neg_tot, top, bot, - x_min_hor_some, x_max_hor_some, cy_hor_some, y_min_hor_some, y_max_hor_some) - # dbg_plt([0, None, top, bot], "non-empty multi-column separators in current split", - # list(zip(peaks_neg_tot[x_starting], peaks_neg_tot[x_ending], - # y_min - top, y_max - top)), True) - - # core algorithm: - # 1. iterate through multi-column separators, pre-ordered by their y coord - # 2. for each separator, iterate from its starting to its ending column - # 3. in each starting column, determine the next downwards separator, - # 4. if there is none, then fill up the column to the bottom; - # otherwise, fill up to that next separator - # 5. moreover, determine the next rightward column that would not cut through - # any regions, advancing to that column, and storing a new in-order bbox - # for that down/right span - # 6. if there was a next separator, and it ends no further than the current one, - # then recurse on that separator from step 1, then continue (with the next - # column for the current separator) at step 2, or (with the next separator - # in order) at step 1 - args = list(range(len(y_mid))) - while len(args): - cur = args[0] - args = args[1:] - # print("iter", cur, y_mid[cur], "%d:%d" % (x_starting[cur], x_ending[cur])) - def get_span(start, y_top, y_bot): - # for last, l_top, l_bot, l_count in labelcolmap.get(start, []): - # if y_top < l_bot and y_bot > l_top and last > start + 1: - # width = (peaks_neg_tot[last] - peaks_neg_tot[start]) - # print("span", start, last, l_top, l_bot, l_count, - # "box area", (y_bot - y_top) * width, - # "label area", (min(y_bot, l_bot) - max(y_top, l_top)) * width, - # "box height", (y_bot - y_top), - # "label height", sum(text_mask[ - # y_top: y_bot, peaks_neg_tot[start + 1]])) - return max((last for last, l_top, l_bot, l_count in labelcolmap.get(start, []) - # yield the right-most column that does not cut through - # any regions in this horizontal span - if y_top < l_bot and y_bot > l_top - # Ignore if it ends here, anyway - and last > start + 1 - # Ensure this is not just a tiny region near larger regions - and l_count > 0.1 * max(l_count2 for _, l_top2, l_bot2, l_count2 in labelcolmap[start] - if y_top < l_bot2 and y_bot > l_top2) - # or just a small cut of the respective region - # (i.e. box should cover at least 10% of the label). - and ((min(y_bot, l_bot) - max(y_top, l_top)) * - (peaks_neg_tot[last] - peaks_neg_tot[start])) > 0.1 * l_count - # But do allow cutting tiny passages with less 10% of height - # (i.e. label is already almost separated by columns) - and sum(text_mask[ - y_top: y_bot, peaks_neg_tot[start + 1]]) > 0.1 * (y_bot - y_top)), - # Otherwise advance only 1 column. - default=start + 1) - def add_sep(cur): - column = x_starting[cur] - while column < x_ending[cur]: - nxt = np.flatnonzero((y_mid[cur] < y_mid) & - (column >= x_starting) & - (column < x_ending)) - if len(nxt): - nxt = nxt[0] - # print("column", column) - last = get_span(column, y_max[cur], y_min[nxt]) - last = min(last, x_ending[nxt], x_ending[cur]) - # print("nxt", nxt, y_mid[nxt], "%d:%d" % (column, last)) - boxes.append([peaks_neg_tot[column], - peaks_neg_tot[last], - y_mid[cur], - y_mid[nxt]]) - # dbg_plt(boxes[-1], "recursive column %d:%d box [%d]" % (column, last, len(boxes))) - column = last - if (last == x_ending[nxt] and - x_ending[nxt] <= x_ending[cur] and - x_starting[nxt] >= x_starting[cur] and - nxt in args): - # child – recur - # print("recur", nxt, y_mid[nxt], "%d:%d" % (x_starting[nxt], x_ending[nxt])) - args.remove(nxt) - add_sep(nxt) + if len(peaks_neg_fin_rev)>=len(peaks_neg_fin_org): + peaks_neg_fin=list(np.sort(peaks_neg_fin_rev)) + num_col=len(peaks_neg_fin) else: - # print("column", column) - last = get_span(column, y_max[cur], bot) - # print("bot", bot, "%d:%d" % (column, last)) + peaks_neg_fin=list(np.copy(peaks_neg_fin_org)) + num_col=len(peaks_neg_fin) + + #print(peaks_neg_fin,'peaks_neg_fin') + except: + pass + #num_col, peaks_neg_fin = find_num_col( + # regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], + # multiplier=7.0) + x_min_hor_some=matrix_new[:,2][ (matrix_new[:,9]==0) ] + x_max_hor_some=matrix_new[:,3][ (matrix_new[:,9]==0) ] + cy_hor_some=matrix_new[:,5][ (matrix_new[:,9]==0) ] + cy_hor_diff=matrix_new[:,7][ (matrix_new[:,9]==0) ] + arg_org_hor_some=matrix_new[:,0][ (matrix_new[:,9]==0) ] + + if right2left_readingorder: + x_max_hor_some_new = regions_without_separators.shape[1] - x_min_hor_some + x_min_hor_some_new = regions_without_separators.shape[1] - x_max_hor_some + x_min_hor_some =list(np.copy(x_min_hor_some_new)) + x_max_hor_some =list(np.copy(x_max_hor_some_new)) + + peaks_neg_tot=return_points_with_boundies(peaks_neg_fin,0, regions_without_separators[:,:].shape[1]) + peaks_neg_tot_tables.append(peaks_neg_tot) + + reading_order_type, x_starting, x_ending, y_type_2, y_diff_type_2, \ + y_lines_without_mother, x_start_without_mother, x_end_without_mother, there_is_sep_with_child, \ + y_lines_with_child_without_mother, x_start_with_child_without_mother, x_end_with_child_without_mother, \ + new_main_sep_y = return_x_start_end_mothers_childs_and_type_of_reading_order( + x_min_hor_some, x_max_hor_some, cy_hor_some, peaks_neg_tot, cy_hor_diff) + x_starting = np.array(x_starting) + x_ending = np.array(x_ending) + y_type_2 = np.array(y_type_2) + y_diff_type_2 = np.array(y_diff_type_2) + + if ((reading_order_type==1) or + (reading_order_type==0 and + (len(y_lines_without_mother)>=2 or there_is_sep_with_child==1))): + try: + y_grenze=int(splitter_y_new[i])+300 + #check if there is a big separator in this y_mains_sep_ohne_grenzen + + args_early_ys=np.arange(len(y_type_2)) + #print(args_early_ys,'args_early_ys') + #print(int(splitter_y_new[i]),int(splitter_y_new[i+1])) + + x_starting_up = x_starting[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + x_ending_up = x_ending[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + y_type_2_up = y_type_2[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + y_diff_type_2_up = y_diff_type_2[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + args_up = args_early_ys[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + if len(y_type_2_up) > 0: + y_main_separator_up = y_type_2_up [(x_starting_up==0) & + (x_ending_up==(len(peaks_neg_tot)-1) )] + y_diff_main_separator_up = y_diff_type_2_up[(x_starting_up==0) & + (x_ending_up==(len(peaks_neg_tot)-1) )] + args_main_to_deleted = args_up[(x_starting_up==0) & + (x_ending_up==(len(peaks_neg_tot)-1) )] + #print(y_main_separator_up,y_diff_main_separator_up,args_main_to_deleted,'fffffjammmm') + if len(y_diff_main_separator_up) > 0: + args_to_be_kept = np.array(list( set(args_early_ys) - set(args_main_to_deleted) )) + #print(args_to_be_kept,'args_to_be_kept') + boxes.append([0, peaks_neg_tot[len(peaks_neg_tot)-1], + int(splitter_y_new[i]), int( np.max(y_diff_main_separator_up))]) + splitter_y_new[i]=[ np.max(y_diff_main_separator_up) ][0] + + #print(splitter_y_new[i],'splitter_y_new[i]') + y_type_2 = y_type_2[args_to_be_kept] + x_starting = x_starting[args_to_be_kept] + x_ending = x_ending[args_to_be_kept] + y_diff_type_2 = y_diff_type_2[args_to_be_kept] + + #print('galdiha') + y_grenze=int(splitter_y_new[i])+200 + args_early_ys2=np.arange(len(y_type_2)) + y_type_2_up=y_type_2[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + x_starting_up=x_starting[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + x_ending_up=x_ending[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + y_diff_type_2_up=y_diff_type_2[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + args_up2=args_early_ys2[(y_type_2 > int(splitter_y_new[i])) & + (y_type_2 <= y_grenze)] + #print(y_type_2_up,x_starting_up,x_ending_up,'didid') + nodes_in = [] + for ij in range(len(x_starting_up)): + nodes_in = nodes_in + list(range(x_starting_up[ij], + x_ending_up[ij])) + nodes_in = np.unique(nodes_in) + #print(nodes_in,'nodes_in') + + if set(nodes_in)==set(range(len(peaks_neg_tot)-1)): + pass + elif set(nodes_in)==set(range(1, len(peaks_neg_tot)-1)): + pass + else: + #print('burdaydikh') + args_to_be_kept2=np.array(list( set(args_early_ys2)-set(args_up2) )) + + if len(args_to_be_kept2)>0: + y_type_2 = y_type_2[args_to_be_kept2] + x_starting = x_starting[args_to_be_kept2] + x_ending = x_ending[args_to_be_kept2] + y_diff_type_2 = y_diff_type_2[args_to_be_kept2] + else: + pass + #print('burdaydikh2') + elif len(y_diff_main_separator_up)==0: + nodes_in = [] + for ij in range(len(x_starting_up)): + nodes_in = nodes_in + list(range(x_starting_up[ij], + x_ending_up[ij])) + nodes_in = np.unique(nodes_in) + #print(nodes_in,'nodes_in2') + #print(np.array(range(len(peaks_neg_tot)-1)),'np.array(range(len(peaks_neg_tot)-1))') + + if set(nodes_in)==set(range(len(peaks_neg_tot)-1)): + pass + elif set(nodes_in)==set(range(1,len(peaks_neg_tot)-1)): + pass + else: + #print('burdaydikh') + #print(args_early_ys,'args_early_ys') + #print(args_up,'args_up') + args_to_be_kept2=np.array(list( set(args_early_ys) - set(args_up) )) + + #print(args_to_be_kept2,'args_to_be_kept2') + #print(len(y_type_2),len(x_starting),len(x_ending),len(y_diff_type_2)) + if len(args_to_be_kept2)>0: + y_type_2 = y_type_2[args_to_be_kept2] + x_starting = x_starting[args_to_be_kept2] + x_ending = x_ending[args_to_be_kept2] + y_diff_type_2 = y_diff_type_2[args_to_be_kept2] + else: + pass + #print('burdaydikh2') + + #int(splitter_y_new[i]) + y_lines_by_order=[] + x_start_by_order=[] + x_end_by_order=[] + if (len(x_end_with_child_without_mother)==0 and reading_order_type==0) or reading_order_type==1: + if reading_order_type==1: + y_lines_by_order.append(int(splitter_y_new[i])) + x_start_by_order.append(0) + x_end_by_order.append(len(peaks_neg_tot)-2) + else: + #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') + columns_covered_by_mothers = [] + for dj in range(len(x_start_without_mother)): + columns_covered_by_mothers = columns_covered_by_mothers + \ + list(range(x_start_without_mother[dj], + x_end_without_mother[dj])) + columns_covered_by_mothers = list(set(columns_covered_by_mothers)) + + all_columns=np.arange(len(peaks_neg_tot)-1) + columns_not_covered=list(set(all_columns) - set(columns_covered_by_mothers)) + y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * (len(columns_not_covered) + len(x_start_without_mother))) + ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) + x_starting = np.append(x_starting, columns_not_covered) + x_starting = np.append(x_starting, x_start_without_mother) + x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + x_ending = np.append(x_ending, x_end_without_mother) + + ind_args=np.arange(len(y_type_2)) + #ind_args=np.array(ind_args) + #print(ind_args,'ind_args') + for column in range(len(peaks_neg_tot)-1): + #print(column,'column') + ind_args_in_col=ind_args[x_starting==column] + #print('babali2') + #print(ind_args_in_col,'ind_args_in_col') + ind_args_in_col=np.array(ind_args_in_col) + #print(len(y_type_2)) + y_column=y_type_2[ind_args_in_col] + x_start_column=x_starting[ind_args_in_col] + x_end_column=x_ending[ind_args_in_col] + #print('babali3') + ind_args_col_sorted=np.argsort(y_column) + y_col_sort=y_column[ind_args_col_sorted] + x_start_column_sort=x_start_column[ind_args_col_sorted] + x_end_column_sort=x_end_column[ind_args_col_sorted] + #print('babali4') + for ii in range(len(y_col_sort)): + #print('babali5') + y_lines_by_order.append(y_col_sort[ii]) + x_start_by_order.append(x_start_column_sort[ii]) + x_end_by_order.append(x_end_column_sort[ii]-1) + else: + #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') + columns_covered_by_mothers = [] + for dj in range(len(x_start_without_mother)): + columns_covered_by_mothers = columns_covered_by_mothers + \ + list(range(x_start_without_mother[dj], + x_end_without_mother[dj])) + columns_covered_by_mothers = list(set(columns_covered_by_mothers)) + + all_columns=np.arange(len(peaks_neg_tot)-1) + columns_not_covered=list(set(all_columns) - set(columns_covered_by_mothers)) + y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * (len(columns_not_covered) + len(x_start_without_mother))) + ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) + x_starting = np.append(x_starting, columns_not_covered) + x_starting = np.append(x_starting, x_start_without_mother) + x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + x_ending = np.append(x_ending, x_end_without_mother) + + columns_covered_by_with_child_no_mothers = [] + for dj in range(len(x_end_with_child_without_mother)): + columns_covered_by_with_child_no_mothers = columns_covered_by_with_child_no_mothers + \ + list(range(x_start_with_child_without_mother[dj], + x_end_with_child_without_mother[dj])) + columns_covered_by_with_child_no_mothers = list(set(columns_covered_by_with_child_no_mothers)) + + all_columns = np.arange(len(peaks_neg_tot)-1) + columns_not_covered_child_no_mother = list(set(all_columns) - set(columns_covered_by_with_child_no_mothers)) + #indexes_to_be_spanned=[] + for i_s in range(len(x_end_with_child_without_mother)): + columns_not_covered_child_no_mother.append(x_start_with_child_without_mother[i_s]) + columns_not_covered_child_no_mother = np.sort(columns_not_covered_child_no_mother) + ind_args = np.arange(len(y_type_2)) + x_end_with_child_without_mother = np.array(x_end_with_child_without_mother) + x_start_with_child_without_mother = np.array(x_start_with_child_without_mother) + for i_s_nc in columns_not_covered_child_no_mother: + if i_s_nc in x_start_with_child_without_mother: + x_end_biggest_column = x_end_with_child_without_mother[x_start_with_child_without_mother==i_s_nc][0] + args_all_biggest_lines = ind_args[(x_starting==i_s_nc) & + (x_ending==x_end_biggest_column)] + y_column_nc = y_type_2[args_all_biggest_lines] + x_start_column_nc = x_starting[args_all_biggest_lines] + x_end_column_nc = x_ending[args_all_biggest_lines] + y_column_nc = np.sort(y_column_nc) + for i_c in range(len(y_column_nc)): + if i_c==(len(y_column_nc)-1): + ind_all_lines_between_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & + (y_type_2=i_s_nc) & + (x_ending<=x_end_biggest_column)] + else: + ind_all_lines_between_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & + (y_type_2=i_s_nc) & + (x_ending<=x_end_biggest_column)] + y_all_between_nm_wc = y_type_2[ind_all_lines_between_nm_wc] + x_starting_all_between_nm_wc = x_starting[ind_all_lines_between_nm_wc] + x_ending_all_between_nm_wc = x_ending[ind_all_lines_between_nm_wc] + + x_diff_all_between_nm_wc = x_ending_all_between_nm_wc - x_starting_all_between_nm_wc + if len(x_diff_all_between_nm_wc)>0: + biggest=np.argmax(x_diff_all_between_nm_wc) + + columns_covered_by_mothers = [] + for dj in range(len(x_starting_all_between_nm_wc)): + columns_covered_by_mothers = columns_covered_by_mothers + \ + list(range(x_starting_all_between_nm_wc[dj], + x_ending_all_between_nm_wc[dj])) + columns_covered_by_mothers = list(set(columns_covered_by_mothers)) + + all_columns=np.arange(i_s_nc, x_end_biggest_column) + columns_not_covered = list(set(all_columns) - set(columns_covered_by_mothers)) + + should_longest_line_be_extended=0 + if (len(x_diff_all_between_nm_wc) > 0 and + set(list(range(x_starting_all_between_nm_wc[biggest], + x_ending_all_between_nm_wc[biggest])) + + list(columns_not_covered)) != set(all_columns)): + should_longest_line_be_extended=1 + index_lines_so_close_to_top_separator = \ + np.arange(len(y_all_between_nm_wc))[(y_all_between_nm_wc>y_column_nc[i_c]) & + (y_all_between_nm_wc<=(y_column_nc[i_c]+500))] + if len(index_lines_so_close_to_top_separator) > 0: + indexes_remained_after_deleting_closed_lines= \ + np.array(list(set(list(range(len(y_all_between_nm_wc)))) - + set(list(index_lines_so_close_to_top_separator)))) + if len(indexes_remained_after_deleting_closed_lines) > 0: + y_all_between_nm_wc = y_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + x_starting_all_between_nm_wc = x_starting_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + x_ending_all_between_nm_wc = x_ending_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + + y_all_between_nm_wc = np.append(y_all_between_nm_wc, y_column_nc[i_c]) + x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, i_s_nc) + x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, x_end_biggest_column) + + if len(x_diff_all_between_nm_wc) > 0: + try: + y_all_between_nm_wc = np.append(y_all_between_nm_wc, y_column_nc[i_c]) + x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, x_starting_all_between_nm_wc[biggest]) + x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, x_ending_all_between_nm_wc[biggest]) + except: + pass + + y_all_between_nm_wc = np.append(y_all_between_nm_wc, [y_column_nc[i_c]] * len(columns_not_covered)) + x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, columns_not_covered) + x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, np.array(columns_not_covered) + 1) + + ind_args_between=np.arange(len(x_ending_all_between_nm_wc)) + for column in range(i_s_nc, x_end_biggest_column): + ind_args_in_col=ind_args_between[x_starting_all_between_nm_wc==column] + #print('babali2') + #print(ind_args_in_col,'ind_args_in_col') + ind_args_in_col=np.array(ind_args_in_col) + #print(len(y_type_2)) + y_column=y_all_between_nm_wc[ind_args_in_col] + x_start_column=x_starting_all_between_nm_wc[ind_args_in_col] + x_end_column=x_ending_all_between_nm_wc[ind_args_in_col] + #print('babali3') + ind_args_col_sorted=np.argsort(y_column) + y_col_sort=y_column[ind_args_col_sorted] + x_start_column_sort=x_start_column[ind_args_col_sorted] + x_end_column_sort=x_end_column[ind_args_col_sorted] + #print('babali4') + for ii in range(len(y_col_sort)): + #print('babali5') + y_lines_by_order.append(y_col_sort[ii]) + x_start_by_order.append(x_start_column_sort[ii]) + x_end_by_order.append(x_end_column_sort[ii]-1) + else: + #print(column,'column') + ind_args_in_col=ind_args[x_starting==i_s_nc] + #print('babali2') + #print(ind_args_in_col,'ind_args_in_col') + ind_args_in_col=np.array(ind_args_in_col) + #print(len(y_type_2)) + y_column=y_type_2[ind_args_in_col] + x_start_column=x_starting[ind_args_in_col] + x_end_column=x_ending[ind_args_in_col] + #print('babali3') + ind_args_col_sorted=np.argsort(y_column) + y_col_sort=y_column[ind_args_col_sorted] + x_start_column_sort=x_start_column[ind_args_col_sorted] + x_end_column_sort=x_end_column[ind_args_col_sorted] + #print('babali4') + for ii in range(len(y_col_sort)): + y_lines_by_order.append(y_col_sort[ii]) + x_start_by_order.append(x_start_column_sort[ii]) + x_end_by_order.append(x_end_column_sort[ii]-1) + + for il in range(len(y_lines_by_order)): + y_copy = list(y_lines_by_order) + x_start_copy = list(x_start_by_order) + x_end_copy = list(x_end_by_order) + + #print(y_copy,'y_copy') + y_itself=y_copy.pop(il) + x_start_itself=x_start_copy.pop(il) + x_end_itself=x_end_copy.pop(il) + + #print(y_copy,'y_copy2') + for column in range(x_start_itself, x_end_itself+1): + #print(column,'cols') + y_in_cols=[] + for yic in range(len(y_copy)): + #print('burda') + if (y_copy[yic]>y_itself and + column>=x_start_copy[yic] and + column<=x_end_copy[yic]): + y_in_cols.append(y_copy[yic]) + #print('burda2') + #print(y_in_cols,'y_in_cols') + if len(y_in_cols)>0: + y_down=np.min(y_in_cols) + else: + y_down=[int(splitter_y_new[i+1])][0] + #print(y_itself,'y_itself') + boxes.append([peaks_neg_tot[column], + peaks_neg_tot[column+1], + y_itself, + y_down]) + except: + boxes.append([0, peaks_neg_tot[len(peaks_neg_tot)-1], + int(splitter_y_new[i]), int(splitter_y_new[i+1])]) + else: + y_lines_by_order=[] + x_start_by_order=[] + x_end_by_order=[] + if len(x_starting)>0: + all_columns = np.arange(len(peaks_neg_tot)-1) + columns_covered_by_lines_covered_more_than_2col = [] + for dj in range(len(x_starting)): + if set(list(range(x_starting[dj],x_ending[dj]))) == set(all_columns): + pass + else: + columns_covered_by_lines_covered_more_than_2col = columns_covered_by_lines_covered_more_than_2col + \ + list(range(x_starting[dj],x_ending[dj])) + columns_covered_by_lines_covered_more_than_2col = list(set(columns_covered_by_lines_covered_more_than_2col)) + columns_not_covered = list(set(all_columns) - set(columns_covered_by_lines_covered_more_than_2col)) + + y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * (len(columns_not_covered) + 1)) + ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) + x_starting = np.append(x_starting, columns_not_covered) + x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + if len(new_main_sep_y) > 0: + x_starting = np.append(x_starting, 0) + x_ending = np.append(x_ending, len(peaks_neg_tot)-1) + else: + x_starting = np.append(x_starting, x_starting[0]) + x_ending = np.append(x_ending, x_ending[0]) + else: + all_columns = np.arange(len(peaks_neg_tot)-1) + columns_not_covered = list(set(all_columns)) + y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * len(columns_not_covered)) + ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) + x_starting = np.append(x_starting, columns_not_covered) + x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + + ind_args=np.array(range(len(y_type_2))) + #ind_args=np.array(ind_args) + for column in range(len(peaks_neg_tot)-1): + #print(column,'column') + ind_args_in_col=ind_args[x_starting==column] + ind_args_in_col=np.array(ind_args_in_col) + #print(len(y_type_2)) + y_column=y_type_2[ind_args_in_col] + x_start_column=x_starting[ind_args_in_col] + x_end_column=x_ending[ind_args_in_col] + + ind_args_col_sorted=np.argsort(y_column) + y_col_sort=y_column[ind_args_col_sorted] + x_start_column_sort=x_start_column[ind_args_col_sorted] + x_end_column_sort=x_end_column[ind_args_col_sorted] + #print('babali4') + for ii in range(len(y_col_sort)): + #print('babali5') + y_lines_by_order.append(y_col_sort[ii]) + x_start_by_order.append(x_start_column_sort[ii]) + x_end_by_order.append(x_end_column_sort[ii]-1) + + for il in range(len(y_lines_by_order)): + y_copy = list(y_lines_by_order) + x_start_copy = list(x_start_by_order) + x_end_copy = list(x_end_by_order) + + #print(y_copy,'y_copy') + y_itself=y_copy.pop(il) + x_start_itself=x_start_copy.pop(il) + x_end_itself=x_end_copy.pop(il) + + #print(y_copy,'y_copy2') + for column in range(x_start_itself, x_end_itself+1): + #print(column,'cols') + y_in_cols=[] + for yic in range(len(y_copy)): + #print('burda') + if (y_copy[yic]>y_itself and + column>=x_start_copy[yic] and + column<=x_end_copy[yic]): + y_in_cols.append(y_copy[yic]) + #print('burda2') + #print(y_in_cols,'y_in_cols') + if len(y_in_cols)>0: + y_down=np.min(y_in_cols) + else: + y_down=[int(splitter_y_new[i+1])][0] + #print(y_itself,'y_itself') boxes.append([peaks_neg_tot[column], - peaks_neg_tot[last], - y_mid[cur], - bot]) - # dbg_plt(boxes[-1], "non-recursive column %d box [%d]" % (column, len(boxes))) - column = last - add_sep(cur) + peaks_neg_tot[column+1], + y_itself, + y_down]) + #else: + #boxes.append([ 0, regions_without_separators[:,:].shape[1] ,splitter_y_new[i],splitter_y_new[i+1]]) if right2left_readingorder: peaks_neg_tot_tables_new = [] if len(peaks_neg_tot_tables)>=1: for peaks_tab_ind in peaks_neg_tot_tables: - peaks_neg_tot_tables_ind = width_tot - np.array(peaks_tab_ind) + peaks_neg_tot_tables_ind = regions_without_separators.shape[1] - np.array(peaks_tab_ind) peaks_neg_tot_tables_ind = list(peaks_neg_tot_tables_ind[::-1]) peaks_neg_tot_tables_new.append(peaks_neg_tot_tables_ind) for i in range(len(boxes)): - x_start_new = width_tot - boxes[i][1] - x_end_new = width_tot - boxes[i][0] + x_start_new = regions_without_separators.shape[1] - boxes[i][1] + x_end_new = regions_without_separators.shape[1] - boxes[i][0] boxes[i][0] = x_start_new boxes[i][1] = x_end_new - peaks_neg_tot_tables = peaks_neg_tot_tables_new - - # show final xy-cut - # dbg_plt(None, "final XY-Cut", boxes, True) - - logger.debug('exit return_boxes_of_images_by_order_of_reading_new') - return boxes, peaks_neg_tot_tables - -def is_image_filename(fname: str) -> bool: - return fname.lower().endswith(('.jpg', - '.jpeg', - '.png', - '.tif', - '.tiff', - )) - -def is_xml_filename(fname: str) -> bool: - return fname.lower().endswith('.xml') - -def ensure_array(obj: Iterable) -> np.ndarray: - """convert sequence to array of type `object` so items can be of heterogeneous shape - (but ensure not to convert inner arrays to `object` if len=1) - """ - if not isinstance(obj, np.ndarray): - return np.fromiter(obj, object) - return obj - -def seg_mask_label(segmap:np.ndarray, - mask:np.ndarray, - only:bool=False, - label:int=2, - skeletonize:bool=False, - dilate:int=0, - keep:int=0, -) -> None: - """ - overwrite an existing segmentation map from a binary mask with a given label - - Args: - segmap: integer array of existing segmentation labels ([H, W] or [B, H, W] shape) - mask: boolean array for specific label - Keyword Args: - label: the class label to be written - only: whether to suppress the `label` outside `mask` - skeletonize: whether to transform the mask to its skeleton - dilate: whether to also apply dilatation after this (convolution with square kernel of given size) - keep: if nonzero, a clas label to be kept untouched - - Use this to enforce specific confidence thresholds or rules after segmentation. - """ - if not mask.any(): - return - # plt.subplot(2, 2, 1, title="segmap orig") - # plt.imshow(segmap) - # plt.subplot(2, 2, 2, title="mask") - # plt.imshow(mask) - if keep: - keepmask = segmap == keep - if only: - segmap[segmap == label] = 0 - if skeletonize: - if mask.ndim == 3: - mask = np.stack(morphology.skeletonize(m) for m in mask) - else: - mask = morphology.skeletonize(mask) - if dilate: - kernel = np.ones((dilate, dilate), np.uint8) - mask = cv2.dilate(mask.astype(np.uint8), kernel, iterations=1) > 0 - segmap[mask] = label - # plt.subplot(2, 2, 3, title="segmap masked") - # plt.imshow(segmap) - if keep: - segmap[keepmask] = keep - # plt.subplot(2, 2, 4, title="segmap final") - # plt.imshow(segmap) - # plt.show() + return boxes, peaks_neg_tot_tables_new + else: + return boxes, peaks_neg_tot_tables diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index f1a7a8e..a81ccb4 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -1,29 +1,26 @@ -from typing import Sequence, Union -from numbers import Number from functools import partial -import itertools - import cv2 import numpy as np -from scipy.sparse.csgraph import minimum_spanning_tree -from shapely.geometry import Polygon, LineString -from shapely.geometry.polygon import orient -from shapely import set_precision, affinity -from shapely.ops import unary_union, nearest_points +from shapely import geometry from .rotate import rotate_image, rotation_image_new def contours_in_same_horizon(cy_main_hor): - """ - Takes an array of y coords, identifies all pairs among them - which are close to each other, and returns all such pairs - by index into the array. - """ - sort = np.argsort(cy_main_hor) - same = np.diff(cy_main_hor[sort]) <= 20 - # groups = np.split(sort, np.arange(len(cy_main_hor) - 1)[~same] + 1) - same = np.flatnonzero(same) - return np.stack((sort[:-1][same], sort[1:][same])).T + X1 = np.zeros((len(cy_main_hor), len(cy_main_hor))) + X2 = np.zeros((len(cy_main_hor), len(cy_main_hor))) + + X1[0::1, :] = cy_main_hor[:] + X2 = X1.T + + X_dif = np.abs(X2 - X1) + args_help = np.array(range(len(cy_main_hor))) + all_args = [] + for i in range(len(cy_main_hor)): + list_h = list(args_help[X_dif[i, :] <= 20]) + list_h.append(i) + if len(list_h) > 1: + all_args.append(list(set(list_h))) + return np.unique(np.array(all_args, dtype=object)) def find_contours_mean_y_diff(contours_main): M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] @@ -31,74 +28,106 @@ def find_contours_mean_y_diff(contours_main): return np.mean(np.diff(np.sort(np.array(cy_main)))) def get_text_region_boxes_by_given_contours(contours): - return [cv2.boundingRect(contour) - for contour in contours] + boxes = [] + contours_new = [] + for jj in range(len(contours)): + box = cv2.boundingRect(contours[jj]) + boxes.append(box) + contours_new.append(contours[jj]) -def filter_contours_area_of_image(image, contours, hierarchy, max_area=1.0, min_area=0.0, dilate=0): + return boxes, contours_new + +def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area): found_polygons_early = [] - for jv, contour in enumerate(contours): - if len(contour) < 3: # A polygon cannot have less than 3 points + for jv,c in enumerate(contours): + if len(c) < 3: # A polygon cannot have less than 3 points continue - polygon = contour2polygon(contour, dilate=dilate) + polygon = geometry.Polygon([point[0] for point in c]) area = polygon.area if (area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1): - found_polygons_early.append(polygon2contour(polygon)) + found_polygons_early.append(np.array([[point] + for point in polygon.exterior.coords], dtype=np.uint)) return found_polygons_early -def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area=1.0, min_area=0.0, dilate=0): +def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): found_polygons_early = [] - for jv, contour in enumerate(contours): - if len(contour) < 3: # A polygon cannot have less than 3 points + for jv,c in enumerate(contours): + if len(c) < 3: # A polygon cannot have less than 3 points continue - polygon = contour2polygon(contour, dilate=dilate) - # area = cv2.contourArea(contour) + polygon = geometry.Polygon([point[0] for point in c]) + # area = cv2.contourArea(c) area = polygon.area ##print(np.prod(thresh.shape[:2])) # Check that polygon has area greater than minimal area # print(hierarchy[0][jv][3],hierarchy ) - if (area >= min_area * image.size and - area <= max_area * image.size and + if (area >= min_area * np.prod(image.shape[:2]) and + area <= max_area * np.prod(image.shape[:2]) and # hierarchy[0][jv][3]==-1 True): - # print(contour[0][0][1]) - found_polygons_early.append(polygon2contour(polygon)) + # print(c[0][0][1]) + found_polygons_early.append(np.array([[point] + for point in polygon.exterior.coords], dtype=np.int32)) return found_polygons_early -def find_center_of_contours(contours): - moments = [cv2.moments(contour) for contour in contours] - cx = [feat["m10"] / (feat["m00"] + 1e-32) - for feat in moments] - cy = [feat["m01"] / (feat["m00"] + 1e-32) - for feat in moments] - return cx, cy +def find_new_features_of_contours(contours_main): + areas_main = np.array([cv2.contourArea(contours_main[j]) + for j in range(len(contours_main))]) + M_main = [cv2.moments(contours_main[j]) + for j in range(len(contours_main))] + cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) + for j in range(len(M_main))] + cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) + for j in range(len(M_main))] + try: + x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) + for j in range(len(contours_main))]) + argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0]) + for j in range(len(contours_main))]) + x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0] + for j in range(len(contours_main))]) + y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1] + for j in range(len(contours_main))]) + x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) + for j in range(len(contours_main))]) + y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) + for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) + for j in range(len(contours_main))]) + except: + x_min_main = np.array([np.min(contours_main[j][:, 0]) + for j in range(len(contours_main))]) + argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) + for j in range(len(contours_main))]) + x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] + for j in range(len(contours_main))]) + y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] + for j in range(len(contours_main))]) + x_max_main = np.array([np.max(contours_main[j][:, 0]) + for j in range(len(contours_main))]) + y_min_main = np.array([np.min(contours_main[j][:, 1]) + for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 1]) + for j in range(len(contours_main))]) + # dis_x=np.abs(x_max_main-x_min_main) -def find_new_features_of_contours(contours): - # areas = np.array([cv2.contourArea(contour) for contour in contours]) - cx, cy = find_center_of_contours(contours) - slice_x = np.index_exp[:, 0, 0] - slice_y = np.index_exp[:, 0, 1] - if any(contour.ndim < 3 for contour in contours): - slice_x = np.index_exp[:, 0] - slice_y = np.index_exp[:, 1] - x_min = np.array([np.min(contour[slice_x]) for contour in contours]) - x_max = np.array([np.max(contour[slice_x]) for contour in contours]) - y_min = np.array([np.min(contour[slice_y]) for contour in contours]) - y_max = np.array([np.max(contour[slice_y]) for contour in contours]) - # dis_x=np.abs(x_max-x_min) - y_corr_x_min = np.array([contour[np.argmin(contour[slice_x])][slice_y[1:]] - for contour in contours]) + return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin - return cx, cy, x_min, x_max, y_min, y_max, y_corr_x_min +def find_features_of_contours(contours_main): + areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) + M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] + cy_main=[(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] + x_min_main=np.array([np.min(contours_main[j][:,0,0]) for j in range(len(contours_main))]) + x_max_main=np.array([np.max(contours_main[j][:,0,0]) for j in range(len(contours_main))]) -def find_features_of_contours(contours): - y_min = np.array([np.min(contour[:,0,1]) for contour in contours]) - y_max = np.array([np.max(contour[:,0,1]) for contour in contours]) + y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))]) + y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))]) - return y_min, y_max + return y_min_main, y_max_main def return_parent_contours(contours, hierarchy): contours_parent = [contours[i] @@ -106,26 +135,31 @@ def return_parent_contours(contours, hierarchy): if hierarchy[0][i][3] == -1] return contours_parent -def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002, dilate=0): - if region_pre_p.ndim == 3: - mask = (region_pre_p[:, :, 0] == label).astype(np.uint8) +def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 else: - mask = (region_pre_p[:, :] == label).astype(np.uint8) + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_imgs, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) - contours_imgs = filter_contours_area_of_image_tables(mask, contours_imgs, hierarchy, - max_area=1, - min_area=min_area, - dilate=dilate) + contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, + max_area=1, min_area=min_area) return contours_imgs def do_work_of_contours_in_image(contour, index_r_con, img, slope_first): - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[contour], color=1) + img_copy = np.zeros(img.shape) + img_copy = cv2.fillPoly(img_copy, pts=[contour], color=(1, 1, 1)) img_copy = rotation_image_new(img_copy, -slope_first) - _, thresh = cv2.threshold(img_copy, 0, 255, 0) + img_copy = img_copy.astype(np.uint8) + imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) @@ -148,8 +182,8 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): cnts_org = [] # print(cnts,'cnts') for i in range(len(cnts)): - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=1) + img_copy = np.zeros(img.shape) + img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1)) # plt.imshow(img_copy) # plt.show() @@ -160,7 +194,9 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): # plt.imshow(img_copy) # plt.show() - _, thresh = cv2.threshold(img_copy, 0, 255, 0) + img_copy = img_copy.astype(np.uint8) + imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) @@ -170,18 +206,19 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): return cnts_org -def get_textregion_confidences_old(cnts, img, slope_first): +def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first): zoom = 3 img = cv2.resize(img, (img.shape[1] // zoom, img.shape[0] // zoom), interpolation=cv2.INTER_NEAREST) cnts_org = [] for cnt in cnts: - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[cnt // zoom], color=1) + img_copy = np.zeros(img.shape) + img_copy = cv2.fillPoly(img_copy, pts=[(cnt / zoom).astype(int)], color=(1, 1, 1)) img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) - _, thresh = cv2.threshold(img_copy, 0, 255, 0) + imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) @@ -191,356 +228,104 @@ def get_textregion_confidences_old(cnts, img, slope_first): return cnts_org def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first, confidence_matrix): - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=1) - confidence_matrix_mapped_with_contour = confidence_matrix * img_copy - confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy)) + img_copy = np.zeros(img.shape) + img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=(1, 1, 1)) + + confidence_matrix_mapped_with_contour = confidence_matrix * img_copy[:,:,0] + confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy[:,:,0])) img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) - _, thresh = cv2.threshold(img_copy, 0, 255, 0) + imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if len(cont_int)==0: - cont_int = [contour_par] - confidence_contour = 0 - else: - cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) - cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) + cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) + cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) + # print(np.shape(cont_int[0])) return cont_int[0], index_r_con, confidence_contour -def get_region_confidences(cnts, confidence_matrix): +def get_textregion_contours_in_org_image_light(cnts, img, slope_first, confidence_matrix, map=map): if not len(cnts): - return [] + return [], [] + + confidence_matrix = cv2.resize(confidence_matrix, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) + img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) + ##cnts = list( (np.array(cnts)/2).astype(np.int16) ) + #cnts = cnts/2 + cnts = [(i/6).astype(int) for i in cnts] + results = map(partial(do_back_rotation_and_get_cnt_back, + img=img, + slope_first=slope_first, + confidence_matrix=confidence_matrix, + ), + cnts, range(len(cnts))) + contours, indexes, conf_contours = tuple(zip(*results)) + return [i*6 for i in contours], list(conf_contours) - height, width = confidence_matrix.shape - confidence_matrix = cv2.resize(confidence_matrix, - (width // 6, height // 6), - interpolation=cv2.INTER_NEAREST) - confs = [] - for cnt in cnts: - cnt_mask = np.zeros_like(confidence_matrix) - cnt_mask = cv2.fillPoly(cnt_mask, pts=[cnt // 6], color=1.0) - confs.append(np.sum(confidence_matrix * cnt_mask) / np.sum(cnt_mask)) - return confs +def return_contours_of_interested_textline(region_pre_p, pixel): + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) -def return_contours_of_interested_textline(region_pre_p, label, min_area=0.0): - cnts_images = (region_pre_p == label).astype(np.uint8) - contours_imgs, hierarchy = cv2.findContours(cnts_images, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = filter_contours_area_of_image_tables( - cnts_images, contours_imgs, hierarchy, max_area=1, min_area=min_area) + thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003) return contours_imgs def return_contours_of_image(image): if len(image.shape) == 2: + image = np.repeat(image[:, :, np.newaxis], 3, axis=2) image = image.astype(np.uint8) - imgray = image else: image = image.astype(np.uint8) - imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) + imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours, hierarchy -def dilate_textline_contours(all_found_textline_polygons): - from . import ensure_array - return [ensure_array( - [polygon2contour(contour2polygon(contour, dilate=6)) - for contour in region]) - for region in all_found_textline_polygons] - -def dilate_textregion_contours(all_found_textregion_polygons): - from . import ensure_array - return ensure_array( - [polygon2contour(contour2polygon(contour, dilate=6)) - for contour in all_found_textregion_polygons]) - -def match_deskewed_contours(slope_deskew, contours_o, contours_d, shape_o, shape_d): - from . import ensure_array - - cntareas_o = np.array([cv2.contourArea(contour) for contour in contours_o]) - cntareas_d = np.array([cv2.contourArea(contour) for contour in contours_d]) - cntareas_o = cntareas_o / float(np.prod(shape_o[:2])) - cntareas_d = cntareas_d / float(np.prod(shape_d[:2])) - - contours_o = ensure_array(contours_o) - contours_d = ensure_array(contours_d) - - sort_o = np.argsort(cntareas_o) - sort_d = np.argsort(cntareas_d) - contours_o = contours_o[sort_o] - contours_d = contours_d[sort_d] - cntareas_o = cntareas_o[sort_o] - cntareas_d = cntareas_d[sort_d] - - centers_o = np.stack(find_center_of_contours(contours_o)) # [2, N] - centers_d = np.stack(find_center_of_contours(contours_d)) # [2, N] - center0_o = centers_o[:, -1:] # [2, 1] - center0_d = centers_d[:, -1:] # [2, 1] - - # find the largest among the largest 5 deskewed contours - # that is also closest to the largest original contour - last5_centers_d = centers_d[:, -5:] - dists_d = np.linalg.norm(center0_o - last5_centers_d, axis=0) - ind_largest = len(contours_d) - last5_centers_d.shape[1] + np.argmin(dists_d) - center0_d[:, 0] = centers_d[:, ind_largest] - - # order new contours the same way as the undeskewed contours - # (by calculating the offset of the largest contours, respectively, - # of the new and undeskewed image; then for each contour, - # finding the closest new contour, with proximity calculated - # as distance of their centers modulo offset vector) - h_o, w_o = shape_o[:2] - center_o = (w_o // 2, h_o // 2) - M = cv2.getRotationMatrix2D(center_o, slope_deskew, 1.0) - M_22 = np.array(M)[:2, :2] - center0_o = np.dot(M_22, center0_o) # [2, 1] - offset = center0_o - center0_d # [2, 1] - - centers_o = np.dot(M_22, centers_o) - offset # [2,N] - # add dimension for area (so only contours of similar size will be considered close) - centers_o = np.append(centers_o, cntareas_o[np.newaxis], axis=0) - centers_d = np.append(centers_d, cntareas_d[np.newaxis], axis=0) - - dists = np.zeros((len(contours_o), len(contours_d))) - for i in range(len(contours_o)): - dists[i] = np.linalg.norm(centers_o[:, i: i + 1] - centers_d, axis=0) - corresp = np.zeros(dists.shape, dtype=bool) - # keep searching next-closest until at least one correspondence on each side - while not np.all(corresp.sum(axis=1)) or not np.all(corresp.sum(axis=0)): - idx = np.nanargmin(dists) - i, j = np.unravel_index(idx, dists.shape) - dists[i, j] = np.nan - corresp[i, j] = True - # print("original/deskewed adjacency", corresp.nonzero()) - contours_d_ordered = contours_d[np.argmax(corresp, axis=1)] - # from matplotlib import pyplot as plt - # img1 = np.zeros(shape_d[:2], dtype=np.uint8) - # for i in range(len(contours_o)): - # cv2.fillPoly(img1, pts=[contours_d_ordered[i]], color=i + 1) - # plt.subplot(1, 4, 1, title="direct corresp contours") - # plt.imshow(img1) - # img2 = np.zeros(shape_d[:2], dtype=np.uint8) - # join deskewed regions mapping to single original ones - for i in range(len(contours_o)): - if np.count_nonzero(corresp[i]) > 1: - indices = np.flatnonzero(corresp[i]) - # print("joining", indices) - polygons_d = [contour2polygon(contour) - for contour in contours_d[indices]] - contour_d_joined = polygon2contour(join_polygons(polygons_d)) - contours_d_ordered[i] = contour_d_joined - # cv2.fillPoly(img2, pts=[contour_d_joined], color=i + 1) - # plt.subplot(1, 4, 2, title="joined contours") - # plt.imshow(img2) - # img3 = np.zeros(shape_d[:2], dtype=np.uint8) - # split deskewed regions mapping to multiple original ones - def deskew(polygon): - polygon = affinity.rotate(polygon, -slope_deskew, origin=center_o) - #polygon = affinity.translate(polygon, *offset.squeeze()) - return polygon - for j in range(len(contours_d)): - if np.count_nonzero(corresp[:, j]) > 1: - indices = np.flatnonzero(corresp[:, j]) - # print("splitting along", indices) - polygons_o = [deskew(contour2polygon(contour)) - for contour in contours_o[indices]] - polygon_d = contour2polygon(contours_d[j]) - polygons_d = [make_intersection(polygon_d, polygon) - for polygon in polygons_o] - # ignore where there is no actual overlap - indices = indices[np.flatnonzero(polygons_d)] - contours_d_joined = [polygon2contour(polygon_d) - for polygon_d in polygons_d - if polygon_d] - contours_d_ordered[indices] = contours_d_joined - # cv2.fillPoly(img3, pts=contours_d_joined, color=j + 1) - # plt.subplot(1, 4, 3, title="split contours") - # plt.imshow(img3) - # img4 = np.zeros(shape_d[:2], dtype=np.uint8) - # for i in range(len(contours_o)): - # cv2.fillPoly(img4, pts=[contours_d_ordered[i]], color=i + 1) - # plt.subplot(1, 4, 4, title="result contours") - # plt.imshow(img4) - # plt.show() - # from matplotlib import patches as ptchs - # plt.subplot(1, 2, 1, title="undeskewed") - # plt.imshow(mask_o) - # centers_o = np.stack(find_center_of_contours(contours_o)) # [2, N] - # for i in range(len(contours_o)): - # cnt = contours_o[i] - # ctr = centers_o[:, i] - # plt.gca().add_patch(ptchs.Polygon(cnt[:, 0], closed=False, fill=False, color='blue')) - # plt.gca().scatter(ctr[0], ctr[1], 20, c='blue', marker='x') - # plt.gca().text(ctr[0], ctr[1], str(i), c='blue') - # plt.subplot(1, 2, 2, title="deskewed") - # plt.imshow(mask_d) - # centers_d = np.stack(find_center_of_contours(contours_d_ordered)) # [2, N] - # for i in range(len(contours_o)): - # cnt = contours_o[i] - # cnt = polygon2contour(deskew(contour2polygon(cnt))) - # plt.gca().add_patch(ptchs.Polygon(cnt[:, 0], closed=False, fill=False, color='blue')) - # for i in range(len(contours_d_ordered)): - # cnt = contours_d_ordered[i] - # ctr = centers_d[:, i] - # plt.gca().add_patch(ptchs.Polygon(cnt[:, 0], closed=False, fill=False, color='red')) - # plt.gca().scatter(ctr[0], ctr[1], 20, c='red', marker='x') - # plt.gca().text(ctr[0], ctr[1], str(i), c='red') - # plt.show() - invsort_o = np.argsort(sort_o) - return contours_d_ordered[invsort_o] - -def estimate_skew_contours(contours): - if not len(contours): - raise ValueError("not enough contours") - _, size_in, angle_in = zip(*map(cv2.minAreaRect, contours)) - w_in, h_in = np.array(size_in).T - angle_in = np.array(angle_in) - # 1. depending on how contours are oriented, - # and where they start, minAreaRect can present - # either side as width or height; so we first - # need to normalise - transposed = h_in > w_in - # print("transposed", transposed, angle_in) - w_in[transposed], h_in[transposed] = h_in[transposed], w_in[transposed] - angle_in[transposed] -= 90 - # 2. now we look at aspect ratio: too short - # textlines do not yield reliable angles - usable = w_in > 2.5 * h_in - # print("usable aspect", w_in / h_in, usable, angle_in[usable]) - if not np.any(usable): - raise ValueError("not enough contours with high aspect ratio") - # 3. next, get rid of outliers regarding length - w_avg = np.median(w_in[usable]) - w_dev = w_in[usable] / w_avg - usable[usable] = (0.67 <= w_dev) & (w_dev <= 1.33) - # print("usable length", w_in[usable] / w_avg, usable, angle_in[usable]) - if not np.any(usable): - raise ValueError("not enough contours with consistent length") - if np.count_nonzero(usable) == 1: - return angle_in[usable] - # 4. there is no way to distinguish between +90 and -89.9 here, - # so map to [0,180] when calculating averages, then map back to [-90,90] - # (we don't want -90 and +89 to average zero, or +1 and +179 to average 90) - angles = angle_in[usable] - if transposed := np.median(np.abs(angles)) >= 45: - angles %= 180 - angle_avg = np.median(angles) - angle_dev = np.abs(angles - angle_avg) - usable[usable] = (angle_dev <= 2 * np.median(angle_dev)) - # print("usable angle", usable, angle_in[usable]) - if not np.any(usable): - raise ValueError("not enough contours with consistent angle") - if transposed: - angle = 90 - (90 - np.mean(angle_in[usable] % 180)) % 180 +def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003): + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 else: - angle = np.mean(angle_in[usable]) - # print("mean angle", angle) - return angle + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) -def contour2polygon(contour: Union[np.ndarray, Sequence[Sequence[Sequence[Number]]]], dilate=0): - polygon = Polygon([point[0] for point in contour]) - if dilate: - polygon = polygon.buffer(dilate) - if polygon.geom_type == 'GeometryCollection': - # heterogeneous result: filter zero-area shapes (LineString, Point) - polygon = unary_union([geom for geom in polygon.geoms if geom.area > 0]) - if polygon.geom_type == 'MultiPolygon': - # homogeneous result: construct convex hull to connect - polygon = join_polygons(polygon.geoms) - return make_valid(polygon) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables( + thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size) -def polygon2contour(polygon: Polygon) -> np.ndarray: - polygon = np.array(polygon.exterior.coords[:-1], dtype=int) - return np.maximum(0, polygon).astype(int)[:, np.newaxis] + return contours_imgs -def make_intersection(poly1, poly2): - interp = poly1.intersection(poly2) - # post-process - if interp.is_empty or interp.area == 0.0: - return None - if interp.geom_type == 'GeometryCollection': - # heterogeneous result: filter zero-area shapes (LineString, Point) - interp = unary_union([geom for geom in interp.geoms if geom.area > 0]) - if interp.geom_type == 'MultiPolygon': - # homogeneous result: construct convex hull to connect - interp = join_polygons(interp.geoms) - assert interp.geom_type == 'Polygon', interp.wkt - interp = make_valid(interp) - return interp +def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area): + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) -def make_valid(polygon: Polygon) -> Polygon: - """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" - def isint(x): - return isinstance(x, int) or int(x) == x - # make sure rounding does not invalidate - if (not all(map(isint, np.array(polygon.exterior.coords).flat)) and - polygon.minimum_clearance < 1.0): - polygon = Polygon(np.round(polygon.exterior.coords)) - if polygon.is_valid: - return polygon - points = list(polygon.exterior.coords[:-1]) - def step(split, tolerance): - # try by re-arranging points - poly = Polygon(points[-split:]+points[:-split]) - if poly.is_valid: - return poly - # try by simplification - poly = poly.simplify(tolerance + 1.0) - if poly.is_valid: - return poly - # try by enlarging - poly = poly.buffer(tolerance) - if poly.is_valid: - return poly - return None - for split in range(1, len(points)): - for tolerance in np.linspace(1, np.sqrt(polygon.area), 100): - # simplification may not be possible (at all) due to ordering - # in that case, try another starting point - if poly := step(split, tolerance): - return poly - assert polygon.is_valid, polygon.wkt - return polygon + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables( + thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area) + + img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3)) + img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1)) + + return img_ret[:, :, 0] -def join_polygons(polygons: Sequence[Polygon], scale=20) -> Polygon: - """construct concave hull (alpha shape) from input polygons by connecting their pairwise nearest points""" - # ensure input polygons are simply typed and all oriented equally - polygons = [orient(poly) - for poly in itertools.chain.from_iterable( - [poly.geoms - if poly.geom_type in ['MultiPolygon', 'GeometryCollection'] - else [poly] - for poly in polygons])] - npoly = len(polygons) - if npoly == 1: - return polygons[0] - # find min-dist path through all polygons (travelling salesman) - pairs = itertools.combinations(range(npoly), 2) - dists = np.zeros((npoly, npoly), dtype=float) - for i, j in pairs: - dist = polygons[i].distance(polygons[j]) - if dist < 1e-5: - dist = 1e-5 # if pair merely touches, we still need to get an edge - dists[i, j] = dist - dists[j, i] = dist - dists = minimum_spanning_tree(dists, overwrite=True) - # add bridge polygons (where necessary) - for prevp, nextp in zip(*dists.nonzero()): - prevp = polygons[prevp] - nextp = polygons[nextp] - nearest = nearest_points(prevp, nextp) - bridgep = orient(LineString(nearest).buffer(max(1, scale/5), resolution=1), -1) - polygons.append(bridgep) - jointp = unary_union(polygons) - if jointp.geom_type == 'MultiPolygon': - jointp = unary_union(jointp.geoms) - assert jointp.geom_type == 'Polygon', jointp.wkt - # follow-up calculations will necessarily be integer; - # so anticipate rounding here and then ensure validity - jointp2 = set_precision(jointp, 1.0, mode="keep_collapsed") - if jointp2.geom_type != 'Polygon' or not jointp2.is_valid: - jointp2 = Polygon(np.round(jointp.exterior.coords)) - jointp2 = make_valid(jointp2) - assert jointp2.geom_type == 'Polygon', jointp2.wkt - return jointp2 diff --git a/src/eynollah/utils/counter.py b/src/eynollah/utils/counter.py index e6205c8..9a3ed70 100644 --- a/src/eynollah/utils/counter.py +++ b/src/eynollah/utils/counter.py @@ -3,7 +3,7 @@ from collections import Counter REGION_ID_TEMPLATE = 'region_%04d' LINE_ID_TEMPLATE = 'region_%04d_line_%04d' -class EynollahIdCounter: +class EynollahIdCounter(): def __init__(self, region_idx=0, line_idx=0): self._counter = Counter() diff --git a/src/eynollah/utils/drop_capitals.py b/src/eynollah/utils/drop_capitals.py index 744d5a5..67547d3 100644 --- a/src/eynollah/utils/drop_capitals.py +++ b/src/eynollah/utils/drop_capitals.py @@ -1,7 +1,6 @@ import numpy as np import cv2 from .contour import ( - find_center_of_contours, find_new_features_of_contours, return_contours_of_image, return_parent_contours, @@ -19,11 +18,12 @@ def adhere_drop_capital_region_into_corresponding_textline( all_found_textline_polygons_h, kernel=None, curved_line=False, + textline_light=False, ): # print(np.shape(all_found_textline_polygons),np.shape(all_found_textline_polygons[3]),'all_found_textline_polygonsshape') # print(all_found_textline_polygons[3]) - cx_m, cy_m = find_center_of_contours(contours_only_text_parent) - cx_h, cy_h = find_center_of_contours(contours_only_text_parent_h) + cx_m, cy_m, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) + cx_h, cy_h, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_h) cx_d, cy_d, _, _, y_min_d, y_max_d, _ = find_new_features_of_contours(polygons_of_drop_capitals) img_con_all = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) @@ -78,7 +78,7 @@ def adhere_drop_capital_region_into_corresponding_textline( # region_with_intersected_drop=region_with_intersected_drop/3 region_with_intersected_drop = region_with_intersected_drop.astype(np.uint8) # print(np.unique(img_con_all_copy[:,:,0])) - if curved_line: + if curved_line or textline_light: if len(region_with_intersected_drop) > 1: sum_pixels_of_intersection = [] @@ -89,9 +89,9 @@ def adhere_drop_capital_region_into_corresponding_textline( region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1 # print(region_final,'region_final') - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) try: - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -153,9 +153,9 @@ def adhere_drop_capital_region_into_corresponding_textline( # areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))]) - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) try: - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -208,7 +208,7 @@ def adhere_drop_capital_region_into_corresponding_textline( try: # print(all_found_textline_polygons[j_cont][0]) - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -261,7 +261,7 @@ def adhere_drop_capital_region_into_corresponding_textline( else: pass - ##cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + ##cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) ###print(all_box_coord[j_cont]) ###print(cx_t) ###print(cy_t) @@ -315,9 +315,9 @@ def adhere_drop_capital_region_into_corresponding_textline( region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1 # print(region_final,'region_final') - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) try: - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -375,12 +375,12 @@ def adhere_drop_capital_region_into_corresponding_textline( # areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))]) - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) # print(cx_t,'print') try: # print(all_found_textline_polygons[j_cont][0]) - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -453,7 +453,7 @@ def adhere_drop_capital_region_into_corresponding_textline( #####try: #####if len(contours_new_parent)==1: ######print(all_found_textline_polygons[j_cont][0]) - #####cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[j_cont]) + #####cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[j_cont]) ######print(all_box_coord[j_cont]) ######print(cx_t) ######print(cy_t) @@ -501,7 +501,7 @@ def adhere_drop_capital_region_into_corresponding_textline( def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): - drop_only = (layout_no_patch == 4) * 1 + drop_only = (layout_no_patch[:, :, 0] == 4) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) @@ -529,8 +529,9 @@ def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): if (((map_of_drop_contour_bb == 1) * 1).sum() / float(((map_of_drop_contour_bb == 5) * 1).sum()) * 100) >= 15: contours_drop_parent_final.append(contours_drop_parent[jj]) - layout_no_patch[layout_no_patch == 4] = 0 - layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=4) + layout_no_patch[:, :, 0][layout_no_patch[:, :, 0] == 4] = 0 + + layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=(4, 4, 4)) return layout_no_patch diff --git a/src/eynollah/utils/font.py b/src/eynollah/utils/font.py deleted file mode 100644 index 939933e..0000000 --- a/src/eynollah/utils/font.py +++ /dev/null @@ -1,16 +0,0 @@ - -# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files -import sys -from PIL import ImageFont - -if sys.version_info < (3, 10): - import importlib_resources -else: - import importlib.resources as importlib_resources - - -def get_font(): - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "../Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - return ImageFont.truetype(font=font, size=40) diff --git a/src/eynollah/utils/marginals.py b/src/eynollah/utils/marginals.py index 79c75d1..a29e50d 100644 --- a/src/eynollah/utils/marginals.py +++ b/src/eynollah/utils/marginals.py @@ -2,184 +2,224 @@ import numpy as np import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d -from .contour import find_center_of_contours, return_contours_of_interested_region +from .contour import find_new_features_of_contours, return_contours_of_interested_region from .resize import resize_image from .rotate import rotate_image -def get_marginals(num_col, slope_deskew, early_layout, - kernel=None, - label_text=1, - label_marg=4, - label_tabs=10, -): - if kernel is None: - kernel = np.ones((5, 5), dtype=np.uint8) - kernel_hor = np.ones((1, 5), dtype=np.uint8) +def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_version=False, kernel=None): + mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1])) + mask_marginals=mask_marginals.astype(np.uint8) - text_mask = ((early_layout == label_text) | - (early_layout == label_tabs)).astype(np.uint8) - text_mask_d = rotate_image(text_mask, slope_deskew) - main_mask_d = np.zeros_like(text_mask_d) - height, width = main_mask_d.shape - if height <= 1500: + text_with_lines=text_with_lines.astype(np.uint8) + ##text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3) + + text_with_lines_eroded=cv2.erode(text_with_lines,kernel,iterations=5) + + if text_with_lines.shape[0]<=1500: pass - elif 1500 < height <= 1800: - text_mask_d = resize_image(text_mask_d, int(height / 1.5), width) - text_mask_d = cv2.erode(text_mask_d, kernel, iterations=5) - # rs: and back to original size - text_mask_d = resize_image(text_mask_d, height, width) + elif text_with_lines.shape[0]>1500 and text_with_lines.shape[0]<=1800: + text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.5),text_with_lines.shape[1]) + text_with_lines=cv2.erode(text_with_lines,kernel,iterations=5) + text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) else: - text_mask_d = resize_image(text_mask_d, int(height / 1.8), width) - text_mask_d = cv2.erode(text_mask_d, kernel, iterations=7) - # rs: and back to original size - text_mask_d = resize_image(text_mask_d, height, width) + text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.8),text_with_lines.shape[1]) + text_with_lines=cv2.erode(text_with_lines,kernel,iterations=7) + text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - text_mask_d = cv2.erode(text_mask_d, kernel_hor, iterations=6) - text_mask_d_y = text_mask_d.sum(axis=0) - text_mask_d_y_eroded = text_mask_d.sum(axis=0) - max_text_thickness_percent = 100. * text_mask_d_y.max() / height - min_text_thickness = max_text_thickness_percent / 100. * height / 20. + text_with_lines_y=text_with_lines.sum(axis=0) + text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0) - # plt.figure() - # ax1 = plt.subplot(2, 2, 1, title="text_mask_d") - # ax1.imshow(text_mask_d, aspect='auto') - # ax2 = plt.subplot(2, 2, 3, title="text_mask_d_y", sharex=ax1) - # ax2.plot(list(range(width)), text_mask_d_y) - # ax2.hlines(int(0.14 * height), 0, width, - # label='max_text_thickness=14%', colors='r') - # ax2.hlines([min_text_thickness], 0, width, - # label='min_text_thickness', colors='g') - # ax2.scatter([np.argmax(text_mask_d_y)], - # [text_mask_d_y.max()], color='r', - # label='max = %d%%' % max_text_thickness_percent) - # ax1 = plt.subplot(2, 2, 4, title="early layout") - # ax1.imshow(early_layout, aspect='auto') - # plt.legend() - # plt.show() + thickness_along_y_percent=text_with_lines_y_eroded.max()/(float(text_with_lines.shape[0]))*100 - if max_text_thickness_percent < 14: - return + #print(thickness_along_y_percent,'thickness_along_y_percent') - text_mask_d_y_rev = np.max(text_mask_d_y) - text_mask_d_y - region_sum_0 = gaussian_filter1d(text_mask_d_y, 1) - first_nonzero = region_sum_0.nonzero()[0][0] # outer left - last_nonzero = region_sum_0.nonzero()[0][-1] # outer right - mid_point = 0.5 * (last_nonzero + first_nonzero) - one_third_right = (last_nonzero - mid_point) / 3.0 - one_third_left = (mid_point - first_nonzero) / 3.0 - - # rs: constrain the distance at least 2 characters at 12pt, retrieve height and prominence - peaks, props = find_peaks(text_mask_d_y_rev, height=0, prominence=0, distance=30) - peaks_orig = np.copy(peaks) - # rs: also calculate the product of prominence and height (for final selection) - scores = np.zeros(peaks.max() + 1) - scores[peaks] = props['prominences'] * props['peak_heights'] - - peaks = peaks[(peaks > first_nonzero) & (peaks < last_nonzero)] - peaks = peaks[region_sum_0[peaks] < min_text_thickness] - - if num_col == 1: - peaks_right = peaks[peaks > mid_point] - peaks_left = peaks[peaks < mid_point] - elif num_col == 2: - peaks_right = peaks[peaks > mid_point + one_third_right] - peaks_left = peaks[peaks < mid_point - one_third_left] + if thickness_along_y_percent<30: + min_textline_thickness=8 + elif thickness_along_y_percent>=30 and thickness_along_y_percent<50: + min_textline_thickness=20 else: - # should not happen, anyway - return + min_textline_thickness=40 - if len(peaks_left) == 0: - if len(peaks_right) == 0: - # plt.figure() - # ax1 = plt.subplot(2, 1, 1, title='text_mask_d (deskewed text+sep mask)') - # ax1.imshow(text_mask_d, aspect='auto') - # ax1.vlines([first_nonzero], 0, height, label='first_nonzero', colors='r') - # ax1.vlines([last_nonzero], 0, height, label='last_nonzero', colors='r') - # ax1.vlines(peaks_left, 0, height, label='peaks_left', colors='orange') - # ax1.vlines(peaks_right, 0, height, label='peaks_right', colors='orange') - # ax2 = plt.subplot(2, 1, 2, title='text_mask_d_y (smoothed)', sharex=ax1) - # ax2.plot(list(range(width)), region_sum_0) - # ax2.hlines(min_text_thickness, 0, width, colors='g', - # label='min_text_thickness=%d' % min_text_thickness) - # ax2.scatter(peaks_orig, region_sum_0[peaks_orig], label='peaks') - # plt.legend() - # plt.show() - return - point_right = peaks_right[np.argmax(scores[peaks_right])] - #point_left = first_nonzero - point_left = 0 - elif len(peaks_right) == 0: - point_left = peaks_left[np.argmax(scores[peaks_left])] - #point_right = last_nonzero - point_right = width - 1 + + + if thickness_along_y_percent>=14: + + text_with_lines_y_rev=-1*text_with_lines_y[:] + + text_with_lines_y_rev=text_with_lines_y_rev-np.min(text_with_lines_y_rev) + + sigma_gaus=1 + region_sum_0= gaussian_filter1d(text_with_lines_y, sigma_gaus) + + region_sum_0_rev=gaussian_filter1d(text_with_lines_y_rev, sigma_gaus) + + region_sum_0_updown=region_sum_0[len(region_sum_0)::-1] + + first_nonzero=(next((i for i, x in enumerate(region_sum_0) if x), None)) + last_nonzero=(next((i for i, x in enumerate(region_sum_0_updown) if x), None)) + + + last_nonzero=len(region_sum_0)-last_nonzero + + mid_point=(last_nonzero+first_nonzero)/2. + + + one_third_right=(last_nonzero-mid_point)/3.0 + one_third_left=(mid_point-first_nonzero)/3.0 + + peaks, _ = find_peaks(text_with_lines_y_rev, height=0) + peaks=np.array(peaks) + peaks=peaks[(peaks>first_nonzero) & ((peaksmid_point] + peaks_left=peaks[peaks(mid_point+one_third_right)] + peaks_left=peaks[peaks<(mid_point-one_third_left)] + + + try: + point_right=np.min(peaks_right) + except: + point_right=last_nonzero + + + try: + point_left=np.max(peaks_left) + except: + point_left=first_nonzero + + + + if point_right>=mask_marginals.shape[1]: + point_right=mask_marginals.shape[1]-1 + + try: + mask_marginals[:,point_left:point_right]=1 + except: + mask_marginals[:,:]=1 + + mask_marginals_rotated=rotate_image(mask_marginals,-slope_deskew) + + mask_marginals_rotated_sum=mask_marginals_rotated.sum(axis=0) + + mask_marginals_rotated_sum[mask_marginals_rotated_sum!=0]=1 + index_x=np.array(range(len(mask_marginals_rotated_sum)))+1 + + index_x_interest=index_x[mask_marginals_rotated_sum==1] + + min_point_of_left_marginal=np.min(index_x_interest)-16 + max_point_of_right_marginal=np.max(index_x_interest)+16 + + if min_point_of_left_marginal<0: + min_point_of_left_marginal=0 + if max_point_of_right_marginal>=text_regions.shape[1]: + max_point_of_right_marginal=text_regions.shape[1]-1 + + if light_version: + text_regions_org = np.copy(text_regions) + text_regions[text_regions[:,:]==1]=4 + + pixel_img=4 + min_area_text=0.00001 + + polygon_mask_marginals_rotated = return_contours_of_interested_region(mask_marginals,1,min_area_text) + + polygon_mask_marginals_rotated = polygon_mask_marginals_rotated[0] + + polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) + + cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) + + text_regions[(text_regions[:,:]==4)]=1 + + marginlas_should_be_main_text=[] + + x_min_marginals_left=[] + x_min_marginals_right=[] + + for i in range(len(cx_text_only)): + results = cv2.pointPolygonTest(polygon_mask_marginals_rotated, (cx_text_only[i], cy_text_only[i]), False) + + if results == -1: + marginlas_should_be_main_text.append(polygons_of_marginals[i]) + + + + text_regions_org=cv2.fillPoly(text_regions_org, pts =marginlas_should_be_main_text, color=(4,4)) + text_regions = np.copy(text_regions_org) + + + else: + + text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4 + + pixel_img=4 + min_area_text=0.00001 + + polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) + + cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) + + text_regions[(text_regions[:,:]==4)]=1 + + marginlas_should_be_main_text=[] + + x_min_marginals_left=[] + x_min_marginals_right=[] + + for i in range(len(cx_text_only)): + x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i]) + y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i]) + + if x_width_mar>16 and y_height_mar/x_width_mar<18: + marginlas_should_be_main_text.append(polygons_of_marginals[i]) + if x_min_text_only[i]<(mid_point-one_third_left): + x_min_marginals_left_new=x_min_text_only[i] + if len(x_min_marginals_left)==0: + x_min_marginals_left.append(x_min_marginals_left_new) + else: + x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new) + else: + x_min_marginals_right_new=x_min_text_only[i] + if len(x_min_marginals_right)==0: + x_min_marginals_right.append(x_min_marginals_right_new) + else: + x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new) + + if len(x_min_marginals_left)==0: + x_min_marginals_left=[0] + if len(x_min_marginals_right)==0: + x_min_marginals_right=[text_regions.shape[1]-1] + + + text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4)) + + + #text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 + #text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 + + + text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0 + text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0 + + ###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4 + + ###text_regions[:,point_right:][ text_regions[:,point_right:]==1]=4 + #plt.plot(region_sum_0) + #plt.plot(peaks,region_sum_0[peaks],'*') + #plt.show() + + + #plt.imshow(text_regions) + #plt.show() + + #sys.exit() else: - best_left = np.argmax(scores[peaks_left]) - best_right = np.argmax(scores[peaks_right]) - point_left = peaks_left[best_left] - point_right = peaks_right[best_right] - if scores[best_left] < 0.1 * scores[best_right]: - point_left = 0 - #point_left = first_nonzero - if scores[best_right] < 0.1 * scores[best_left]: - point_right = 0 - #point_right = last_nonzero - - main_mask_d[:, point_left: point_right] = 1 - if not np.any(main_mask_d): - return - - # plt.figure() - # ax1 = plt.subplot(2, 2, 1) - # ax1.title.set_text('text_mask_d (deskewed text+table mask)') - # ax1.imshow(text_mask_d) - # ax1.vlines(peaks_left, 0, height, label='peaks_left', colors='b') - # ax1.vlines(peaks_right, 0, height, label='peaks_right', colors='b') - # ax1.vlines([first_nonzero], 0, height, label='first_nonzero', colors='g') - # ax1.vlines([last_nonzero], 0, height, label='last_nonzero', colors='g') - # ax1.vlines([point_left], 0, height, label='point_left', colors='r') - # ax1.vlines([point_right], 0, height, label='point_right', colors='r') - # ax2 = plt.subplot(2, 2, 2, title='main_mask_d (deskewed main mask)', sharey=ax1) - # ax2.imshow(main_mask_d) - # ax3 = plt.subplot(2, 2, 3, title='text_mask_d_y (projection for minima)', sharex=ax1) - # ax3.plot(list(range(width)), text_mask_d_y) - # ax3.set_aspect('auto') - # ax4 = plt.subplot(2, 2, 4, title='early_layout (undeskewed labels)') - # ax4.imshow(early_layout) - # plt.legend() - # plt.show() - - # rs: rotate back (into undeskewed/original shape as early_layout input): - main_mask = rotate_image(main_mask_d, -slope_deskew) - - min_area_text = 0.00001 - main_contour = return_contours_of_interested_region(main_mask, 1, min_area_text)[0] - text_contours = return_contours_of_interested_region(early_layout, label_text, min_area_text) - cx_text, cy_text = find_center_of_contours(text_contours) - - marg_contours = [] - for i, contour in enumerate(text_contours): - if -1 == cv2.pointPolygonTest(main_contour, - (cx_text[i], - cy_text[i]), - False): - marg_contours.append(contour) - - # early_layout_orig = np.copy(early_layout) - early_layout = cv2.fillPoly(early_layout, pts=marg_contours, color=label_marg) - - # plt.figure() - # ax1 = plt.subplot(2, 2, 1, title='main_mask_d (deskewed main mask)') - # plt.imshow(main_mask_d) - # ax2 = plt.subplot(2, 2, 2, title='main_mask (undeskewed main mask)') - # plt.imshow(main_mask) - # ax3 = plt.subplot(2, 2, 3, title='early_layout (undeskewed labels original)') - # plt.imshow(early_layout_orig) - # ax4 = plt.subplot(2, 2, 4, title='early_layout (undeskewed labels split)') - # plt.imshow(early_layout) - # plt.show() - - # if there was no main text, then relabel marginalia as main - if not np.any(early_layout == label_text): - early_layout[early_layout == label_marg] = label_text + pass + return text_regions diff --git a/src/eynollah/utils/rotate.py b/src/eynollah/utils/rotate.py index 6651c4e..189693d 100644 --- a/src/eynollah/utils/rotate.py +++ b/src/eynollah/utils/rotate.py @@ -1,6 +1,36 @@ import math import cv2 +def rotatedRectWithMaxArea(w, h, angle): + if w <= 0 or h <= 0: + return 0, 0 + + width_is_longer = w >= h + side_long, side_short = (w, h) if width_is_longer else (h, w) + + # since the solutions for angle, -angle and 180-angle are all the same, + # if suffices to look at the first quadrant and the absolute values of sin,cos: + sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) + if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: + # half constrained case: two crop corners touch the longer side, + # the other two corners are on the mid-line parallel to the longer line + x = 0.5 * side_short + wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) + else: + # fully constrained case: crop touches all 4 sides + cos_2a = cos_a * cos_a - sin_a * sin_a + wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a + + return wr, hr + +def rotate_max_area_new(image, rotated, angle): + wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) + h, w, _ = rotated.shape + y1 = h // 2 - int(hr / 2) + y2 = y1 + int(hr) + x1 = w // 2 - int(wr / 2) + x2 = x1 + int(wr) + return rotated[y1:y2, x1:x2] def rotation_image_new(img, thetha): rotated = rotate_image(img, thetha) @@ -20,19 +50,35 @@ def rotate_image_different( img, slope): img_rotation = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows)) return img_rotation -def rotate_image_enlarge(img, angle): - h, w = img.shape[:2] - cx, cy = 0.5 * w, 0.5 * h - matrix = cv2.getRotationMatrix2D((cx, cy), angle, 1.0) - radian = angle / 180 * math.pi - cos = abs(math.cos(radian)) - sin = abs(math.sin(radian)) - new_w, new_h = (w * cos + h * sin, - w * sin + h * cos) - # box is larger after resize, so instead of shifting - # back from center, shift from new center - matrix[0, 2] += 0.5 * new_w - cx - matrix[1, 2] += 0.5 * new_h - cy - return cv2.warpAffine(img, matrix, (int(new_w + 0.5), - int(new_h + 0.5)), - flags=cv2.INTER_CUBIC) +def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_table_prediction, angle): + wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) + h, w, _ = rotated.shape + y1 = h // 2 - int(hr / 2) + y2 = y1 + int(hr) + x1 = w // 2 - int(wr / 2) + x2 = x1 + int(wr) + return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_table_prediction[y1:y2, x1:x2] + +def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thetha): + rotated = rotate_image(img, thetha) + rotated_textline = rotate_image(textline, thetha) + rotated_layout = rotate_image(text_regions_p_1, thetha) + rotated_table_prediction = rotate_image(table_prediction, thetha) + return rotate_max_area(img, rotated, rotated_textline, rotated_layout, rotated_table_prediction, thetha) + +def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regions_p_fully, thetha): + rotated = rotate_image(img, thetha) + rotated_textline = rotate_image(textline, thetha) + rotated_layout = rotate_image(text_regions_p_1, thetha) + rotated_layout_full = rotate_image(text_regions_p_fully, thetha) + return rotate_max_area_full_layout(img, rotated, rotated_textline, rotated_layout, rotated_layout_full, thetha) + +def rotate_max_area_full_layout(image, rotated, rotated_textline, rotated_layout, rotated_layout_full, angle): + wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) + h, w, _ = rotated.shape + y1 = h // 2 - int(hr / 2) + y2 = y1 + int(hr) + x1 = w // 2 - int(wr / 2) + x2 = x1 + int(wr) + return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_layout_full[y1:y2, x1:x2] + diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 689a564..3499c29 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -5,16 +5,7 @@ import numpy as np import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d -from scipy.stats import linregress -from ocrd_utils import ( - shift_coordinates, - rotate_coordinates, - transform_coordinates, -) -from .rotate import ( - rotate_image, - rotate_image_enlarge, -) +from .rotate import rotate_image from .resize import resize_image from .contour import ( return_parent_contours, @@ -22,12 +13,11 @@ from .contour import ( return_contours_of_image, filter_contours_area_of_image, return_contours_of_interested_textline, - find_center_of_contours, find_contours_mean_y_diff, ) from . import ( find_num_col_deskew, - box2rect, + crop_image_inside_box, ) def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): @@ -45,8 +35,10 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): x_cont = x_cont - np.min(x_cont) y_cont = y_cont - np.min(y_cont) - y_min_cont, x_min_cont = 0, 0 - y_max_cont, x_max_cont = img_patch.shape + x_min_cont = 0 + x_max_cont = img_patch.shape[1] + y_min_cont = 0 + y_max_cont = img_patch.shape[0] xv = np.linspace(x_min_cont, x_max_cont, 1000) textline_patch_sum_along_width = img_patch.sum(axis=axis) @@ -70,8 +62,7 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ - y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] + arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -82,14 +73,11 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): clusters_to_be_deleted = [] if len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : - arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : + arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): @@ -114,15 +102,14 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) - if len(np.diff(peaks_new_tot))>1: - y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) - else: - sigma_gaus = 12 + y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) + sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) + # print(sigma_gaus,'sigma_gaus') except: sigma_gaus = 12 if sigma_gaus < 3: sigma_gaus = 3 + # print(sigma_gaus,'sigma') y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down = -y_padded + np.max(y_padded) @@ -145,12 +132,14 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): rotation_matrix) def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): - h, w = img_patch.shape[:2] + (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, -thetha, 1.0) x_d = M[0, 2] y_d = M[1, 2] - rotation_matrix = M[:2, :2] + + thetha = thetha / 180. * np.pi + rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]]) contour_text_interest_copy = contour_text_interest.copy() x_cont = contour_text_interest[:, 0, 0] @@ -173,86 +162,83 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) - - try: - y_padded_smoothed_e= gaussian_filter1d(y_padded, 2) - y_padded_up_to_down_e=-y_padded+np.max(y_padded) - y_padded_up_to_down_padded_e=np.zeros(len(y_padded_up_to_down_e)+40) - y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e)+20]=y_padded_up_to_down_e - y_padded_up_to_down_padded_e= gaussian_filter1d(y_padded_up_to_down_padded_e, 2) - - peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) - peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) - neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) + if 1>0: + try: + y_padded_smoothed_e= gaussian_filter1d(y_padded, 2) + y_padded_up_to_down_e=-y_padded+np.max(y_padded) + y_padded_up_to_down_padded_e=np.zeros(len(y_padded_up_to_down_e)+40) + y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e)+20]=y_padded_up_to_down_e + y_padded_up_to_down_padded_e= gaussian_filter1d(y_padded_up_to_down_padded_e, 2) + - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ - y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3] - diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) - - arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) - arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] - peaks_new=peaks_e[:] - peaks_neg_new=peaks_neg_e[:] + peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) + peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) + neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - clusters_to_be_deleted=[] - if len(arg_diff_cluster)>0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) - for i in range(len(arg_diff_cluster)-1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1: - arg_diff_cluster[i+1]+1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) - if len(clusters_to_be_deleted)>0: - peaks_new_extra=[] - for m in range(len(clusters_to_be_deleted)): - min_cluster=np.min(peaks_e[clusters_to_be_deleted[m]]) - max_cluster=np.max(peaks_e[clusters_to_be_deleted[m]]) - peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) - for m1 in range(len(clusters_to_be_deleted[m])): - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]-1]] - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]]] - peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg_e[clusters_to_be_deleted[m][m1]]] - peaks_new_tot=[] - for i1 in peaks_new: - peaks_new_tot.append(i1) - for i1 in peaks_new_extra: - peaks_new_tot.append(i1) - peaks_new_tot=np.sort(peaks_new_tot) - else: - peaks_new_tot=peaks_e[:] + arg_neg_must_be_deleted= np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3] + diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) + + arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) + arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] - textline_con,hierarchy=return_contours_of_image(img_patch) - textline_con_fil=filter_contours_area_of_image(img_patch, - textline_con, hierarchy, - max_area=1, min_area=0.0008) + peaks_new=peaks_e[:] + peaks_neg_new=peaks_neg_e[:] - if len(np.diff(peaks_new_tot))>0: + clusters_to_be_deleted=[] + if len(arg_diff_cluster)>0: + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) + for i in range(len(arg_diff_cluster)-1): + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1: + arg_diff_cluster[i+1]+1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) + if len(clusters_to_be_deleted)>0: + peaks_new_extra=[] + for m in range(len(clusters_to_be_deleted)): + min_cluster=np.min(peaks_e[clusters_to_be_deleted[m]]) + max_cluster=np.max(peaks_e[clusters_to_be_deleted[m]]) + peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) + for m1 in range(len(clusters_to_be_deleted[m])): + peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]-1]] + peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]]] + peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg_e[clusters_to_be_deleted[m][m1]]] + peaks_new_tot=[] + for i1 in peaks_new: + peaks_new_tot.append(i1) + for i1 in peaks_new_extra: + peaks_new_tot.append(i1) + peaks_new_tot=np.sort(peaks_new_tot) + else: + peaks_new_tot=peaks_e[:] + + textline_con,hierarchy=return_contours_of_image(img_patch) + textline_con_fil=filter_contours_area_of_image(img_patch, + textline_con, hierarchy, + max_area=1, min_area=0.0008) y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) sigma_gaus=int( y_diff_mean * (7./40.0) ) - else: + #print(sigma_gaus,'sigma_gaus') + except: sigma_gaus=12 - - except: - sigma_gaus=12 - if sigma_gaus<3: - sigma_gaus=3 + if sigma_gaus<3: + sigma_gaus=3 + #print(sigma_gaus,'sigma') y_padded_smoothed= gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down=-y_padded+np.max(y_padded) y_padded_up_to_down_padded=np.zeros(len(y_padded_up_to_down)+40) y_padded_up_to_down_padded[20:len(y_padded_up_to_down)+20]=y_padded_up_to_down y_padded_up_to_down_padded= gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) + peaks, _ = find_peaks(y_padded_smoothed, height=0) peaks_neg, _ = find_peaks(y_padded_up_to_down_padded, height=0) try: neg_peaks_max=np.max(y_padded_smoothed[peaks]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ - y_padded_up_to_down_padded[peaks_neg]/float(neg_peaks_max)<0.42] + arg_neg_must_be_deleted= np.arange(len(peaks_neg))[y_padded_up_to_down_padded[peaks_neg]/float(neg_peaks_max)<0.42] diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] - except: arg_neg_must_be_deleted=[] arg_diff_cluster=[] @@ -260,6 +246,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_new=peaks[:] peaks_neg_new=peaks_neg[:] clusters_to_be_deleted=[] + if len(arg_diff_cluster)>=2 and len(arg_diff_cluster)>0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) for i in range(len(arg_diff_cluster)-1): @@ -288,6 +275,21 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_new_tot.append(i1) peaks_new_tot=np.sort(peaks_new_tot) + ##plt.plot(y_padded_up_to_down_padded) + ##plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') + ##plt.show() + + ##plt.plot(y_padded_up_to_down_padded) + ##plt.plot(peaks_neg_new,y_padded_up_to_down_padded[peaks_neg_new],'*') + ##plt.show() + + ##plt.plot(y_padded_smoothed) + ##plt.plot(peaks,y_padded_smoothed[peaks],'*') + ##plt.show() + + ##plt.plot(y_padded_smoothed) + ##plt.plot(peaks_new_tot,y_padded_smoothed[peaks_new_tot],'*') + ##plt.show() peaks=peaks_new_tot[:] peaks_neg=peaks_neg_new[:] else: @@ -296,13 +298,11 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_neg=peaks_neg_new[:] except: pass - if len(y_padded_smoothed[peaks]) > 1: - mean_value_of_peaks=np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks=np.std(y_padded_smoothed[peaks]) - else: - mean_value_of_peaks = np.nan - std_value_of_peaks = np.nan + + mean_value_of_peaks=np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks=np.std(y_padded_smoothed[peaks]) peaks_values=y_padded_smoothed[peaks] + peaks_neg = peaks_neg - 20 - 20 peaks = peaks - 20 for jj in range(len(peaks_neg)): @@ -324,47 +324,33 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1 - ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) - #point_up - # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1 - ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) - #point_up - # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( - 1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./2) + 1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( 1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -406,14 +392,14 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(point_up)]], - [[int(x_max), int(point_up)]], - [[int(x_max), int(point_down)]], - [[int(x_min), int(point_down)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], + [int(x_max), int(point_up)], + [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) elif len(peaks) < 1: pass @@ -465,14 +451,14 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(y_min)]], - [[int(x_max), int(y_min)]], - [[int(x_max), int(y_max)]], - [[int(x_min), int(y_max)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(y_min)], + [int(x_max), int(y_min)], + [int(x_max), int(y_max)], + [int(x_min), int(y_max)]])) elif len(peaks) == 2: dis_to_next = np.abs(peaks[1] - peaks[0]) for jj in range(len(peaks)): @@ -491,8 +477,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up =peaks[jj] + first_nonzero - int(1. / 1.8 * dis_to_next) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -533,14 +518,14 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(point_up)]], - [[int(x_max), int(point_up)]], - [[int(x_max), int(point_down)]], - [[int(x_min), int(point_down)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], + [int(x_max), int(point_up)], + [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) else: for jj in range(len(peaks)): if jj == 0: @@ -567,8 +552,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down = peaks[jj] + first_nonzero + int(1. / 1.9 * dis_to_next_down) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -609,14 +593,15 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(point_up)]], - [[int(x_max), int(point_up)]], - [[int(x_max), int(point_down)]], - [[int(x_min), int(point_down)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], + [int(x_max), int(point_up)], + [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) + return peaks, textline_boxes_rot def separate_lines_vertical(img_patch, contour_text_interest, thetha): @@ -638,8 +623,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): neg_peaks_max = np.max(y_padded_up_to_down_padded[peaks_neg]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ - y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.42] + arg_neg_must_be_deleted = np.arange(len(peaks_neg))[y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.42] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -649,7 +633,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): peaks_neg_new = peaks_neg[:] clusters_to_be_deleted = [] - if len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) >= 2: + if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : @@ -657,7 +641,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) - else: + if len(arg_neg_must_be_deleted) == 1: clusters_to_be_deleted.append(arg_neg_must_be_deleted) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] @@ -683,14 +667,9 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): peaks_new_tot = peaks[:] peaks = peaks_new_tot[:] peaks_neg = peaks_neg_new[:] - - if len(y_padded_smoothed[peaks])>1: - mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks = np.std(y_padded_smoothed[peaks]) - else: - mean_value_of_peaks = np.nan - std_value_of_peaks = np.nan - + + mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks = np.std(y_padded_smoothed[peaks]) peaks_values = y_padded_smoothed[peaks] peaks_neg = peaks_neg - 20 - 20 @@ -708,6 +687,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): textline_boxes_rot = [] if len(peaks_neg) == len(peaks) + 1 and len(peaks) >= 3: + # print('11') for jj in range(len(peaks)): if jj == (len(peaks) - 1): @@ -715,50 +695,30 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 - ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) - #point_up - # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 - ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) - #point_up - # np.max(y_cont) - #peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) - ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) - for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] @@ -788,14 +748,14 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(point_up)]], - [[int(x_max), int(point_up)]], - [[int(x_max), int(point_down)]], - [[int(x_min), int(point_down)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], + [int(x_max), int(point_up)], + [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) elif len(peaks) < 1: pass elif len(peaks) == 1: @@ -824,14 +784,14 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(y_min)]], - [[int(x_max), int(y_min)]], - [[int(x_max), int(y_max)]], - [[int(x_min), int(y_max)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(y_min)], + [int(x_max), int(y_min)], + [int(x_max), int(y_max)], + [int(x_min), int(y_max)]])) elif len(peaks) == 2: dis_to_next = np.abs(peaks[1] - peaks[0]) for jj in range(len(peaks)): @@ -847,8 +807,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): point_up = peaks[jj] + first_nonzero - int(1.0 / 1.8 * dis_to_next) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -879,14 +838,14 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(point_up)]], - [[int(x_max), int(point_up)]], - [[int(x_max), int(point_down)]], - [[int(x_min), int(point_down)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], + [int(x_max), int(point_up)], + [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) else: for jj in range(len(peaks)): if jj == 0: @@ -913,8 +872,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): point_down = peaks[jj] + first_nonzero + int(1.0 / 1.9 * dis_to_next_down) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -945,107 +903,130 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[[int(x_min_rot1), int(point_up_rot1)]], - [[int(x_max_rot2), int(point_up_rot2)]], - [[int(x_max_rot3), int(point_down_rot3)]], - [[int(x_min_rot4), int(point_down_rot4)]]])) - textline_boxes.append(np.array([[[int(x_min), int(point_up)]], - [[int(x_max), int(point_up)]], - [[int(x_max), int(point_down)]], - [[int(x_min), int(point_down)]]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], + [int(x_max_rot2), int(point_up_rot2)], + [int(x_max_rot3), int(point_down_rot3)], + [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], + [int(x_max), int(point_up)], + [int(x_max), int(point_down)], + [int(x_min), int(point_down)]])) return peaks, textline_boxes_rot -def separate_lines_new_inside_tiles2(img_patch, _): - y = img_patch.sum(axis=1) - y_padded = np.pad(y, (20,)) - x = np.arange(len(y)) +def separate_lines_new_inside_tiles2(img_patch, thetha): + (h, w) = img_patch.shape[:2] + center = (w // 2, h // 2) + M = cv2.getRotationMatrix2D(center, -thetha, 1.0) + x_d = M[0, 2] + y_d = M[1, 2] + + thetha = thetha / 180.0 * np.pi + rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]]) + # contour_text_interest_copy = contour_text_interest.copy() + + # x_cont = contour_text_interest[:, 0, 0] + # y_cont = contour_text_interest[:, 0, 1] + # x_cont = x_cont - np.min(x_cont) + # y_cont = y_cont - np.min(y_cont) + + x_min_cont = 0 + x_max_cont = img_patch.shape[1] + y_min_cont = 0 + y_max_cont = img_patch.shape[0] + + xv = np.linspace(x_min_cont, x_max_cont, 1000) + textline_patch_sum_along_width = img_patch.sum(axis=1) + first_nonzero = 0 # (next((i for i, x in enumerate(mada_n) if x), None)) + + y = textline_patch_sum_along_width[:] # [first_nonzero:last_nonzero] + y_padded = np.zeros(len(y) + 40) + y_padded[20 : len(y) + 20] = y + x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) - try: - y_padded_smoothed_e = gaussian_filter1d(y_padded, 2) - y_padded_up_to_down_e = -y_padded + np.max(y_padded) - y_padded_up_to_down_padded_e = np.zeros(len(y_padded_up_to_down_e) + 40) - y_padded_up_to_down_padded_e[20 : len(y_padded_up_to_down_e) + 20] = y_padded_up_to_down_e - y_padded_up_to_down_padded_e = gaussian_filter1d(y_padded_up_to_down_padded_e, 2) + if 1 > 0: + try: + y_padded_smoothed_e = gaussian_filter1d(y_padded, 2) + y_padded_up_to_down_e = -y_padded + np.max(y_padded) + y_padded_up_to_down_padded_e = np.zeros(len(y_padded_up_to_down_e) + 40) + y_padded_up_to_down_padded_e[20 : len(y_padded_up_to_down_e) + 20] = y_padded_up_to_down_e + y_padded_up_to_down_padded_e = gaussian_filter1d(y_padded_up_to_down_padded_e, 2) - peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) - peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) - neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) + peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) + peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) + neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ - y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] - diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) + arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] + diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) - arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) - arg_diff_cluster = arg_diff[diff_arg_neg_must_be_deleted > 1] + arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) + arg_diff_cluster = arg_diff[diff_arg_neg_must_be_deleted > 1] - peaks_new = peaks_e[:] - peaks_neg_new = peaks_neg_e[:] + peaks_new = peaks_e[:] + peaks_neg_new = peaks_neg_e[:] - clusters_to_be_deleted = [] - if len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) - for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[i] + 1: - arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) - if len(clusters_to_be_deleted) > 0: - peaks_new_extra = [] - for m in range(len(clusters_to_be_deleted)): - min_cluster = np.min(peaks_e[clusters_to_be_deleted[m]]) - max_cluster = np.max(peaks_e[clusters_to_be_deleted[m]]) - peaks_new_extra.append(int((min_cluster + max_cluster) / 2.0)) - for m1 in range(len(clusters_to_be_deleted[m])): - peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1] - 1]] - peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1]]] - peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg_e[clusters_to_be_deleted[m][m1]]] - peaks_new_tot = [] - for i1 in peaks_new: - peaks_new_tot.append(i1) - for i1 in peaks_new_extra: - peaks_new_tot.append(i1) - peaks_new_tot = np.sort(peaks_new_tot) - else: - peaks_new_tot = peaks_e[:] + clusters_to_be_deleted = [] + if len(arg_diff_cluster) > 0: + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + for i in range(len(arg_diff_cluster) - 1): + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + if len(clusters_to_be_deleted) > 0: + peaks_new_extra = [] + for m in range(len(clusters_to_be_deleted)): + min_cluster = np.min(peaks_e[clusters_to_be_deleted[m]]) + max_cluster = np.max(peaks_e[clusters_to_be_deleted[m]]) + peaks_new_extra.append(int((min_cluster + max_cluster) / 2.0)) + for m1 in range(len(clusters_to_be_deleted[m])): + peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1] - 1]] + peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1]]] + peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg_e[clusters_to_be_deleted[m][m1]]] + peaks_new_tot = [] + for i1 in peaks_new: + peaks_new_tot.append(i1) + for i1 in peaks_new_extra: + peaks_new_tot.append(i1) + peaks_new_tot = np.sort(peaks_new_tot) + else: + peaks_new_tot = peaks_e[:] - textline_con, hierarchy = return_contours_of_image(img_patch) - textline_con_fil = filter_contours_area_of_image(img_patch, - textline_con, hierarchy, - max_area=1, min_area=0.0008) - if len(np.diff(peaks_new_tot)): + textline_con, hierarchy = return_contours_of_image(img_patch) + textline_con_fil = filter_contours_area_of_image(img_patch, + textline_con, hierarchy, + max_area=1, min_area=0.0008) y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) - else: - sigma_gaus = 12 - except: - sigma_gaus = 12 - if sigma_gaus < 3: - sigma_gaus = 3 + sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) + # print(sigma_gaus,'sigma_gaus') + except: + sigma_gaus = 12 + if sigma_gaus < 3: + sigma_gaus = 3 + # print(sigma_gaus,'sigma') y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) - y_padded_neg = np.pad(np.max(y_padded) - y_padded, (20,)) - y_padded_neg_smoothed = gaussian_filter1d(y_padded_neg, sigma_gaus) + y_padded_up_to_down = -y_padded + np.max(y_padded) + y_padded_up_to_down_padded = np.zeros(len(y_padded_up_to_down) + 40) + y_padded_up_to_down_padded[20 : len(y_padded_up_to_down) + 20] = y_padded_up_to_down + y_padded_up_to_down_padded = gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) peaks, _ = find_peaks(y_padded_smoothed, height=0) - peaks_neg, _ = find_peaks(y_padded_neg_smoothed, height=0) + peaks_neg, _ = find_peaks(y_padded_up_to_down_padded, height=0) peaks_new = peaks[:] peaks_neg_new = peaks_neg[:] try: - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ - y_padded_neg_smoothed[peaks_neg] < - y_padded_smoothed[peaks].max() * 0.24] + neg_peaks_max = np.max(y_padded_smoothed[peaks]) + + arg_neg_must_be_deleted = np.arange(len(peaks_neg))[y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.24] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) - arg_diff = np.arange(len(diff_arg_neg_must_be_deleted)) + arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) arg_diff_cluster = arg_diff[diff_arg_neg_must_be_deleted > 1] clusters_to_be_deleted = [] - if len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) >= 2: + if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : @@ -1053,7 +1034,7 @@ def separate_lines_new_inside_tiles2(img_patch, _): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) - else: + if len(arg_neg_must_be_deleted) == 1: clusters_to_be_deleted.append(arg_neg_must_be_deleted) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] @@ -1072,12 +1053,12 @@ def separate_lines_new_inside_tiles2(img_patch, _): peaks_new_tot.append(i1) peaks_new_tot = np.sort(peaks_new_tot) - # plt.plot(y_padded_neg_smoothed) - # plt.plot(peaks_neg,y_padded_neg_smoothed[peaks_neg],'*') + # plt.plot(y_padded_up_to_down_padded) + # plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') # plt.show() - # plt.plot(y_padded_neg_smoothed) - # plt.plot(peaks_neg_new,y_padded_neg_smoothed[peaks_neg_new],'*') + # plt.plot(y_padded_up_to_down_padded) + # plt.plot(peaks_neg_new,y_padded_up_to_down_padded[peaks_neg_new],'*') # plt.show() # plt.plot(y_padded_smoothed) @@ -1096,49 +1077,60 @@ def separate_lines_new_inside_tiles2(img_patch, _): peaks_new_tot = peaks[:] peaks = peaks_new_tot[:] peaks_neg = peaks_neg_new[:] - - # if len(y_padded_smoothed[peaks]) > 1: - # mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) - # std_value_of_peaks = np.std(y_padded_smoothed[peaks]) - # else: - # mean_value_of_peaks = np.nan - # std_value_of_peaks = np.nan - - # peaks_values = y_padded_smoothed[peaks] - def clip(positions): - # prevent wrap around array bounds - return np.maximum(0, np.minimum(img_patch.shape[0] - 1, positions)) + mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks = np.std(y_padded_smoothed[peaks]) + peaks_values = y_padded_smoothed[peaks] - peaks_neg_true = clip(np.array(peaks_neg) - 40) - peaks_pos_true = clip(np.array(peaks) - 20) + ###peaks_neg = peaks_neg - 20 - 20 + ###peaks = peaks - 20 + peaks_neg_true = peaks_neg[:] + peaks_pos_true = peaks[:] - # ax1 = plt.subplot(1, 2, 1, title="textline mask slice") - # plt.imshow(img_patch, aspect="auto") - # ax2 = plt.subplot(1, 2, 2, title="projection profile", sharey=ax1) - # plt.plot(y, x) - # ax2.scatter(y[peaks_neg_true], peaks_neg_true, color='r', label="neg (0)") - # ax2.scatter(y[peaks_pos_true], peaks_pos_true, color='g', label="pos (1)") - # plt.legend() - # plt.show() + if len(peaks_neg_true) > 0: + peaks_neg_true = np.array(peaks_neg_true) - offsets = np.arange(-6, 6) - def add_offsets(positions): - # let y range around peak positions (without slice indexing) - return (positions[np.newaxis] + offsets[:, np.newaxis]).flatten() + peaks_neg_true = peaks_neg_true - 20 - 20 - if peaks_neg_true.size: - img_patch[clip(add_offsets(peaks_neg_true))] = 0 + # print(peaks_neg_true) + for i in range(len(peaks_neg_true)): + img_patch[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 + else: + pass - if peaks_pos_true.size: - img_patch[clip(add_offsets(peaks_pos_true))] = 1 + if len(peaks_pos_true) > 0: + peaks_pos_true = np.array(peaks_pos_true) + peaks_pos_true = peaks_pos_true - 20 + for i in range(len(peaks_pos_true)): + ##img_patch[peaks_pos_true[i]-8:peaks_pos_true[i]+8,:]=1 + img_patch[peaks_pos_true[i] - 6 : peaks_pos_true[i] + 6, :] = 1 + else: + pass kernel = np.ones((5, 5), np.uint8) + # img_patch = cv2.erode(img_patch,kernel,iterations = 3) + #######################img_patch = cv2.erode(img_patch,kernel,iterations = 2) img_patch = cv2.erode(img_patch, kernel, iterations=1) return img_patch -def separate_lines_new_inside_tiles(img_path, _): +def separate_lines_new_inside_tiles(img_path, thetha): + (h, w) = img_path.shape[:2] + center = (w // 2, h // 2) + M = cv2.getRotationMatrix2D(center, -thetha, 1.0) + x_d = M[0, 2] + y_d = M[1, 2] + + thetha = thetha / 180.0 * np.pi + rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]]) + + x_min_cont = 0 + x_max_cont = img_path.shape[1] + y_min_cont = 0 + y_max_cont = img_path.shape[0] + + xv = np.linspace(x_min_cont, x_max_cont, 1000) + mada_n = img_path.sum(axis=1) ##plt.plot(mada_n) @@ -1185,11 +1177,13 @@ def separate_lines_new_inside_tiles(img_path, _): if diff_peaks[i] <= cut_off: forest.append(peaks_neg[i + 1]) if diff_peaks[i] > cut_off: + # print(forest[np.argmin(z[forest]) ] ) if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) forest = [] forest.append(peaks_neg[i + 1]) if i == (len(peaks_neg) - 1): + # print(print(forest[np.argmin(z[forest]) ] )) if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -1206,14 +1200,17 @@ def separate_lines_new_inside_tiles(img_path, _): if diff_peaks_pos[i] <= cut_off: forest.append(peaks[i + 1]) if diff_peaks_pos[i] > cut_off: + # print(forest[np.argmin(z[forest]) ] ) if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) forest = [] forest.append(peaks[i + 1]) if i == (len(peaks) - 1): + # print(print(forest[np.argmin(z[forest]) ] )) if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) + # print(len(peaks_neg_true) ,len(peaks_pos_true) ,'lensss') if len(peaks_neg_true) > 0: peaks_neg_true = np.array(peaks_neg_true) @@ -1239,6 +1236,7 @@ def separate_lines_new_inside_tiles(img_path, _): """ peaks_neg_true = peaks_neg_true - 20 - 20 + # print(peaks_neg_true) for i in range(len(peaks_neg_true)): img_path[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 @@ -1259,156 +1257,252 @@ def separate_lines_new_inside_tiles(img_path, _): img_path = cv2.erode(img_path, kernel, iterations=2) return img_path -def separate_lines_vertical_cont(textline_mask, box_ind): +def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_ind, add_boxes_coor_into_textlines): kernel = np.ones((5, 5), np.uint8) + pixel = 255 min_area = 0 max_area = 1 - _, thresh = cv2.threshold(textline_mask.astype(np.uint8), 0, 255, 0) - contours, hierarchy = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - contours = return_parent_contours(contours, hierarchy) - contours = filter_contours_area_of_image_tables(thresh, - contours, hierarchy, - max_area=max_area, - min_area=min_area) - contours_final = [] - for contour in contours: - img = np.zeros_like(textline_mask, dtype=np.uint8) - img = cv2.fillPoly(img, pts=[contour], color=255) - img = cv2.dilate(img, kernel, iterations=4) - _, thresh = cv2.threshold(img, 0, 255, 0) - contours_text_rot, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + if len(img_patch.shape) == 3: + cnts_images = (img_patch[:, :, 0] == pixel) * 1 + else: + cnts_images = (img_patch[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - contours_final.append(contours_text_rot[0]) + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables(thresh, + contours_imgs, hierarchy, + max_area=max_area, min_area=min_area) + cont_final = [] + ###print(add_boxes_coor_into_textlines,'ikki') + for i in range(len(contours_imgs)): + img_contour = np.zeros((cnts_images.shape[0], cnts_images.shape[1], 3)) + img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=(255, 255, 255)) + img_contour = img_contour.astype(np.uint8) - return None, contours_final + img_contour = cv2.dilate(img_contour, kernel, iterations=4) + imgrayrot = cv2.cvtColor(img_contour, cv2.COLOR_BGR2GRAY) + _, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) + contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) -def textline_contours_postprocessing(textline_mask, angle, contour_parent): - x, y, w, h = cv2.boundingRect(contour_parent) - label = 255 - textline_mask = textline_mask * label + ##contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[ + ##0] + ##contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] + ##if add_boxes_coor_into_textlines: + ##print(np.shape(contours_text_rot[0]),'sjppo') + ##contours_text_rot[0][:, 0, 0]=contours_text_rot[0][:, 0, 0] + box_ind[0] + ##contours_text_rot[0][:, 0, 1]=contours_text_rot[0][:, 0, 1] + box_ind[1] + cont_final.append(contours_text_rot[0]) + + ##print(cont_final,'nadizzzz') + return None, cont_final + +def textline_contours_postprocessing(textline_mask, slope, contour_text_interest, box_ind, add_boxes_coor_into_textlines=False): + textline_mask = np.repeat(textline_mask[:, :, np.newaxis], 3, axis=2) * 255 + textline_mask = textline_mask.astype(np.uint8) kernel = np.ones((5, 5), np.uint8) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_OPEN, kernel) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_CLOSE, kernel) textline_mask = cv2.erode(textline_mask, kernel, iterations=2) # textline_mask = cv2.erode(textline_mask, kernel, iterations=1) - textline_mask_d = rotate_image_enlarge(textline_mask, angle) - #textline_mask_d[textline_mask_d != 0] = 1 + # print(textline_mask.shape[0]/float(textline_mask.shape[1]),'miz') + try: + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(textline_mask) + # plt.show() - # if np.abs(angle)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # if abs(slope)>1: + # x_help=30 + # y_help=2 + # else: + # x_help=2 + # y_help=2 - contour_parent = contour_parent - [x, y] - img = np.zeros((h, w), dtype=np.uint8) - img = cv2.fillPoly(img, pts=[contour_parent], color=255) - img_d = rotate_image_enlarge(img, angle) + x_help = 30 + y_help = 2 - _, thresh = cv2.threshold(img_d, 0, 255, 0) - contours_parent_d, _ = cv2.findContours(thresh.astype(np.uint8), - cv2.RETR_EXTERNAL, - cv2.CHAIN_APPROX_SIMPLE) - contour_parent_d = contours_parent_d[ - np.argmax(map(cv2.contourArea, contours_parent_d))] + textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), + textline_mask.shape[1] + int(2 * x_help), 3)) + textline_mask_help[y_help : y_help + textline_mask.shape[0], + x_help : x_help + textline_mask.shape[1], :] = np.copy(textline_mask[:, :, :]) - _, contours_rotated_clean = separate_lines( - textline_mask_d, contour_parent_d, - # already deskewed! - #angle, x_off, y_off) - 0, 0, 0) + dst = rotate_image(textline_mask_help, slope) + dst = dst[:, :, 0] + dst[dst != 0] = 1 + + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(dst) + # plt.show() + + contour_text_copy = contour_text_interest.copy() + contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0] + contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] + + img_contour = np.zeros((box_ind[3], box_ind[2], 3)) + img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=(255, 255, 255)) + + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(img_contour) + # plt.show() + + img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), + img_contour.shape[1] + int(2 * x_help), 3)) + img_contour_help[y_help : y_help + img_contour.shape[0], + x_help : x_help + img_contour.shape[1], :] = np.copy(img_contour[:, :, :]) + + img_contour_rot = rotate_image(img_contour_help, slope) + # plt.imshow(img_contour_rot_help) + # plt.show() + + # plt.imshow(dst_help) + # plt.show() + + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(img_contour_rot_help) + # plt.show() + + # plt.imshow(dst_help) + # plt.show() + + img_contour_rot = img_contour_rot.astype(np.uint8) + # dst_help = dst_help.astype(np.uint8) + imgrayrot = cv2.cvtColor(img_contour_rot, cv2.COLOR_BGR2GRAY) + _, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) + contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] + ind_big_con = np.argmax(len_con_text_rot) + + # print('juzaa') + if abs(slope) > 45: + # print(add_boxes_coor_into_textlines,'avval') + _, contours_rotated_clean = separate_lines_vertical_cont( + textline_mask, contours_text_rot[ind_big_con], box_ind, slope, + add_boxes_coor_into_textlines=add_boxes_coor_into_textlines) + else: + _, contours_rotated_clean = separate_lines( + dst, contours_text_rot[ind_big_con], slope, x_help, y_help) + except: + contours_rotated_clean = [] - # undo relative coordinates - transform = rotate_coordinates(np.eye(3), -angle, 0.5 * np.array([h, w])) - transform = shift_coordinates(transform, [x, y]) - contours_rotated_clean = [np.round(transform_coordinates(contour[:, 0], - transform)[:, np.newaxis]) - .astype(int) - for contour in contours_rotated_clean - if len(contour) > 3] return contours_rotated_clean -def separate_lines_new2(img_crop, _, num_col, slope_region, logger=None, plotter=None): - """ - morph textline mask to cope with warped lines by independently deskewing horizontal slices - """ +def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, plotter=None): if logger is None: logger = getLogger(__package__) - if not np.prod(img_crop.shape): - return img_crop - height, width = img_crop.shape - num_patches = max(1, width // (200 if num_col == 1 else 140)) - length_x = width // num_patches + if num_col == 1: + num_patches = int(img_path.shape[1] / 200.0) + else: + num_patches = int(img_path.shape[1] / 140.0) + # num_patches=int(img_path.shape[1]/200.) + if num_patches == 0: + num_patches = 1 + + img_patch_ineterst = img_path[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[15]+dis_down ,:] + + # plt.imshow(img_patch_ineterst) + # plt.show() + + length_x = int(img_path.shape[1] / float(num_patches)) # margin = int(0.04 * length_x) just recently this was changed because it break lines into 2 margin = int(0.04 * length_x) + # print(margin,'margin') # if margin<=4: # margin = int(0.08 * length_x) # margin=0 width_mid = length_x - 2 * margin + nxf = img_path.shape[1] / float(width_mid) - img_crop_revised = np.zeros_like(img_crop) - for index_x_d in range(0, width, width_mid): - index_x_u = index_x_d + length_x - if index_x_u > width: - if index_x_u >= width + width_mid: - break # already in last window - index_x_u = width - index_x_d = width - length_x + if nxf > int(nxf): + nxf = int(nxf) + 1 + else: + nxf = int(nxf) - # box = (slice(index_y_d, index_y_u), slice(index_x_d, index_x_u)) - # img_patch = img_crop[box] - box = (slice(None), slice(index_x_d, index_x_u)) - img_xline = img_crop[box] + slopes_tile_wise = [] + for i in range(nxf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + length_x + elif i > 0: + index_x_d = i * width_mid + index_x_u = index_x_d + length_x - if img_xline.any(): + if index_x_u > img_path.shape[1]: + index_x_u = img_path.shape[1] + index_x_d = img_path.shape[1] - length_x + + # img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + img_xline = img_patch_ineterst[:, index_x_d:index_x_u] + + try: + assert img_xline.any() slope_xline = return_deskew_slop(img_xline, 2, logger=logger, plotter=plotter) - else: - continue + except: + slope_xline = 0 - if (abs(slope_region) < 25 and - abs(slope_xline) > 25): - slope_xline = slope_region + if abs(slope_region) < 25 and abs(slope_xline) > 25: + slope_xline = [slope_region][0] # if abs(slope_region)>70 and abs(slope_xline)<25: - # slope_xline = slope_region + # slope_xline=[slope_region][0] + slopes_tile_wise.append(slope_xline) + # print(slope_xline,'xlineeee') + img_line_rotated = rotate_image(img_xline, slope_xline) + img_line_rotated[:, :][img_line_rotated[:, :] != 0] = 1 - pad_above = pad_below = int(img_xline.shape[0] * 0.1) - pad_left = pad_right = img_xline.shape[1] - img_xline_padded = np.pad(img_xline, ((pad_above, pad_below), - (pad_left, pad_right))) - # plt.subplot(2, 2, 1, title="xline padded") - # plt.imshow(img_xline_padded) - img_xline_rotated = rotate_image(img_xline_padded, slope_xline) - #img_xline_rotated[img_xline_rotated != 0] = 1 - # plt.subplot(2, 2, 2, title="xline rotated") - # plt.imshow(img_xline_rotated) - img_xline_separated = separate_lines_new_inside_tiles2(img_xline_rotated, 0) - # plt.subplot(2, 2, 3, title="xline separated") - # plt.imshow(img_xline_separated) - img_xline_separated = rotate_image(img_xline_separated, -slope_xline) - #img_xline_separated[img_xline_separated != 0] = 1 - # plt.subplot(2, 2, 4, title="xline unrotated") - # plt.imshow(img_xline_separated) + # print(slopes_tile_wise,'slopes_tile_wise') + img_patch_ineterst = img_path[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[14]+dis_down ,:] + + img_patch_ineterst_revised = np.zeros(img_patch_ineterst.shape) + + for i in range(nxf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + length_x + elif i > 0: + index_x_d = i * width_mid + index_x_u = index_x_d + length_x + + if index_x_u > img_path.shape[1]: + index_x_u = img_path.shape[1] + index_x_d = img_path.shape[1] - length_x + + img_xline = img_patch_ineterst[:, index_x_d:index_x_u] + + img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) + img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] + + img_resized = np.zeros((int(img_int.shape[0] * (1.2)), int(img_int.shape[1] * (3)))) + img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], + int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] = img_int[:, :] + # plt.imshow(img_xline) # plt.show() + img_line_rotated = rotate_image(img_resized, slopes_tile_wise[i]) + img_line_rotated[:, :][img_line_rotated[:, :] != 0] = 1 - # unpad - img_xline_separated = img_xline_separated[ - pad_above: -pad_below, - pad_left: -pad_right] + img_patch_separated = separate_lines_new_inside_tiles2(img_line_rotated, 0) - # window - window = (slice(None), slice(margin, -margin or None)) - img_crop_revised[box][window] = img_xline_separated[window] - # plt.subplot(1, 2, 1, title="original box") - # plt.imshow(img_crop[box]) - # plt.gca().add_patch(patches.Rectangle((margin, 0), length_x - 2 * margin, height, alpha=0.5, color='gray')) - # plt.subplot(1, 2, 2, title="revised box") - # plt.imshow(img_crop_revised[box]) - # plt.gca().add_patch(patches.Rectangle((margin, 0), length_x - 2 * margin, height, alpha=0.5, color='gray')) - # plt.show() + img_patch_separated_returned = rotate_image(img_patch_separated, -slopes_tile_wise[i]) + img_patch_separated_returned[:, :][img_patch_separated_returned[:, :] != 0] = 1 - return img_crop_revised + img_patch_separated_returned_true_size = img_patch_separated_returned[ + int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], + int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] -def do_image_rotation(angle, img=None, sigma_des=1.0, logger=None): + img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] + img_patch_ineterst_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size + + # plt.imshow(img_patch_ineterst_revised) + # plt.show() + return img_patch_ineterst_revised + +def do_image_rotation(angle, img, sigma_des, logger=None): if logger is None: logger = getLogger(__package__) img_rot = rotate_image(img, angle) @@ -1420,81 +1514,149 @@ def do_image_rotation(angle, img=None, sigma_des=1.0, logger=None): var = 0 return var -def return_deskew_slop(img, - sigma_des, - n_tot_angles=100, - main_page=False, - logger=None, - plotter=None, - name=None): +def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, + main_page=False, logger=None, plotter=None, map=map): if main_page and plotter: - plotter.save_plot_of_textline_density(img, name) + plotter.save_plot_of_textline_density(img_patch_org) - height, width = img.shape[:2] - max_shape = int(np.max(img.shape) * 1.1) + img_int=np.zeros((img_patch_org.shape[0],img_patch_org.shape[1])) + img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] - onset_x = int(0.5 * (max_shape - width)) - onset_y = int(0.5 * (max_shape - height)) + max_shape=np.max(img_int.shape) + img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) - img_resized = np.zeros((max_shape, max_shape)) - img_resized[onset_y: onset_y + height, - onset_x: onset_x + width] = img + onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) + onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) - def best_angle(angles): - return get_smallest_skew(img_resized, sigma_des, angles, - logger=logger, - name=name, - plotter=plotter) + #img_resized=np.zeros((int( img_int.shape[0]*(1.8) ) , int( img_int.shape[1]*(2.6) ) )) + #img_resized[ int( img_int.shape[0]*(.4)):int( img_int.shape[0]*(.4))+img_int.shape[0] , int( img_int.shape[1]*(.8)):int( img_int.shape[1]*(.8))+img_int.shape[1] ]=img_int[:,:] + img_resized[ onset_y:onset_y+img_int.shape[0] , onset_x:onset_x+img_int.shape[1] ]=img_int[:,:] - if main_page and width > height: + #print(img_resized.shape,'img_resizedshape') + #plt.imshow(img_resized) + #plt.show() + if main_page and img_patch_org.shape[1] > img_patch_org.shape[0]: + #plt.imshow(img_resized) + #plt.show() angles = np.array([-45, 0, 45, 90,]) - angle, _ = best_angle(angles) + angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) angles = np.linspace(angle - 22.5, angle + 22.5, n_tot_angles) - angle, _ = best_angle(sigma_des) + angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) elif main_page: - #angles = np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) - angles = np.concatenate((np.linspace(-12, -7, n_tot_angles // 4), - np.linspace(-6, 6, n_tot_angles // 2), - np.linspace(7, 12, n_tot_angles // 4))) - angle, var = best_angle(angles) + #plt.imshow(img_resized) + #plt.show() + angles = np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) + angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + early_slope_edge=11 + if abs(angle) > early_slope_edge: + if angle < 0: + angles = np.linspace(-90, -12, n_tot_angles) + else: + angles = np.linspace(90, 12, n_tot_angles) + angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) else: angles = np.linspace(-25, 25, int(0.5 * n_tot_angles) + 10) - angle, var = best_angle(angles) + angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) - # precision stage: - angles = np.linspace(angle - 2.5, angle + 2.5, n_tot_angles // 2) - angle, _ = best_angle(angles) + early_slope_edge=22 + if abs(angle) > early_slope_edge: + if angle < 0: + angles = np.linspace(-90, -25, int(0.5 * n_tot_angles) + 10) + else: + angles = np.linspace(90, 25, int(0.5 * n_tot_angles) + 10) + angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) return angle -def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, name=None): +def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map): if logger is None: logger = getLogger(__package__) - results = [do_image_rotation(angle, img=img, sigma_des=sigma_des, logger=logger) - for angle in angles] + results = list(map(partial(do_image_rotation, img=img, sigma_des=sigma_des, logger=logger), angles)) if plotter: - plotter.save_plot_of_rotation_angle(angles, results, name) + plotter.save_plot_of_rotation_angle(angles, results) try: var_res = np.array(results) assert var_res.any() - idx = np.argmax(var_res) - angle = angles[idx] - var = var_res[idx] + angle = angles[np.argmax(var_res)] except: logger.exception("cannot determine best angle among %s", str(angles)) angle = 0 - var = 0 - return angle, var + return angle + +def do_work_of_slopes_new( + box_text, contour, contour_par, index_r_con, + textline_mask_tot_ea, image_page_rotated, slope_deskew, + logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None +): + if KERNEL is None: + KERNEL = np.ones((5, 5), np.uint8) + if logger is None: + logger = getLogger(__package__) + logger.debug('enter do_work_of_slopes_new') + + x, y, w, h = box_text + _, crop_coor = crop_image_inside_box(box_text, image_page_rotated) + mask_textline = np.zeros(textline_mask_tot_ea.shape) + mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1)) + all_text_region_raw = textline_mask_tot_ea * mask_textline + all_text_region_raw = all_text_region_raw[y: y + h, x: x + w].astype(np.uint8) + img_int_p = all_text_region_raw[:,:] + img_int_p = cv2.erode(img_int_p, KERNEL, iterations=2) + + if img_int_p.shape[0] /img_int_p.shape[1] < 0.1: + slope = 0 + slope_for_all = slope_deskew + all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w] + cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text, 0) + else: + try: + textline_con, hierarchy = return_contours_of_image(img_int_p) + textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, + hierarchy, + max_area=1, min_area=0.00008) + y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN + if np.isnan(y_diff_mean): + slope_for_all = MAX_SLOPE + else: + sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) + img_int_p[img_int_p > 0] = 1 + slope_for_all = return_deskew_slop(img_int_p, sigma_des, logger=logger, plotter=plotter) + if abs(slope_for_all) <= 0.5: + slope_for_all = slope_deskew + except: + logger.exception("cannot determine angle of contours") + slope_for_all = MAX_SLOPE + + if slope_for_all == MAX_SLOPE: + slope_for_all = slope_deskew + slope = slope_for_all + + mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) + mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1)) + + # plt.imshow(mask_only_con_region) + # plt.show() + all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].copy() + mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] + + ##plt.imshow(textline_mask_tot_ea) + ##plt.show() + ##plt.imshow(all_text_region_raw) + ##plt.show() + ##plt.imshow(mask_only_con_region) + ##plt.show() + + all_text_region_raw[mask_only_con_region == 0] = 0 + cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text) + + return cnt_clean_rot, box_text, contour, contour_par, crop_coor, index_r_con, slope def do_work_of_slopes_new_curved( - contour_par, - textline_mask_tot_ea=None, - num_col=1, slope_deskew=0.0, - logger=None, MAX_SLOPE=999, - KERNEL=None, plotter=None, - name=None, + box_text, contour, contour_par, index_r_con, + textline_mask_tot_ea, image_page_rotated, mask_texts_only, num_col, scale_par, slope_deskew, + logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None ): if KERNEL is None: KERNEL = np.ones((5, 5), np.uint8) @@ -1502,112 +1664,93 @@ def do_work_of_slopes_new_curved( logger = getLogger(__package__) logger.debug("enter do_work_of_slopes_new_curved") - x, y, w, h = cv2.boundingRect(contour_par) - - mask_parent = np.zeros((h, w), dtype=np.uint8) - mask_parent = cv2.fillPoly(mask_parent, pts=[contour_par - [x, y]], color=1) - all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w] * mask_parent - if not np.any(all_text_region_raw): - return [], slope_deskew - img_int_p = np.copy(all_text_region_raw) - # correct for relative area - rel_area = 1.0 * textline_mask_tot_ea.size / img_int_p.size + x, y, w, h = box_text + all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].astype(np.uint8) + img_int_p = all_text_region_raw[:, :] # img_int_p=cv2.erode(img_int_p,KERNEL,iterations = 2) # plt.imshow(img_int_p) # plt.show() - slope = slope_deskew - if h >= 0.1 * w: + if img_int_p.shape[0] / img_int_p.shape[1] < 0.1: + slope = 0 + slope_for_all = slope_deskew + else: try: textline_con, hierarchy = return_contours_of_image(img_int_p) textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, - min_area=0.0008 * rel_area) - if len(textline_con_fil) > 1: - cx, cy = find_center_of_contours(textline_con_fil) - y_diff_mean = np.median(np.diff(np.sort(np.array(cy)))) - x_diff_mean = np.median(np.diff(np.sort(np.array(cx)))) - if h > w and x_diff_mean / w > 2 * y_diff_mean / h: - # print(len(textline_con_fil), "transposed", x_diff_mean, y_diff_mean) - transposed = True - img_int_p = img_int_p.T - sigma = x_diff_mean - else: - transposed = False - sigma = y_diff_mean - slope = return_deskew_slop(img_int_p, max(1.0, 0.1 * sigma), - logger=logger, - name=name, - plotter=plotter) - if transposed: - slope = -90 - slope if slope < 0 else 90 - slope - if abs(slope - slope_deskew) < 0.5: - slope = slope_deskew - elif len(textline_con_fil): - if h > 3 * w: - # print(1, "transposed", h, w) - transposed = True - img_int_p = img_int_p.T - else: - transposed = False - # do linear regression on mask to get slope - img_ys, img_xs = img_int_p.nonzero() - regression = linregress(x=img_xs, y=img_ys) - slope = 180 / np.pi * np.arctan(regression.slope) - # print(regression, regression.stderr) - if transposed: - slope = 90 - slope - if regression.stderr > 0.005: - slope = slope_deskew + max_area=1, min_area=0.0008) + y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN + if np.isnan(y_diff_mean): + slope_for_all = MAX_SLOPE + else: + sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) + img_int_p[img_int_p > 0] = 1 + slope_for_all = return_deskew_slop(img_int_p, sigma_des, logger=logger, plotter=plotter) + if abs(slope_for_all) < 0.5: + slope_for_all = slope_deskew except: logger.exception("cannot determine angle of contours") - slope = slope_deskew + slope_for_all = MAX_SLOPE - # print(slope, slope_deskew) + if slope_for_all == MAX_SLOPE: + slope_for_all = slope_deskew + slope = slope_for_all - if abs(slope) < 45: - # apply horizontal tiling, deskew each patch independently - mask_textlines_separated_d = separate_lines_new2(all_text_region_raw, 0, - num_col, slope, + _, crop_coor = crop_image_inside_box(box_text, image_page_rotated) + + if abs(slope_for_all) < 45: + textline_region_in_image = np.zeros(textline_mask_tot_ea.shape) + x, y, w, h = cv2.boundingRect(contour_par) + mask_biggest = np.zeros(mask_texts_only.shape) + mask_biggest = cv2.fillPoly(mask_biggest, pts=[contour_par], color=(1, 1, 1)) + mask_region_in_patch_region = mask_biggest[y : y + h, x : x + w] + textline_biggest_region = mask_biggest * textline_mask_tot_ea + + # print(slope_for_all,'slope_for_all') + textline_rotated_separated = separate_lines_new2(textline_biggest_region[y: y+h, x: x+w], 0, + num_col, slope_for_all, logger=logger, plotter=plotter) - # plt.subplot(1, 2, 1, title="textline mask of region") - # plt.imshow(all_text_region_raw) - # plt.subplot(1, 2, 2, title="separated+deskewed") - # plt.imshow(mask_textlines_separated_d) + + # new line added + ##print(np.shape(textline_rotated_separated),np.shape(mask_biggest)) + textline_rotated_separated[mask_region_in_patch_region[:, :] != 1] = 0 + # till here + + textline_region_in_image[y : y + h, x : x + w] = textline_rotated_separated + + # plt.imshow(textline_region_in_image) # plt.show() - textline_contours = return_contours_of_interested_textline( - mask_textlines_separated_d, 1, min_area=3e-9 * rel_area) + pixel_img = 1 + cnt_textlines_in_image = return_contours_of_interested_textline(textline_region_in_image, pixel_img) textlines_cnt_per_region = [] - for contour in textline_contours: - mask_line = np.zeros_like(mask_parent) - mask_line = cv2.fillPoly(mask_line, pts=[contour], color=1) - mask_line = cv2.dilate(mask_line, KERNEL, iterations=5 if num_col == 0 else 4) - # plt.subplot(1, 2, 1, title="parent mask") - # plt.imshow(mask_parent) - # plt.subplot(1, 2, 2, title="single textline") - # plt.imshow(mask_line) - # plt.show() + for jjjj in range(len(cnt_textlines_in_image)): + mask_biggest2 = np.zeros(mask_texts_only.shape) + mask_biggest2 = cv2.fillPoly(mask_biggest2, pts=[cnt_textlines_in_image[jjjj]], color=(1, 1, 1)) + if num_col + 1 == 1: + mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=5) + else: + mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=4) - textline_contours2 = return_contours_of_interested_textline( - mask_line, 1, min_area=3e-9 * rel_area) - textline_areas2 = np.array(list(map(cv2.contourArea, textline_contours2))) + pixel_img = 1 + mask_biggest2 = resize_image(mask_biggest2, int(mask_biggest2.shape[0] * scale_par), int(mask_biggest2.shape[1] * scale_par)) + cnt_textlines_in_image_ind = return_contours_of_interested_textline(mask_biggest2, pixel_img) try: - contour2 = textline_contours2[np.argmax(textline_areas2)] - textlines_cnt_per_region.append(contour2 + [x, y]) + textlines_cnt_per_region.append(cnt_textlines_in_image_ind[0]) except Exception as why: logger.error(why) else: - textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, - slope, contour_par) + textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text, True) + # print(np.shape(textlines_cnt_per_region),'textlines_cnt_per_region') - return textlines_cnt_per_region[::-1], slope + return textlines_cnt_per_region[::-1], box_text, contour, contour_par, crop_coor, index_r_con, slope def do_work_of_slopes_new_light( - box_text, contour, contour_par, - textline_mask_tot_ea=None, slope_deskew=0, + box_text, contour, contour_par, index_r_con, + textline_mask_tot_ea, image_page_rotated, slope_deskew, textline_light, logger=None ): if logger is None: @@ -1615,6 +1758,7 @@ def do_work_of_slopes_new_light( logger.debug('enter do_work_of_slopes_new_light') x, y, w, h = box_text + _, crop_coor = crop_image_inside_box(box_text, image_page_rotated) mask_textline = np.zeros(textline_mask_tot_ea.shape) mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1)) all_text_region_raw = textline_mask_tot_ea * mask_textline @@ -1623,10 +1767,16 @@ def do_work_of_slopes_new_light( mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1)) - all_text_region_raw = np.copy(textline_mask_tot_ea) - all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw) - cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot, - max_area=1, min_area=0.00001) + if textline_light: + all_text_region_raw = np.copy(textline_mask_tot_ea) + all_text_region_raw[mask_only_con_region == 0] = 0 + cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw) + cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot, + max_area=1, min_area=0.00001) + else: + all_text_region_raw = np.copy(textline_mask_tot_ea[y: y + h, x: x + w]) + mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] + all_text_region_raw[mask_only_con_region == 0] = 0 + cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_deskew, contour_par, box_text) - return cnt_clean_rot, slope_deskew + return cnt_clean_rot, box_text, contour, contour_par, crop_coor, index_r_con, slope_deskew diff --git a/src/eynollah/utils/shm.py b/src/eynollah/utils/shm.py deleted file mode 100644 index 4b51053..0000000 --- a/src/eynollah/utils/shm.py +++ /dev/null @@ -1,45 +0,0 @@ -from multiprocessing import shared_memory -from contextlib import contextmanager -from functools import wraps -import numpy as np - -@contextmanager -def share_ndarray(array: np.ndarray): - size = np.dtype(array.dtype).itemsize * np.prod(array.shape) - shm = shared_memory.SharedMemory(create=True, size=size) - try: - shared_array = np.ndarray(array.shape, dtype=array.dtype, buffer=shm.buf) - shared_array[:] = array[:] - shared_array.flags["WRITEABLE"] = False - yield dict(shape=array.shape, dtype=array.dtype, name=shm.name) - finally: - shm.close() - shm.unlink() - -@contextmanager -def ndarray_shared(array: dict): - shm = shared_memory.SharedMemory(name=array['name']) - try: - array = np.ndarray(array['shape'], dtype=array['dtype'], buffer=shm.buf) - yield array - finally: - shm.close() - -def wrap_ndarray_shared(kw=None): - def wrapper(f): - if kw is None: - @wraps(f) - def shared_func(array, *args, **kwargs): - with ndarray_shared(array) as ndarray: - return f(ndarray, *args, **kwargs) - return shared_func - else: - @wraps(f) - def shared_func(*args, **kwargs): - array = kwargs.pop(kw) - with ndarray_shared(array) as ndarray: - kwargs[kw] = ndarray - return f(*args, **kwargs) - return shared_func - return wrapper - diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py deleted file mode 100644 index 93d1137..0000000 --- a/src/eynollah/utils/utils_ocr.py +++ /dev/null @@ -1,504 +0,0 @@ -import math -import copy - -import numpy as np -import cv2 -import tensorflow as tf -from scipy.signal import find_peaks -from scipy.ndimage import gaussian_filter1d -from PIL import Image, ImageDraw, ImageFont - -from .resize import resize_image - - -def decode_batch_predictions(pred, num_to_char, max_len = 128): - # input_len is the product of the batch size and the - # number of time steps. - input_len = np.ones(pred.shape[0]) * pred.shape[1] - - # Decode CTC predictions using greedy search. - # decoded is a tuple with 2 elements. - decoded = tf.keras.backend.ctc_decode(pred, - input_length = input_len, - beam_width = 100) - # The outputs are in the first element of the tuple. - # Additionally, the first element is actually a list, - # therefore we take the first element of that list as well. - #print(decoded,'decoded') - decoded = decoded[0][0][:, :max_len] - - #print(decoded, decoded.shape,'decoded') - - output = [] - for d in decoded: - # Convert the predicted indices to the corresponding chars. - d = tf.strings.reduce_join(num_to_char(d)) - d = d.numpy().decode("utf-8") - output.append(d) - return output - - -def distortion_free_resize(image, img_size): - w, h = img_size - image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True) - - # Check tha amount of padding needed to be done. - pad_height = h - tf.shape(image)[0] - pad_width = w - tf.shape(image)[1] - - # Only necessary if you want to do same amount of padding on both sides. - if pad_height % 2 != 0: - height = pad_height // 2 - pad_height_top = height + 1 - pad_height_bottom = height - else: - pad_height_top = pad_height_bottom = pad_height // 2 - - if pad_width % 2 != 0: - width = pad_width // 2 - pad_width_left = width + 1 - pad_width_right = width - else: - pad_width_left = pad_width_right = pad_width // 2 - - image = tf.pad( - image, - paddings=[ - [pad_height_top, pad_height_bottom], - [pad_width_left, pad_width_right], - [0, 0], - ], - ) - - image = tf.transpose(image, (1, 0, 2)) - image = tf.image.flip_left_right(image) - return image - -def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.06*width) - - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: - - peaks_real = peaks_real[(peaks_realwidth1)] - - arg_max = np.argmax(sum_smoothed[peaks_real]) - peaks_final = peaks_real[arg_max] - return peaks_final - else: - return None - -# Function to fit text inside the given area -def fit_text_single_line(draw, text, font_path, max_width, max_height): - initial_font_size = 50 - font_size = initial_font_size - while font_size > 10: # Minimum font size - font = ImageFont.truetype(font_path, font_size) - text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - if text_width <= max_width and text_height <= max_height: - return font # Return the best-fitting font - - font_size -= 2 # Reduce font size and retry - - return ImageFont.truetype(font_path, 10) # Smallest font fallback - -def return_textlines_split_if_needed(textline_image, textline_image_bin=None): - - split_point = return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) - if split_point: - image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - if textline_image_bin is not None: - image1_bin = textline_image_bin[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2_bin = textline_image_bin[:, split_point:,:]#image.crop((width1, 0, width, height)) - return [image1, image2], [image1_bin, image2_bin] - else: - return [image1, image2], None - else: - return None, None - -def preprocess_and_resize_image_for_ocrcnn_model(img, image_height, image_width): - if img.shape[0]==0 or img.shape[1]==0: - img_fin = np.ones((image_height, image_width, 3)) - else: - ratio = image_height /float(img.shape[0]) - w_ratio = int(ratio * img.shape[1]) - - if w_ratio <= image_width: - width_new = w_ratio - else: - width_new = image_width - - if width_new == 0: - width_new = img.shape[1] - - - img = resize_image(img, image_height, width_new) - img_fin = np.ones((image_height, image_width, 3))*255 - - img_fin[:,:width_new,:] = img[:,:,:] - img_fin = img_fin / 255. - return img_fin - -def get_deskewed_contour_and_bb_and_image(contour, image, deskew_angle): - (h_in, w_in) = image.shape[:2] - center = (w_in // 2, h_in // 2) - - rotation_matrix = cv2.getRotationMatrix2D(center, deskew_angle, 1.0) - - cos_angle = abs(rotation_matrix[0, 0]) - sin_angle = abs(rotation_matrix[0, 1]) - new_w = int((h_in * sin_angle) + (w_in * cos_angle)) - new_h = int((h_in * cos_angle) + (w_in * sin_angle)) - - rotation_matrix[0, 2] += (new_w / 2) - center[0] - rotation_matrix[1, 2] += (new_h / 2) - center[1] - - deskewed_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h)) - - contour_points = np.array(contour, dtype=np.float32) - transformed_points = cv2.transform(np.array([contour_points]), rotation_matrix)[0] - - x, y, w, h = cv2.boundingRect(np.array(transformed_points, dtype=np.int32)) - cropped_textline = deskewed_image[y:y+h, x:x+w] - - return cropped_textline - -def rotate_image_with_padding(image, angle, border_value=(0,0,0)): - # Get image dimensions - (h, w) = image.shape[:2] - - # Calculate the center of the image - center = (w // 2, h // 2) - - # Get the rotation matrix - rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0) - - # Compute the new bounding dimensions - cos = abs(rotation_matrix[0, 0]) - sin = abs(rotation_matrix[0, 1]) - new_w = int((h * sin) + (w * cos)) - new_h = int((h * cos) + (w * sin)) - - # Adjust the rotation matrix to account for translation - rotation_matrix[0, 2] += (new_w / 2) - center[0] - rotation_matrix[1, 2] += (new_h / 2) - center[1] - - # Perform the rotation - try: - rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=border_value) - except: - rotated_image = np.copy(image) - - return rotated_image - -def get_orientation_moments(contour): - moments = cv2.moments(contour) - if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero - return 90 if moments["mu11"] > 0 else -90 - else: - angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) - return np.degrees(angle) # Convert radians to degrees - - -def get_orientation_moments_of_mask(mask): - mask=mask.astype('uint8') - contours, _ = cv2.findContours(mask[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - - largest_contour = max(contours, key=cv2.contourArea) if contours else None - - moments = cv2.moments(largest_contour) - if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero - return 90 if moments["mu11"] > 0 else -90 - else: - angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) - return np.degrees(angle) # Convert radians to degrees - -def get_contours_and_bounding_boxes(mask): - # Find contours in the binary mask - contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - - largest_contour = max(contours, key=cv2.contourArea) if contours else None - - # Get the bounding rectangle for the contour - x, y, w, h = cv2.boundingRect(largest_contour) - #bounding_boxes.append((x, y, w, h)) - - return x, y, w, h - -def return_splitting_point_of_image(image_to_spliited): - width = np.shape(image_to_spliited)[1] - height = np.shape(image_to_spliited)[0] - common_window = int(0.03*width) - - width1 = int ( common_window) - width2 = int ( width - common_window ) - - img_sum = np.sum(image_to_spliited[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 1) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - peaks_real = peaks_real[(peaks_realwidth1)] - - arg_sort = np.argsort(sum_smoothed[peaks_real]) - peaks_sort_4 = peaks_real[arg_sort][::-1][:3] - - return np.sort(peaks_sort_4) - -def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, img_bin_curved=None): - peaks_4 = return_splitting_point_of_image(img_curved) - if len(peaks_4)>0: - imgs_tot = [] - - for ind in range(len(peaks_4)+1): - if ind==0: - img = img_curved[:, :peaks_4[ind], :] - if img_bin_curved is not None: - img_bin = img_bin_curved[:, :peaks_4[ind], :] - mask = mask_curved[:, :peaks_4[ind], :] - elif ind==len(peaks_4): - img = img_curved[:, peaks_4[ind-1]:, :] - if img_bin_curved is not None: - img_bin = img_bin_curved[:, peaks_4[ind-1]:, :] - mask = mask_curved[:, peaks_4[ind-1]:, :] - else: - img = img_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - if img_bin_curved is not None: - img_bin = img_bin_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - mask = mask_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - - or_ma = get_orientation_moments_of_mask(mask) - - if img_bin_curved is not None: - imgs_tot.append([img, mask, or_ma, img_bin] ) - else: - imgs_tot.append([img, mask, or_ma] ) - - - w_tot_des_list = [] - w_tot_des = 0 - imgs_deskewed_list = [] - imgs_bin_deskewed_list = [] - - for ind in range(len(imgs_tot)): - img_in = imgs_tot[ind][0] - mask_in = imgs_tot[ind][1] - ori_in = imgs_tot[ind][2] - if img_bin_curved is not None: - img_bin_in = imgs_tot[ind][3] - - if abs(ori_in)<45: - img_in_des = rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) - if img_bin_curved is not None: - img_bin_in_des = rotate_image_with_padding(img_bin_in, ori_in, border_value=(255,255,255) ) - mask_in_des = rotate_image_with_padding(mask_in, ori_in) - mask_in_des = mask_in_des.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_in_des[:,:,0]) - - if w_n==0 or h_n==0: - img_in_des = np.copy(img_in) - if img_bin_curved is not None: - img_bin_in_des = np.copy(img_bin_in) - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved is not None: - img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) - else: - mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - if img_bin_curved is not None: - img_bin_in_des = img_bin_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved is not None: - img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) - - - else: - img_in_des = np.copy(img_in) - if img_bin_curved is not None: - img_bin_in_des = np.copy(img_bin_in) - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved is not None: - img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) - - w_tot_des+=img_in_des.shape[1] - w_tot_des_list.append(img_in_des.shape[1]) - imgs_deskewed_list.append(img_in_des) - if img_bin_curved is not None: - imgs_bin_deskewed_list.append(img_bin_in_des) - - - - - img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - if img_bin_curved is not None: - img_bin_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - else: - img_bin_final_deskewed = None - - w_indexer = 0 - for ind in range(len(w_tot_des_list)): - img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] - if img_bin_curved is not None: - img_bin_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_bin_deskewed_list[ind][:,:,:] - w_indexer = w_indexer+w_tot_des_list[ind] - return img_final_deskewed, img_bin_final_deskewed - else: - return img_curved, img_bin_curved - -def return_textline_contour_with_added_box_coordinate(textline_contour, box_ind): - textline_contour[:,:,0] += box_ind[2] - textline_contour[:,:,1] += box_ind[0] - return textline_contour - - -def return_rnn_cnn_ocr_of_given_textlines(image, - all_found_textline_polygons, - all_box_coord, - prediction_model, - b_s_ocr, num_to_char, - curved_line=False): - max_len = 512 - padding_token = 299 - image_width = 512#max_len * 4 - image_height = 32 - ind_tot = 0 - #cv2.imwrite('./img_out.png', image_page) - ocr_all_textlines = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - cropped_lines = [] - indexer_text_region = 0 - - for indexing, ind_poly_first in enumerate(all_found_textline_polygons): - #ocr_textline_in_textregion = [] - if len(ind_poly_first)==0: - cropped_lines_region_indexer.append(indexer_text_region) - cropped_lines_meging_indexing.append(0) - img_fin = np.ones((image_height, image_width, 3))*1 - cropped_lines.append(img_fin) - - else: - for indexing2, ind_poly in enumerate(ind_poly_first): - cropped_lines_region_indexer.append(indexer_text_region) - if not curved_line: - ind_poly = copy.deepcopy(ind_poly) - box_ind = all_box_coord[indexing] - - ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) - #print(ind_poly_copy) - ind_poly[ind_poly<0] = 0 - x, y, w, h = cv2.boundingRect(ind_poly) - - w_scaled = w * image_height/float(h) - - mask_poly = np.zeros(image.shape) - - img_poly_on_img = np.copy(image) - - mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) - - - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - img_crop[mask_poly==0] = 255 - - if w_scaled < 640:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) - - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], - image_height, - image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], - image_height, - image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, - image_height, - image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - - indexer_text_region+=1 - - extracted_texts = [] - - n_iterations = math.ceil(len(cropped_lines) / b_s_ocr) - - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*b_s_ocr - imgs = cropped_lines[n_start:] - imgs = np.array(imgs) - imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) - - - else: - n_start = i*b_s_ocr - n_end = (i+1)*b_s_ocr - imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(b_s_ocr, image_height, image_width, 3) - - - preds = prediction_model.predict(imgs, verbose=0) - - pred_texts = decode_batch_predictions(preds, num_to_char) - - for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - extracted_texts.append(pred_texts_ib) - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - ocr_all_textlines = [] - for ind in unique_cropped_lines_region_indexer: - ocr_textline_in_textregion = [] - extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] - for it_ind, text_textline in enumerate(extracted_texts_merged_un): - ocr_textline_in_textregion.append(text_textline) - ocr_all_textlines.append(ocr_textline_in_textregion) - return ocr_all_textlines diff --git a/src/eynollah/utils/xml.py b/src/eynollah/utils/xml.py index ded098e..bd95702 100644 --- a/src/eynollah/utils/xml.py +++ b/src/eynollah/utils/xml.py @@ -46,26 +46,24 @@ def create_page_xml(imageFilename, height, width): )) return pcgts -def xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right): +def xml_reading_order(page, order_of_texts, id_of_marginalia): region_order = ReadingOrderType() og = OrderedGroupType(id="ro357564684568544579089") page.set_ReadingOrder(region_order) region_order.set_OrderedGroup(og) region_counter = EynollahIdCounter() - - for id_marginal in id_of_marginalia_left: - og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) + for idx_textregion, _ in enumerate(order_of_texts): + og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(order_of_texts[idx_textregion] + 1))) region_counter.inc('region') - - for idx_textregion in order_of_texts: - og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(idx_textregion + 1))) - region_counter.inc('region') - - for id_marginal in id_of_marginalia_right: + for id_marginal in id_of_marginalia: og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) region_counter.inc('region') -def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, indexes_sorted, index_of_types, kind_of_texts, ref_point): +def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, matrix_of_orders, indexes_sorted, index_of_types, kind_of_texts, ref_point): + indexes_sorted = np.array(indexes_sorted) + index_of_types = np.array(index_of_types) + kind_of_texts = np.array(kind_of_texts) + id_of_texts = [] order_of_texts = [] @@ -88,7 +86,3 @@ def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region order_of_texts.append(interest) return order_of_texts, id_of_texts - -def etree_namespace_for_element_tag(tag: str): - right = tag.find('}') - return tag[1:right] diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 47fc32f..92e353f 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -2,15 +2,15 @@ # pylint: disable=import-error from pathlib import Path import os.path -import logging -from typing import Optional -import numpy as np -from shapely import affinity, clip_by_rect +import xml.etree.ElementTree as ET +from .utils.xml import create_page_xml, xml_reading_order +from .utils.counter import EynollahIdCounter -from ocrd_utils import points_from_polygon +from ocrd_utils import getLogger from ocrd_models.ocrd_page import ( BorderType, CoordsType, + PcGtsType, TextLineType, TextEquivType, TextRegionType, @@ -19,275 +19,302 @@ from ocrd_models.ocrd_page import ( SeparatorRegionType, to_xml ) +import numpy as np -from .utils.xml import create_page_xml, xml_reading_order -from .utils.counter import EynollahIdCounter -from .utils.contour import contour2polygon, make_valid +class EynollahXmlWriter(): -class EynollahXmlWriter: - - def __init__(self, *, dir_out, image_filename, image_width, image_height, curved_line, pcgts=None): - self.logger = logging.getLogger('eynollah.writer') + def __init__(self, *, dir_out, image_filename, curved_line,textline_light, pcgts=None): + self.logger = getLogger('eynollah.writer') self.counter = EynollahIdCounter() self.dir_out = dir_out self.image_filename = image_filename self.output_filename = os.path.join(self.dir_out or "", self.image_filename_stem) + ".xml" self.curved_line = curved_line + self.textline_light = textline_light self.pcgts = pcgts - self.image_height = image_height - self.image_width = image_width - self.scale_x = 1.0 - self.scale_y = 1.0 + self.scale_x = None # XXX set outside __init__ + self.scale_y = None # XXX set outside __init__ + self.height_org = None # XXX set outside __init__ + self.width_org = None # XXX set outside __init__ @property def image_filename_stem(self): return Path(Path(self.image_filename).name).stem - def calculate_points(self, contour, offset=None): - poly = contour2polygon(contour) - if offset is not None: - poly = affinity.translate(poly, *offset) - poly = affinity.scale(poly, xfact=1 / self.scale_x, yfact=1 / self.scale_y, origin=(0, 0)) - poly = make_valid(clip_by_rect(poly, 0, 0, self.image_width, self.image_height)) - return points_from_polygon(poly.exterior.coords[:-1]) + def calculate_page_coords(self, cont_page): + self.logger.debug('enter calculate_page_coords') + points_page_print = "" + for _, contour in enumerate(cont_page[0]): + if len(contour) == 2: + points_page_print += str(int((contour[0]) / self.scale_x)) + points_page_print += ',' + points_page_print += str(int((contour[1]) / self.scale_y)) + else: + points_page_print += str(int((contour[0][0]) / self.scale_x)) + points_page_print += ',' + points_page_print += str(int((contour[0][1] ) / self.scale_y)) + points_page_print = points_page_print + ' ' + return points_page_print[:-1] - def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, slopes, counter, ocr_all_textlines_textregion): - for j, polygon_textline in enumerate(all_found_textline_polygons[region_idx]): + def serialize_lines_in_marginal(self, marginal_region, all_found_textline_polygons_marginals, marginal_idx, page_coord, all_box_coord_marginals, slopes_marginals, counter): + for j in range(len(all_found_textline_polygons_marginals[marginal_idx])): + coords = CoordsType() + textline = TextLineType(id=counter.next_line_id, Coords=coords) + marginal_region.add_TextLine(textline) + marginal_region.set_orientation(-slopes_marginals[marginal_idx]) + points_co = '' + for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])): + if not (self.curved_line or self.textline_light): + if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: + textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) + textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + else: + textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) + textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + points_co += str(textline_x_coord) + points_co += ',' + points_co += str(textline_y_coord) + if (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) <= 45: + if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + page_coord[0]) / self.scale_y)) + else: + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + page_coord[0]) / self.scale_y)) + + elif (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) > 45: + if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) + else: + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) + points_co += ' ' + coords.set_points(points_co[:-1]) + + def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion): + self.logger.debug('enter serialize_lines_in_region') + for j in range(len(all_found_textline_polygons[region_idx])): coords = CoordsType() textline = TextLineType(id=counter.next_line_id, Coords=coords) if ocr_all_textlines_textregion: - # FIXME: add OCR confidence - textline.set_TextEquiv([TextEquivType(Unicode=ocr_all_textlines_textregion[j])]) + textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] ) text_region.add_TextLine(textline) text_region.set_orientation(-slopes[region_idx]) - offset = [page_coord[2], page_coord[0]] - coords.set_points(self.calculate_points(polygon_textline, offset)) + region_bboxes = all_box_coord[region_idx] + points_co = '' + for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[region_idx][j]): + if not (self.curved_line or self.textline_light): + if len(contour_textline) == 2: + textline_x_coord = max(0, int((contour_textline[0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) + textline_y_coord = max(0, int((contour_textline[1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) + else: + textline_x_coord = max(0, int((contour_textline[0][0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) + textline_y_coord = max(0, int((contour_textline[0][1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) + points_co += str(textline_x_coord) + points_co += ',' + points_co += str(textline_y_coord) + + if (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) <= 45: + if len(contour_textline) == 2: + points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y)) + else: + points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) + elif (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) > 45: + if len(contour_textline)==2: + points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2])/self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0])/self.scale_y)) + else: + points_co += str(int((contour_textline[0][0] + region_bboxes[2]+page_coord[2])/self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[0][1] + region_bboxes[0]+page_coord[0])/self.scale_y)) + points_co += ' ' + coords.set_points(points_co[:-1]) + + def serialize_lines_in_dropcapital(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion): + self.logger.debug('enter serialize_lines_in_region') + for j in range(1): + coords = CoordsType() + textline = TextLineType(id=counter.next_line_id, Coords=coords) + if ocr_all_textlines_textregion: + textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] ) + text_region.add_TextLine(textline) + #region_bboxes = all_box_coord[region_idx] + points_co = '' + for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[j]): + if len(contour_textline) == 2: + points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y)) + else: + points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) + + points_co += ' ' + coords.set_points(points_co[:-1]) def write_pagexml(self, pcgts): self.logger.info("output filename: '%s'", self.output_filename) with open(self.output_filename, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout( - self, - *, - num_col, - found_polygons_text_region, - page_coord, - page_slope, - order_of_texts, - all_found_textline_polygons, - found_polygons_images, - found_polygons_tables, - found_polygons_marginals_left, - found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, - all_found_textline_polygons_marginals_right, - slopes, - slopes_marginals_left, - slopes_marginals_right, - cont_page, - polygons_seplines, - ocr_all_textlines=None, - ocr_all_textlines_marginals_left=None, - ocr_all_textlines_marginals_right=None, - ocr_all_textlines_drop=None, - conf_textregions=None, - conf_marginals_left=None, - conf_marginals_right=None, - conf_images=None, - conf_tables=None, - ): - return self.build_pagexml_full_layout( - num_col=num_col, - found_polygons_text_region=found_polygons_text_region, - found_polygons_text_region_h=[], - page_coord=page_coord, - page_slope=page_slope, - order_of_texts=order_of_texts, - all_found_textline_polygons=all_found_textline_polygons, - all_found_textline_polygons_h=[], - found_polygons_images=found_polygons_images, - found_polygons_tables=found_polygons_tables, - found_polygons_drop_capitals=[], - found_polygons_marginals_left=found_polygons_marginals_left, - found_polygons_marginals_right=found_polygons_marginals_right, - all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, - all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, - slopes=slopes, - slopes_h=[], - slopes_marginals_left=slopes_marginals_left, - slopes_marginals_right=slopes_marginals_right, - cont_page=cont_page, - polygons_seplines=polygons_seplines, - ocr_all_textlines=ocr_all_textlines, - ocr_all_textlines_marginals_left=ocr_all_textlines_marginals_left, - ocr_all_textlines_marginals_right=ocr_all_textlines_marginals_right, - conf_textregions=conf_textregions, - conf_marginals_left=conf_marginals_left, - conf_marginals_right=conf_marginals_right, - conf_images=conf_images, - conf_tables=conf_tables, - ) - - def build_pagexml_full_layout( - self, - *, - num_col, - found_polygons_text_region, - found_polygons_text_region_h, - page_coord, - page_slope, - order_of_texts, - all_found_textline_polygons, - all_found_textline_polygons_h, - found_polygons_images, - found_polygons_tables, - found_polygons_drop_capitals, - found_polygons_marginals_left, - found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, - all_found_textline_polygons_marginals_right, - slopes, - slopes_h, - slopes_marginals_left, - slopes_marginals_right, - cont_page, - polygons_seplines, - ocr_all_textlines=None, - ocr_all_textlines_h=None, - ocr_all_textlines_marginals_left=None, - ocr_all_textlines_marginals_right=None, - ocr_all_textlines_drop=None, - conf_textregions=None, - conf_textregions_h=None, - conf_marginals_left=None, - conf_marginals_right=None, - conf_images=None, - conf_tables=None, - conf_drops=None, - ): - self.logger.debug('enter build_pagexml') + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines, conf_contours_textregion): + self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure - pcgts = self.pcgts if self.pcgts else create_page_xml( - self.image_filename, self.image_height, self.image_width) + pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) page = pcgts.get_Page() - pcgts.Metadata.Comments = "num_col %d" % num_col - page.set_custom('layout {num_col:%d;} ' % num_col) - page.set_orientation(-page_slope) - if len(cont_page): - page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_points(cont_page[0])))) + page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) - offset = [page_coord[2], page_coord[0]] counter = EynollahIdCounter() - if len(order_of_texts): + if len(found_polygons_text_region) > 0: _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) - id_of_marginalia_left = [_counter_marginals.next_region_id - for _ in found_polygons_marginals_left] - id_of_marginalia_right = [_counter_marginals.next_region_id - for _ in found_polygons_marginals_right] - xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) + id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals] + xml_reading_order(page, order_of_texts, id_of_marginalia) - for mm, region_contour in enumerate(found_polygons_text_region): - textregion = TextRegionType( - id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_points(region_contour, offset)) - ) - assert textregion.Coords - if conf_textregions: - textregion.Coords.set_conf(conf_textregions[mm]) + for mm in range(len(found_polygons_text_region)): + textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord), conf=conf_contours_textregion[mm]), + ) + #textregion.set_conf(conf_contours_textregion[mm]) page.add_TextRegion(textregion) if ocr_all_textlines: ocr_textlines = ocr_all_textlines[mm] else: ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, - slopes, counter, ocr_textlines) + self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines) - self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) - for mm, region_contour in enumerate(found_polygons_text_region_h): - textregion = TextRegionType( - id=counter.next_region_id, type_='heading', - Coords=CoordsType(points=self.calculate_points(region_contour, offset)) - ) - assert textregion.Coords - if conf_textregions_h: - textregion.Coords.set_conf(conf_textregions_h[mm]) - page.add_TextRegion(textregion) - if ocr_all_textlines_h: - ocr_textlines = ocr_all_textlines_h[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, - slopes_h, counter, ocr_textlines) - - for mm, region_contour in enumerate(found_polygons_drop_capitals): - dropcapital = TextRegionType( - id=counter.next_region_id, type_='drop-capital', - Coords=CoordsType(points=self.calculate_points(region_contour, offset)) - ) - if conf_drops: - dropcapital.Coords.set_conf(conf_drops[mm]) - page.add_TextRegion(dropcapital) - slopes_drop = [0] - if ocr_all_textlines_drop: - ocr_textlines = ocr_all_textlines_drop[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(dropcapital, [[found_polygons_drop_capitals[mm]]], 0, page_coord, - slopes_drop, counter, ocr_textlines) - - for mm, region_contour in enumerate(found_polygons_marginals_left): - marginal = TextRegionType( - id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_points(region_contour, offset)) - ) - if conf_marginals_left: - marginal.Coords.set_conf(conf_marginals_left[mm]) + for mm in range(len(found_polygons_marginals)): + marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) page.add_TextRegion(marginal) - if ocr_all_textlines_marginals_left: - ocr_textlines = ocr_all_textlines_marginals_left[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, - slopes_marginals_left, counter, ocr_textlines) + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) - for mm, region_contour in enumerate(found_polygons_marginals_right): - marginal = TextRegionType( - id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_points(region_contour, offset)) - ) - if conf_marginals_right: - marginal.Coords.set_conf(conf_marginals_right[mm]) - page.add_TextRegion(marginal) - if ocr_all_textlines_marginals_right: - ocr_textlines = ocr_all_textlines_marginals_right[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, - slopes_marginals_right, counter, ocr_textlines) + for mm in range(len(found_polygons_text_region_img)): + img_region = ImageRegionType(id=counter.next_region_id, Coords=CoordsType()) + page.add_ImageRegion(img_region) + points_co = '' + for lmm in range(len(found_polygons_text_region_img[mm])): + try: + points_co += str(int((found_polygons_text_region_img[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((found_polygons_text_region_img[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) + points_co += ' ' + except: - for mm, region_contour in enumerate(found_polygons_images): - image = ImageRegionType( - id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_points(region_contour, offset))) - if conf_images: - image.Coords.set_conf(conf_images[mm]) - page.add_ImageRegion(image) + points_co += str(int((found_polygons_text_region_img[mm][lmm][0] + page_coord[2])/ self.scale_x )) + points_co += ',' + points_co += str(int((found_polygons_text_region_img[mm][lmm][1] + page_coord[0])/ self.scale_y )) + points_co += ' ' - for region_contour in polygons_seplines: - page.add_SeparatorRegion( - SeparatorRegionType(id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_points(region_contour, offset)))) + img_region.get_Coords().set_points(points_co[:-1]) - for mm, region_contour in enumerate(found_polygons_tables): - table = TableRegionType( - id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_points(region_contour, offset))) - if conf_tables: - table.Coords.set_conf(conf_tables[mm]) - page.add_TableRegion(table) + for mm in range(len(polygons_lines_to_be_written_in_xml)): + sep_hor = SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType()) + page.add_SeparatorRegion(sep_hor) + points_co = '' + for lmm in range(len(polygons_lines_to_be_written_in_xml[mm])): + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,0] ) / self.scale_x)) + points_co += ',' + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,1] ) / self.scale_y)) + points_co += ' ' + sep_hor.get_Coords().set_points(points_co[:-1]) + for mm in range(len(found_polygons_tables)): + tab_region = TableRegionType(id=counter.next_region_id, Coords=CoordsType()) + page.add_TableRegion(tab_region) + points_co = '' + for lmm in range(len(found_polygons_tables[mm])): + points_co += str(int((found_polygons_tables[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((found_polygons_tables[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) + points_co += ' ' + tab_region.get_Coords().set_points(points_co[:-1]) return pcgts + def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines, conf_contours_textregion, conf_contours_textregion_h): + self.logger.debug('enter build_pagexml_full_layout') + + # create the file structure + pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) + page = pcgts.get_Page() + page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) + + counter = EynollahIdCounter() + _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) + id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals] + xml_reading_order(page, order_of_texts, id_of_marginalia) + + for mm in range(len(found_polygons_text_region)): + textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord), conf=conf_contours_textregion[mm])) + page.add_TextRegion(textregion) + + if ocr_all_textlines: + ocr_textlines = ocr_all_textlines[mm] + else: + ocr_textlines = None + self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines) + + self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) + for mm in range(len(found_polygons_text_region_h)): + textregion = TextRegionType(id=counter.next_region_id, type_='header', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) + page.add_TextRegion(textregion) + + if ocr_all_textlines: + ocr_textlines = ocr_all_textlines[mm] + else: + ocr_textlines = None + self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter, ocr_textlines) + + for mm in range(len(found_polygons_marginals)): + marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + page.add_TextRegion(marginal) + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) + + for mm in range(len(found_polygons_drop_capitals)): + dropcapital = TextRegionType(id=counter.next_region_id, type_='drop-capital', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord))) + page.add_TextRegion(dropcapital) + ###all_box_coord_drop = None + ###slopes_drop = None + ###self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=None) + + for mm in range(len(found_polygons_text_region_img)): + page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) + + for mm in range(len(polygons_lines_to_be_written_in_xml)): + page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0])))) + + for mm in range(len(found_polygons_tables)): + page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) + + return pcgts + + def calculate_polygon_coords(self, contour, page_coord): + self.logger.debug('enter calculate_polygon_coords') + coords = '' + for value_bbox in contour: + if len(value_bbox) == 2: + coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y)) + else: + coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) + coords=coords + ' ' + return coords[:-1] + diff --git a/src/eynollah/training/__init__.py b/tests/__init__.py similarity index 100% rename from src/eynollah/training/__init__.py rename to tests/__init__.py diff --git a/tests/cli_tests/conftest.py b/tests/cli_tests/conftest.py deleted file mode 100644 index 601d76b..0000000 --- a/tests/cli_tests/conftest.py +++ /dev/null @@ -1,47 +0,0 @@ -from typing import List -import pytest -import logging - -from click.testing import CliRunner, Result -from eynollah.cli import main as eynollah_cli - - -@pytest.fixture -def run_eynollah_ok_and_check_logs( - pytestconfig, - caplog, - model_dir, - eynollah_subcommands, - eynollah_log_filter, -): - """ - Generates a Click Runner for `cli`, injects model_path and logging level - to `args`, runs the command and checks whether the logs generated contain - every fragment in `expected_logs` - """ - - def _run_click_ok_logs( - subcommand: 'str', - args: List[str], - expected_logs: List[str], - ) -> Result: - assert subcommand in eynollah_subcommands, f'subcommand {subcommand} must be one of {eynollah_subcommands}' - args = [ - '-m', model_dir, - subcommand, - *args - ] - if pytestconfig.getoption('verbose') > 0: - args = ['-l', 'DEBUG'] + args - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(eynollah_log_filter): - result = runner.invoke(eynollah_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - if expected_logs: - logmsgs = [logrec.message for logrec in caplog.records] - assert any(logmsg.startswith(needle) for needle in expected_logs for logmsg in logmsgs), f'{expected_logs} not in {logmsgs}' - return result - - return _run_click_ok_logs - diff --git a/tests/cli_tests/test_binarization.py b/tests/cli_tests/test_binarization.py deleted file mode 100644 index aee74a4..0000000 --- a/tests/cli_tests/test_binarization.py +++ /dev/null @@ -1,53 +0,0 @@ -import pytest -from PIL import Image - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["--no-patches"], - ], ids=str) -def test_run_eynollah_binarization_filename( - tmp_path, - run_eynollah_ok_and_check_logs, - resources_dir, - options, -): - infile = resources_dir / '2files/kant_aufklaerung_1784_0020.tif' - outfile = tmp_path / 'kant_aufklaerung_1784_0020.png' - run_eynollah_ok_and_check_logs( - 'binarization', - [ - '-i', str(infile), - '-o', str(outfile), - ] + options, - [ - f"output filename: '{str(outfile)}'" - ] - ) - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as binarized_img: - binarized_size = binarized_img.size - assert original_size == binarized_size - -def test_run_eynollah_binarization_directory( - tmp_path, - run_eynollah_ok_and_check_logs, - resources_dir, - image_resources, -): - outdir = tmp_path - run_eynollah_ok_and_check_logs( - 'binarization', - [ - '-di', str(resources_dir / '2files'), - '-o', str(outdir), - ], - [ - str(image_resources[0]), - str(image_resources[1]), - ] - ) - assert len(list(outdir.iterdir())) == 2 diff --git a/tests/cli_tests/test_enhance.py b/tests/cli_tests/test_enhance.py deleted file mode 100644 index b034cfb..0000000 --- a/tests/cli_tests/test_enhance.py +++ /dev/null @@ -1,55 +0,0 @@ -import pytest -from PIL import Image - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["-sos"], - ], ids=str) -def test_run_eynollah_enhancement_filename( - tmp_path, - resources_dir, - run_eynollah_ok_and_check_logs, - options, -): - infile = resources_dir / '2files/kant_aufklaerung_1784_0020.tif' - outfile = tmp_path / 'kant_aufklaerung_1784_0020.png' - run_eynollah_ok_and_check_logs( - 'enhancement', - [ - '-i', str(infile), - '-o', str(outfile.parent), - # force rescaling - '-ncu', 3, - ] + options, - [ - 'Enhancement applied', - ] - ) - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as enhanced_img: - enhanced_size = enhanced_img.size - assert (original_size == enhanced_size) == ("-sos" in options) - -def test_run_eynollah_enhancement_directory( - tmp_path, - resources_dir, - image_resources, - run_eynollah_ok_and_check_logs, -): - outdir = tmp_path - run_eynollah_ok_and_check_logs( - 'enhancement', - [ - '-di', str(resources_dir/ '2files'), - '-o', str(outdir), - # force rescaling - '-ncu', 3, - ], - [ - 'Enhancement applied', - ] - ) - assert len(list(outdir.iterdir())) == 2 diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py deleted file mode 100644 index 7cbe013..0000000 --- a/tests/cli_tests/test_layout.py +++ /dev/null @@ -1,119 +0,0 @@ -import pytest -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - #["--allow_scaling", "--curved-line"], - ["--allow_scaling", "--curved-line", "--full-layout"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], - # -ep ... - # -eoi ... - # --skip_layout_and_reading_order - ], ids=str) -def test_run_eynollah_layout_filename( - tmp_path, - run_eynollah_ok_and_check_logs, - resources_dir, - options, -): - infile = resources_dir / '2files/kant_aufklaerung_1784_0020.tif' - outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' - run_eynollah_ok_and_check_logs( - 'layout', - [ - '-i', str(infile), - '-o', str(outfile.parent), - ] + options, - [ - str(infile) - ] - ) - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line - -@pytest.mark.parametrize( - "options", - [ - ["--tables"], - ["--tables", "--full-layout"], - ], ids=str) -def test_run_eynollah_layout_filename2( - tmp_path, - resources_dir, - run_eynollah_ok_and_check_logs, - options, -): - infile = resources_dir / '2files/euler_rechenkunst01_1738_0025.tif' - outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' - run_eynollah_ok_and_check_logs( - 'layout', - [ - '-i', str(infile), - '-o', str(outfile.parent), - ] + options, - [ - str(infile) - ] - ) - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:TableRegion", namespaces=NS) - # model/decoding is not very precise, so (depending on mode) we can get fractures/splits/FP - assert len(regions) >= 1, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) >= 2, "result is inaccurate" # mostly table (if detected correctly), but 1 page and 1 catch-word line - -def test_run_eynollah_layout_directory( - tmp_path, - resources_dir, - run_eynollah_ok_and_check_logs, -): - outdir = tmp_path - run_eynollah_ok_and_check_logs( - 'layout', - [ - '-di', str(resources_dir / '2files'), - '-o', str(outdir), - ], - [ - 'Job done in', - 'All jobs done in', - ] - ) - assert len(list(outdir.iterdir())) == 2 - -# def test_run_eynollah_layout_marginalia( -# tmp_path, -# resources_dir, -# run_eynollah_ok_and_check_logs, -# ): -# outdir = tmp_path -# outfile = outdir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.xml' -# run_eynollah_ok_and_check_logs( -# 'layout', -# [ -# '-i', str(resources_dir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg'), -# '-o', str(outdir), -# ], -# [ -# 'Job done in', -# 'All jobs done in', -# ] -# ) -# assert outfile.exists() -# tree = page_from_file(str(outfile)).etree -# regions = tree.xpath('//page:TextRegion[type="marginalia"]', namespaces=NS) -# assert len(regions) == 5, "expected 5 marginalia regions" diff --git a/tests/cli_tests/test_mbreorder.py b/tests/cli_tests/test_mbreorder.py deleted file mode 100644 index e429e98..0000000 --- a/tests/cli_tests/test_mbreorder.py +++ /dev/null @@ -1,47 +0,0 @@ -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS - -def test_run_eynollah_mbreorder_filename( - tmp_path, - resources_dir, - run_eynollah_ok_and_check_logs, -): - infile = resources_dir / '2files/kant_aufklaerung_1784_0020.xml' - outfile = tmp_path /'kant_aufklaerung_1784_0020.xml' - run_eynollah_ok_and_check_logs( - 'machine-based-reading-order', - [ - '-i', str(infile), - '-o', str(outfile.parent), - ], - [ - # FIXME: mbreorder has no logging! - ] - ) - assert outfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_order = out_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - #assert len(out_order) >= 2, "result is inaccurate" - #assert in_order != out_order - assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] - -def test_run_eynollah_mbreorder_directory( - tmp_path, - resources_dir, - run_eynollah_ok_and_check_logs, -): - outdir = tmp_path - run_eynollah_ok_and_check_logs( - 'machine-based-reading-order', - [ - '-di', str(resources_dir / '2files'), - '-o', str(outdir), - ], - [ - # FIXME: mbreorder has no logging! - ] - ) - assert len(list(outdir.iterdir())) == 2 - diff --git a/tests/cli_tests/test_ocr.py b/tests/cli_tests/test_ocr.py deleted file mode 100644 index 6bf3080..0000000 --- a/tests/cli_tests/test_ocr.py +++ /dev/null @@ -1,64 +0,0 @@ -import pytest -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS - -@pytest.mark.parametrize( - "options", - [ - ["-trocr"], - [], # defaults - ["-doit", #str(outrenderfile.parent)], - ], - ], ids=str) -def test_run_eynollah_ocr_filename( - tmp_path, - run_eynollah_ok_and_check_logs, - resources_dir, - options, -): - infile = resources_dir / '2files/kant_aufklaerung_1784_0020.tif' - outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' - outrenderfile = tmp_path / 'render' / 'kant_aufklaerung_1784_0020.png' - outrenderfile.parent.mkdir() - if "-doit" in options: - options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) - run_eynollah_ok_and_check_logs( - 'ocr', - [ - '-i', str(infile), - '-dx', str(infile.parent), - '-o', str(outfile.parent), - ] + options, - [ - # FIXME: ocr has no logging! - ] - ) - assert outfile.exists() - if "-doit" in options: - assert outrenderfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) - assert len(out_texts) >= 2, ("result is inaccurate", out_texts) - assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) - -def test_run_eynollah_ocr_directory( - tmp_path, - run_eynollah_ok_and_check_logs, - resources_dir, -): - outdir = tmp_path - run_eynollah_ok_and_check_logs( - 'ocr', - [ - '-di', str(resources_dir / '2files'), - '-dx', str(resources_dir / '2files'), - '-o', str(outdir), - ], - [ - # FIXME: ocr has no logging! - ] - ) - assert len(list(outdir.iterdir())) == 2 - diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index 69f3d28..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,37 +0,0 @@ -from glob import glob -import os -import pytest -from pathlib import Path - - -@pytest.fixture() -def tests_dir(): - return Path(__file__).parent.resolve() - -@pytest.fixture() -def model_dir(tests_dir): - return os.environ.get('EYNOLLAH_MODELS_DIR', str(tests_dir.joinpath('..').resolve())) - -@pytest.fixture() -def resources_dir(tests_dir): - return tests_dir / 'resources' - -@pytest.fixture() -def image_resources(resources_dir): - return [Path(x) for x in glob(str(resources_dir / '2files/*.tif'))] - -@pytest.fixture() -def eynollah_log_filter(): - return lambda logrec: logrec.name.startswith('eynollah') - -@pytest.fixture -def eynollah_subcommands(): - return [ - 'binarization', - 'layout', - 'ocr', - 'enhancement', - 'machine-based-reading-order', - 'models', - ] - diff --git a/tests/resources/2files/euler_rechenkunst01_1738_0025.xml b/tests/resources/2files/euler_rechenkunst01_1738_0025.xml deleted file mode 100644 index 1a92f73..0000000 --- a/tests/resources/2files/euler_rechenkunst01_1738_0025.xml +++ /dev/null @@ -1,1626 +0,0 @@ - - - OCR-D - 2016-09-29T14:32:09 - 2018-04-25T08:56:33 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 9 - - - 9 - - - 9 - - - - - - - - - der - - - - - rechten - - - - - gegen - - - - - der - - - - - lincken - - - - - Hand - - - - - bedeutet - - - der rechten gegen der lincken Hand bedeutet - - - - - - - - wie - - - - - folget: - - - wie folget: - - - der rechten gegen der lincken Hand bedeutet -wie folget: - - - - - - - - - I. - - - I. - - - I. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - eins - - - 1 - eins - - - - - - - - 2 - - - - - - - - - - - zwey - - - 2 - zwey - - - - - - - - 3 - - - - - - - - - - - drey - - - 3 - drey - - - - - - - - 4 - - - - - - - - - - - vier - - - 4 - vier - - - 0 - nichts -1 - eins -2 - zwey -3 - drey -4 - vier - - - - - - - - - 5 - - - - - - - - - - - fuͤnf - - - 5 - fuͤnf - - - - - - - - 6 - - - - - - - - - - - ſechs - - - 6 - ſechs - - - - - - - 7 - - - - - - - - - - - ſieben - - - 7 - ſieben - - - - - - - - 8 - - - - - - - - - - - acht - - - 8 - acht - - - - - - - - 9 - - - - - - - - - - - neun - - - 9 - neun - - - 5 - fuͤnf -6 - ſechs -7 - ſieben -8 - acht -9 - neun - - - - - - - - - Auf - - - - - der - - - - - zweyten - - - - - Stelle - - - - - aber - - - - - bedeutet. - - - Auf der zweyten Stelle aber bedeutet. - - - Auf der zweyten Stelle aber bedeutet. - - - - - - - - - II. - - - II. - - - II. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - zehen - - - 1 - zehen - - - - - - - - 2 - - - - - - - - - - - zwanzig - - - 2 - zwanzig - - - - - - - 3 - - - - - - - - - - - dreyßig - - - 3 - dreyßig - - - - - - - 4 - - - - - - - - - - - vierzig - - - 4 - vierzig - - 0 - nichts -1 - zehen -2 - zwanzig -3 - dreyßig -4 - vierzig - - - - - - - - - 5 - - - - - - - - - - - fuͤnfzig - - - 5 - fuͤnfzig - - - - - - - - 6 - - - - - - - - - - - ſechzig - - - 6 - ſechzig - - - - - - - 7 - - - - - - - - - - - ſiebenzig - - - 7 - ſiebenzig - - - - - - - 8 - - - - - - - - - - - achtzig - - - 8 - achtzig - - - - - - - 9 - - - - - - - - - - - neunzig - - - 9 - neunzig - - 5 - fuͤnfzig -6 - ſechzig -7 - ſiebenzig -8 - achtzig -9 - neunzig - - - - - - - - - Auf - - - - - der - - - - - dritten - - - - - Stelle - - - - - bedeutet. - - - Auf der dritten Stelle bedeutet. - - - Auf der dritten Stelle bedeutet. - - - - - - - - - III. - - - III. - - - III. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - hundert - - - 1 - hundert - - - - - - - - 2 - - - - - - - - - - - zwey - - - - - hundert - - - 2 - zwey hundert - - - - - - - - 3 - - - - - - - - - - - drey - - - - - hundert - - - 3 - drey hundert - - - - - - - - 4 - - - - - - - - - - - vier - - - - - hundert - - - 4 - vier hundert - - - 0 - nichts -1 - hundert -2 - zwey hundert -3 - drey hundert -4 - vier hundert - - - - - - - - - 5 - - - - - - - - - - - fuͤnf - - - - - hundert - - - 5 - fuͤnf hundert - - - - - - - - 6 - - - - - - - - - - - ſechs - - - - - hundert - - - 6 - ſechs hundert - - - - - - - 7 - - - - - - - - - - - ſieben - - - - - hundert - - - 7 - ſieben hundert - - - - - - - - 8 - - - - - - - - - - - acht - - - - - hundert - - - 8 - acht hundert - - - - - - - 9 - - - - - - - - - - - neun - - - - - hundert - - - 9 - neun hundert - - - 5 - fuͤnf hundert -6 - ſechs hundert -7 - ſieben hundert -8 - acht hundert -9 - neun hundert - - - - - - - - - Auf - - - - - der - - - - - vierten - - - - - Stelle - - - - - bedeutet. - - - Auf der vierten Stelle bedeutet. - - - Auf der vierten Stelle bedeutet. - - - - - - - - - IV. - - - IV. - - - IV. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - tauſend - - - 1 - tauſend - - - - - - - - 2 - - - - - - - - - - - zwey - - - - - tauſend - - - 2 - zwey tauſend - - - - - - - - 3 - - - - - - - - - - - drey - - - - - tauſend - - - 3 - drey tauſend - - - - - - - - 4 - - - - - - - - - - - vier - - - - - tauſend - - - 4 - vier tauſend - - - 0 - nichts -1 - tauſend -2 - zwey tauſend -3 - drey tauſend -4 - vier tauſend - - - - - - - - - 5 - - - - - - - - - - - fuͤnf - - - - - tauſend - - - 5 - fuͤnf tauſend - - - - - - - - 6 - - - - - - - - - - - ſechs - - - - - tauſend - - - 6 - ſechs tauſend - - - - - - - 7 - - - - - - - - - - - ſieben - - - - - tauſend - - - 7 - ſieben tauſend - - - - - - - - 8 - - - - - - - - - - - acht - - - - - tauſend - - - 8 - acht tauſend - - - - - - - 9 - - - - - - - - - - - neun - - - - - tauſend - - - 9 - neun tauſend - - 5 - fuͤnf tauſend -6 - ſechs tauſend -7 - ſieben tauſend -8 - acht tauſend -9 - neun tauſend - - - - - - - - - Auf - - - - - der - - - - - fuͤnften - - - - - Stelle - - - - - bedeutet. - - - Auf der fuͤnften Stelle bedeutet. - - - Auf der fuͤnften Stelle bedeutet. - - - - - - - - - V. - - - V. - - - V. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - zehen - - - - - tauſend - - - 1 - zehen tauſend - - - - - - - - 2 - - - - - - - - - - - zwanzig - - - - - tauſend - - - 2 - zwanzig tauſend - - - - - - - - 3 - - - - - - - - - - - dreyßig - - - - - tauſend - - - 3 - dreyßig tauſend - - - - - - - - 4 - - - - - - - - - - - vierzig - - - - - tauſend - - - 4 - vierzig tauſend - - - 0 - nichts -1 - zehen tauſend -2 - zwanzig tauſend -3 - dreyßig tauſend -4 - vierzig tauſend - - - - - - - - - 5 - - - - - - - - - - - fuͤnfzig - - - - - tauſend - - - 5 - fuͤnfzig tauſend - - - - - - - - 6 - - - - - - - - - - - ſechzig - - - - - tauſend - - - 6 - ſechzig tauſend - - - - - - - 7 - - - - - - - - - - - ſiebenzig - - - - - tauſend - - - 7 - ſiebenzig tauſend - - - - - - - - 8 - - - - - - - - - - - achtzig - - - - - tauſend - - - 8 - achtzig tauſend - - - - - - - 9 - - - - - - - - - - - neunzig - - - - - tauſend - - - 9 - neunzig tauſend - - - 5 - fuͤnfzig tauſend -6 - ſechzig tauſend -7 - ſiebenzig tauſend -8 - achtzig tauſend -9 - neunzig tauſend - - - - - - - - A - - - - - 5 - - - A 5 - - A 5 - - - - - - - - - Anf - - - Anf - - Anf - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/resources/2files/kant_aufklaerung_1784_0020.xml b/tests/resources/2files/kant_aufklaerung_1784_0020.xml deleted file mode 100644 index 47484cd..0000000 --- a/tests/resources/2files/kant_aufklaerung_1784_0020.xml +++ /dev/null @@ -1,2129 +0,0 @@ - - - OCR-D - 2016-09-20T11:09:27.431+02:00 - 2018-04-24T17:44:49.605+01:00 - - - - - - - - - - - - - - - - - - - - - - - ( - - - - - - - 484 - - - - - - - ) - - - - - ( 484 ) - - - - ( 484 ) - - - - - - - - - - - gewiegelt - - - - - - - worden - - - - - - - ; - - - - - - - ſo - - - - - - - ſchaͤdlich - - - - - - - iſt - - - - - - - es - - - - - - - Vorurtheile - - - - - - - zu - - - - - gewiegelt worden; ſo ſchaͤdlich iſt es Vorurtheile zu - - - - - - - - - - pflanzen - - - - - - - , - - - - - - - weil - - - - - - - ſie - - - - - - - ſich - - - - - - - zuletzt - - - - - - - an - - - - - - - denen - - - - - - - ſelbſt - - - - - - - raͤchen - - - - - - - , - - - - - pflanzen, weil ſie ſich zuletzt an denen ſelbſt raͤchen, - - - - - - - - - - die - - - - - - - , - - - - - - - oder - - - - - - - deren - - - - - - - Vorgaͤnger - - - - - - - , - - - - - - - ihre - - - - - - - Urheber - - - - - - - geweſen - - - - - die, oder deren Vorgaͤnger, ihre Urheber geweſen - - - - - - - - - - ſind - - - - - - - . - - - - - - - Daher - - - - - - - kann - - - - - - - ein - - - - - - - Publikum - - - - - - - nur - - - - - - - langſam - - - - - - - zur - - - - - ſind. Daher kann ein Publikum nur langſam zur - - - - - - - - - - Aufklaͤrung - - - - - - - gelangen - - - - - - - . - - - - - - - Durch - - - - - - - eine - - - - - - - Revolution - - - - - - - wird - - - - - Aufklaͤrung gelangen. Durch eine Revolution wird - - - - - - - - - - vielleicht - - - - - - - wohl - - - - - - - ein - - - - - - - Abfall - - - - - - - von - - - - - - - perſoͤnlichem - - - - - - - Despo- - - - - - vielleicht wohl ein Abfall von perſoͤnlichem Despo- - - - - - - - - - - tism - - - - - - - und - - - - - - - gewinnſuͤchtiger - - - - - - - oder - - - - - - - herrſchſüchtiger - - - - - - - Be - - - - - - - - - - - - - tism und gewinnſuͤchtiger oder herrſchſüchtiger Be- - - - - - - - - - - druͤkkung - - - - - - - , - - - - - - - aber - - - - - - - niemals - - - - - - - wahre - - - - - - - Reform - - - - - - - der - - - - - - - Den - - - - - - - - - - - - - druͤkkung, aber niemals wahre Reform der Den- - - - - - - - - - - kungsart - - - - - - - zu - - - - - - - Stande - - - - - - - kommen - - - - - - - ; - - - - - - - ſondern - - - - - - - neue - - - - - - - Vor - - - - - - - - - - - - - kungsart zu Stande kommen; ſondern neue Vor- - - - - - - - - - - urtheile - - - - - - - werden - - - - - - - , - - - - - - - eben - - - - - - - ſowohl - - - - - - - als - - - - - - - die - - - - - - - alten - - - - - - - , - - - - - - - zum - - - - - urtheile werden, eben ſowohl als die alten, zum - - - - - - - - - - Leitbande - - - - - - - des - - - - - - - gedankenloſen - - - - - - - großen - - - - - - - Haufens - - - - - Leitbande des gedankenloſen großen Haufens - - - - - - - - - - dienen - - - - - - - . - - - - - dienen. - - - - gewiegelt worden; ſo ſchaͤdlich iſt es Vorurtheile zu -pflanzen, weil ſie ſich zuletzt an denen ſelbſt raͤchen, -die, oder deren Vorgaͤnger, ihre Urheber geweſen -ſind. Daher kann ein Publikum nur langſam zur -Aufklaͤrung gelangen. Durch eine Revolution wird -vielleicht wohl ein Abfall von perſoͤnlichem Despo- -tism und gewinnſuͤchtiger oder herrſchſüchtiger Be- -druͤkkung, aber niemals wahre Reform der Den- -kungsart zu Stande kommen; ſondern neue Vor- -urtheile werden, eben ſowohl als die alten, zum -Leitbande des gedankenloſen großen Haufens -dienen. - - - - - - - - - - - Zu - - - - - - - dieſer - - - - - - - Aufklaͤrung - - - - - - - aber - - - - - - - wird - - - - - - - nichts - - - - - - - erfordert - - - - - Zu dieſer Aufklaͤrung aber wird nichts erfordert - - - - - - - - - - als - - - - - - - Freiheit - - - - - - - ; - - - - - - - und - - - - - - - zwar - - - - - - - die - - - - - - - unſchaͤdlichſte - - - - - - - unter - - - - - als Freiheit; und zwar die unſchaͤdlichſte unter - - - - - - - - - allem - - - - - - - , - - - - - - - was - - - - - - - nur - - - - - - - Freiheit - - - - - - - heißen - - - - - - - mag - - - - - - - , - - - - - - - naͤmlich - - - - - - - die - - - - - - - : - - - - - allem, was nur Freiheit heißen mag, naͤmlich die: - - - - - - - - - - von - - - - - - - ſeiner - - - - - - - Vernunft - - - - - - - in - - - - - - - allen - - - - - - - Stuͤkken - - - - - - - oͤffentlichen - - - - - von ſeiner Vernunft in allen Stuͤkken oͤffentlichen - - - - - - - - - - Gebrauch - - - - - - - zu - - - - - - - machen - - - - - - - . - - - - - - - Nun - - - - - - - hoͤre - - - - - - - ich - - - - - - - aber - - - - - - - von - - - - - - - al - - - - - - - - - - - - - Gebrauch zu machen. Nun hoͤre ich aber von al- - - - - - - - - - - len - - - - - - - Seiten - - - - - - - rufen - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - ! - - - - - - - Der - - - - - - - Offi - - - - - - - - - - - - - len Seiten rufen: raͤſonnirt nicht! Der Offi- - - - - - - - - - - zier - - - - - - - ſagt - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - exercirt - - - - - - - ! - - - - - - - Der - - - - - zier ſagt: raͤſonnirt nicht, ſondern exercirt! Der - - - - - - - - - - Finanzrath - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - bezahlt - - - - - - - ! - - - - - - - Der - - - - - Finanzrath: raͤſonnirt nicht, ſondern bezahlt! Der - - - - - - - - - - Geiſtliche - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - glaubt - - - - - - - ! - - - - - - - ( - - - - - - - Nur - - - - - Geiſtliche: raͤſonnirt nicht, ſondern glaubt! (Nur - - - - - - - - - - ein - - - - - - - einziger - - - - - - - Herr - - - - - - - in - - - - - - - der - - - - - - - Welt - - - - - - - ſagt - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - , - - - - - - - ſo - - - - - ein einziger Herr in der Welt ſagt: raͤſonnirt, ſo - - - - - - - - - - viel - - - - - - - ihr - - - - - - - wollt - - - - - - - , - - - - - - - und - - - - - - - woruͤber - - - - - - - ihr - - - - - - - wollt - - - - - - - ; - - - - - - - aber - - - - - - - ge - - - - - - - - - - - - - viel ihr wollt, und woruͤber ihr wollt; aber ge- - - - - - - - - - - horcht - - - - - - - ! - - - - - - - ) - - - - - - - Hier - - - - - - - iſt - - - - - - - uͤberall - - - - - - - Einſchraͤnkung - - - - - - - der - - - - - - - Frei - - - - - - - - - - - - - horcht!) Hier iſt uͤberall Einſchraͤnkung der Frei- - - - - - - - - - - heit - - - - - - - . - - - - - - - Welche - - - - - - - Einſchraͤnkung - - - - - - - aber - - - - - - - iſt - - - - - - - der - - - - - - - Aufklaͤ - - - - - - - - - - - - - heit. Welche Einſchraͤnkung aber iſt der Aufklaͤ- - - - - - - - - - - rung - - - - - - - hinderlich - - - - - - - ? - - - - - - - welche - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - ihr - - - - - - - wohl - - - - - - - gar - - - - - rung hinderlich? welche nicht, ſondern ihr wohl gar - - - - - - - - - - befoͤrderlich - - - - - - - ? - - - - - - - - - - - - - - Ich - - - - - - - antworte - - - - - - - : - - - - - - - der - - - - - - - oͤffentliche - - - - - befoͤrderlich? — Ich antworte: der oͤffentliche - - - - - - - - - - Gebrauch - - - - - - - ſeiner - - - - - - - Vernunft - - - - - - - muß - - - - - - - jederzeit - - - - - - - frei - - - - - - - ſein - - - - - - - , - - - - - Gebrauch ſeiner Vernunft muß jederzeit frei ſein, - - - - - - - - - - und - - - - - - - der - - - - - - - allein - - - - - - - kann - - - - - - - Aufklaͤrung - - - - - - - unter - - - - - - - Menſchen - - - - - - - zu - - - - - und der allein kann Aufklaͤrung unter Menſchen zu - - - - - Zu dieſer Aufklaͤrung aber wird nichts erfordert -als Freiheit; und zwar die unſchaͤdlichſte unter -allem, was nur Freiheit heißen mag, naͤmlich die: -von ſeiner Vernunft in allen Stuͤkken oͤffentlichen -Gebrauch zu machen. Nun hoͤre ich aber von al- -len Seiten rufen: raͤſonnirt nicht! Der Offi- -zier ſagt: raͤſonnirt nicht, ſondern exercirt! Der -Finanzrath: raͤſonnirt nicht, ſondern bezahlt! Der -Geiſtliche: raͤſonnirt nicht, ſondern glaubt! (Nur -ein einziger Herr in der Welt ſagt: raͤſonnirt, ſo -viel ihr wollt, und woruͤber ihr wollt; aber ge- -horcht!) Hier iſt uͤberall Einſchraͤnkung der Frei- -heit. Welche Einſchraͤnkung aber iſt der Aufklaͤ- -rung hinderlich? welche nicht, ſondern ihr wohl gar -befoͤrderlich? — Ich antworte: der oͤffentliche -Gebrauch ſeiner Vernunft muß jederzeit frei ſein, -und der allein kann Aufklaͤrung unter Menſchen zu - - - - - - - - - - - Stan - - - - - - - - - - - - - Stan- - - - - - Stan- - - - - - - - - - - \ No newline at end of file diff --git a/tests/resources/2files/euler_rechenkunst01_1738_0025.tif b/tests/resources/euler_rechenkunst01_1738_0025.tif similarity index 100% rename from tests/resources/2files/euler_rechenkunst01_1738_0025.tif rename to tests/resources/euler_rechenkunst01_1738_0025.tif diff --git a/tests/resources/2files/kant_aufklaerung_1784_0020.tif b/tests/resources/kant_aufklaerung_1784_0020.tif similarity index 100% rename from tests/resources/2files/kant_aufklaerung_1784_0020.tif rename to tests/resources/kant_aufklaerung_1784_0020.tif diff --git a/tests/resources/marginalia/estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg b/tests/resources/marginalia/estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg deleted file mode 100644 index 9270508..0000000 Binary files a/tests/resources/marginalia/estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg and /dev/null differ diff --git a/tests/resources/marginalia/estor_rechtsgelehrsamkeit02_1758_0880_800px.xml b/tests/resources/marginalia/estor_rechtsgelehrsamkeit02_1758_0880_800px.xml deleted file mode 100644 index 45240c4..0000000 --- a/tests/resources/marginalia/estor_rechtsgelehrsamkeit02_1758_0880_800px.xml +++ /dev/null @@ -1,235 +0,0 @@ - - - - SBB_QURATOR - 2025-10-30T16:38:21.180191 - 2025-10-30T16:38:21.180191 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tests/test_model_zoo.py b/tests/test_model_zoo.py deleted file mode 100644 index 9d37431..0000000 --- a/tests/test_model_zoo.py +++ /dev/null @@ -1,16 +0,0 @@ -from eynollah.model_zoo import EynollahModelZoo - -def test_trocr1( - model_dir, -): - model_zoo = EynollahModelZoo(model_dir) - try: - from transformers import TrOCRProcessor, VisionEncoderDecoderModel - model_zoo.load_models('trocr_processor') - proc = model_zoo.get('trocr_processor') - assert isinstance(proc, TrOCRProcessor) - model_zoo.load_models(['ocr', 'tr']) - model = model_zoo.get('ocr') - assert isinstance(model, VisionEncoderDecoderModel) - except ImportError: - pass diff --git a/tests/test_run.py b/tests/test_run.py new file mode 100644 index 0000000..607140e --- /dev/null +++ b/tests/test_run.py @@ -0,0 +1,132 @@ +from os import environ +from pathlib import Path +import logging +from PIL import Image +from eynollah.cli import layout as layout_cli, binarization as binarization_cli +from click.testing import CliRunner +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + +testdir = Path(__file__).parent.resolve() + +EYNOLLAH_MODELS = environ.get('EYNOLLAH_MODELS', str(testdir.joinpath('..', 'models_eynollah').resolve())) +SBBBIN_MODELS = environ.get('SBBBIN_MODELS', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) + +def test_run_eynollah_layout_filename(tmp_path, subtests, pytestconfig, caplog): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' + args = [ + '-m', EYNOLLAH_MODELS, + '-i', str(infile), + '-o', str(outfile.parent), + # subtests write to same location + '--overwrite', + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'eynollah' + runner = CliRunner() + for options in [ + [], # defaults + ["--allow_scaling", "--curved-line"], + ["--allow_scaling", "--curved-line", "--full-layout"], + ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], + ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", + "--textline_light", "--light_version"], + # -ep ... + # -eoi ... + # --do_ocr + # --skip_layout_and_reading_order + ]: + with subtests.test(#msg="test CLI", + options=options): + with caplog.filtering(only_eynollah): + result = runner.invoke(layout_cli, args + options, catch_exceptions=False) + print(result) + assert result.exit_code == 0 + logmsgs = [logrec.message for logrec in caplog.records] + assert str(infile) in logmsgs + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath("//page:TextRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + lines = tree.xpath("//page:TextLine", namespaces=NS) + assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line + +def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', EYNOLLAH_MODELS, + '-di', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'eynollah' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(layout_cli, args) + print(result) + assert result.exit_code == 0 + logmsgs = [logrec.message for logrec in caplog.records] + assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 + assert any(logmsg for logmsg in logmsgs if logmsg.startswith('All jobs done in')) + assert len(list(outdir.iterdir())) == 2 + +def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, caplog): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') + args = [ + '-m', SBBBIN_MODELS, + str(infile), + str(outfile), + ] + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'SbbBinarizer' + runner = CliRunner() + for options in [ + [], # defaults + ["--no-patches"], + ]: + with subtests.test(#msg="test CLI", + options=options): + with caplog.filtering(only_eynollah): + result = runner.invoke(binarization_cli, args + options) + print(result) + assert result.exit_code == 0 + logmsgs = [logrec.message for logrec in caplog.records] + assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) + assert outfile.exists() + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as binarized_img: + binarized_size = binarized_img.size + assert original_size == binarized_size + +def test_run_eynollah_binarization_directory(tmp_path, subtests, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', SBBBIN_MODELS, + '-di', str(indir), + '-do', str(outdir), + ] + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'SbbBinarizer' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(binarization_cli, args) + print(result) + assert result.exit_code == 0 + logmsgs = [logrec.message for logrec in caplog.records] + assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 + assert len(list(outdir.iterdir())) == 2 diff --git a/tests/test_smoke.py b/tests/test_smoke.py index e2b323a..252213f 100644 --- a/tests/test_smoke.py +++ b/tests/test_smoke.py @@ -2,5 +2,6 @@ def test_utils_import(): import eynollah.utils import eynollah.utils.contour import eynollah.utils.drop_capitals + import eynollah.utils.drop_capitals import eynollah.utils.is_nan import eynollah.utils.rotate diff --git a/train/.gitkeep b/train/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/train/Dockerfile b/train/Dockerfile deleted file mode 100644 index 2456ea4..0000000 --- a/train/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# Use NVIDIA base image -FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 - -# Set the working directory -WORKDIR /app - - -# Set environment variable for GitPython -ENV GIT_PYTHON_REFRESH=quiet - -# Install Python and pip -RUN apt-get update && apt-get install -y --fix-broken && \ - apt-get install -y \ - python3 \ - python3-pip \ - python3-distutils \ - python3-setuptools \ - python3-wheel && \ - rm -rf /var/lib/apt/lists/* - -# Copy and install Python dependencies -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt - -# Copy the rest of the application -COPY . . - -# Specify the entry point -CMD ["python3", "train.py", "with", "config_params_docker.json"] diff --git a/train/config_params.json b/train/config_params.json deleted file mode 100644 index b01ac08..0000000 --- a/train/config_params.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "backbone_type" : "transformer", - "task": "cnn-rnn-ocr", - "n_classes" : 2, - "max_len": 280, - "n_epochs" : 3, - "input_height" : 32, - "input_width" : 512, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-5, - "save_interval": 1500, - "patches" : false, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : true, - "scaling" : false, - "adding_rgb_background": true, - "adding_rgb_foreground": true, - "add_red_textlines": true, - "white_noise_strap": true, - "textline_right_in_depth": true, - "textline_left_in_depth": true, - "textline_up_in_depth": true, - "textline_down_in_depth": true, - "textline_right_in_depth_bin": true, - "textline_left_in_depth_bin": true, - "textline_up_in_depth_bin": true, - "textline_down_in_depth_bin": true, - "bin_deg": true, - "textline_skewing": true, - "textline_skewing_bin": true, - "channels_shuffling": true, - "degrading": true, - "brightening": true, - "binarization" : true, - "pepper_aug": true, - "pepper_bin_aug": true, - "image_inversion": true, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "color_padding_rotation": true, - "padding_white": true, - "rotation_not_90": true, - "transformer_num_patches_xy": [56, 56], - "transformer_patchsize_x": 4, - "transformer_patchsize_y": 4, - "transformer_projection_dim": 64, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 1, - "transformer_num_heads": 1, - "transformer_cnn_first": false, - "blur_k" : ["blur","gauss","median"], - "padd_colors" : ["white", "black"], - "scales" : [0.6, 0.7, 0.8, 0.9], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "pepper_indexes": [0.01, 0.005], - "skewing_amplitudes" : [5, 8], - "flip_index" : [0, 1, -1], - "shuffle_indexes" : [ [0,2,1], [1,2,0], [1,0,2] , [2,1,0]], - "thetha" : [0.1, 0.2, -0.1, -0.2], - "thetha_padd": [-0.6, -1, -1.4, -1.8, 0.6, 1, 1.4, 1.8], - "white_padds" : [0.1, 0.3, 0.5, 0.7, 0.9], - "number_of_backgrounds_per_image": 2, - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "/home/vahid/extracted_lines/1919_bin/train", - "dir_eval": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/eval_new", - "dir_output": "/home/vahid/extracted_lines/1919_bin/output", - "dir_rgb_backgrounds": "/home/vahid/Documents/1_2_test_eynollah/set_rgb_background", - "dir_rgb_foregrounds": "/home/vahid/Documents/1_2_test_eynollah/out_set_rgb_foreground", - "dir_img_bin": "/home/vahid/extracted_lines/1919_bin/images_bin", - "characters_txt_file":"/home/vahid/Downloads/models_eynollah/model_eynollah_ocr_cnnrnn_20250930/characters_org.txt" - -} diff --git a/train/config_params_docker.json b/train/config_params_docker.json deleted file mode 100644 index 45f87d3..0000000 --- a/train/config_params_docker.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 3, - "n_epochs" : 1, - "input_height" : 672, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "patches" : false, - "pretraining" : true, - "augmentation" : false, - "flip_aug" : false, - "blur_aug" : true, - "scaling" : true, - "adding_rgb_background": false, - "adding_rgb_foreground": false, - "add_red_textlines": false, - "channels_shuffling": true, - "degrading": true, - "brightening": true, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": true, - "transformer_num_patches_xy": [14, 21], - "transformer_patchsize_x": 1, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 64, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 1, - "transformer_num_heads": 1, - "transformer_cnn_first": true, - "blur_k" : ["blur","gauss","median"], - "scales" : [0.6, 0.7, 0.8, 0.9], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "shuffle_indexes" : [ [0,2,1], [1,2,0], [1,0,2] , [2,1,0]], - "thetha" : [5, -5], - "number_of_backgrounds_per_image": 2, - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": true, - "data_is_provided": false, - "dir_train": "/entry_point_dir/train", - "dir_eval": "/entry_point_dir/eval", - "dir_output": "/entry_point_dir/output" -} diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json deleted file mode 100644 index 9116ce3..0000000 --- a/train/custom_config_page2label.json +++ /dev/null @@ -1,8 +0,0 @@ -{ -"use_case": "textline", -"textregions":{ "rest_as_paragraph": 1, "header":2 , "heading":2 , "marginalia":3 }, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6}, -"columns_width":{"1":1000, "2":1300, "3":1600, "4":2000, "5":2300, "6":2500} -} diff --git a/train/requirements.txt b/train/requirements.txt deleted file mode 100644 index 090bc50..0000000 --- a/train/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -ocrd-fork-sacred >= 0.8.7.post1 -seaborn -numpy -tqdm -imutils -scipy -tensorflow-addons # for connected_components, depublished and only compatible with tensorflow < 2.16 -tensorflow < 2.16 # for tensorflow-addons, so only needed in training -tf_data < 2.16 # for tensorflow-addons, so only needed in training -protobuf < 5 # for tensorflow-addons, so only needed in training diff --git a/train/scales_enhancement.json b/train/scales_enhancement.json deleted file mode 100644 index 58034f0..0000000 --- a/train/scales_enhancement.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "scales" : [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] -}