diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000..72b2c5a --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,28 @@ +version: 2 + +jobs: + + build-python36: + docker: + - image: python:3.6 + steps: + - checkout + - restore_cache: + keys: + - model-cache + - run: make models + - save_cache: + key: model-cache + paths: + models_eynollah.tar.gz + models_eynollah + - run: make install + - run: make smoke-test + +workflows: + version: 2 + build: + jobs: + - build-python36 + #- build-python37 + #- build-python38 # no tensorflow for python 3.8 diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 562fb6f..0000000 --- a/.dockerignore +++ /dev/null @@ -1,6 +0,0 @@ -tests -dist -build -env* -*.egg-info -models_eynollah* diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml deleted file mode 100644 index d2869ed..0000000 --- a/.github/workflows/build-docker.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: CD - -on: - push: - branches: [ "main" ] - workflow_dispatch: # run manually - -jobs: - - build: - runs-on: ubuntu-latest - permissions: - packages: write - contents: read - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - # we need tags for docker version tagging - fetch-tags: true - fetch-depth: 0 - - # Activate cache export feature to reduce build time of images - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Log in to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERIO_USERNAME }} - password: ${{ secrets.DOCKERIO_PASSWORD }} - - name: Build the Docker image - # build both tags at the same time - run: make docker DOCKER_TAG="docker.io/ocrd/eynollah -t ghcr.io/qurator-spk/eynollah" - - name: Test the Docker image - run: docker run --rm ocrd/eynollah ocrd-eynollah-segment -h - - name: Push to Dockerhub - run: docker push docker.io/ocrd/eynollah - - name: Push to Github Container Registry - run: docker push ghcr.io/qurator-spk/eynollah diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml deleted file mode 100644 index 248f4ef..0000000 --- a/.github/workflows/pypi.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: PyPI CD - -on: - release: - types: [published] - workflow_dispatch: - -jobs: - pypi-publish: - name: upload release to PyPI - runs-on: ubuntu-latest - permissions: - # IMPORTANT: this permission is mandatory for Trusted Publishing - id-token: write - steps: - - uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 - - name: Build package - run: make build - - name: Publish package distributions to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - verbose: true diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 466e690..1afd2a6 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -1,9 +1,9 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: Test +name: Python package -on: [push] +on: [push, pull_request] jobs: build: @@ -11,89 +11,26 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.8', '3.9', '3.10', '3.11'] + python-version: ['3.6'] # '3.7' steps: - - name: clean up - run: | - df -h - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc - sudo rm -rf "/usr/local/share/boost" - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - df -h - - uses: actions/checkout@v4 - - uses: actions/cache/restore@v4 - id: seg_model_cache + - uses: actions/checkout@v2 + - uses: actions/cache@v2 + id: model_cache with: - path: models_layout_v0_5_0 - key: seg-models - - uses: actions/cache/restore@v4 - id: ocr_model_cache - with: - path: models_ocr_v0_5_1 - key: ocr-models - - uses: actions/cache/restore@v4 - id: bin_model_cache - with: - path: default-2021-03-09 - key: bin-models + path: models_eynollah + key: ${{ runner.os }}-models - name: Download models - if: steps.seg_model_cache.outputs.cache-hit != 'true' || steps.bin_model_cache.outputs.cache-hit != 'true' || steps.ocr_model_cache.outputs.cache-hit != true + if: steps.model_cache.outputs.cache-hit != 'true' run: make models - - uses: actions/cache/save@v4 - if: steps.seg_model_cache.outputs.cache-hit != 'true' - with: - path: models_layout_v0_5_0 - key: seg-models - - uses: actions/cache/save@v4 - if: steps.ocr_model_cache.outputs.cache-hit != 'true' - with: - path: models_ocr_v0_5_1 - key: ocr-models - - uses: actions/cache/save@v4 - if: steps.bin_model_cache.outputs.cache-hit != 'true' - with: - path: default-2021-03-09 - key: bin-models - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip - make install-dev EXTRAS=OCR,plotting - make deps-test EXTRAS=OCR,plotting - ls -l models_* - - name: Lint with ruff - uses: astral-sh/ruff-action@v3 - with: - src: "./src" + pip install . + pip install -r requirements-test.txt - name: Test with pytest - run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" - - name: Get coverage results - run: | - coverage report --format=markdown >> $GITHUB_STEP_SUMMARY - coverage html - coverage json - coverage xml - - name: Store coverage results - uses: actions/upload-artifact@v4 - with: - name: coverage-report_${{ matrix.python-version }} - path: | - htmlcov - pytest.xml - coverage.xml - coverage.json - - name: Upload coverage results - uses: codecov/codecov-action@v4 - with: - files: coverage.xml - fail_ci_if_error: false - - name: Test standalone CLI - run: make smoke-test - - name: Test OCR-D CLI - run: make ocrd-test + run: make test diff --git a/.gitignore b/.gitignore index fd64f0b..5236dde 100644 --- a/.gitignore +++ b/.gitignore @@ -2,12 +2,6 @@ __pycache__ sbb_newspapers_org_image/pylint.log models_eynollah* -models_ocr* -models_layout* -default-2021-03-09 output.html /build /dist -*.tif -*.sw? -TAGS diff --git a/CHANGELOG.md b/CHANGELOG.md index c2caaa6..c6a5193 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,264 +5,7 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased -## [0.6.0] - 2025-10-17 - -Added: - - * `eynollah-training` CLI and docs for training the models, #187, #193, https://github.com/qurator-spk/sbb_pixelwise_segmentation/tree/unifying-training-models - -Fixed: - - * `join_polygons` always returning Polygon, not MultiPolygon, #203 - -## [0.6.0rc2] - 2025-10-14 - -Fixed: - - * Prevent OOM GPU error by avoiding loading the `region_fl` model, #199 - * XML output: encoding should be `utf-8`, not `utf8`, #196, #197 - -## [0.6.0rc1] - 2025-10-10 - -Fixed: - - * continue processing when no columns detected but text regions exist - * convert marginalia to main text if no main text is present - * reset deskewing angle to 0° when text covers <30% image area and detected angle >45° - * :fire: polygons: avoid invalid paths (use `Polygon.buffer()` instead of dilation etc.) - * `return_boxes_of_images_by_order_of_reading_new`: avoid Numpy.dtype mismatch, simplify - * `return_boxes_of_images_by_order_of_reading_new`: log any exceptions instead of ignoring - * `filter_contours_without_textline_inside`: avoid removing from duplicate lists twice - * `get_marginals`: exit early if no peaks found to avoid spurious overlap mask - * `get_smallest_skew`: after shifting search range of rotation angle, use overall best result - * Dockerfile: fix CUDA installation (cuDNN contested between Torch and TF due to extra OCR) - * OCR: re-instate missing methods and fix `utils_ocr` function calls - * mbreorder/enhancement CLIs: missing imports - * :fire: writer: `SeparatorRegion` needs `SeparatorRegionType` (not `ImageRegionType`), f458e3e - * tests: switch from `pytest-subtests` to `parametrize` so we can use `pytest-isolate` - (so CUDA memory gets freed between tests if running on GPU) - -Added: - * :fire: `layout` CLI: new option `--model_version` to override default choices - * test coverage for OCR options in `layout` - * test coverage for table detection in `layout` - * CI linting with ruff - -Changed: - - * polygons: slightly widen for regions and lines, increase for separators - * various refactorings, some code style and identifier improvements - * deskewing/multiprocessing: switch back to ProcessPoolExecutor (faster), - but use shared memory if necessary, and switch back from `loky` to stdlib, - and shutdown in `del()` instead of `atexit` - * :fire: OCR: switch CNN-RNN model to `20250930` version compatible with TF 2.12 on CPU, too - * OCR: allow running `-tr` without `-fl`, too - * :fire: writer: use `@type='heading'` instead of `'header'` for headings - * :fire: performance gains via refactoring (simplification, less copy-code, vectorization, - avoiding unused calculations, avoiding unnecessary 3-channel image operations) - * :fire: heuristic reading order detection: many improvements - - contour vs splitter box matching: - * contour must be contained in box exactly instead of heuristics - * make fallback center matching, center must be contained in box - - original vs deskewed contour matching: - * same min-area filter on both sides - * similar area score in addition to center proximity - * avoid duplicate and missing mappings by allowing N:M - matches and splitting+joining where necessary - * CI: update+improve model caching - - -## [0.5.0] - 2025-09-26 - -Fixed: - - * restoring the contour in the original image caused an error due to an empty tuple, #154 - * removed NumPy warnings calculating sigma, mean, (fixed issue #158) - * fixed bug in `separate_lines.py`, #124 - * Drop capitals are now handled separately from their corresponding textline - * Marginals are now divided into left and right. Their reading order is written first for left marginals, then for right marginals, and within each side from top to bottom - * Added a new page extraction model. Instead of bounding boxes, it outputs page contours in the XML file, improving results for skewed pages - * Improved reading order for cases where a textline is segmented into multiple smaller textlines - -Changed - - * CLIs: read only allowed filename suffixes (image or XML) with `--dir_in` - * CLIs: make all output option required, and `-i` / `-di` required but mutually exclusive - * ocr CLI: drop redundant `-brb` in favour of just `-dib` - * APIs: move all input/output path options from class (kwarg and attribute) ro `run` kwarg - * layout textlines: polygonal also without `-cl` - -Added: - - * `eynollah machine-based-reading-order` CLI to run reading order detection, #175 - * `eynollah enhancement` CLI to run image enhancement, #175 - * Improved models for page extraction and reading order detection, #175 - * For the lightweight version (layout and textline detection), thresholds are now assigned to the artificial class. Users can apply these thresholds to improve detection of isolated textlines and regions. To counteract the drawback of thresholding, the skeleton of the artificial class is used to keep lines as thin as possible (resolved issues #163 and #161) - * Added and integrated a trained CNN-RNN OCR models - * Added and integrated a trained TrOCR model - * Improved OCR detection to support vertical and curved textlines - * Introduced a new machine-based reading order model with rotation augmentation - * Optimized reading order speed by clustering text regions that belong to the same block, maintaining top-to-bottom order - * Implemented text merging across textlines based on hyphenation when a line ends with a hyphen - * Integrated image enhancement as a separate use case - * Added reading order functionality on the layout level as a separate use case - * CNN-RNN OCR models provide confidence scores for predictions - * Added OCR visualization: predicted OCR can be overlaid on an image of the same size as the input - * Introduced a threshold value for CNN-RNN OCR models, allowing users to filter out low-confidence textline predictions - * For OCR, users can specify a single model by name instead of always using the default model - * Under the OCR use case, if Ground Truth XMLs and images are available, textline image and corresponding text extraction can now be performed - -Merged PRs: - - * better machine based reading order + layout and textline + ocr by @vahidrezanezhad in https://github.com/qurator-spk/eynollah/pull/175 - * CI: pypi by @kba in https://github.com/qurator-spk/eynollah/pull/154 - * CI: Use most recent actions/setup-python@v5 by @kba in https://github.com/qurator-spk/eynollah/pull/157 - * update docker by @bertsky in https://github.com/qurator-spk/eynollah/pull/159 - * Ocrd fixes by @kba in https://github.com/qurator-spk/eynollah/pull/167 - * Updating readme for eynollah use cases cli by @kba in https://github.com/qurator-spk/eynollah/pull/166 - * OCR-D processor: expose reading_order_machine_based by @bertsky in https://github.com/qurator-spk/eynollah/pull/171 - * prepare release v0.5.0: fix logging by @bertsky in https://github.com/qurator-spk/eynollah/pull/180 - * mb_ro_on_layout: remove copy-pasta code not actually used by @kba in https://github.com/qurator-spk/eynollah/pull/181 - * prepare release v0.5.0: improve CLI docstring, refactor I/O path options from class to run kwargs, increase test coverage @bertsky in #182 - * prepare release v0.5.0: fix for OCR doit subtest by @bertsky in https://github.com/qurator-spk/eynollah/pull/183 - * Prepare release v0.5.0 by @kba in https://github.com/qurator-spk/eynollah/pull/178 - * updating eynollah README, how to use it for use cases by @vahidrezanezhad in https://github.com/qurator-spk/eynollah/pull/156 - * add feedback to command line interface by @michalbubula in https://github.com/qurator-spk/eynollah/pull/170 - -## [0.4.0] - 2025-04-07 - -Fixed: - - * allow empty imports for optional dependencies - * avoid Numpy warnings (empty slices etc.) - * remove deprecated Numpy types - * binarization CLI: make `dir_in` usable again - -Added: - - * Continuous Deployment via Dockerhub and GHCR - * CI: also test CLIs and OCR-D - * CI: measure code coverage, annotate+upload reports - * smoke-test: also check results - * smoke-test: also test sbb-binarize - * ocrd-test: analog for OCR-D CLI (segment and binarize) - * pytest: add asserts, extend coverage, use subtests for various options - * pytest: also add binarization - * pytest: add `dir_in` mode (segment and binarize) - * make install: control optional dependencies via `EXTRAS` variable - * OCR-D: expose and describe recently added parameters: - - `ignore_page_extraction` - - `allow_enhancement` - - `textline_light` - - `right_to_left` - * OCR-D: :fire: integrate ocrd-sbb-binarize - * add detection confidence in `TextRegion/Coords/@conf` - (but only in light version and not for marginalia) - -Changed: - - * Docker build: simplify, w/ `OCR`, conform to OCR-D spec - * OCR-D: :fire: migrate to core v3 - - initialize+setup only once - - restrict number of parallel page workers to 1 - (conflicts with existing multiprocessing; TF parts not mp-compatible) - - do query maximally annotated page image - (but filtering existing binarization/cropping/deskewing), - rebase (as new `@imageFilename`) if necessary - - add behavioural docstring - - * :fire: refactor `Eynollah` API: - - no more data (kw)args at init, - but kwargs `dir_in` / `image_filename` for `run()` - - no more data attributes, but function kwargs - (`pcgts`, `image_filename`, `image_pil`, `dir_in`, `override_dpi`) - - remove redundant TF session/model loaders - (only load once during init) - - factor `run_single()` out of `run()` (loop body), - expose for independent calls (like OCR-D) - - expose `cache_images()`, add `dpi` kwarg, set `self._imgs` - - single-image mode writes PAGE file result - (just as directory mode does) - - * CLI: assertions (instead of print+exit) for options checks - * light mode: fine-tune ratio to better detect a region as header - -## [0.3.1] - 2024-08-27 - -Fixed: - - * regression in OCR-D processor, #106 - * Expected Ptrcv::UMat for argument 'contour', #110 - * Memory usage explosion with very narrow images (e.g. book spine), #67 - -## [0.3.0] - 2023-05-13 - -Changed: - - * Eynollah light integration, #86 - * use PEP420 style qurator namespace, #97 - * set_memory_growth to all GPU devices alike, #100 - -Fixed: - - * PAGE-XML coordinates can have self-intersections, #20 - * reading order representation (XML order vs index), #22 - * allow cropping separately, #26 - * Order of regions, #51 - * error while running inference, #75 - * Eynollah crashes while processing image, #77 - * ValueError: bad marshal data, #87 - * contour extraction: inhomogeneous shape, #92 - * Confusing model dir variables, #93 - * New release?, #96 - -## [0.2.0] - 2023-03-24 - -Changed: - - * Convert default model from HDFS to TF SavedModel, #91 - -Added: - - * parmeter `tables` to toggle table detectino, #91 - * default model described in ocrd-tool.json, #91 - -## [0.1.0] - 2023-03-22 - -Fixed: - - * Do not produce spurious `TextEquiv`, #68 - * Less spammy logging, #64, #65, #71 - -Changed: - - * Upgrade to tensorflow 2.4.0, #74 - * Improved README - * CI: test for python 3.7+, #90 - -## [0.0.11] - 2022-02-02 - -Fixed: - - * `models` parameter should have `content-type`, #61, OCR-D/core#777 - -## [0.0.10] - 2021-09-27 - -Fixed: - - * call to `uild_pagexml_no_full_layout` for empty pages, #52 - -## [0.0.9] - 2021-08-16 - -Added: - - * Table detection, #48 - -Fixed: - - * Catch exception, #47 - -## [0.0.8] - 2021-07-27 +## [0.0.7] - 2021-07-27 Fixed: @@ -307,18 +50,6 @@ Fixed: Initial release -[0.6.0]: ../../compare/v0.6.0...v0.6.0rc2 -[0.6.0rc2]: ../../compare/v0.6.0rc2...v0.6.0rc1 -[0.6.0rc1]: ../../compare/v0.6.0rc1...v0.5.0 -[0.5.0]: ../../compare/v0.5.0...v0.4.0 -[0.4.0]: ../../compare/v0.4.0...v0.3.1 -[0.3.1]: ../../compare/v0.3.1...v0.3.0 -[0.3.0]: ../../compare/v0.3.0...v0.2.0 -[0.2.0]: ../../compare/v0.2.0...v0.1.0 -[0.1.0]: ../../compare/v0.1.0...v0.0.11 -[0.0.11]: ../../compare/v0.0.11...v0.0.10 -[0.0.10]: ../../compare/v0.0.10...v0.0.9 -[0.0.9]: ../../compare/v0.0.9...v0.0.8 [0.0.8]: ../../compare/v0.0.8...v0.0.7 [0.0.7]: ../../compare/v0.0.7...v0.0.6 [0.0.6]: ../../compare/v0.0.6...v0.0.5 diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index a15776e..0000000 --- a/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -ARG DOCKER_BASE_IMAGE -FROM $DOCKER_BASE_IMAGE - -ARG VCS_REF -ARG BUILD_DATE -LABEL \ - maintainer="https://ocr-d.de/en/contact" \ - org.label-schema.vcs-ref=$VCS_REF \ - org.label-schema.vcs-url="https://github.com/qurator-spk/eynollah" \ - org.label-schema.build-date=$BUILD_DATE \ - org.opencontainers.image.vendor="DFG-Funded Initiative for Optical Character Recognition Development" \ - org.opencontainers.image.title="Eynollah" \ - org.opencontainers.image.description="" \ - org.opencontainers.image.source="https://github.com/qurator-spk/eynollah" \ - org.opencontainers.image.documentation="https://github.com/qurator-spk/eynollah/blob/${VCS_REF}/README.md" \ - org.opencontainers.image.revision=$VCS_REF \ - org.opencontainers.image.created=$BUILD_DATE \ - org.opencontainers.image.base.name=ocrd/core-cuda-tf2 - -ENV DEBIAN_FRONTEND=noninteractive -# set proper locales -ENV PYTHONIOENCODING=utf8 -ENV LANG=C.UTF-8 -ENV LC_ALL=C.UTF-8 - -# avoid HOME/.local/share (hard to predict USER here) -# so let XDG_DATA_HOME coincide with fixed system location -# (can still be overridden by derived stages) -ENV XDG_DATA_HOME /usr/local/share -# avoid the need for an extra volume for persistent resource user db -# (i.e. XDG_CONFIG_HOME/ocrd/resources.yml) -ENV XDG_CONFIG_HOME /usr/local/share/ocrd-resources - -WORKDIR /build/eynollah -COPY . . -COPY ocrd-tool.json . -# prepackage ocrd-tool.json as ocrd-all-tool.json -RUN ocrd ocrd-tool ocrd-tool.json dump-tools > $(dirname $(ocrd bashlib filename))/ocrd-all-tool.json -# prepackage ocrd-all-module-dir.json -RUN ocrd ocrd-tool ocrd-tool.json dump-module-dirs > $(dirname $(ocrd bashlib filename))/ocrd-all-module-dir.json -# install everything and reduce image size -RUN make install EXTRAS=OCR && rm -rf /build/eynollah -# fixup for broken cuDNN installation (Torch pulls in 8.5.0, which is incompatible with Tensorflow) -RUN pip install nvidia-cudnn-cu11==8.6.0.163 -# smoke test -RUN eynollah --help - -WORKDIR /data -VOLUME /data diff --git a/Makefile b/Makefile index 29dd877..920f15b 100644 --- a/Makefile +++ b/Makefile @@ -1,30 +1,5 @@ -PYTHON ?= python3 -PIP ?= pip3 -EXTRAS ?= - -# DOCKER_BASE_IMAGE = artefakt.dev.sbb.berlin:5000/sbb/ocrd_core:v2.68.0 -DOCKER_BASE_IMAGE ?= docker.io/ocrd/core-cuda-tf2:latest -DOCKER_TAG ?= ocrd/eynollah -DOCKER ?= docker - -#SEG_MODEL := https://qurator-data.de/eynollah/2021-04-25/models_eynollah.tar.gz -#SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah_renamed.tar.gz -# SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah.tar.gz -#SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz -#SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz -SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 -SEG_MODELFILE = $(notdir $(patsubst %?download=1,%,$(SEG_MODEL))) -SEG_MODELNAME = $(SEG_MODELFILE:%.tar.gz=%) - -BIN_MODEL := https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip -BIN_MODELFILE = $(notdir $(BIN_MODEL)) -BIN_MODELNAME := default-2021-03-09 - -OCR_MODEL := https://zenodo.org/records/17236998/files/models_ocr_v0_5_1.tar.gz?download=1 -OCR_MODELFILE = $(notdir $(patsubst %?download=1,%,$(OCR_MODEL))) -OCR_MODELNAME = $(OCR_MODELFILE:%.tar.gz=%) - -PYTEST_ARGS ?= -vv --isolate +EYNOLLAH_MODELS ?= $(PWD)/models_eynollah +export EYNOLLAH_MODELS # BEGIN-EVAL makefile-parser --make-help Makefile @@ -32,126 +7,37 @@ help: @echo "" @echo " Targets" @echo "" - @echo " docker Build Docker image" - @echo " build Build Python source and binary distribution" - @echo " install Install package with pip" + @echo " models Download and extract models to $(PWD)/models_eynollah" + @echo " install Install with pip" @echo " install-dev Install editable with pip" - @echo " deps-test Install test dependencies with pip" - @echo " models Download and extract models to $(CURDIR):" - @echo " $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME)" - @echo " smoke-test Run simple CLI check" - @echo " ocrd-test Run OCR-D CLI check" @echo " test Run unit tests" @echo "" @echo " Variables" - @echo " EXTRAS comma-separated list of features (like 'OCR,plotting') for 'install' [$(EXTRAS)]" - @echo " DOCKER_TAG Docker image tag for 'docker' [$(DOCKER_TAG)]" - @echo " PYTEST_ARGS pytest args for 'test' (Set to '-s' to see log output during test execution, '-vv' to see individual tests. [$(PYTEST_ARGS)]" - @echo " SEG_MODEL URL of 'models' archive to download for segmentation 'test' [$(SEG_MODEL)]" - @echo " BIN_MODEL URL of 'models' archive to download for binarization 'test' [$(BIN_MODEL)]" - @echo " OCR_MODEL URL of 'models' archive to download for binarization 'test' [$(OCR_MODEL)]" @echo "" # END-EVAL -# Download and extract models to $(PWD)/models_layout_v0_5_0 -models: $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME) +# Download and extract models to $(PWD)/models_eynollah +models: models_eynollah -# do not download these files if we already have the directories -.INTERMEDIATE: $(BIN_MODELFILE) $(SEG_MODELFILE) $(OCR_MODELFILE) +models_eynollah: models_eynollah.tar.gz + tar xf models_eynollah.tar.gz -$(BIN_MODELFILE): - wget -O $@ $(BIN_MODEL) -$(SEG_MODELFILE): - wget -O $@ $(SEG_MODEL) -$(OCR_MODELFILE): - wget -O $@ $(OCR_MODEL) - -$(BIN_MODELNAME): $(BIN_MODELFILE) - mkdir $@ - unzip -d $@ $< -$(SEG_MODELNAME): $(SEG_MODELFILE) - tar zxf $< -$(OCR_MODELNAME): $(OCR_MODELFILE) - tar zxf $< - -build: - $(PIP) install build - $(PYTHON) -m build . +models_eynollah.tar.gz: + wget 'https://qurator-data.de/eynollah/models_eynollah.tar.gz' # Install with pip install: - $(PIP) install .$(and $(EXTRAS),[$(EXTRAS)]) + pip install . # Install editable with pip install-dev: - $(PIP) install -e .$(and $(EXTRAS),[$(EXTRAS)]) + pip install -e . -ifeq (OCR,$(findstring OCR, $(EXTRAS))) -deps-test: $(OCR_MODELNAME) -endif -deps-test: $(BIN_MODELNAME) $(SEG_MODELNAME) - $(PIP) install -r requirements-test.txt -ifeq (OCR,$(findstring OCR, $(EXTRAS))) - ln -rs $(OCR_MODELNAME)/* $(SEG_MODELNAME)/ -endif - -smoke-test: TMPDIR != mktemp -d -smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif - # layout analysis: - eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/$(SEG_MODELNAME) - fgrep -q http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 $(TMPDIR)/$(basename $( Document Layout Analysis, Binarization and OCR with Deep Learning and Heuristics - -[![PyPI Version](https://img.shields.io/pypi/v/eynollah)](https://pypi.org/project/eynollah/) -[![GH Actions Test](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml) -[![GH Actions Deploy](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml) -[![License: ASL](https://img.shields.io/github/license/qurator-spk/eynollah)](https://opensource.org/license/apache-2-0/) -[![DOI](https://img.shields.io/badge/DOI-10.1145%2F3604951.3605513-red)](https://doi.org/10.1145/3604951.3605513) +> Document Layout Analysis ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) -## Features -* Support for 10 distinct segmentation classes: - * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) -* Support for various image optimization operations: - * cropping (border detection), binarization, deskewing, dewarping, scaling, enhancing, resizing -* Textline segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text -* Text recognition (OCR) using either CNN-RNN or Transformer models -* Detection of reading order (left-to-right or right-to-left) using either heuristics or trainable models -* Output in [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML) -* [OCR-D](https://github.com/qurator-spk/eynollah#use-as-ocr-d-processor) interface +## Introduction +This tool performs document layout analysis (segmentation) from image data and returns the results as [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML). -:warning: Development is focused on achieving the best quality of results for a wide variety of historical -documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. +It can currently detect the following layout classes/elements: +* [Border](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_BorderType.html) +* [Textregion](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextRegionType.html) +* [Textline](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html) +* [Image](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_ImageRegionType.html) +* [Separator](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_SeparatorRegionType.html) +* [Marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html) +* [Initial (Drop Capital)](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html) + +In addition, the tool can be used to detect the _[ReadingOrder](https://ocr-d.de/en/gt-guidelines/trans/lyLeserichtung.html)_ of regions. The final goal is to feed the output to an OCR model. + +The tool uses a combination of various models and heuristics (see flowchart below for the different stages and how they interact): +* [Border detection](https://github.com/qurator-spk/eynollah#border-detection) +* [Layout detection](https://github.com/qurator-spk/eynollah#layout-detection) +* [Textline detection](https://github.com/qurator-spk/eynollah#textline-detection) +* [Image enhancement](https://github.com/qurator-spk/eynollah#Image_enhancement) +* [Scale classification](https://github.com/qurator-spk/eynollah#Scale_classification) +* [Heuristic methods](https://https://github.com/qurator-spk/eynollah#heuristic-methods) + +The first three stages are based on [pixel-wise segmentation](https://github.com/qurator-spk/sbb_pixelwise_segmentation). + +![](https://user-images.githubusercontent.com/952378/100619946-1936f680-331e-11eb-9297-6e8b4cab3c16.png) + +## Border detection +For the purpose of text recognition (OCR) and in order to avoid noise being introduced from texts outside the printspace, one first needs to detect the border of the printed frame. This is done by a binary pixel-wise-segmentation model trained on a dataset of 2,000 documents where about 1,200 of them come from the [dhSegment](https://github.com/dhlab-epfl/dhSegment/) project (you can download the dataset from [here](https://github.com/dhlab-epfl/dhSegment/releases/download/v0.2/pages.zip)) and the remainder having been annotated in SBB. For border detection, the model needs to be fed with the whole image at once rather than separated in patches. + +## Layout detection +As a next step, text regions need to be identified by means of layout detection. Again a pixel-wise segmentation model was trained on 131 labeled images from the SBB digital collections, including some data augmentation. Since the target of this tool are historical documents, we consider as main region types text regions, separators, images, tables and background - each with their own subclasses, e.g. in the case of text regions, subclasses like header/heading, drop capital, main body text etc. While it would be desirable to detect and classify each of these classes in a granular way, there are also limitations due to having a suitably large and balanced training set. Accordingly, the current version of this tool is focussed on the main region types background, text region, image and separator. + +## Textline detection +In a subsequent step, binary pixel-wise segmentation is used again to classify pixels in a document that constitute textlines. For textline segmentation, a model was initially trained on documents with only one column/block of text and some augmentation with regard to scaling. By fine-tuning the parameters also for multi-column documents, additional training data was produced that resulted in a much more robust textline detection model. + +## Image enhancement +This is an image to image model which input was low quality of an image and label was actually the original image. For this one we did not have any GT, so we decreased the quality of documents in SBB and then feed them into model. + +## Scale classification +This is simply an image classifier which classifies images based on their scales or better to say based on their number of columns. + +## Heuristic methods +Some heuristic methods are also employed to further improve the model predictions: +* After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. +* For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. +* A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. +* Deskewing is applied on the text region level (due to regions having different degrees of skew) in order to improve the textline segmentation result. +* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels. +* Finally, using the derived coordinates, bounding boxes are determined for each textline. ## Installation +`pip install .` or -Python `3.8-3.11` with Tensorflow `<2.13` on Linux are currently supported. +`pip install . -e` for editable installation -For (limited) GPU support the CUDA toolkit needs to be installed. A known working config is CUDA `11` with cuDNN `8.6`. +Alternatively, you can also use `make` with these targets: -You can either install from PyPI +`make install` or -``` -pip install eynollah -``` +`make install-dev` for editable installation -or clone the repository, enter it and install (editable) with +### Models -``` -git clone git@github.com:qurator-spk/eynollah.git -cd eynollah; pip install -e . -``` +In order to run this tool you also need trained models. You can download our pretrained models from [qurator-data.de](https://qurator-data.de/eynollah/). -Alternatively, you can run `make install` or `make install-dev` for editable installation. - -To also install the dependencies for the OCR engines: - -``` -pip install "eynollah[OCR]" -# or -make install EXTRAS=OCR -``` - -## Models - -Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). - -For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). -Model cards are also provided for our trained models. - -## Training - -In case you want to train your own model with Eynollah, see the -documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the -tools in the [`train` folder](https://github.com/qurator-spk/eynollah/tree/main/train). +Alternatively, running `make models` will download and extract models to `$(PWD)/models_eynollah`. ## Usage -Eynollah supports five use cases: layout analysis (segmentation), binarization, -image enhancement, text recognition (OCR), and reading order detection. - -### Layout Analysis - -The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading -order using either heuristic methods or a [pretrained reading order detection model](https://github.com/qurator-spk/eynollah#machine-based-reading-order). - -Reading order detection can be performed either as part of layout analysis based on image input, or, currently under -development, based on pre-existing layout analysis results in PAGE-XML format as input. - -The command-line interface for layout analysis can be called like this: +The basic command-line interface can be called like this: ```sh -eynollah layout \ - -i | -di \ - -o \ - -m \ - [OPTIONS] +eynollah \ +-i \ +-o \ +-m \ +-fl \ +-ae \ +-as \ +-cl \ +-si ``` -The following options can be used to further configure the processing: +The tool does accept and works better on original images (RGB format) than binarized images. -| option | description | -|-------------------|:-------------------------------------------------------------------------------| -| `-fl` | full layout analysis including all steps and segmentation classes | -| `-light` | lighter and faster but simpler method for main region detection and deskewing | -| `-tll` | this indicates the light textline and should be passed with light version | -| `-tab` | apply table detection | -| `-ae` | apply enhancement (the resulting image is saved to the output directory) | -| `-as` | apply scaling | -| `-cl` | apply contour detection for curved text lines instead of bounding boxes | -| `-ib` | apply binarization (the resulting image is saved to the output directory) | -| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | -| `-eoi` | extract only images to output directory (other processing will not be done) | -| `-ho` | ignore headers for reading order dectection | -| `-si ` | save image regions detected to this directory | -| `-sd ` | save deskewed image to this directory | -| `-sl ` | save layout prediction as plot to this directory | -| `-sp ` | save cropped page image to this directory | -| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | +### `--full-layout` vs `--no-full-layout` -If no further option is set, the tool performs layout detection of main regions (background, text, images, separators -and marginals). -The best output quality is achieved when RGB images are used as input rather than greyscale or binarized images. +Here are the difference in elements detected depending on the `--full-layout`/`--no-full-layout` command line flags: -### Binarization +| | `--full-layout` | `--no-full-layout` | +| --- | --- | --- | +| reading order | x | x | +| header regions | x | - | +| text regions | x | x | +| text regions / text line | x | x | +| drop-capitals | x | - | +| marginals | x | x | +| marginals / text line | x | x | +| image region | x | x | -The binarization module performs document image binarization using pretrained pixelwise segmentation models. +### How to use -The command-line interface for binarization can be called like this: +First, this model makes use of up to 9 trained models which are responsible for different operations like size detection, column classification, image enhancement, page extraction, main layout detection, full layout detection and textline detection.That does not mean that all 9 models are always required for every document. Based on the document characteristics and parameters specified, different scenarios can be applied. -```sh -eynollah binarization \ - -i | -di \ - -o \ - -m \ -``` +* If none of the parameters is set to `true`, the tool will perform a layout detection of main regions (background, text, images, separators and marginals). An advantage of this tool is that it tries to extract main text regions separately as much as possible. -### OCR +* If you set `-ae` (**a**llow image **e**nhancement) parameter to `true`, the tool will first check the ppi (pixel-per-inch) of the image and when it is less than 300, the tool will resize it and only then image enhancement will occur. Image enhancement can also take place without this option, but by setting this option to `true`, the layout xml data (e.g. coordinates) will be based on the resized and enhanced image instead of the original image. -The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. +* For some documents, while the quality is good, their scale is very large, and the performance of tool decreases. In such cases you can set `-as` (**a**llow **s**caling) to `true`. With this option enabled, the tool will try to rescale the image and only then the layout detection process will begin. -The command-line interface for OCR can be called like this: +* If you care about drop capitals (initials) and headings, you can set `-fl` (**f**ull **l**ayout) to `true`. With this setting, the tool can currently distinguish 7 document layout classes/elements. -```sh -eynollah ocr \ - -i | -di \ - -dx \ - -o \ - -m | --model_name \ -``` +* In cases where the document includes curved headers or curved lines, rectangular bounding boxes for textlines will not be a great option. In such cases it is strongly recommended setting the flag `-cl` (**c**urved **l**ines) to `true` to find contours of curved lines instead of rectangular bounding boxes. Be advised that enabling this option increases the processing time of the tool. -### Machine-based-reading-order +* To crop and save image regions inside the document, set the parameter `-si` (**s**ave **i**mages) to true and provide a directory path to store the extracted images. -The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. - -The command-line interface for machine based reading order can be called like this: - -```sh -eynollah machine-based-reading-order \ - -i | -di \ - -xml | -dx \ - -m \ - -o -``` - -#### Use as OCR-D processor - -Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), -formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). - -In this case, the source image file group with (preferably) RGB images should be used as input like this: - - ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: -- existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) -- existing annotation (and respective `AlternativeImage`s) are partially _ignored_: - - previous page frame detection (`cropped` images) - - previous derotation (`deskewed` images) - - previous thresholding (`binarized` images) -- if the page-level image nevertheless deviates from the original (`@imageFilename`) - (because some other preprocessing step was in effect like `denoised`), then - the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -In general, it makes more sense to add other workflow steps **after** Eynollah. - -There is also an OCR-D processor for binarization: - - ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 - -#### Additional documentation - -Additional documentation is available in the [docs](https://github.com/qurator-spk/eynollah/tree/main/docs) directory. - -## How to cite - -```bibtex -@inproceedings{hip23rezanezhad, - title = {Document Layout Analysis with Deep Learning and Heuristics}, - author = {Rezanezhad, Vahid and Baierer, Konstantin and Gerber, Mike and Labusch, Kai and Neudecker, Clemens}, - booktitle = {Proceedings of the 7th International Workshop on Historical Document Imaging and Processing {HIP} 2023, - San José, CA, USA, August 25-26, 2023}, - publisher = {Association for Computing Machinery}, - address = {New York, NY, USA}, - year = {2023}, - pages = {73--78}, - url = {https://doi.org/10.1145/3604951.3605513} -} -``` +* This tool is actively being developed. If problems occur, or the performance does not meet your expectations, we welcome your feedback via [issues](https://github.com/qurator-spk/eynollah/issues). diff --git a/docs/models.md b/docs/models.md deleted file mode 100644 index 3d296d5..0000000 --- a/docs/models.md +++ /dev/null @@ -1,165 +0,0 @@ -# Models documentation - -This suite of 15 models presents a document layout analysis (DLA) system for historical documents implemented by -pixel-wise segmentation using a combination of a ResNet50 encoder with various U-Net decoders. In addition, heuristic -methods are applied to detect marginals and to determine the reading order of text regions. - -The detection and classification of multiple classes of layout elements such as headings, images, tables etc. as part of -DLA is required in order to extract and process them in subsequent steps. Altogether, the combination of image -detection, classification and segmentation on the wide variety that can be found in over 400 years of printed cultural -heritage makes this a very challenging task. Deep learning models are complemented with heuristics for the detection of -text lines, marginals, and reading order. Furthermore, an optional image enhancement step was added in case of documents -that either have insufficient pixel density and/or require scaling. Also, a column classifier for the analysis of -multi-column documents was added. With these additions, DLA performance was improved, and a high accuracy in the -prediction of the reading order is accomplished. - -Two Arabic/Persian terms form the name of the model suite: عين الله, which can be transcribed as "ain'allah" or -"eynollah"; it translates into English as "God's Eye" -- it sees (nearly) everything on the document image. - -See the flowchart below for the different stages and how they interact: - -![](https://user-images.githubusercontent.com/952378/100619946-1936f680-331e-11eb-9297-6e8b4cab3c16.png) - - -## Models - -### Image enhancement - -Model card: [Image Enhancement](https://huggingface.co/SBB/eynollah-enhancement) - -This model addresses image resolution, specifically targeting documents with suboptimal resolution. In instances where -the detection of document layout exhibits inadequate performance, the proposed enhancement aims to significantly improve -the quality and clarity of the images, thus facilitating enhanced visual interpretation and analysis. - -### Page extraction / border detection - -Model card: [Page Extraction/Border Detection](https://huggingface.co/SBB/eynollah-page-extraction) - -A problem that can negatively affect OCR are black margins around a page caused by document scanning. A deep learning -model helps to crop to the page borders by using a pixel-wise segmentation method. - -### Column classification - -Model card: [Column Classification](https://huggingface.co/SBB/eynollah-column-classifier) - -This model is a trained classifier that recognizes the number of columns in a document by use of a training set with -manual classification of all documents into six classes with either one, two, three, four, five, or six and more columns -respectively. - -### Binarization - -Model card: [Binarization](https://huggingface.co/SBB/eynollah-binarization) - -This model is designed to tackle the intricate task of document image binarization, which involves segmentation of the -image into white and black pixels. This process significantly contributes to the overall performance of the layout -models, particularly in scenarios where the documents are degraded or exhibit subpar quality. The robust binarization -capability of the model enables improved accuracy and reliability in subsequent layout analysis, thereby facilitating -enhanced document understanding and interpretation. - -### Main region detection - -Model card: [Main Region Detection](https://huggingface.co/SBB/eynollah-main-regions) - -This model has employed a different set of labels, including an artificial class specifically designed to encompass the -text regions. The inclusion of this artificial class facilitates easier isolation of text regions by the model. This -approach grants the advantage of training the model using downscaled images, which in turn leads to faster predictions -during the inference phase. By incorporating this methodology, improved efficiency is achieved without compromising the -model's ability to accurately identify and classify text regions within documents. - -### Main region detection (with scaling augmentation) - -Model card: [Main Region Detection (with scaling augmentation)](https://huggingface.co/SBB/eynollah-main-regions-aug-scaling) - -Utilizing scaling augmentation, this model leverages the capability to effectively segment elements of extremely high or -low scales within documents. By harnessing this technique, the tool gains a significant advantage in accurately -categorizing and isolating such elements, thereby enhancing its overall performance and enabling precise analysis of -documents with varying scale characteristics. - -### Main region detection (with rotation augmentation) - -Model card: [Main Region Detection (with rotation augmentation)](https://huggingface.co/SBB/eynollah-main-regions-aug-rotation) - -This model takes advantage of rotation augmentation. This helps the tool to segment the vertical text regions in a -robust way. - -### Main region detection (ensembled) - -Model card: [Main Region Detection (ensembled)](https://huggingface.co/SBB/eynollah-main-regions-ensembled) - -The robustness of this model is attained through an ensembling technique that combines the weights from various epochs. -By employing this approach, the model achieves a high level of resilience and stability, effectively leveraging the -strengths of multiple epochs to enhance its overall performance and deliver consistent and reliable results. - -### Full region detection (1,2-column documents) - -Model card: [Full Region Detection (1,2-column documents)](https://huggingface.co/SBB/eynollah-full-regions-1column) - -This model deals with documents comprising of one and two columns. - -### Full region detection (3,n-column documents) - -Model card: [Full Region Detection (3,n-column documents)](https://huggingface.co/SBB/eynollah-full-regions-3pluscolumn) - -This model is responsible for detecting headers and drop capitals in documents with three or more columns. - -### Textline detection - -Model card: [Textline Detection](https://huggingface.co/SBB/eynollah-textline) - -The method for textline detection combines deep learning and heuristics. In the deep learning part, an image-to-image -model performs binary segmentation of the document into the classes textline vs. background. In the heuristics part, -bounding boxes or contours are derived from binary segmentation. - -Skewed documents can heavily affect textline detection accuracy, so robust deskewing is needed. But detecting textlines -with rectangle bounding boxes cannot deal with partially curved textlines. To address this, a functionality -specifically for documents with curved textlines was included. After finding the contour of a text region and its -corresponding textline segmentation, the text region is cut into smaller vertical straps. For each strap, its textline -segmentation is first deskewed and then the textlines are separated with the same heuristic method as for finding -textline bounding boxes. Later, the strap is rotated back into its original orientation. - -### Textline detection (light) - -Model card: [Textline Detection Light (simpler but faster method)](https://huggingface.co/SBB/eynollah-textline_light) - -The method for textline detection combines deep learning and heuristics. In the deep learning part, an image-to-image -model performs binary segmentation of the document into the classes textline vs. background. In the heuristics part, -bounding boxes or contours are derived from binary segmentation. - -In the context of this textline model, a distinct labeling approach has been employed to ensure accurate predictions. -Specifically, an artificial bounding class has been incorporated alongside the textline classes. This strategic -inclusion effectively prevents any spurious connections between adjacent textlines during the prediction phase, thereby -enhancing the model's ability to accurately identify and delineate individual textlines within documents. This model -eliminates the need for additional heuristics in extracting textline contours. - -### Table detection - -Model card: [Table Detection](https://huggingface.co/SBB/eynollah-tables) - -The objective of this model is to perform table segmentation in historical document images. Due to the pixel-wise -segmentation approach employed and the presence of traditional tables predominantly composed of text, the detection of -tables required the incorporation of heuristics to achieve reasonable performance. These heuristics were necessary to -effectively identify and delineate tables within the historical document images, ensuring accurate segmentation and -enabling subsequent analysis and interpretation. - -### Image detection - -Model card: [Image Detection](https://huggingface.co/SBB/eynollah-image-extraction) - -This model is used for the task of illustration detection only. - -### Reading order detection - -Model card: [Reading Order Detection]() - -TODO - -## Heuristic methods - -Additionally, some heuristic methods are employed to further improve the model predictions: - -* After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. -* For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. -* A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. -* Deskewing is applied on the text region level (due to regions having different degrees of skew) in order to improve the textline segmentation result. -* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels. -* Finally, using the derived coordinates, bounding boxes are determined for each textline. diff --git a/docs/train.md b/docs/train.md deleted file mode 100644 index 252bead..0000000 --- a/docs/train.md +++ /dev/null @@ -1,719 +0,0 @@ -# Training documentation - -This document aims to assist users in preparing training datasets, training models, and -performing inference with trained models. We cover various use cases including -pixel-wise segmentation, image classification, image enhancement, and -machine-based reading order detection. For each use case, we provide guidance -on how to generate the corresponding training dataset. - -The following three tasks can all be accomplished using the code in the -[`train`](https://github.com/qurator-spk/eynollah/tree/main/train) directory: - -* generate training dataset -* train a model -* inference with the trained model - -## Training, evaluation and output - -The train and evaluation folders should contain subfolders of `images` and `labels`. - -The output folder should be an empty folder where the output model will be written to. - -## Generate training dataset - -The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following -command demonstrates, the dataset generator provides several subcommands: - -```sh -eynollah-training generate-gt --help -``` - -The three most important subcommands are: - -* image-enhancement -* machine-based-reading-order -* pagexml2label - -### image-enhancement - -Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of -high-resolution images. The training dataset can then be generated using the following command: - -```sh -eynollah-training image-enhancement \ - -dis "dir of high resolution images" \ - -dois "dir where degraded images will be written" \ - -dols "dir where the corresponding high resolution image will be written as label" \ - -scs "degrading scales json file" -``` - -The scales JSON file is a dictionary with a key named `scales` and values representing scales smaller than 1. Images are -downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose -resolution at different scales. The degraded images are used as input images, and the original high-resolution images -serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: - -```yaml -{ - "scales": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] -} -``` - -### machine-based-reading-order - -For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's -input is a three-channel image: the first and last channels contain information about each of the two text regions, -while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. -To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct -reading order. - -For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set -to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area -to the image area, with a default value of zero. To run the dataset generator, use the following command: - -```shell -eynollah-training generate-gt machine-based-reading-order \ - -dx "dir of GT xml files" \ - -domi "dir where output images will be written" \ -"" -docl "dir where the labels will be written" \ - -ih "height" \ - -iw "width" \ - -min "min area ratio" -``` - -### pagexml2label - -pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, -including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. -To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script -expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled -as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four -elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. - -In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired -element is automatically encoded as 1 in the PNG label. - -To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. -For example, in the case of 'textline' detection, the JSON file would resemble this: - -```yaml -{ -"use_case": "textline" -} -``` - -In the case of layout segmentation a custom config json file can look like this: - -```yaml -{ -"use_case": "layout", -"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} -} -``` - -A possible custom config json file for layout segmentation where the "printspace" is a class: - -```yaml -{ -"use_case": "layout", -"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} -"printspace_as_class_in_layout" : 8 -} -``` - -For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. -In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. -For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', -'noiseregion', and 'tableregion'. - -Text regions and graphic regions also have their own specific types. The known types for text regions are 'paragraph', -'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', -and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and -'signature'. -Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined -two additional types, "rest_as_paragraph" and "rest_as_decoration", to ensure that no unknown types are missed. -This way, users can extract all known types from the labels and be confident that no unknown types are overlooked. - -In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown -as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the -graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator -region" are also present in the label. However, other regions like "noise region" and "table region" will not be -included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. - -```sh -eynollah-training generate-gt pagexml2label \ - -dx "dir of GT xml files" \ - -do "dir where output label png files will be written" \ - -cfg "custom config json file" \ - -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" -``` - -We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key -is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, -the example JSON config file should look like this: - -```yaml -{ - "use_case": "layout", - "textregions": { - "paragraph": 1, - "drop-capital": 1, - "header": 2, - "heading": 2, - "marginalia": 3 - }, - "imageregion": 4, - "separatorregion": 5, - "graphicregions": { - "rest_as_decoration": 6 - }, - "artificial_class_on_boundary": ["paragraph", "header", "heading", "marginalia"], - "artificial_class_label": 7 -} -``` - -This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the -elements labeled as "paragraph," "header," "heading," and "marginalia." - -For "textline", "word", and "glyph", the artificial class on the boundaries will be activated only if the -"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements -represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the -artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use -case: - -```yaml -{ - "use_case": "textline", - "artificial_class_label": 2 -} -``` - -If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to -crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that -in this scenario, since cropping will be applied to the label files, the directory of the original images must be -provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels -required for training are obtained. The command should resemble the following: - -```sh -eynollah-training generate-gt pagexml2label \ - -dx "dir of GT xml files" \ - -do "dir where output label png files will be written" \ - -cfg "custom config json file" \ - -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" \ - -ps \ - -di "dir where the org images are located" \ - -doi "dir where the cropped output images will be written" -``` - -## Train a model - -### classification - -For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, -all we require is a training directory with subdirectories, each containing images of its respective classes. We need -separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both -directories. Additionally, the class names should be specified in the config JSON file, as shown in the following -example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the -"classification_classes_name" key in the config file should appear as follows: - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "classification", - "n_classes" : 2, - "n_epochs" : 10, - "input_height" : 448, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "f1_threshold_classification": 0.8, - "pretraining" : true, - "classification_classes_name" : {"0":"apple", "1":"orange"}, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── apple # directory of images for apple class - └── orange # directory of images for orange class -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── apple # directory of images for apple class - └── orange # directory of images for orange class - -``` - -The classification model can be trained using the following command line: - -```sh -eynollah-training train with config_classification.json -``` - -As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. -This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, -an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". -Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". - -### reading order -An example config json file for machine based reading order should be like this: - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "reading_order", - "n_classes" : 1, - "n_epochs" : 5, - "input_height" : 672, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "pretraining" : true, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── images # directory of images - └── labels # directory of labels -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── images # directory of images - └── labels # directory of labels -``` - -The classification model can be trained like the classification case command line. - -### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement - -#### Parameter configuration for segmentation or enhancement usecases - -The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, -its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for -classification and machine-based reading order, as you can see in their example config files. - -* `backbone_type`: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we - offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first - apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. -* `task`: The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". -* `patches`: If you want to break input images into smaller patches (input size of the model) you need to set this -* parameter to `true`. In the case that the model should see the image once, like page extraction, patches should be - set to ``false``. -* `n_batch`: Number of batches at each iteration. -* `n_classes`: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it - should set to 1. And for the case of layout detection just the unique number of classes should be given. -* `n_epochs`: Number of epochs. -* `input_height`: This indicates the height of model's input. -* `input_width`: This indicates the width of model's input. -* `weight_decay`: Weight decay of l2 regularization of model layers. -* `pretraining`: Set to `true` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved - in a folder named "pretrained_model" in the same directory of "train.py" script. -* `augmentation`: If you want to apply any kind of augmentation this parameter should first set to `true`. -* `flip_aug`: If `true`, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. -* `blur_aug`: If `true`, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. -* `scaling`: If `true`, scaling will be applied on image. Scale of scaling is given with "scales" parameter. -* `degrading`: If `true`, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. -* `brightening`: If `true`, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. -* `rotation_not_90`: If `true`, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. -* `rotation`: If `true`, 90 degree rotation will be applied on image. -* `binarization`: If `true`,Otsu thresholding will be applied to augment the input data with binarized images. -* `scaling_bluring`: If `true`, combination of scaling and blurring will be applied on image. -* `scaling_binarization`: If `true`, combination of scaling and binarization will be applied on image. -* `scaling_flip`: If `true`, combination of scaling and flip will be applied on image. -* `flip_index`: Type of flips. -* `blur_k`: Type of blurrings. -* `scales`: Scales of scaling. -* `brightness`: The amount of brightenings. -* `thetha`: Rotation angles. -* `degrade_scales`: The amount of degradings. -* `continue_training`: If `true`, it means that you have already trained a model and you would like to continue the - training. So it is needed to providethe dir of trained model with "dir_of_start_model" and index for naming - themodels. For example if you have already trained for 3 epochs then your lastindex is 2 and if you want to continue - from model_1.h5, you can set `index_start` to 3 to start naming model with index 3. -* `weighted_loss`: If `true`, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to `true`the parameter "is_loss_soft_dice" should be ``false`` -* `data_is_provided`: If you have already provided the input data you can set this to `true`. Be sure that the train - and eval data are in"dir_output".Since when once we provide training data we resize and augmentthem and then wewrite - them in sub-directories train and eval in "dir_output". -* `dir_train`: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. -* `index_start`: Starting index for saved models in the case that "continue_training" is `true`. -* `dir_of_start_model`: Directory containing pretrained model to continue training the model in the case that "continue_training" is `true`. -* `transformer_num_patches_xy`: Number of patches for vision transformer in x and y direction respectively. -* `transformer_patchsize_x`: Patch size of vision transformer patches in x direction. -* `transformer_patchsize_y`: Patch size of vision transformer patches in y direction. -* `transformer_projection_dim`: Transformer projection dimension. Default value is 64. -* `transformer_mlp_head_units`: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. -* `transformer_layers`: transformer layers. Default value is 8. -* `transformer_num_heads`: Transformer number of heads. Default value is 4. -* `transformer_cnn_first`: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. - -In the case of segmentation and enhancement the train and evaluation directory should be as following. - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── images # directory of images - └── labels # directory of labels -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── images # directory of images - └── labels # directory of labels -``` - -After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following -command, similar to the process for classification and reading order: - -``` -eynollah-training train with config_classification.json` -``` - -#### Binarization - -### Ground truth format - -Lables for each pixel are identified by a number. So if you have a -binary case, ``n_classes`` should be set to ``2`` and labels should -be ``0`` and ``1`` for each class and pixel. - -In the case of multiclass, just set ``n_classes`` to the number of classes -you have and the try to produce the labels by pixels set from ``0 , 1 ,2 .., n_classes-1``. -The labels format should be png. -Our lables are 3 channel png images but only information of first channel is used. -If you have an image label with height and width of 10, for a binary case the first channel should look like this: - - Label: [ [1, 0, 0, 1, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - ..., - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] - - This means that you have an image by `10*10*3` and `pixel[0,0]` belongs - to class `1` and `pixel[0,1]` belongs to class `0`. - - A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/~vahid.rezanezhad/binarization_training_data_sample/), which contains images and lables folders. - - -An example config json file for binarization can be like this: - -```yaml -{ - "backbone_type" : "transformer", - "task": "binarization", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 224, - "input_width" : 672, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "transformer_num_patches_xy": [7, 7], - "transformer_patchsize_x": 3, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 192, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 8, - "transformer_num_heads": 4, - "transformer_cnn_first": true, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -#### Textline - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -#### Enhancement - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "enhancement", - "n_classes" : 3, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel -image. - -#### Page extraction - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : false, - "pretraining" : true, - "augmentation" : false, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -For page segmentation (or print space or border segmentation), the model needs to view the input image in its -entirety,hence the patches parameter should be set to false. - -#### layout segmentation - -An example config json file for layout segmentation with 5 classes (including background) can be like this: - -```yaml -{ - "backbone_type" : "transformer", - "task": "segmentation", - "n_classes" : 5, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "transformer_num_patches_xy": [7, 14], - "transformer_patchsize_x": 1, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 64, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 8, - "transformer_num_heads": 4, - "transformer_cnn_first": true, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` -## Inference with the trained model - -### classification - -For conducting inference with a trained model, you simply need to execute the following command line, specifying the -directory of the model and the image on which to perform inference: - -```sh -eynollah-training inference -m "model dir" -i "image" -``` - -This will straightforwardly return the class of the image. - -### machine based reading order - -To infer the reading order using a reading order model, we need a page XML file containing layout information but -without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The -new XML file with the added reading order will be written to the output directory with the same name. We need to run: - -```sh -eynollah-training inference \ - -m "model dir" \ - -xml "page xml file" \ - -o "output dir to write new xml with reading order" -``` - -### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement - -For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: - -```sh -eynollah-training inference \ - -m "model dir" \ - -i "image" \ - -p \ - -s "output image" -``` - -Note that in the case of page extraction the -p flag is not needed. - -For segmentation or binarization tasks, if a ground truth (GT) label is available, the IoU evaluation metric can be -calculated for the output. To do this, you need to provide the GT label using the argument -gt. diff --git a/docs/usage.md b/docs/usage.md deleted file mode 100644 index da164de..0000000 --- a/docs/usage.md +++ /dev/null @@ -1,92 +0,0 @@ -# Usage documentation -The command-line interface can be called like this: - -```sh -eynollah \ - -i | -di \ - -o \ - -m \ - [OPTIONS] -``` - -## Processing options -The following options can be used to further configure the processing: - -| option | description | -|-------------------|:-------------------------------------------------------------------------------| -| `-fl` | full layout analysis including all steps and segmentation classes | -| `-light` | lighter and faster but simpler method for main region detection and deskewing | -| `-tab` | apply table detection | -| `-ae` | apply enhancement (the resulting image is saved to the output directory) | -| `-as` | apply scaling | -| `-cl` | apply contour detection for curved text lines instead of bounding boxes | -| `-ib` | apply binarization (the resulting image is saved to the output directory) | -| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | -| `-eoi` | extract only images to output directory (other processing will not be done) | -| `-ho` | ignore headers for reading order dectection | -| `-si ` | save image regions detected to this directory | -| `-sd ` | save deskewed image to this directory | -| `-sl ` | save layout prediction as plot to this directory | -| `-sp ` | save cropped page image to this directory | -| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | - -If no option is set, the tool performs detection of main regions (background, text, images, separators and marginals). - -### `--full-layout` vs `--no-full-layout` - -Here are the difference in elements detected depending on the `--full-layout`/`--no-full-layout` command line flags: - -| | `--full-layout` | `--no-full-layout` | -|--------------------------|-----------------|--------------------| -| reading order | x | x | -| header regions | x | - | -| text regions | x | x | -| text regions / text line | x | x | -| drop-capitals | x | - | -| marginals | x | x | -| marginals / text line | x | x | -| image region | x | x | - -## Use as OCR-D processor -Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) processor that is described in -[`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). - -The source image file group with (preferably) RGB images should be used as input for Eynollah like this: - -``` -ocrd-eynollah-segment -I OCR-D-IMG -O SEG-LINE -P models -``` - -Any image referenced by `@imageFilename` in PAGE-XML is passed on directly to Eynollah as a processor, so that e.g. - -``` -ocrd-eynollah-segment -I OCR-D-IMG-BIN -O SEG-LINE -P models -``` - -uses the original (RGB) image despite any binarization that may have occured in previous OCR-D processing steps. - -## Use with Docker -TODO - -## Hints -* The best output quality is produced when RGB images are used as input rather than greyscale or binarized images. -* If none of the parameters is set to `true`, the tool will perform a layout detection of main regions (background, -text, images, separators and marginals). An advantage of this tool is that it tries to extract main text regions -separately as much as possible. -* If you set `-ae` (**a**llow image **e**nhancement) parameter to `true`, the tool will first check the ppi -(pixel-per-inch) of the image and when it is less than 300, the tool will resize it and only then image enhancement will -occur. Image enhancement can also take place without this option, but by setting this option to `true`, the layout xml -data (e.g. coordinates) will be based on the resized and enhanced image instead of the original image. -* For some documents, while the quality is good, their scale is very large, and the performance of tool decreases. In -such cases you can set `-as` (**a**llow **s**caling) to `true`. With this option enabled, the tool will try to rescale -the image and only then the layout detection process will begin. -* If you care about drop capitals (initials) and headings, you can set `-fl` (**f**ull **l**ayout) to `true`. With this -setting, the tool can currently distinguish 7 document layout classes/elements. -* In cases where the document includes curved headers or curved lines, rectangular bounding boxes for textlines will not -be a great option. In such cases it is strongly recommended setting the flag `-cl` (**c**urved **l**ines) to `true` to -find contours of curved lines instead of rectangular bounding boxes. Be advised that enabling this option increases the -processing time of the tool. -* To crop and save image regions inside the document, set the parameter `-si` (**s**ave **i**mages) to true and provide -a directory path to store the extracted images. -* To extract only images from a document, set the parameter `-eoi` (**e**xtract **o**nly **i**mages). Choosing this -option disables any other processing. To save the cropped images add `-ep` and `-si`. diff --git a/ocrd-tool.json b/ocrd-tool.json index 711a192..5c48493 120000 --- a/ocrd-tool.json +++ b/ocrd-tool.json @@ -1 +1 @@ -src/eynollah/ocrd-tool.json \ No newline at end of file +qurator/eynollah/ocrd-tool.json \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index e7744a1..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,78 +0,0 @@ -[build-system] -requires = ["setuptools>=61.0", "wheel", "setuptools-ocrd"] - -[project] -name = "eynollah" -authors = [ - {name = "Vahid Rezanezhad"}, - {name = "Staatsbibliothek zu Berlin - Preußischer Kulturbesitz"}, -] -description = "Document Layout Analysis" -readme = "README.md" -license.file = "LICENSE" -requires-python = ">=3.8" -keywords = ["document layout analysis", "image segmentation"] - -dynamic = [ - "dependencies", - "optional-dependencies", - "version" -] - -classifiers = [ - "Development Status :: 4 - Beta", - "Environment :: Console", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Topic :: Scientific/Engineering :: Image Processing", -] - -[project.scripts] -eynollah = "eynollah.cli:main" -eynollah-training = "eynollah.training.cli:main" -ocrd-eynollah-segment = "eynollah.ocrd_cli:main" -ocrd-sbb-binarize = "eynollah.ocrd_cli_binarization:main" - -[project.urls] -Homepage = "https://github.com/qurator-spk/eynollah" -Repository = "https://github.com/qurator-spk/eynollah.git" - -[tool.setuptools.dynamic] -dependencies = {file = ["requirements.txt"]} -optional-dependencies.test = {file = ["requirements-test.txt"]} -optional-dependencies.OCR = {file = ["requirements-ocr.txt"]} -optional-dependencies.plotting = {file = ["requirements-plotting.txt"]} -optional-dependencies.training = {file = ["requirements-training.txt"]} - -[tool.setuptools.packages.find] -where = ["src"] - -[tool.setuptools.package-data] -"*" = ["*.json", '*.yml', '*.xml', '*.xsd', '*.ttf'] - -[tool.coverage.run] -branch = true -source = ["eynollah"] - -[tool.ruff] -line-length = 120 -# TODO: Reenable and fix after release v0.6.0 -exclude = ['src/eynollah/training'] - -[tool.ruff.lint] -ignore = [ -# disable unused imports -"F401", -# disable import order -"E402", -# disable unused variables -"F841", -# disable bare except -"E722", -] - -[tool.ruff.format] -quote-style = "preserve" - diff --git a/train/.gitkeep b/qurator/.gitkeep similarity index 100% rename from train/.gitkeep rename to qurator/.gitkeep diff --git a/qurator/__init__.py b/qurator/__init__.py new file mode 100644 index 0000000..5284146 --- /dev/null +++ b/qurator/__init__.py @@ -0,0 +1 @@ +__import__("pkg_resources").declare_namespace(__name__) diff --git a/qurator/eynollah/__init__.py b/qurator/eynollah/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/qurator/eynollah/__init__.py @@ -0,0 +1 @@ + diff --git a/qurator/eynollah/cli.py b/qurator/eynollah/cli.py new file mode 100644 index 0000000..a895b0d --- /dev/null +++ b/qurator/eynollah/cli.py @@ -0,0 +1,146 @@ +import sys +import click +from ocrd_utils import initLogging, setOverrideLogLevel +from qurator.eynollah.eynollah import Eynollah + + +@click.command() +@click.option( + "--image", + "-i", + help="image filename", + type=click.Path(exists=True, dir_okay=False), + required=True, +) +@click.option( + "--out", + "-o", + help="directory to write output xml data", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--model", + "-m", + help="directory of models", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_images", + "-si", + help="if a directory is given, images in documents will be cropped and saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_layout", + "-sl", + help="if a directory is given, plot of layout will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_deskewed", + "-sd", + help="if a directory is given, deskewed image will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_all", + "-sa", + help="if a directory is given, all plots needed for documentation will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--enable-plotting/--disable-plotting", + "-ep/-noep", + is_flag=True, + help="If set, will plot intermediary files and images", +) +@click.option( + "--allow-enhancement/--no-allow-enhancement", + "-ae/-noae", + is_flag=True, + help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory", +) +@click.option( + "--curved-line/--no-curvedline", + "-cl/-nocl", + is_flag=True, + help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectabgle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.", +) +@click.option( + "--full-layout/--no-full-layout", + "-fl/-nofl", + is_flag=True, + help="if this parameter set to true, this tool will try to return all elements of layout.", +) +@click.option( + "--input_binary/--input-RGB", + "-ib/-irgb", + is_flag=True, + help="in general, eynollah uses RGB as input but if the input document is strongly dark, bright or for any other reason you can turn binarized input on. This option does not mean that you have to provide a binary image, otherwise this means that the tool itself will binarized the RGB input document.", +) +@click.option( + "--allow_scaling/--no-allow-scaling", + "-as/-noas", + is_flag=True, + help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection", +) +@click.option( + "--headers-off/--headers-on", + "-ho/-noho", + is_flag=True, + help="if this parameter set to true, this tool would ignore headers role in reading order", +) +@click.option( + "--log-level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) +def main( + image, + out, + model, + save_images, + save_layout, + save_deskewed, + save_all, + enable_plotting, + allow_enhancement, + curved_line, + full_layout, + input_binary, + allow_scaling, + headers_off, + log_level +): + if log_level: + setOverrideLogLevel(log_level) + initLogging() + if not enable_plotting and (save_layout or save_deskewed or save_all or save_images): + print("Error: You used one of -sl, -sd, -sa or -si but did not enable plotting with -ep") + sys.exit(1) + elif enable_plotting and not (save_layout or save_deskewed or save_all or save_images): + print("Error: You used -ep to enable plotting but set none of -sl, -sd, -sa or -si") + sys.exit(1) + eynollah = Eynollah( + image_filename=image, + dir_out=out, + dir_models=model, + dir_of_cropped_images=save_images, + dir_of_layout=save_layout, + dir_of_deskewed=save_deskewed, + dir_of_all=save_all, + enable_plotting=enable_plotting, + allow_enhancement=allow_enhancement, + curved_line=curved_line, + full_layout=full_layout, + input_binary=input_binary, + allow_scaling=allow_scaling, + headers_off=headers_off, + ) + pcgts = eynollah.run() + eynollah.writer.write_pagexml(pcgts) + +if __name__ == "__main__": + main() diff --git a/qurator/eynollah/eynollah.py b/qurator/eynollah/eynollah.py new file mode 100644 index 0000000..2b8b97e --- /dev/null +++ b/qurator/eynollah/eynollah.py @@ -0,0 +1,2086 @@ +# pylint: disable=no-member,invalid-name,line-too-long,missing-function-docstring,missing-class-docstring,too-many-branches +# pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member +# pylint: disable=too-many-public-methods,too-many-arguments,too-many-instance-attributes,too-many-public-methods, +# pylint: disable=consider-using-enumerate +""" +tool to extract table form data from alto xml data +""" + +import math +import os +import sys +import time +import warnings +from pathlib import Path +from multiprocessing import Process, Queue, cpu_count +import gc +from ocrd_utils import getLogger +import cv2 +import numpy as np +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" +stderr = sys.stderr +sys.stderr = open(os.devnull, "w") +from keras import backend as K +from keras.models import load_model +sys.stderr = stderr +import tensorflow as tf +tf.get_logger().setLevel("ERROR") +warnings.filterwarnings("ignore") + + +from .utils.contour import ( + filter_contours_area_of_image, + find_contours_mean_y_diff, + find_new_features_of_contours, + get_text_region_boxes_by_given_contours, + get_textregion_contours_in_org_image, + return_contours_of_image, + return_contours_of_interested_region, + return_contours_of_interested_region_by_min_size, + return_contours_of_interested_textline, + return_parent_contours, +) +from .utils.rotate import ( + rotate_image, + rotation_not_90_func, + rotation_not_90_func_full_layout) +from .utils.separate_lines import ( + textline_contours_postprocessing, + separate_lines_new2, + return_deskew_slop) +from .utils.drop_capitals import ( + adhere_drop_capital_region_into_corresponding_textline, + filter_small_drop_capitals_from_no_patch_layout) +from .utils.marginals import get_marginals +from .utils.resize import resize_image +from .utils import ( + boosting_headers_by_longshot_region_segmentation, + crop_image_inside_box, + find_num_col, + otsu_copy_binary, + put_drop_out_from_only_drop_model, + putt_bb_of_drop_capitals_of_model_in_patches_in_layout, + check_any_text_region_in_model_one_is_main_or_header, + small_textlines_to_parent_adherence2, + order_of_regions, + find_number_of_columns_in_document, + return_boxes_of_images_by_order_of_reading_new) +from .utils.pil_cv2 import check_dpi, pil2cv +from .utils.xml import order_and_id_of_texts +from .plot import EynollahPlotter +from .writer import EynollahXmlWriter + +SLOPE_THRESHOLD = 0.13 +RATIO_OF_TWO_MODEL_THRESHOLD = 95.50 #98.45: +DPI_THRESHOLD = 298 +MAX_SLOPE = 999 +KERNEL = np.ones((5, 5), np.uint8) + +class Eynollah: + def __init__( + self, + dir_models, + image_filename, + image_pil=None, + image_filename_stem=None, + dir_out=None, + dir_of_cropped_images=None, + dir_of_layout=None, + dir_of_deskewed=None, + dir_of_all=None, + enable_plotting=False, + allow_enhancement=False, + curved_line=False, + full_layout=False, + input_binary=False, + allow_scaling=False, + headers_off=False, + override_dpi=None, + logger=None, + pcgts=None, + ): + if image_pil: + self._imgs = self._cache_images(image_pil=image_pil) + else: + self._imgs = self._cache_images(image_filename=image_filename) + if override_dpi: + self.dpi = override_dpi + self.image_filename = image_filename + self.dir_out = dir_out + self.allow_enhancement = allow_enhancement + self.curved_line = curved_line + self.full_layout = full_layout + self.input_binary = input_binary + self.allow_scaling = allow_scaling + self.headers_off = headers_off + self.plotter = None if not enable_plotting else EynollahPlotter( + dir_of_all=dir_of_all, + dir_of_deskewed=dir_of_deskewed, + dir_of_cropped_images=dir_of_cropped_images, + dir_of_layout=dir_of_layout, + image_filename_stem=Path(Path(image_filename).name).stem) + self.writer = EynollahXmlWriter( + dir_out=self.dir_out, + image_filename=self.image_filename, + curved_line=self.curved_line, + pcgts=pcgts) + self.logger = logger if logger else getLogger('eynollah') + self.dir_models = dir_models + + self.model_dir_of_enhancement = dir_models + "/model_enhancement.h5" + self.model_dir_of_binarization = dir_models + "/model_bin_sbb_ens.h5" + self.model_dir_of_col_classifier = dir_models + "/model_scale_classifier.h5" + self.model_region_dir_p = dir_models + "/model_main_covid19_lr5-5_scale_1_1_great.h5" + self.model_region_dir_p2 = dir_models + "/model_main_home_corona3_rot.h5" + self.model_region_dir_fully_np = dir_models + "/model_no_patches_class0_30eopch.h5" + self.model_region_dir_fully = dir_models + "/model_3up_new_good_no_augmentation.h5" + self.model_page_dir = dir_models + "/model_page_mixed_best.h5" + self.model_region_dir_p_ens = dir_models + "/model_ensemble_s.h5" + self.model_textline_dir = dir_models + "/model_textline_newspapers.h5" + + def _cache_images(self, image_filename=None, image_pil=None): + ret = {} + if image_filename: + ret['img'] = cv2.imread(image_filename) + self.dpi = check_dpi(image_filename) + else: + ret['img'] = pil2cv(image_pil) + self.dpi = check_dpi(image_pil) + ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) + for prefix in ('', '_grayscale'): + ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) + return ret + + def imread(self, grayscale=False, uint8=True): + key = 'img' + if grayscale: + key += '_grayscale' + if uint8: + key += '_uint8' + return self._imgs[key].copy() + + def isNaN(self, num): + return num != num + + + def predict_enhancement(self, img): + self.logger.debug("enter predict_enhancement") + model_enhancement, session_enhancement = self.start_new_session_and_model(self.model_dir_of_enhancement) + + img_height_model = model_enhancement.layers[len(model_enhancement.layers) - 1].output_shape[1] + img_width_model = model_enhancement.layers[len(model_enhancement.layers) - 1].output_shape[2] + if img.shape[0] < img_height_model: + img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) + + if img.shape[1] < img_width_model: + img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST) + margin = int(0 * img_width_model) + width_mid = img_width_model - 2 * margin + height_mid = img_height_model - 2 * margin + img = img / float(255.0) + + img_h = img.shape[0] + img_w = img.shape[1] + + prediction_true = np.zeros((img_h, img_w, 3)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - img_width_model + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - img_height_model + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_p_pred = model_enhancement.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2])) + + seg = label_p_pred[0, :, :, :] + seg = seg * 255 + + if i == 0 and j == 0: + seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg + elif i == nxf - 1 and j == nyf - 1: + seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg + elif i == 0 and j == nyf - 1: + seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg + elif i == nxf - 1 and j == 0: + seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg + elif i == 0 and j != 0 and j != nyf - 1: + seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg + elif i == nxf - 1 and j != 0 and j != nyf - 1: + seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg + elif i != 0 and i != nxf - 1 and j == 0: + seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg + elif i != 0 and i != nxf - 1 and j == nyf - 1: + seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg + else: + seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg + + prediction_true = prediction_true.astype(int) + session_enhancement.close() + del model_enhancement + del session_enhancement + gc.collect() + + return prediction_true + + def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): + self.logger.debug("enter calculate_width_height_by_columns") + if num_col == 1 and width_early < 1100: + img_w_new = 2000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 2000) + elif num_col == 1 and width_early >= 2500: + img_w_new = 2000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 2000) + elif num_col == 1 and width_early >= 1100 and width_early < 2500: + img_w_new = width_early + img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early) + elif num_col == 2 and width_early < 2000: + img_w_new = 2400 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 2400) + elif num_col == 2 and width_early >= 3500: + img_w_new = 2400 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 2400) + elif num_col == 2 and width_early >= 2000 and width_early < 3500: + img_w_new = width_early + img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early) + elif num_col == 3 and width_early < 2000: + img_w_new = 3000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 3000) + elif num_col == 3 and width_early >= 4000: + img_w_new = 3000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 3000) + elif num_col == 3 and width_early >= 2000 and width_early < 4000: + img_w_new = width_early + img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early) + elif num_col == 4 and width_early < 2500: + img_w_new = 4000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 4000) + elif num_col == 4 and width_early >= 5000: + img_w_new = 4000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 4000) + elif num_col == 4 and width_early >= 2500 and width_early < 5000: + img_w_new = width_early + img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early) + elif num_col == 5 and width_early < 3700: + img_w_new = 5000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 5000) + elif num_col == 5 and width_early >= 7000: + img_w_new = 5000 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 5000) + elif num_col == 5 and width_early >= 3700 and width_early < 7000: + img_w_new = width_early + img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early) + elif num_col == 6 and width_early < 4500: + img_w_new = 6500 # 5400 + img_h_new = int(img.shape[0] / float(img.shape[1]) * 6500) + else: + img_w_new = width_early + img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early) + + if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: + img_new = np.copy(img) + num_column_is_classified = False + else: + img_new = resize_image(img, img_h_new, img_w_new) + num_column_is_classified = True + + return img_new, num_column_is_classified + + def resize_image_with_column_classifier(self, is_image_enhanced, img_bin): + self.logger.debug("enter resize_image_with_column_classifier") + if self.input_binary: + img = np.copy(img_bin) + else: + img = self.imread() + + _, page_coord = self.early_page_for_num_of_column_classification(img) + model_num_classifier, session_col_classifier = self.start_new_session_and_model(self.model_dir_of_col_classifier) + if self.input_binary: + img_in = np.copy(img) + img_in = img_in / 255.0 + width_early = img_in.shape[1] + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) + else: + img_1ch = self.imread(grayscale=True, uint8=False) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + # plt.imshow(img_1ch) + # plt.show() + img_1ch = img_1ch / 255.0 + + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = model_num_classifier.predict(img_in) + num_col = np.argmax(label_p_pred[0]) + 1 + + self.logger.info("Found %s columns (%s)", num_col, label_p_pred) + + session_col_classifier.close() + + del model_num_classifier + del session_col_classifier + + K.clear_session() + gc.collect() + + + + img_new, _ = self.calculate_width_height_by_columns(img, num_col, width_early, label_p_pred) + + if img_new.shape[1] > img.shape[1]: + img_new = self.predict_enhancement(img_new) + is_image_enhanced = True + + return img, img_new, is_image_enhanced + + def resize_and_enhance_image_with_column_classifier(self): + self.logger.debug("enter resize_and_enhance_image_with_column_classifier") + dpi = self.dpi + self.logger.info("Detected %s DPI", dpi) + if self.input_binary: + img = self.imread() + model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization) + prediction_bin = self.do_prediction(True, img, model_bin) + + prediction_bin=prediction_bin[:,:,0] + prediction_bin = (prediction_bin[:,:]==0)*1 + prediction_bin = prediction_bin*255 + + prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + + session_bin.close() + del model_bin + del session_bin + gc.collect() + + prediction_bin = prediction_bin.astype(np.uint8) + img= np.copy(prediction_bin) + img_bin = np.copy(prediction_bin) + else: + img = self.imread() + img_bin = None + + _, page_coord = self.early_page_for_num_of_column_classification(img_bin) + model_num_classifier, session_col_classifier = self.start_new_session_and_model(self.model_dir_of_col_classifier) + + if self.input_binary: + img_in = np.copy(img) + width_early = img_in.shape[1] + img_in = img_in / 255.0 + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) + else: + img_1ch = self.imread(grayscale=True) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + + + label_p_pred = model_num_classifier.predict(img_in) + num_col = np.argmax(label_p_pred[0]) + 1 + self.logger.info("Found %s columns (%s)", num_col, label_p_pred) + session_col_classifier.close() + K.clear_session() + + if dpi < DPI_THRESHOLD: + img_new, num_column_is_classified = self.calculate_width_height_by_columns(img, num_col, width_early, label_p_pred) + image_res = self.predict_enhancement(img_new) + is_image_enhanced = True + else: + is_image_enhanced = False + num_column_is_classified = True + image_res = np.copy(img) + + session_col_classifier.close() + + + self.logger.debug("exit resize_and_enhance_image_with_column_classifier") + return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin + + # pylint: disable=attribute-defined-outside-init + def get_image_and_scales(self, img_org, img_res, scale): + self.logger.debug("enter get_image_and_scales") + self.image = np.copy(img_res) + self.image_org = np.copy(img_org) + self.height_org = self.image.shape[0] + self.width_org = self.image.shape[1] + + self.img_hight_int = int(self.image.shape[0] * scale) + self.img_width_int = int(self.image.shape[1] * scale) + self.scale_y = self.img_hight_int / float(self.image.shape[0]) + self.scale_x = self.img_width_int / float(self.image.shape[1]) + + self.image = resize_image(self.image, self.img_hight_int, self.img_width_int) + + # Also set for the plotter + if self.plotter: + self.plotter.image_org = self.image_org + self.plotter.scale_y = self.scale_y + self.plotter.scale_x = self.scale_x + # Also set for the writer + self.writer.image_org = self.image_org + self.writer.scale_y = self.scale_y + self.writer.scale_x = self.scale_x + self.writer.height_org = self.height_org + self.writer.width_org = self.width_org + + def get_image_and_scales_after_enhancing(self, img_org, img_res): + self.logger.debug("enter get_image_and_scales_after_enhancing") + self.image = np.copy(img_res) + self.image = self.image.astype(np.uint8) + self.image_org = np.copy(img_org) + self.height_org = self.image_org.shape[0] + self.width_org = self.image_org.shape[1] + + self.scale_y = img_res.shape[0] / float(self.image_org.shape[0]) + self.scale_x = img_res.shape[1] / float(self.image_org.shape[1]) + + # Also set for the plotter + if self.plotter: + self.plotter.image_org = self.image_org + self.plotter.scale_y = self.scale_y + self.plotter.scale_x = self.scale_x + # Also set for the writer + self.writer.image_org = self.image_org + self.writer.scale_y = self.scale_y + self.writer.scale_x = self.scale_x + self.writer.height_org = self.height_org + self.writer.width_org = self.width_org + + def start_new_session_and_model_old(self, model_dir): + self.logger.debug("enter start_new_session_and_model (model_dir=%s)", model_dir) + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + + session = tf.InteractiveSession() + model = load_model(model_dir, compile=False) + + return model, session + + + def start_new_session_and_model(self, model_dir): + self.logger.debug("enter start_new_session_and_model (model_dir=%s)", model_dir) + gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=7.7, allow_growth=True) + session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) + model = load_model(model_dir, compile=False) + + return model, session + + def do_prediction(self, patches, img, model, marginal_of_patch_percent=0.1): + self.logger.debug("enter do_prediction") + + img_height_model = model.layers[len(model.layers) - 1].output_shape[1] + img_width_model = model.layers[len(model.layers) - 1].output_shape[2] + + if not patches: + img_h_page = img.shape[0] + img_w_page = img.shape[1] + img = img / float(255.0) + img = resize_image(img, img_height_model, img_width_model) + + label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2])) + + seg = np.argmax(label_p_pred, axis=3)[0] + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + prediction_true = resize_image(seg_color, img_h_page, img_w_page) + prediction_true = prediction_true.astype(np.uint8) + + + else: + if img.shape[0] < img_height_model: + img = resize_image(img, img_height_model, img.shape[1]) + + if img.shape[1] < img_width_model: + img = resize_image(img, img.shape[0], img_width_model) + + self.logger.info("Image dimensions: %sx%s", img_height_model, img_width_model) + margin = int(marginal_of_patch_percent * img_height_model) + width_mid = img_width_model - 2 * margin + height_mid = img_height_model - 2 * margin + img = img / float(255.0) + img = img.astype(np.float16) + img_h = img.shape[0] + img_w = img.shape[1] + prediction_true = np.zeros((img_h, img_w, 3)) + mask_true = np.zeros((img_h, img_w)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - img_width_model + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - img_height_model + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_p_pred = model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2])) + seg = np.argmax(label_p_pred, axis=3)[0] + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + + if i == 0 and j == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] + seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color + elif i == nxf - 1 and j == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :] + seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] + mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0] = seg + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg_color + elif i == 0 and j == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :] + seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] + mask_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin] = seg + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg_color + elif i == nxf - 1 and j == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] + seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] + mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color + elif i == 0 and j != 0 and j != nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] + seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + mask_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color + elif i == nxf - 1 and j != 0 and j != nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] + seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] + mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color + elif i != 0 and i != nxf - 1 and j == 0: + seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] + seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] + mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color + elif i != 0 and i != nxf - 1 and j == nyf - 1: + seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :] + seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] + mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin] = seg + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg_color + else: + seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] + seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] + mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color + + prediction_true = prediction_true.astype(np.uint8) + del model + gc.collect() + return prediction_true + + def early_page_for_num_of_column_classification(self,img_bin): + self.logger.debug("enter early_page_for_num_of_column_classification") + if self.input_binary: + img =np.copy(img_bin) + img = img.astype(np.uint8) + else: + img = self.imread() + model_page, session_page = self.start_new_session_and_model(self.model_page_dir) + img = cv2.GaussianBlur(img, (5, 5), 0) + + img_page_prediction = self.do_prediction(False, img, model_page) + + imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + thresh = cv2.dilate(thresh, KERNEL, iterations=3) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] + x, y, w, h = cv2.boundingRect(cnt) + box = [x, y, w, h] + else: + box = [0, 0, img.shape[1], img.shape[0]] + croped_page, page_coord = crop_image_inside_box(box, img) + session_page.close() + del model_page + del session_page + gc.collect() + K.clear_session() + self.logger.debug("exit early_page_for_num_of_column_classification") + return croped_page, page_coord + + def extract_page(self): + self.logger.debug("enter extract_page") + cont_page = [] + model_page, session_page = self.start_new_session_and_model(self.model_page_dir) + img = cv2.GaussianBlur(self.image, (5, 5), 0) + img_page_prediction = self.do_prediction(False, img, model_page) + imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + thresh = cv2.dilate(thresh, KERNEL, iterations=3) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + if len(contours)>0: + cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] + x, y, w, h = cv2.boundingRect(cnt) + if x <= 30: + w += x + x = 0 + if (self.image.shape[1] - (x + w)) <= 30: + w = w + (self.image.shape[1] - (x + w)) + if y <= 30: + h = h + y + y = 0 + if (self.image.shape[0] - (y + h)) <= 30: + h = h + (self.image.shape[0] - (y + h)) + + box = [x, y, w, h] + else: + box = [0, 0, img.shape[1], img.shape[0]] + croped_page, page_coord = crop_image_inside_box(box, self.image) + cont_page.append(np.array([[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], [page_coord[2], page_coord[1]]])) + session_page.close() + del model_page + del session_page + gc.collect() + K.clear_session() + self.logger.debug("exit extract_page") + return croped_page, page_coord, cont_page + + def extract_text_regions(self, img, patches, cols): + self.logger.debug("enter extract_text_regions") + img_height_h = img.shape[0] + img_width_h = img.shape[1] + + model_region, session_region = self.start_new_session_and_model(self.model_region_dir_fully if patches else self.model_region_dir_fully_np) + + if not patches: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + prediction_regions2 = None + else: + if cols == 1: + img2 = otsu_copy_binary(img) + img2 = img2.astype(np.uint8) + img2 = resize_image(img2, int(img_height_h * 0.7), int(img_width_h * 0.7)) + marginal_of_patch_percent = 0.1 + prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent) + prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h) + + if cols == 2: + img2 = otsu_copy_binary(img) + img2 = img2.astype(np.uint8) + img2 = resize_image(img2, int(img_height_h * 0.4), int(img_width_h * 0.4)) + marginal_of_patch_percent = 0.1 + prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent) + prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h) + + elif cols > 2: + img2 = otsu_copy_binary(img) + img2 = img2.astype(np.uint8) + img2 = resize_image(img2, int(img_height_h * 0.3), int(img_width_h * 0.3)) + marginal_of_patch_percent = 0.1 + prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent) + prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h) + + if cols == 2: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + if img_width_h >= 2000: + img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) + img = img.astype(np.uint8) + + if cols == 1: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img = resize_image(img, int(img_height_h * 0.5), int(img_width_h * 0.5)) + img = img.astype(np.uint8) + + if cols == 3: + if (self.scale_x == 1 and img_width_h > 3000) or (self.scale_x != 1 and img_width_h > 2800): + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img = resize_image(img, int(img_height_h * 2800 / float(img_width_h)), 2800) + else: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + + if cols == 4: + if (self.scale_x == 1 and img_width_h > 4000) or (self.scale_x != 1 and img_width_h > 3700): + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img= resize_image(img, int(img_height_h * 3700 / float(img_width_h)), 3700) + else: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) + + if cols == 5: + if self.scale_x == 1 and img_width_h > 5000: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img= resize_image(img, int(img_height_h * 0.7), int(img_width_h * 0.7)) + else: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9) ) + + if cols >= 6: + if img_width_h > 5600: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img= resize_image(img, int(img_height_h * 5600 / float(img_width_h)), 5600) + else: + img = otsu_copy_binary(img) + img = img.astype(np.uint8) + img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)) + + marginal_of_patch_percent = 0.1 + prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent) + prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) + + session_region.close() + del model_region + del session_region + gc.collect() + + self.logger.debug("exit extract_text_regions") + return prediction_regions, prediction_regions2 + + def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + self.logger.debug("enter get_slopes_and_deskew_new") + num_cores = cpu_count() + queue_of_all_params = Queue() + + processes = [] + nh = np.linspace(0, len(boxes), num_cores + 1) + indexes_by_text_con = np.array(range(len(contours_par))) + for i in range(num_cores): + boxes_per_process = boxes[int(nh[i]) : int(nh[i + 1])] + contours_per_process = contours[int(nh[i]) : int(nh[i + 1])] + contours_par_per_process = contours_par[int(nh[i]) : int(nh[i + 1])] + indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])] + + processes.append(Process(target=self.do_work_of_slopes_new, args=(queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, contours_par_per_process, indexes_text_con_per_process, image_page_rotated, slope_deskew))) + for i in range(num_cores): + processes[i].start() + + slopes = [] + all_found_texline_polygons = [] + all_found_text_regions = [] + all_found_text_regions_par = [] + boxes = [] + all_box_coord = [] + all_index_text_con = [] + for i in range(num_cores): + list_all_par = queue_of_all_params.get(True) + slopes_for_sub_process = list_all_par[0] + polys_for_sub_process = list_all_par[1] + boxes_for_sub_process = list_all_par[2] + contours_for_subprocess = list_all_par[3] + contours_par_for_subprocess = list_all_par[4] + boxes_coord_for_subprocess = list_all_par[5] + indexes_for_subprocess = list_all_par[6] + for j in range(len(slopes_for_sub_process)): + slopes.append(slopes_for_sub_process[j]) + all_found_texline_polygons.append(polys_for_sub_process[j]) + boxes.append(boxes_for_sub_process[j]) + all_found_text_regions.append(contours_for_subprocess[j]) + all_found_text_regions_par.append(contours_par_for_subprocess[j]) + all_box_coord.append(boxes_coord_for_subprocess[j]) + all_index_text_con.append(indexes_for_subprocess[j]) + for i in range(num_cores): + processes[i].join() + self.logger.debug('slopes %s', slopes) + self.logger.debug("exit get_slopes_and_deskew_new") + return slopes, all_found_texline_polygons, boxes, all_found_text_regions, all_found_text_regions_par, all_box_coord, all_index_text_con + + def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, mask_texts_only, num_col, scale_par, slope_deskew): + self.logger.debug("enter get_slopes_and_deskew_new_curved") + num_cores = cpu_count() + queue_of_all_params = Queue() + + processes = [] + nh = np.linspace(0, len(boxes), num_cores + 1) + indexes_by_text_con = np.array(range(len(contours_par))) + + for i in range(num_cores): + boxes_per_process = boxes[int(nh[i]) : int(nh[i + 1])] + contours_per_process = contours[int(nh[i]) : int(nh[i + 1])] + contours_par_per_process = contours_par[int(nh[i]) : int(nh[i + 1])] + indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])] + + processes.append(Process(target=self.do_work_of_slopes_new_curved, args=(queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, contours_par_per_process, image_page_rotated, mask_texts_only, num_col, scale_par, indexes_text_con_per_process, slope_deskew))) + + for i in range(num_cores): + processes[i].start() + + slopes = [] + all_found_texline_polygons = [] + all_found_text_regions = [] + all_found_text_regions_par = [] + boxes = [] + all_box_coord = [] + all_index_text_con = [] + + for i in range(num_cores): + list_all_par = queue_of_all_params.get(True) + polys_for_sub_process = list_all_par[0] + boxes_for_sub_process = list_all_par[1] + contours_for_subprocess = list_all_par[2] + contours_par_for_subprocess = list_all_par[3] + boxes_coord_for_subprocess = list_all_par[4] + indexes_for_subprocess = list_all_par[5] + slopes_for_sub_process = list_all_par[6] + for j in range(len(polys_for_sub_process)): + slopes.append(slopes_for_sub_process[j]) + all_found_texline_polygons.append(polys_for_sub_process[j][::-1]) + boxes.append(boxes_for_sub_process[j]) + all_found_text_regions.append(contours_for_subprocess[j]) + all_found_text_regions_par.append(contours_par_for_subprocess[j]) + all_box_coord.append(boxes_coord_for_subprocess[j]) + all_index_text_con.append(indexes_for_subprocess[j]) + + for i in range(num_cores): + processes[i].join() + # print(slopes,'slopes') + return all_found_texline_polygons, boxes, all_found_text_regions, all_found_text_regions_par, all_box_coord, all_index_text_con, slopes + + def do_work_of_slopes_new_curved(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, contours_par_per_process, image_page_rotated, mask_texts_only, num_col, scale_par, indexes_r_con_per_pro, slope_deskew): + self.logger.debug("enter do_work_of_slopes_new_curved") + slopes_per_each_subprocess = [] + bounding_box_of_textregion_per_each_subprocess = [] + textlines_rectangles_per_each_subprocess = [] + contours_textregion_per_each_subprocess = [] + contours_textregion_par_per_each_subprocess = [] + all_box_coord_per_process = [] + index_by_text_region_contours = [] + + textline_cnt_separated = np.zeros(textline_mask_tot_ea.shape) + + for mv in range(len(boxes_text)): + + all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]] + all_text_region_raw = all_text_region_raw.astype(np.uint8) + img_int_p = all_text_region_raw[:, :] + + # img_int_p=cv2.erode(img_int_p,KERNEL,iterations = 2) + # plt.imshow(img_int_p) + # plt.show() + + if img_int_p.shape[0] / img_int_p.shape[1] < 0.1: + slopes_per_each_subprocess.append(0) + slope_for_all = [slope_deskew][0] + else: + try: + textline_con, hierarchy = return_contours_of_image(img_int_p) + textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.0008) + y_diff_mean = find_contours_mean_y_diff(textline_con_fil) + if self.isNaN(y_diff_mean): + slope_for_all = MAX_SLOPE + else: + sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) + img_int_p[img_int_p > 0] = 1 + slope_for_all = return_deskew_slop(img_int_p, sigma_des, plotter=self.plotter) + + if abs(slope_for_all) < 0.5: + slope_for_all = [slope_deskew][0] + + except Exception as why: + self.logger.error(why) + slope_for_all = MAX_SLOPE + + if slope_for_all == MAX_SLOPE: + slope_for_all = [slope_deskew][0] + slopes_per_each_subprocess.append(slope_for_all) + + index_by_text_region_contours.append(indexes_r_con_per_pro[mv]) + _, crop_coor = crop_image_inside_box(boxes_text[mv], image_page_rotated) + + if abs(slope_for_all) < 45: + # all_box_coord.append(crop_coor) + textline_region_in_image = np.zeros(textline_mask_tot_ea.shape) + cnt_o_t_max = contours_par_per_process[mv] + x, y, w, h = cv2.boundingRect(cnt_o_t_max) + mask_biggest = np.zeros(mask_texts_only.shape) + mask_biggest = cv2.fillPoly(mask_biggest, pts=[cnt_o_t_max], color=(1, 1, 1)) + mask_region_in_patch_region = mask_biggest[y : y + h, x : x + w] + textline_biggest_region = mask_biggest * textline_mask_tot_ea + + # print(slope_for_all,'slope_for_all') + textline_rotated_separated = separate_lines_new2(textline_biggest_region[y : y + h, x : x + w], 0, num_col, slope_for_all, plotter=self.plotter) + + # new line added + ##print(np.shape(textline_rotated_separated),np.shape(mask_biggest)) + textline_rotated_separated[mask_region_in_patch_region[:, :] != 1] = 0 + # till here + + textline_cnt_separated[y : y + h, x : x + w] = textline_rotated_separated + textline_region_in_image[y : y + h, x : x + w] = textline_rotated_separated + + # plt.imshow(textline_region_in_image) + # plt.show() + # plt.imshow(textline_cnt_separated) + # plt.show() + + pixel_img = 1 + cnt_textlines_in_image = return_contours_of_interested_textline(textline_region_in_image, pixel_img) + + textlines_cnt_per_region = [] + for jjjj in range(len(cnt_textlines_in_image)): + mask_biggest2 = np.zeros(mask_texts_only.shape) + mask_biggest2 = cv2.fillPoly(mask_biggest2, pts=[cnt_textlines_in_image[jjjj]], color=(1, 1, 1)) + if num_col + 1 == 1: + mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=5) + else: + mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=4) + + pixel_img = 1 + mask_biggest2 = resize_image(mask_biggest2, int(mask_biggest2.shape[0] * scale_par), int(mask_biggest2.shape[1] * scale_par)) + cnt_textlines_in_image_ind = return_contours_of_interested_textline(mask_biggest2, pixel_img) + try: + textlines_cnt_per_region.append(cnt_textlines_in_image_ind[0]) + except Exception as why: + self.logger.error(why) + else: + add_boxes_coor_into_textlines = True + textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv], add_boxes_coor_into_textlines) + add_boxes_coor_into_textlines = False + # print(np.shape(textlines_cnt_per_region),'textlines_cnt_per_region') + + textlines_rectangles_per_each_subprocess.append(textlines_cnt_per_region) + bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv]) + contours_textregion_per_each_subprocess.append(contours_per_process[mv]) + contours_textregion_par_per_each_subprocess.append(contours_par_per_process[mv]) + all_box_coord_per_process.append(crop_coor) + + queue_of_all_params.put([textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, all_box_coord_per_process, index_by_text_region_contours, slopes_per_each_subprocess]) + + def do_work_of_slopes_new(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, contours_par_per_process, indexes_r_con_per_pro, image_page_rotated, slope_deskew): + self.logger.debug('enter do_work_of_slopes_new') + slopes_per_each_subprocess = [] + bounding_box_of_textregion_per_each_subprocess = [] + textlines_rectangles_per_each_subprocess = [] + contours_textregion_per_each_subprocess = [] + contours_textregion_par_per_each_subprocess = [] + all_box_coord_per_process = [] + index_by_text_region_contours = [] + for mv in range(len(boxes_text)): + _, crop_coor = crop_image_inside_box(boxes_text[mv],image_page_rotated) + mask_textline = np.zeros((textline_mask_tot_ea.shape)) + mask_textline = cv2.fillPoly(mask_textline,pts=[contours_per_process[mv]],color=(1,1,1)) + all_text_region_raw = (textline_mask_tot_ea*mask_textline[:,:])[boxes_text[mv][1]:boxes_text[mv][1]+boxes_text[mv][3] , boxes_text[mv][0]:boxes_text[mv][0]+boxes_text[mv][2] ] + all_text_region_raw=all_text_region_raw.astype(np.uint8) + img_int_p=all_text_region_raw[:,:]#self.all_text_region_raw[mv] + img_int_p=cv2.erode(img_int_p,KERNEL,iterations = 2) + + if img_int_p.shape[0]/img_int_p.shape[1]<0.1: + slopes_per_each_subprocess.append(0) + slope_for_all = [slope_deskew][0] + all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]] + cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv], 0) + textlines_rectangles_per_each_subprocess.append(cnt_clean_rot) + index_by_text_region_contours.append(indexes_r_con_per_pro[mv]) + bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv]) + else: + try: + textline_con, hierarchy = return_contours_of_image(img_int_p) + textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.00008) + y_diff_mean = find_contours_mean_y_diff(textline_con_fil) + if self.isNaN(y_diff_mean): + slope_for_all = MAX_SLOPE + else: + sigma_des = int(y_diff_mean * (4.0 / 40.0)) + if sigma_des < 1: + sigma_des = 1 + img_int_p[img_int_p > 0] = 1 + slope_for_all = return_deskew_slop(img_int_p, sigma_des, plotter=self.plotter) + if abs(slope_for_all) <= 0.5: + slope_for_all = [slope_deskew][0] + except Exception as why: + self.logger.error(why) + slope_for_all = MAX_SLOPE + if slope_for_all == MAX_SLOPE: + slope_for_all = [slope_deskew][0] + slopes_per_each_subprocess.append(slope_for_all) + mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) + mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contours_par_per_process[mv]], color=(1, 1, 1)) + + # plt.imshow(mask_only_con_region) + # plt.show() + all_text_region_raw = np.copy(textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]]) + mask_only_con_region = mask_only_con_region[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]] + + ##plt.imshow(textline_mask_tot_ea) + ##plt.show() + ##plt.imshow(all_text_region_raw) + ##plt.show() + ##plt.imshow(mask_only_con_region) + ##plt.show() + + all_text_region_raw[mask_only_con_region == 0] = 0 + cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv]) + + textlines_rectangles_per_each_subprocess.append(cnt_clean_rot) + index_by_text_region_contours.append(indexes_r_con_per_pro[mv]) + bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv]) + + contours_textregion_per_each_subprocess.append(contours_per_process[mv]) + contours_textregion_par_per_each_subprocess.append(contours_par_per_process[mv]) + all_box_coord_per_process.append(crop_coor) + queue_of_all_params.put([slopes_per_each_subprocess, textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, all_box_coord_per_process, index_by_text_region_contours]) + + def textline_contours(self, img, patches, scaler_h, scaler_w): + self.logger.debug('enter textline_contours') + + model_textline, session_textline = self.start_new_session_and_model(self.model_textline_dir if patches else self.model_textline_dir_np) + img = img.astype(np.uint8) + img_org = np.copy(img) + img_h = img_org.shape[0] + img_w = img_org.shape[1] + img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w)) + prediction_textline = self.do_prediction(patches, img, model_textline) + prediction_textline = resize_image(prediction_textline, img_h, img_w) + prediction_textline_longshot = self.do_prediction(False, img, model_textline) + prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w) + + session_textline.close() + + + return prediction_textline[:, :, 0], prediction_textline_longshot_true_size[:, :, 0] + + def do_work_of_slopes(self, q, poly, box_sub, boxes_per_process, textline_mask_tot, contours_per_process): + self.logger.debug('enter do_work_of_slopes') + slope_biggest = 0 + slopes_sub = [] + boxes_sub_new = [] + poly_sub = [] + for mv in range(len(boxes_per_process)): + crop_img, _ = crop_image_inside_box(boxes_per_process[mv], np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2)) + crop_img = crop_img[:, :, 0] + crop_img = cv2.erode(crop_img, KERNEL, iterations=2) + try: + textline_con, hierarchy = return_contours_of_image(crop_img) + textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, max_area=1, min_area=0.0008) + y_diff_mean = find_contours_mean_y_diff(textline_con_fil) + sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) + crop_img[crop_img > 0] = 1 + slope_corresponding_textregion = return_deskew_slop(crop_img, sigma_des, plotter=self.plotter) + except Exception as why: + self.logger.error(why) + slope_corresponding_textregion = MAX_SLOPE + + if slope_corresponding_textregion == MAX_SLOPE: + slope_corresponding_textregion = slope_biggest + slopes_sub.append(slope_corresponding_textregion) + + cnt_clean_rot = textline_contours_postprocessing(crop_img, slope_corresponding_textregion, contours_per_process[mv], boxes_per_process[mv]) + + poly_sub.append(cnt_clean_rot) + boxes_sub_new.append(boxes_per_process[mv]) + + q.put(slopes_sub) + poly.put(poly_sub) + box_sub.put(boxes_sub_new) + + def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier): + self.logger.debug("enter get_regions_from_xy_2models") + erosion_hurts = False + img_org = np.copy(img) + img_height_h = img_org.shape[0] + img_width_h = img_org.shape[1] + + model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) + + ratio_y=1.3 + ratio_x=1 + + img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + + prediction_regions_org_y = self.do_prediction(True, img, model_region) + prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h ) + + #plt.imshow(prediction_regions_org_y[:,:,0]) + #plt.show() + prediction_regions_org_y = prediction_regions_org_y[:,:,0] + mask_zeros_y = (prediction_regions_org_y[:,:]==0)*1 + + ##img_only_regions_with_sep = ( (prediction_regions_org_y[:,:] != 3) & (prediction_regions_org_y[:,:] != 0) )*1 + img_only_regions_with_sep = ( prediction_regions_org_y[:,:] == 1 )*1 + img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) + + try: + img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=20) + + _, _ = find_num_col(img_only_regions, multiplier=6.0) + + img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1))) + + prediction_regions_org = self.do_prediction(True, img, model_region) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + + ##plt.imshow(prediction_regions_org[:,:,0]) + ##plt.show() + prediction_regions_org=prediction_regions_org[:,:,0] + prediction_regions_org[(prediction_regions_org[:,:]==1) & (mask_zeros_y[:,:]==1)]=0 + + session_region.close() + del model_region + del session_region + gc.collect() + + model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p2) + img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1])) + prediction_regions_org2 = self.do_prediction(True, img, model_region, 0.2) + prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h ) + + + session_region.close() + del model_region + del session_region + gc.collect() + + mask_zeros2 = (prediction_regions_org2[:,:,0] == 0) + mask_lines2 = (prediction_regions_org2[:,:,0] == 3) + text_sume_early = (prediction_regions_org[:,:] == 1).sum() + prediction_regions_org_copy = np.copy(prediction_regions_org) + prediction_regions_org_copy[(prediction_regions_org_copy[:,:]==1) & (mask_zeros2[:,:]==1)] = 0 + text_sume_second = ((prediction_regions_org_copy[:,:]==1)*1).sum() + + rate_two_models = text_sume_second / float(text_sume_early) * 100 + + self.logger.info("ratio_of_two_models: %s", rate_two_models) + if not(is_image_enhanced and rate_two_models < RATIO_OF_TWO_MODEL_THRESHOLD): + prediction_regions_org = np.copy(prediction_regions_org_copy) + + + + prediction_regions_org[(mask_lines2[:,:]==1) & (prediction_regions_org[:,:]==0)]=3 + mask_lines_only=(prediction_regions_org[:,:]==3)*1 + prediction_regions_org = cv2.erode(prediction_regions_org[:,:], KERNEL, iterations=2) + + #plt.imshow(text_region2_1st_channel) + #plt.show() + + prediction_regions_org = cv2.dilate(prediction_regions_org[:,:], KERNEL, iterations=2) + + + if rate_two_models<=40: + if self.input_binary: + prediction_bin = np.copy(img_org) + else: + model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization) + prediction_bin = self.do_prediction(True, img_org, model_bin) + prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) + + prediction_bin=prediction_bin[:,:,0] + prediction_bin = (prediction_bin[:,:]==0)*1 + prediction_bin = prediction_bin*255 + + prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + + session_bin.close() + del model_bin + del session_bin + gc.collect() + + + + model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) + ratio_y=1 + ratio_x=1 + + + img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + + prediction_regions_org = self.do_prediction(True, img, model_region) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + prediction_regions_org=prediction_regions_org[:,:,0] + + mask_lines_only=(prediction_regions_org[:,:]==3)*1 + session_region.close() + del model_region + del session_region + gc.collect() + + + mask_texts_only=(prediction_regions_org[:,:]==1)*1 + mask_images_only=(prediction_regions_org[:,:]==2)*1 + + + + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) + polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + text_regions_p_true = cv2.fillPoly(text_regions_p_true,pts = polygons_of_only_lines, color=(3, 3, 3)) + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + + text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1)) + + + + K.clear_session() + return text_regions_p_true, erosion_hurts, polygons_lines_xml + except: + + if self.input_binary: + prediction_bin = np.copy(img_org) + else: + session_region.close() + del model_region + del session_region + gc.collect() + + model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization) + prediction_bin = self.do_prediction(True, img_org, model_bin) + prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) + prediction_bin=prediction_bin[:,:,0] + + prediction_bin = (prediction_bin[:,:]==0)*1 + + prediction_bin = prediction_bin*255 + + prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) + + + + session_bin.close() + del model_bin + del session_bin + gc.collect() + + + + model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens) + ratio_y=1 + ratio_x=1 + + + img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) + + prediction_regions_org = self.do_prediction(True, img, model_region) + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + prediction_regions_org=prediction_regions_org[:,:,0] + + #mask_lines_only=(prediction_regions_org[:,:]==3)*1 + session_region.close() + del model_region + del session_region + gc.collect() + + #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) + + #prediction_regions_org = self.do_prediction(True, img, model_region) + + #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + + #prediction_regions_org = prediction_regions_org[:,:,0] + + #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 + #session_region.close() + #del model_region + #del session_region + #gc.collect() + + + + + mask_lines_only = (prediction_regions_org[:,:] ==3)*1 + + mask_texts_only = (prediction_regions_org[:,:] ==1)*1 + + mask_images_only=(prediction_regions_org[:,:] ==2)*1 + + polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) + polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) + + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) + + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) + + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) + + erosion_hurts = True + K.clear_session() + return text_regions_p_true, erosion_hurts, polygons_lines_xml + + def do_order_of_regions_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): + self.logger.debug("enter do_order_of_regions_full_layout") + cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours(contours_only_text_parent) + cx_text_only_h, cy_text_only_h, x_min_text_only_h, _, _, _, y_cor_x_min_main_h = find_new_features_of_contours(contours_only_text_parent_h) + + try: + arg_text_con = [] + for ii in range(len(cx_text_only)): + for jj in range(len(boxes)): + if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]: + arg_text_con.append(jj) + break + args_contours = np.array(range(len(arg_text_con))) + arg_text_con_h = [] + for ii in range(len(cx_text_only_h)): + for jj in range(len(boxes)): + if (x_min_text_only_h[ii] + 80) >= boxes[jj][0] and (x_min_text_only_h[ii] + 80) < boxes[jj][1] and y_cor_x_min_main_h[ii] >= boxes[jj][2] and y_cor_x_min_main_h[ii] < boxes[jj][3]: + arg_text_con_h.append(jj) + break + args_contours_h = np.array(range(len(arg_text_con_h))) + + order_by_con_head = np.zeros(len(arg_text_con_h)) + order_by_con_main = np.zeros(len(arg_text_con)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij in range(len(boxes)): + + args_contours_box = args_contours[np.array(arg_text_con) == iij] + args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] + con_inter_box = [] + con_inter_box_h = [] + + for box in args_contours_box: + con_inter_box.append(contours_only_text_parent[box]) + + for box in args_contours_box_h: + con_inter_box_h.append(contours_only_text_parent_h[box]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2] + indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for zahler, _ in enumerate(args_contours_box_h): + arg_order_v = indexes_sorted_head[zahler] + order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji in range(len(id_of_texts)): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + for tj1 in range(len(contours_only_text_parent_h)): + order_of_texts_tot.append(int(order_by_con_head[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + + except Exception as why: + self.logger.error(why) + arg_text_con = [] + for ii in range(len(cx_text_only)): + for jj in range(len(boxes)): + if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located + arg_text_con.append(jj) + break + args_contours = np.array(range(len(arg_text_con))) + + order_by_con_main = np.zeros(len(arg_text_con)) + + ############################# head + + arg_text_con_h = [] + for ii in range(len(cx_text_only_h)): + for jj in range(len(boxes)): + if cx_text_only_h[ii] >= boxes[jj][0] and cx_text_only_h[ii] < boxes[jj][1] and cy_text_only_h[ii] >= boxes[jj][2] and cy_text_only_h[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located + arg_text_con_h.append(jj) + break + args_contours_h = np.array(range(len(arg_text_con_h))) + + order_by_con_head = np.zeros(len(arg_text_con_h)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij, _ in enumerate(boxes): + args_contours_box = args_contours[np.array(arg_text_con) == iij] + args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] + con_inter_box = [] + con_inter_box_h = [] + + for box in args_contours_box: + con_inter_box.append(contours_only_text_parent[box]) + + for box in args_contours_box_h: + con_inter_box_h.append(contours_only_text_parent_h[box]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2] + indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for zahler, _ in enumerate(args_contours_box_h): + arg_order_v = indexes_sorted_head[zahler] + order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji, _ in enumerate(id_of_texts): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + for tj1 in range(len(contours_only_text_parent_h)): + order_of_texts_tot.append(int(order_by_con_head[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + return order_text_new, id_of_texts_tot + + def do_order_of_regions_no_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): + self.logger.debug("enter do_order_of_regions_no_full_layout") + cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours(contours_only_text_parent) + + try: + arg_text_con = [] + for ii in range(len(cx_text_only)): + for jj in range(len(boxes)): + if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]: + arg_text_con.append(jj) + break + args_contours = np.array(range(len(arg_text_con))) + order_by_con_main = np.zeros(len(arg_text_con)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij in range(len(boxes)): + args_contours_box = args_contours[np.array(arg_text_con) == iij] + con_inter_box = [] + con_inter_box_h = [] + for i in range(len(args_contours_box)): + con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji, _ in enumerate(id_of_texts): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + + except Exception as why: + self.logger.error(why) + arg_text_con = [] + for ii in range(len(cx_text_only)): + for jj in range(len(boxes)): + if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located + arg_text_con.append(jj) + break + args_contours = np.array(range(len(arg_text_con))) + + order_by_con_main = np.zeros(len(arg_text_con)) + + ref_point = 0 + order_of_texts_tot = [] + id_of_texts_tot = [] + for iij in range(len(boxes)): + args_contours_box = args_contours[np.array(arg_text_con) == iij] + con_inter_box = [] + con_inter_box_h = [] + + for i in range(len(args_contours_box)): + con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) + + indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2]) + + order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + + indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + + for zahler, _ in enumerate(args_contours_box): + arg_order_v = indexes_sorted_main[zahler] + order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + + for jji, _ in enumerate(id_of_texts): + order_of_texts_tot.append(order_of_texts[jji] + ref_point) + id_of_texts_tot.append(id_of_texts[jji]) + ref_point += len(id_of_texts) + + order_of_texts_tot = [] + for tj1 in range(len(contours_only_text_parent)): + order_of_texts_tot.append(int(order_by_con_main[tj1])) + + order_text_new = [] + for iii in range(len(order_of_texts_tot)): + order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + + return order_text_new, id_of_texts_tot + + def do_order_of_regions(self, *args, **kwargs): + if self.full_layout: + return self.do_order_of_regions_full_layout(*args, **kwargs) + return self.do_order_of_regions_no_full_layout(*args, **kwargs) + + def run_graphics_and_columns(self, text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts): + img_g = self.imread(grayscale=True, uint8=True) + + img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) + img_g3 = img_g3.astype(np.uint8) + img_g3[:, :, 0] = img_g[:, :] + img_g3[:, :, 1] = img_g[:, :] + img_g3[:, :, 2] = img_g[:, :] + + image_page, page_coord, cont_page = self.extract_page() + if self.plotter: + self.plotter.save_page_image(image_page) + + text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + mask_images = (text_regions_p_1[:, :] == 2) * 1 + mask_images = mask_images.astype(np.uint8) + mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) + mask_lines = (text_regions_p_1[:, :] == 3) * 1 + mask_lines = mask_lines.astype(np.uint8) + img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 + img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) + + + if erosion_hurts: + img_only_regions = np.copy(img_only_regions_with_sep[:,:]) + else: + img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) + + + try: + num_col, _ = find_num_col(img_only_regions, multiplier=6.0) + num_col = num_col + 1 + if not num_column_is_classified: + num_col_classifier = num_col + 1 + except Exception as why: + self.logger.error(why) + num_col = None + return num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page + + def run_enhancement(self): + self.logger.info("resize and enhance image") + is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = self.resize_and_enhance_image_with_column_classifier() + self.logger.info("Image is %senhanced", '' if is_image_enhanced else 'not ') + K.clear_session() + scale = 1 + if is_image_enhanced: + if self.allow_enhancement: + img_res = img_res.astype(np.uint8) + self.get_image_and_scales(img_org, img_res, scale) + else: + self.get_image_and_scales_after_enhancing(img_org, img_res) + else: + if self.allow_enhancement: + self.get_image_and_scales(img_org, img_res, scale) + else: + self.get_image_and_scales(img_org, img_res, scale) + if self.allow_scaling: + img_org, img_res, is_image_enhanced = self.resize_image_with_column_classifier(is_image_enhanced, img_bin) + self.get_image_and_scales_after_enhancing(img_org, img_res) + return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified + + def run_textline(self, image_page): + scaler_h_textline = 1 # 1.2#1.2 + scaler_w_textline = 1 # 0.9#1 + textline_mask_tot_ea, _ = self.textline_contours(image_page, True, scaler_h_textline, scaler_w_textline) + K.clear_session() + if self.plotter: + self.plotter.save_plot_of_textlines(textline_mask_tot_ea, image_page) + return textline_mask_tot_ea + + def run_deskew(self, textline_mask_tot_ea): + sigma = 2 + main_page_deskew = True + slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), sigma, main_page_deskew, plotter=self.plotter) + slope_first = 0 + + if self.plotter: + self.plotter.save_deskewed_image(slope_deskew) + self.logger.info("slope_deskew: %s", slope_deskew) + return slope_deskew, slope_first + + def run_marginals(self, image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1): + image_page_rotated, textline_mask_tot = image_page[:, :], textline_mask_tot_ea[:, :] + textline_mask_tot[mask_images[:, :] == 1] = 0 + + text_regions_p_1[mask_lines[:, :] == 1] = 3 + text_regions_p = text_regions_p_1[:, :] + text_regions_p = np.array(text_regions_p) + + if num_col_classifier in (1, 2): + try: + regions_without_separators = (text_regions_p[:, :] == 1) * 1 + regions_without_separators = regions_without_separators.astype(np.uint8) + text_regions_p = get_marginals(rotate_image(regions_without_separators, slope_deskew), text_regions_p, num_col_classifier, slope_deskew, kernel=KERNEL) + except Exception as e: + self.logger.error("exception %s", e) + + if self.plotter: + self.plotter.save_plot_of_layout_main_all(text_regions_p, image_page) + self.plotter.save_plot_of_layout_main(text_regions_p, image_page) + return textline_mask_tot, text_regions_p, image_page_rotated + + def run_boxes_no_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, erosion_hurts): + self.logger.debug('enter run_boxes_no_full_layout') + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + _, textline_mask_tot_d, text_regions_p_1_n = rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, slope_deskew) + text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) + regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 + regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + text_regions_p_1_n = None + textline_mask_tot_d = None + regions_without_separators_d = None + pixel_lines = 3 + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines) + + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines) + K.clear_session() + + self.logger.info("num_col_classifier: %s", num_col_classifier) + + if num_col_classifier >= 3: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + regions_without_separators = regions_without_separators.astype(np.uint8) + regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) + else: + regions_without_separators_d = regions_without_separators_d.astype(np.uint8) + regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) + t1 = time.time() + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + boxes = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts) + boxes_d = None + self.logger.debug("len(boxes): %s", len(boxes)) + else: + boxes_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts) + boxes = None + self.logger.debug("len(boxes): %s", len(boxes_d)) + + self.logger.info("detecting boxes took %ss", str(time.time() - t1)) + img_revised_tab = text_regions_p[:, :] + polygons_of_images = return_contours_of_interested_region(img_revised_tab, 2) + + # plt.imshow(img_revised_tab) + # plt.show() + K.clear_session() + self.logger.debug('exit run_boxes_no_full_layout') + return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d + + def run_boxes_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions): + self.logger.debug('enter run_boxes_full_layout') + # set first model with second model + text_regions_p[:, :][text_regions_p[:, :] == 2] = 5 + text_regions_p[:, :][text_regions_p[:, :] == 3] = 6 + text_regions_p[:, :][text_regions_p[:, :] == 4] = 8 + + K.clear_session() + image_page = image_page.astype(np.uint8) + + regions_fully, regions_fully_only_drop = self.extract_text_regions(image_page, True, cols=num_col_classifier) + text_regions_p[:,:][regions_fully[:,:,0]==6]=6 + regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p) + regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 4] = 4 + K.clear_session() + + # plt.imshow(regions_fully[:,:,0]) + # plt.show() + regions_fully = putt_bb_of_drop_capitals_of_model_in_patches_in_layout(regions_fully) + # plt.imshow(regions_fully[:,:,0]) + # plt.show() + K.clear_session() + regions_fully_np, _ = self.extract_text_regions(image_page, False, cols=num_col_classifier) + # plt.imshow(regions_fully_np[:,:,0]) + # plt.show() + if num_col_classifier > 2: + regions_fully_np[:, :, 0][regions_fully_np[:, :, 0] == 4] = 0 + else: + regions_fully_np = filter_small_drop_capitals_from_no_patch_layout(regions_fully_np, text_regions_p) + + # plt.imshow(regions_fully_np[:,:,0]) + # plt.show() + K.clear_session() + # plt.imshow(regions_fully[:,:,0]) + # plt.show() + regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, regions_fully_np, img_only_regions) + # plt.imshow(regions_fully[:,:,0]) + # plt.show() + text_regions_p[:, :][regions_fully[:, :, 0] == 4] = 4 + text_regions_p[:, :][regions_fully_np[:, :, 0] == 4] = 4 + #plt.imshow(text_regions_p) + #plt.show() + + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + _, textline_mask_tot_d, text_regions_p_1_n, regions_fully_n = rotation_not_90_func_full_layout(image_page, textline_mask_tot, text_regions_p, regions_fully, slope_deskew) + + text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) + regions_fully_n = resize_image(regions_fully_n, text_regions_p.shape[0], text_regions_p.shape[1]) + regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 + else: + text_regions_p_1_n = None + textline_mask_tot_d = None + regions_without_separators_d = None + + regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) + + K.clear_session() + img_revised_tab = np.copy(text_regions_p[:, :]) + polygons_of_images = return_contours_of_interested_region(img_revised_tab, 5) + self.logger.debug('exit run_boxes_full_layout') + return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators + + def run(self): + """ + Get image and scales, then extract the page of scanned image + """ + self.logger.debug("enter run") + + t0 = time.time() + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement() + + self.logger.info("Enhancing took %ss ", str(time.time() - t0)) + + t1 = time.time() + text_regions_p_1 ,erosion_hurts, polygons_lines_xml = self.get_regions_from_xy_2models(img_res, is_image_enhanced, num_col_classifier) + self.logger.info("Textregion detection took %ss ", str(time.time() - t1)) + + t1 = time.time() + num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page = \ + self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts) + self.logger.info("Graphics detection took %ss ", str(time.time() - t1)) + self.logger.info('cont_page %s', cont_page) + + if not num_col: + self.logger.info("No columns detected, outputting an empty PAGE-XML") + pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], [], [], [], [], [], [], cont_page, []) + self.logger.info("Job done in %ss", str(time.time() - t1)) + return pcgts + + t1 = time.time() + textline_mask_tot_ea = self.run_textline(image_page) + self.logger.info("textline detection took %ss", str(time.time() - t1)) + + t1 = time.time() + slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) + self.logger.info("deskewing took %ss", str(time.time() - t1)) + t1 = time.time() + + textline_mask_tot, text_regions_p, image_page_rotated = self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1) + self.logger.info("detection of marginals took %ss", str(time.time() - t1)) + t1 = time.time() + + if not self.full_layout: + polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d = self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, erosion_hurts) + + pixel_img = 4 + min_area_mar = 0.00001 + polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) + + if self.full_layout: + polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators = self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions) + + text_only = ((img_revised_tab[:, :] == 1)) * 1 + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + text_only_d = ((text_regions_p_1_n[:, :] == 1)) * 1 + + min_con_area = 0.000005 + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + contours_only_text, hir_on_text = return_contours_of_image(text_only) + contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text) + + if len(contours_only_text_parent) > 0: + areas_cnt_text = np.array([cv2.contourArea(contours_only_text_parent[j]) for j in range(len(contours_only_text_parent))]) + areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) + self.logger.info('areas_cnt_text %s', areas_cnt_text) + contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)] + contours_only_text_parent = [contours_only_text_parent[jz] for jz in range(len(contours_only_text_parent)) if areas_cnt_text[jz] > min_con_area] + areas_cnt_text_parent = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > min_con_area] + + index_con_parents = np.argsort(areas_cnt_text_parent) + contours_only_text_parent = list(np.array(contours_only_text_parent)[index_con_parents]) + areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents]) + + cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest]) + cx_bigest, cy_biggest, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) + + contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d) + contours_only_text_parent_d = return_parent_contours(contours_only_text_d, hir_on_text_d) + + areas_cnt_text_d = np.array([cv2.contourArea(contours_only_text_parent_d[j]) for j in range(len(contours_only_text_parent_d))]) + areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1]) + + if len(areas_cnt_text_d)>0: + contours_biggest_d = contours_only_text_parent_d[np.argmax(areas_cnt_text_d)] + index_con_parents_d=np.argsort(areas_cnt_text_d) + contours_only_text_parent_d=list(np.array(contours_only_text_parent_d)[index_con_parents_d] ) + areas_cnt_text_d=list(np.array(areas_cnt_text_d)[index_con_parents_d] ) + + cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest_d]) + cx_bigest_d, cy_biggest_d, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_d) + try: + if len(cx_bigest_d) >= 5: + cx_bigest_d_last5 = cx_bigest_d[-5:] + cy_biggest_d_last5 = cy_biggest_d[-5:] + dists_d = [math.sqrt((cx_bigest_big[0] - cx_bigest_d_last5[j]) ** 2 + (cy_biggest_big[0] - cy_biggest_d_last5[j]) ** 2) for j in range(len(cy_biggest_d_last5))] + ind_largest = len(cx_bigest_d) -5 + np.argmin(dists_d) + else: + cx_bigest_d_last5 = cx_bigest_d[-len(cx_bigest_d):] + cy_biggest_d_last5 = cy_biggest_d[-len(cx_bigest_d):] + dists_d = [math.sqrt((cx_bigest_big[0]-cx_bigest_d_last5[j])**2 + (cy_biggest_big[0]-cy_biggest_d_last5[j])**2) for j in range(len(cy_biggest_d_last5))] + ind_largest = len(cx_bigest_d) - len(cx_bigest_d) + np.argmin(dists_d) + + cx_bigest_d_big[0] = cx_bigest_d[ind_largest] + cy_biggest_d_big[0] = cy_biggest_d[ind_largest] + except Exception as why: + self.logger.error(why) + + (h, w) = text_only.shape[:2] + center = (w // 2.0, h // 2.0) + M = cv2.getRotationMatrix2D(center, slope_deskew, 1.0) + M_22 = np.array(M)[:2, :2] + p_big = np.dot(M_22, [cx_bigest_big, cy_biggest_big]) + x_diff = p_big[0] - cx_bigest_d_big + y_diff = p_big[1] - cy_biggest_d_big + + contours_only_text_parent_d_ordered = [] + for i in range(len(contours_only_text_parent)): + p = np.dot(M_22, [cx_bigest[i], cy_biggest[i]]) + p[0] = p[0] - x_diff[0] + p[1] = p[1] - y_diff[0] + dists = [math.sqrt((p[0] - cx_bigest_d[j]) ** 2 + (p[1] - cy_biggest_d[j]) ** 2) for j in range(len(cx_bigest_d))] + contours_only_text_parent_d_ordered.append(contours_only_text_parent_d[np.argmin(dists)]) + # img2=np.zeros((text_only.shape[0],text_only.shape[1],3)) + # img2=cv2.fillPoly(img2,pts=[contours_only_text_parent_d[np.argmin(dists)]] ,color=(1,1,1)) + # plt.imshow(img2[:,:,0]) + # plt.show() + else: + contours_only_text_parent_d_ordered = [] + contours_only_text_parent_d = [] + contours_only_text_parent = [] + + else: + contours_only_text_parent_d_ordered = [] + contours_only_text_parent_d = [] + contours_only_text_parent = [] + + else: + contours_only_text, hir_on_text = return_contours_of_image(text_only) + contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text) + + if len(contours_only_text_parent) > 0: + areas_cnt_text = np.array([cv2.contourArea(contours_only_text_parent[j]) for j in range(len(contours_only_text_parent))]) + areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) + + contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)] + contours_only_text_parent = [contours_only_text_parent[jz] for jz in range(len(contours_only_text_parent)) if areas_cnt_text[jz] > min_con_area] + areas_cnt_text_parent = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > min_con_area] + + index_con_parents = np.argsort(areas_cnt_text_parent) + contours_only_text_parent = list(np.array(contours_only_text_parent)[index_con_parents]) + areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents]) + + cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest]) + cx_bigest, cy_biggest, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) + self.logger.debug('areas_cnt_text_parent %s', areas_cnt_text_parent) + # self.logger.debug('areas_cnt_text_parent_d %s', areas_cnt_text_parent_d) + # self.logger.debug('len(contours_only_text_parent) %s', len(contours_only_text_parent_d)) + else: + pass + txt_con_org = get_textregion_contours_in_org_image(contours_only_text_parent, self.image, slope_first) + boxes_text, _ = get_text_region_boxes_by_given_contours(contours_only_text_parent) + boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals) + + if not self.curved_line: + slopes, all_found_texline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new(txt_con_org, contours_only_text_parent, textline_mask_tot_ea, image_page_rotated, boxes_text, slope_deskew) + slopes_marginals, all_found_texline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new(polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, image_page_rotated, boxes_marginals, slope_deskew) + else: + + scale_param = 1 + all_found_texline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_curved(txt_con_org, contours_only_text_parent, cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, boxes_text, text_only, num_col_classifier, scale_param, slope_deskew) + all_found_texline_polygons = small_textlines_to_parent_adherence2(all_found_texline_polygons, textline_mask_tot_ea, num_col_classifier) + all_found_texline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_curved(polygons_of_marginals, polygons_of_marginals, cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, boxes_marginals, text_only, num_col_classifier, scale_param, slope_deskew) + all_found_texline_polygons_marginals = small_textlines_to_parent_adherence2(all_found_texline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) + K.clear_session() + if self.full_layout: + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + contours_only_text_parent_d_ordered = list(np.array(contours_only_text_parent_d_ordered)[index_by_text_par_con]) + text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_texline_polygons, all_found_texline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_texline_polygons, slopes, contours_only_text_parent_d_ordered) + else: + contours_only_text_parent_d_ordered = None + text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_texline_polygons, all_found_texline_polygons_h, slopes, slopes_h, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_texline_polygons, slopes, contours_only_text_parent_d_ordered) + + if self.plotter: + self.plotter.save_plot_of_layout(text_regions_p, image_page) + self.plotter.save_plot_of_layout_all(text_regions_p, image_page) + + K.clear_session() + + polygons_of_tabels = [] + pixel_img = 4 + polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, pixel_img) + all_found_texline_polygons = adhere_drop_capital_region_into_corresponding_textline(text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_texline_polygons, all_found_texline_polygons_h, kernel=KERNEL, curved_line=self.curved_line) + + # print(len(contours_only_text_parent_h),len(contours_only_text_parent_h_d_ordered),'contours_only_text_parent_h') + pixel_lines = 6 + + if not self.headers_off: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines, contours_only_text_parent_h) + else: + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines, contours_only_text_parent_h_d_ordered) + elif self.headers_off: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines) + else: + _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines) + + # print(peaks_neg_fin,peaks_neg_fin_d,'num_col2') + # print(splitter_y_new,splitter_y_new_d,'num_col_classifier') + # print(matrix_of_lines_ch.shape,matrix_of_lines_ch_d.shape,'matrix_of_lines_ch') + + if num_col_classifier >= 3: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + regions_without_separators = regions_without_separators.astype(np.uint8) + regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) + random_pixels_for_image = np.random.randn(regions_without_separators.shape[0], regions_without_separators.shape[1]) + random_pixels_for_image[random_pixels_for_image < -0.5] = 0 + random_pixels_for_image[random_pixels_for_image != 0] = 1 + regions_without_separators[(random_pixels_for_image[:, :] == 1) & (text_regions_p[:, :] == 5)] = 1 + else: + regions_without_separators_d = regions_without_separators_d.astype(np.uint8) + regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) + random_pixels_for_image = np.random.randn(regions_without_separators_d.shape[0], regions_without_separators_d.shape[1]) + random_pixels_for_image[random_pixels_for_image < -0.5] = 0 + random_pixels_for_image[random_pixels_for_image != 0] = 1 + regions_without_separators_d[(random_pixels_for_image[:, :] == 1) & (text_regions_p_1_n[:, :] == 5)] = 1 + + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + boxes = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts) + else: + boxes_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts) + + if self.plotter: + self.plotter.write_images_into_directory(polygons_of_images, image_page) + + if self.full_layout: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) + else: + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d) + + pcgts = self.writer.build_pagexml_full_layout(contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_found_texline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, polygons_of_tabels, polygons_of_drop_capitals, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_xml) + self.logger.info("Job done in %ss", str(time.time() - t0)) + return pcgts + else: + contours_only_text_parent_h = None + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) + else: + contours_only_text_parent_d_ordered = list(np.array(contours_only_text_parent_d_ordered)[index_by_text_par_con]) + order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) + pcgts = self.writer.build_pagexml_no_full_layout(txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml) + self.logger.info("Job done in %ss", str(time.time() - t0)) + return pcgts diff --git a/qurator/eynollah/ocrd-tool.json b/qurator/eynollah/ocrd-tool.json new file mode 100644 index 0000000..a51e77a --- /dev/null +++ b/qurator/eynollah/ocrd-tool.json @@ -0,0 +1,49 @@ +{ + "version": "0.0.8", + "git_url": "https://github.com/qurator-spk/eynollah", + "tools": { + "ocrd-eynollah-segment": { + "executable": "ocrd-eynollah-segment", + "categories": ["Layout analysis"], + "description": "Segment page into regions and lines and do reading order detection with eynollah", + "input_file_grp": ["OCR-D-IMG", "OCR-D-SEG-PAGE", "OCR-D-GT-SEG-PAGE"], + "output_file_grp": ["OCR-D-SEG-LINE"], + "steps": ["layout/segmentation/region", "layout/segmentation/line"], + "parameters": { + "models": { + "type": "string", + "format": "file", + "cacheable": true, + "description": "Path to directory containing models to be used (See https://qurator-data.de/eynollah)", + "required": true + }, + "dpi": { + "type": "number", + "format": "float", + "description": "pixel density in dots per inch (overrides any meta-data in the images); ignored if <= 0 (with fall-back 230)", + "default": 0 + }, + "full_layout": { + "type": "boolean", + "default": true, + "description": "Try to detect all element subtypes, including drop-caps and headings" + }, + "curved_line": { + "type": "boolean", + "default": false, + "description": "try to return contour of textlines instead of just rectangle bounding box. Needs more processing time" + }, + "allow_scaling": { + "type": "boolean", + "default": false, + "description": "check the resolution against the number of detected columns and if needed, scale the image up or down during layout detection (heuristic to improve quality and performance)" + }, + "headers_off": { + "type": "boolean", + "default": false, + "description": "ignore the special role of headings during reading order detection" + } + } + } + } +} diff --git a/src/eynollah/ocrd_cli.py b/qurator/eynollah/ocrd_cli.py similarity index 100% rename from src/eynollah/ocrd_cli.py rename to qurator/eynollah/ocrd_cli.py diff --git a/src/eynollah/plot.py b/qurator/eynollah/plot.py similarity index 91% rename from src/eynollah/plot.py rename to qurator/eynollah/plot.py index c026e94..18a7c14 100644 --- a/src/eynollah/plot.py +++ b/qurator/eynollah/plot.py @@ -1,8 +1,5 @@ -try: - import matplotlib.pyplot as plt - import matplotlib.patches as mpatches -except ImportError: - plt = mpatches = None +import matplotlib.pyplot as plt +import matplotlib.patches as mpatches import numpy as np import os.path import cv2 @@ -12,7 +9,7 @@ from .utils import crop_image_inside_box from .utils.rotate import rotate_image_different from .utils.resize import resize_image -class EynollahPlotter: +class EynollahPlotter(): """ Class collecting all the plotting and image writing methods """ @@ -20,9 +17,7 @@ class EynollahPlotter: def __init__( self, *, - dir_out, dir_of_all, - dir_save_page, dir_of_deskewed, dir_of_layout, dir_of_cropped_images, @@ -31,9 +26,7 @@ class EynollahPlotter: scale_x=1, scale_y=1, ): - self.dir_out = dir_out self.dir_of_all = dir_of_all - self.dir_save_page = dir_save_page self.dir_of_layout = dir_of_layout self.dir_of_cropped_images = dir_of_cropped_images self.dir_of_deskewed = dir_of_deskewed @@ -79,8 +72,8 @@ class EynollahPlotter: if self.dir_of_layout is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] - pixels = ["Background", "Main text", "Header", "Marginalia", "Drop capital", "Image", "Separator", "Tables"] - values_indexes = [0, 1, 2, 8, 4, 5, 6, 10] + pixels = ["Background", "Main text", "Header", "Marginalia", "Drop capital", "Image", "Separator"] + values_indexes = [0, 1, 2, 8, 4, 5, 6] plt.figure(figsize=(40, 40)) plt.rcParams["font.size"] = "40" im = plt.imshow(text_regions_p[:, :]) @@ -93,8 +86,8 @@ class EynollahPlotter: if self.dir_of_all is not None: values = np.unique(text_regions_p[:, :]) # pixels=['Background' , 'Main text' , 'Heading' , 'Marginalia' ,'Drop capitals' , 'Images' , 'Seperators' , 'Tables', 'Graphics'] - pixels = ["Background", "Main text", "Header", "Marginalia", "Drop capital", "Image", "Separator", "Tables"] - values_indexes = [0, 1, 2, 8, 4, 5, 6, 10] + pixels = ["Background", "Main text", "Header", "Marginalia", "Drop capital", "Image", "Separator"] + values_indexes = [0, 1, 2, 8, 4, 5, 6] plt.figure(figsize=(80, 40)) plt.rcParams["font.size"] = "40" plt.subplot(1, 2, 1) @@ -132,11 +125,7 @@ class EynollahPlotter: def save_page_image(self, image_page): if self.dir_of_all is not None: cv2.imwrite(os.path.join(self.dir_of_all, self.image_filename_stem + "_page.png"), image_page) - if self.dir_save_page is not None: - cv2.imwrite(os.path.join(self.dir_save_page, self.image_filename_stem + "_page.png"), image_page) - def save_enhanced_image(self, img_res): - cv2.imwrite(os.path.join(self.dir_out, self.image_filename_stem + "_enhanced.png"), img_res) - + def save_plot_of_textline_density(self, img_patch_org): if self.dir_of_all is not None: plt.figure(figsize=(80,40)) diff --git a/qurator/eynollah/processor.py b/qurator/eynollah/processor.py new file mode 100644 index 0000000..41b12ae --- /dev/null +++ b/qurator/eynollah/processor.py @@ -0,0 +1,67 @@ +from json import loads +from pkg_resources import resource_string +from tempfile import NamedTemporaryFile +from pathlib import Path +from os.path import join + +from PIL import Image + +from ocrd import Processor +from ocrd_modelfactory import page_from_file, exif_from_filename +from ocrd_models import OcrdFile, OcrdExif +from ocrd_models.ocrd_page import to_xml +from ocrd_utils import ( + getLogger, + MIMETYPE_PAGE, + assert_file_grp_cardinality, + make_file_id +) + +from .eynollah import Eynollah +from .utils.pil_cv2 import pil2cv + +OCRD_TOOL = loads(resource_string(__name__, 'ocrd-tool.json').decode('utf8')) + +class EynollahProcessor(Processor): + + def __init__(self, *args, **kwargs): + kwargs['ocrd_tool'] = OCRD_TOOL['tools']['ocrd-eynollah-segment'] + kwargs['version'] = OCRD_TOOL['version'] + super().__init__(*args, **kwargs) + + def process(self): + LOG = getLogger('eynollah') + assert_file_grp_cardinality(self.input_file_grp, 1) + assert_file_grp_cardinality(self.output_file_grp, 1) + for n, input_file in enumerate(self.input_files): + page_id = input_file.pageId or input_file.ID + LOG.info("INPUT FILE %s (%d/%d) ", page_id, n + 1, len(self.input_files)) + pcgts = page_from_file(self.workspace.download_file(input_file)) + LOG.debug('width %s height %s', pcgts.get_Page().imageWidth, pcgts.get_Page().imageHeight) + self.add_metadata(pcgts) + page = pcgts.get_Page() + # XXX loses DPI information + # page_image, _, _ = self.workspace.image_from_page(page, page_id, feature_filter='binarized') + image_filename = self.workspace.download_file(next(self.workspace.mets.find_files(url=page.imageFilename))).local_filename + eynollah_kwargs = { + 'dir_models': self.resolve_resource(self.parameter['models']), + 'allow_enhancement': False, + 'curved_line': self.parameter['curved_line'], + 'full_layout': self.parameter['full_layout'], + 'allow_scaling': self.parameter['allow_scaling'], + 'headers_off': self.parameter['headers_off'], + 'override_dpi': self.parameter['dpi'], + 'logger': LOG, + 'pcgts': pcgts, + 'image_filename': image_filename + } + Eynollah(**eynollah_kwargs).run() + file_id = make_file_id(input_file, self.output_file_grp) + pcgts.set_pcGtsId(file_id) + self.workspace.add_file( + ID=file_id, + file_grp=self.output_file_grp, + pageId=page_id, + mimetype=MIMETYPE_PAGE, + local_filename=join(self.output_file_grp, file_id) + '.xml', + content=to_xml(pcgts)) diff --git a/src/eynollah/utils/__init__.py b/qurator/eynollah/utils/__init__.py similarity index 52% rename from src/eynollah/utils/__init__.py rename to qurator/eynollah/utils/__init__.py index 5ccb2af..fb6b476 100644 --- a/src/eynollah/utils/__init__.py +++ b/qurator/eynollah/utils/__init__.py @@ -1,38 +1,22 @@ -from typing import Tuple -from logging import getLogger -import time import math -try: - import matplotlib.pyplot as plt -except ImportError: - plt = None +import matplotlib.pyplot as plt import numpy as np from shapely import geometry import cv2 +import imutils from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d from .is_nan import isNaN from .contour import (contours_in_same_horizon, - find_center_of_contours, find_new_features_of_contours, return_contours_of_image, return_parent_contours) -def pairwise(iterable): - # pairwise('ABCDEFG') → AB BC CD DE EF FG - - iterator = iter(iterable) - a = next(iterator, None) - - for b in iterator: - yield a, b - a = b - -def return_x_start_end_mothers_childs_and_type_of_reading_order( - x_min_hor_some, x_max_hor_some, cy_hor_some, peak_points, cy_hor_diff): - +def return_x_start_end_mothers_childs_and_type_of_reading_order(x_min_hor_some,x_max_hor_some,cy_hor_some,peak_points,cy_hor_diff): + + x_start=[] x_end=[] kind=[]#if covers 2 and more than 2 columns set it to 1 otherwise 0 @@ -40,43 +24,51 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( y_sep=[] y_diff=[] new_main_sep_y=[] - + indexer=0 for i in range(len(x_min_hor_some)): starting=x_min_hor_some[i]-peak_points starting=starting[starting>=0] min_start=np.argmin(starting) + + ending=peak_points-x_max_hor_some[i] len_ending_neg=len(ending[ending<=0]) - + ending=ending[ending>0] max_end=np.argmin(ending)+len_ending_neg - + + if (max_end-min_start)>=2: if (max_end-min_start)==(len(peak_points)-1): new_main_sep_y.append(indexer) - + #print((max_end-min_start),len(peak_points),'(max_end-min_start)') y_sep.append(cy_hor_some[i]) y_diff.append(cy_hor_diff[i]) x_end.append(max_end) - + x_start.append( min_start) - + len_sep.append(max_end-min_start) if max_end==min_start+1: kind.append(0) else: kind.append(1) - + indexer+=1 - - x_start_returned = np.array(x_start, dtype=int) - x_end_returned = np.array(x_end, dtype=int) - y_sep_returned = np.array(y_sep, dtype=int) - y_diff_returned = np.array(y_diff, dtype=int) - - all_args_uniq = contours_in_same_horizon(y_sep_returned) + + + x_start_returned=np.copy(x_start) + x_end_returned=np.copy(x_end) + y_sep_returned=np.copy(y_sep) + y_diff_returned=np.copy(y_diff) + + + + + all_args_uniq=contours_in_same_horizon(y_sep_returned) + args_to_be_unified=[] y_unified=[] y_diff_unified=[] @@ -92,10 +84,7 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( y_sep_same_hor=np.array(y_sep_returned)[all_args_uniq[dd]] y_diff_same_hor=np.array(y_diff_returned)[all_args_uniq[dd]] #print('burda2') - if (x_s_same_hor[0]==x_e_same_hor[1]-1 or - x_s_same_hor[1]==x_e_same_hor[0]-1 and - x_s_same_hor[0]!=x_s_same_hor[1] and - x_e_same_hor[0]!=x_e_same_hor[1]): + if x_s_same_hor[0]==(x_e_same_hor[1]-1) or x_s_same_hor[1]==(x_e_same_hor[0]-1) and x_s_same_hor[0]!=x_s_same_hor[1] and x_e_same_hor[0]!=x_e_same_hor[1]: #print('burda3') for arg_in in all_args_uniq[dd]: #print(arg_in,'arg_in') @@ -104,85 +93,91 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( y_diff_selected=np.max(y_diff_same_hor) x_s_selected=np.min(x_s_same_hor) x_e_selected=np.max(x_e_same_hor) - + x_s_unified.append(x_s_selected) x_e_unified.append(x_e_selected) y_unified.append(y_selected) y_diff_unified.append(y_diff_selected) + + + #print(x_s_same_hor,'x_s_same_hor') #print(x_e_same_hor[:]-1,'x_e_same_hor') #print('#############################') + #print(x_s_unified,'y_selected') #print(x_e_unified,'x_s_selected') #print(y_unified,'x_e_same_hor') - + args_lines_not_unified=list( set(range(len(y_sep_returned)))-set(args_to_be_unified) ) + #print(args_lines_not_unified,'args_lines_not_unified') - + x_start_returned_not_unified=list( np.array(x_start_returned)[args_lines_not_unified] ) x_end_returned_not_unified=list( np.array(x_end_returned)[args_lines_not_unified] ) y_sep_returned_not_unified=list (np.array(y_sep_returned)[args_lines_not_unified] ) y_diff_returned_not_unified=list (np.array(y_diff_returned)[args_lines_not_unified] ) - + for dv in range(len(y_unified)): y_sep_returned_not_unified.append(y_unified[dv]) y_diff_returned_not_unified.append(y_diff_unified[dv]) x_start_returned_not_unified.append(x_s_unified[dv]) x_end_returned_not_unified.append(x_e_unified[dv]) - + #print(y_sep_returned,'y_sep_returned') #print(x_start_returned,'x_start_returned') #print(x_end_returned,'x_end_returned') - - x_start_returned = np.array(x_start_returned_not_unified, dtype=int) - x_end_returned = np.array(x_end_returned_not_unified, dtype=int) - y_sep_returned = np.array(y_sep_returned_not_unified, dtype=int) - y_diff_returned = np.array(y_diff_returned_not_unified, dtype=int) - + + x_start_returned=np.copy(x_start_returned_not_unified) + x_end_returned=np.copy(x_end_returned_not_unified) + y_sep_returned=np.copy(y_sep_returned_not_unified) + y_diff_returned=np.copy(y_diff_returned_not_unified) + + #print(y_sep_returned,'y_sep_returned2') #print(x_start_returned,'x_start_returned2') #print(x_end_returned,'x_end_returned2') #print(new_main_sep_y,'new_main_sep_y') - + #print(x_start,'x_start') #print(x_end,'x_end') if len(new_main_sep_y)>0: - + min_ys=np.min(y_sep) max_ys=np.max(y_sep) - + y_mains=[] y_mains.append(min_ys) y_mains_sep_ohne_grenzen=[] - + for ii in range(len(new_main_sep_y)): y_mains.append(y_sep[new_main_sep_y[ii]]) y_mains_sep_ohne_grenzen.append(y_sep[new_main_sep_y[ii]]) - + y_mains.append(max_ys) - + y_mains_sorted=np.sort(y_mains) diff=np.diff(y_mains_sorted) argm=np.argmax(diff) - + y_min_new=y_mains_sorted[argm] y_max_new=y_mains_sorted[argm+1] - + #print(y_min_new,'y_min_new') #print(y_max_new,'y_max_new') + + #print(y_sep[new_main_sep_y[0]],y_sep,'yseps') x_start=np.array(x_start) x_end=np.array(x_end) kind=np.array(kind) y_sep=np.array(y_sep) - if (y_min_new in y_mains_sep_ohne_grenzen and - y_max_new in y_mains_sep_ohne_grenzen): + if (y_min_new in y_mains_sep_ohne_grenzen) and (y_max_new in y_mains_sep_ohne_grenzen): x_start=x_start[(y_sep>y_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sepy_min_new) & (y_sep<=y_max_new)] #print('burda1') @@ -190,8 +185,7 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( #print('burda2') kind=kind[(y_sep>y_min_new) & (y_sep<=y_max_new)] y_sep=y_sep[(y_sep>y_min_new) & (y_sep<=y_max_new)] - elif (y_min_new not in y_mains_sep_ohne_grenzen and - y_max_new in y_mains_sep_ohne_grenzen): + elif (y_min_new not in y_mains_sep_ohne_grenzen) and (y_max_new in y_mains_sep_ohne_grenzen): x_start=x_start[(y_sep>=y_min_new) & (y_sep=y_min_new) & (y_sep=y_min_new) & (y_sep1: #print(np.array(remained_sep_indexes),'np.array(remained_sep_indexes)') #print(np.array(mother),'mother') - remained_sep_indexes_without_mother = remained_sep_indexes[mother==0] - remained_sep_indexes_with_child_without_mother = remained_sep_indexes[(mother==0) & (child==1)] + remained_sep_indexes_without_mother=np.array(list(remained_sep_indexes))[np.array(mother)==0] + remained_sep_indexes_with_child_without_mother=np.array(list(remained_sep_indexes))[(np.array(mother)==0) & (np.array(child)==1)] #print(remained_sep_indexes_without_mother,'remained_sep_indexes_without_mother') - #print(remained_sep_indexes_without_mother,'remained_sep_indexes_without_mother') - - x_end_with_child_without_mother = x_end[remained_sep_indexes_with_child_without_mother] - x_start_with_child_without_mother = x_start[remained_sep_indexes_with_child_without_mother] - y_lines_with_child_without_mother = y_sep[remained_sep_indexes_with_child_without_mother] - + + + + x_end_with_child_without_mother=np.array(x_end)[np.array(remained_sep_indexes_with_child_without_mother)] + + x_start_with_child_without_mother=np.array(x_start)[np.array(remained_sep_indexes_with_child_without_mother)] + + y_lines_with_child_without_mother=np.array(y_sep)[np.array(remained_sep_indexes_with_child_without_mother)] + + reading_orther_type=0 - x_end_without_mother = x_end[remained_sep_indexes_without_mother] - x_start_without_mother = x_start[remained_sep_indexes_without_mother] - y_lines_without_mother = y_sep[remained_sep_indexes_without_mother] - + + + x_end_without_mother=np.array(x_end)[np.array(remained_sep_indexes_without_mother)] + x_start_without_mother=np.array(x_start)[np.array(remained_sep_indexes_without_mother)] + y_lines_without_mother=np.array(y_sep)[np.array(remained_sep_indexes_without_mother)] + if len(remained_sep_indexes_without_mother)>=2: for i in range(len(remained_sep_indexes_without_mother)-1): - nodes_i=set(range(x_start[remained_sep_indexes_without_mother[i]], - x_end[remained_sep_indexes_without_mother[i]] - # + 1 - )) + ##nodes_i=set(range(x_start[remained_sep_indexes_without_mother[i]],x_end[remained_sep_indexes_without_mother[i]]+1)) + nodes_i=set(range(x_start[remained_sep_indexes_without_mother[i]],x_end[remained_sep_indexes_without_mother[i]])) for j in range(i+1,len(remained_sep_indexes_without_mother)): - nodes_j=set(range(x_start[remained_sep_indexes_without_mother[j]], - x_end[remained_sep_indexes_without_mother[j]] - # + 1 - )) - set_diff = nodes_i - nodes_j - if set_diff != nodes_i: - reading_orther_type = 1 + #nodes_j=set(range(x_start[remained_sep_indexes_without_mother[j]],x_end[remained_sep_indexes_without_mother[j]]+1)) + nodes_j=set(range(x_start[remained_sep_indexes_without_mother[j]],x_end[remained_sep_indexes_without_mother[j]])) + + set_diff=nodes_i-nodes_j + + if set_diff!=nodes_i: + reading_orther_type=1 else: - reading_orther_type = 0 + reading_orther_type=0 #print(reading_orther_type,'javab') + #print(y_lines_with_child_without_mother,'y_lines_with_child_without_mother') #print(x_start_with_child_without_mother,'x_start_with_child_without_mother') #print(x_end_with_child_without_mother,'x_end_with_hild_without_mother') - - len_sep_with_child = len(child[child==1]) - + + len_sep_with_child=len(np.array(child)[np.array(child)==1]) + #print(len_sep_with_child,'len_sep_with_child') - there_is_sep_with_child = 0 - if len_sep_with_child >= 1: - there_is_sep_with_child = 1 + there_is_sep_with_child=0 + + if len_sep_with_child>=1: + there_is_sep_with_child=1 + #print(all_args_uniq,'all_args_uniq') #print(args_to_be_unified,'args_to_be_unified') - - return (reading_orther_type, - x_start_returned, - x_end_returned, - y_sep_returned, - y_diff_returned, - y_lines_without_mother, - x_start_without_mother, - x_end_without_mother, - there_is_sep_with_child, - y_lines_with_child_without_mother, - x_start_with_child_without_mother, - x_end_with_child_without_mother, - new_main_sep_y) - -def box2rect(box: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: - return (box[1], box[1] + box[3], - box[0], box[0] + box[2]) - -def box2slice(box: Tuple[int, int, int, int]) -> Tuple[slice, slice]: - return (slice(box[1], box[1] + box[3]), - slice(box[0], box[0] + box[2])) - + + + return reading_orther_type,x_start_returned, x_end_returned ,y_sep_returned,y_diff_returned,y_lines_without_mother,x_start_without_mother,x_end_without_mother,there_is_sep_with_child,y_lines_with_child_without_mother,x_start_with_child_without_mother,x_end_with_child_without_mother def crop_image_inside_box(box, img_org_copy): - image_box = img_org_copy[box2slice(box)] - return image_box, box2rect(box) + image_box = img_org_copy[box[1] : box[1] + box[3], box[0] : box[0] + box[2]] + return image_box, [box[1], box[1] + box[3], box[0], box[0] + box[2]] def otsu_copy_binary(img): img_r = np.zeros((img.shape[0], img.shape[1], 3)) img1 = img[:, :, 0] retval1, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + img_r[:, :, 0] = threshold1 img_r[:, :, 1] = threshold1 img_r[:, :, 2] = threshold1 @@ -335,7 +312,9 @@ def otsu_copy_binary(img): img_r = img_r / float(np.max(img_r)) * 255 return img_r + def find_features_of_lines(contours_main): + areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] @@ -347,6 +326,7 @@ def find_features_of_lines(contours_main): y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) slope_lines = [] + for kk in range(len(contours_main)): [vx, vy, x, y] = cv2.fitLine(contours_main[kk], cv2.DIST_L2, 0, 0.01, 0.01) slope_lines.append(((vy / vx) / np.pi * 180)[0]) @@ -359,46 +339,29 @@ def find_features_of_lines(contours_main): slope_lines[(slope_lines != 0) & (slope_lines != 1)] = 2 dis_x = np.abs(x_max_main - x_min_main) - return (slope_lines, - dis_x, - x_min_main, - x_max_main, - np.array(cy_main), - np.array(slope_lines_org), - y_min_main, - y_max_main, - np.array(cx_main)) + return slope_lines, dis_x, x_min_main, x_max_main, np.array(cy_main), np.array(slope_lines_org), y_min_main, y_max_main, np.array(cx_main) def boosting_headers_by_longshot_region_segmentation(textregion_pre_p, textregion_pre_np, img_only_text): textregion_pre_p_org = np.copy(textregion_pre_p) # 4 is drop capitals - headers_in_longshot = textregion_pre_np[:, :, 0] == 2 - #headers_in_longshot = ((textregion_pre_np[:,:,0]==2) | - # (textregion_pre_np[:,:,0]==1)) - textregion_pre_p[:, :, 0][headers_in_longshot & - (textregion_pre_p[:, :, 0] != 4)] = 2 + headers_in_longshot = (textregion_pre_np[:, :, 0] == 2) * 1 + # headers_in_longshot= ( (textregion_pre_np[:,:,0]==2) | (textregion_pre_np[:,:,0]==1) )*1 + textregion_pre_p[:, :, 0][(headers_in_longshot[:, :] == 1) & (textregion_pre_p[:, :, 0] != 4)] = 2 textregion_pre_p[:, :, 0][textregion_pre_p[:, :, 0] == 1] = 0 # earlier it was so, but by this manner the drop capitals are also deleted - # textregion_pre_p[:,:,0][(img_only_text[:,:]==1) & - # (textregion_pre_p[:,:,0]!=7) & - # (textregion_pre_p[:,:,0]!=2)] = 1 - textregion_pre_p[:, :, 0][(img_only_text[:, :] == 1) & - (textregion_pre_p[:, :, 0] != 7) & - (textregion_pre_p[:, :, 0] != 4) & - (textregion_pre_p[:, :, 0] != 2)] = 1 + # textregion_pre_p[:,:,0][( img_only_text[:,:]==1) & (textregion_pre_p[:,:,0]!=7) & (textregion_pre_p[:,:,0]!=2)]=1 + textregion_pre_p[:, :, 0][(img_only_text[:, :] == 1) & (textregion_pre_p[:, :, 0] != 7) & (textregion_pre_p[:, :, 0] != 4) & (textregion_pre_p[:, :, 0] != 2)] = 1 return textregion_pre_p + def find_num_col_deskew(regions_without_separators, sigma_, multiplier=3.8): - regions_without_separators_0 = regions_without_separators.sum(axis=1) + regions_without_separators_0 = regions_without_separators[:,:].sum(axis=1) z = gaussian_filter1d(regions_without_separators_0, sigma_) return np.std(z) -def find_num_col(regions_without_separators, num_col_classifier, tables, multiplier=3.8): - if not regions_without_separators.any(): - return 0, [] - #plt.imshow(regions_without_separators) - #plt.show() - regions_without_separators_0 = regions_without_separators.sum(axis=0) + +def find_num_col(regions_without_separators, multiplier=3.8): + regions_without_separators_0 = regions_without_separators[:, :].sum(axis=0) ##plt.plot(regions_without_separators_0) ##plt.show() sigma_ = 35 # 70#35 @@ -409,7 +372,7 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl y = regions_without_separators_0 # [first_nonzero:last_nonzero] y_help = np.zeros(len(y) + 20) y_help[10 : len(y) + 10] = y - x = np.arange(len(y)) + x = np.array(range(len(y))) zneg_rev = -y_help + np.max(y_help) zneg = np.zeros(len(zneg_rev) + 20) zneg[10 : len(zneg_rev) + 10] = zneg_rev @@ -417,30 +380,20 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl zneg = gaussian_filter1d(zneg, sigma_) peaks_neg, _ = find_peaks(zneg, height=0) - #plt.plot(zneg) - #plt.plot(peaks_neg, zneg[peaks_neg], 'rx') - #plt.show() peaks, _ = find_peaks(z, height=0) peaks_neg = peaks_neg - 10 - 10 last_nonzero = last_nonzero - 100 first_nonzero = first_nonzero + 200 - peaks_neg = peaks_neg[(peaks_neg > first_nonzero) & - (peaks_neg < last_nonzero)] - peaks = peaks[(peaks > 0.06 * regions_without_separators.shape[1]) & - (peaks < 0.94 * regions_without_separators.shape[1])] - peaks_neg = peaks_neg[(peaks_neg > 370) & - (peaks_neg < (regions_without_separators.shape[1] - 370))] + peaks_neg = peaks_neg[(peaks_neg > first_nonzero) & (peaks_neg < last_nonzero)] + peaks = peaks[(peaks > 0.06 * regions_without_separators.shape[1]) & (peaks < 0.94 * regions_without_separators.shape[1])] + peaks_neg = peaks_neg[(peaks_neg > 370) & (peaks_neg < (regions_without_separators.shape[1] - 370))] interest_pos = z[peaks] interest_pos = interest_pos[interest_pos > 10] - if not interest_pos.any(): - return 0, [] # plt.plot(z) # plt.show() interest_neg = z[peaks_neg] - if not interest_neg.any(): - return 0, [] min_peaks_pos = np.min(interest_pos) max_peaks_pos = np.max(interest_pos) @@ -452,8 +405,7 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # print(np.min(interest_pos),np.max(interest_pos),np.max(interest_pos)/np.min(interest_pos),'minmax') dis_talaei = (min_peaks_pos - min_peaks_neg) / multiplier - grenze = min_peaks_pos - dis_talaei - # np.mean(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])-np.std(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])/2.0 + grenze = min_peaks_pos - dis_talaei # np.mean(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])-np.std(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])/2.0 # print(interest_neg,'interest_neg') # print(grenze,'grenze') @@ -465,19 +417,6 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl peaks_neg_fin = peaks_neg[(interest_neg < grenze)] # interest_neg_fin=interest_neg[(interest_neg= 3: - index_sort_interest_neg_fin= np.argsort(interest_neg_fin) - peaks_neg_sorted = np.array(peaks_neg)[index_sort_interest_neg_fin] - interest_neg_fin_sorted = np.array(interest_neg_fin)[index_sort_interest_neg_fin] - - if len(index_sort_interest_neg_fin)>=num_col_classifier: - peaks_neg_fin = list( peaks_neg_sorted[:num_col_classifier] ) - interest_neg_fin = list( interest_neg_fin_sorted[:num_col_classifier] ) - else: - peaks_neg_fin = peaks_neg[:] - interest_neg_fin = interest_neg[:] - num_col = (len(interest_neg_fin)) + 1 # print(peaks_neg_fin,'peaks_neg_fin') @@ -489,26 +428,19 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl p_g_u = len(y) - int(len(y) / 4.0) if num_col == 3: - if ((peaks_neg_fin[0] > p_g_u and - peaks_neg_fin[1] > p_g_u) or - (peaks_neg_fin[0] < p_g_l and - peaks_neg_fin[1] < p_g_l) or - (peaks_neg_fin[0] + 200 < p_m and - peaks_neg_fin[1] < p_m) or - (peaks_neg_fin[0] - 200 > p_m and - peaks_neg_fin[1] > p_m)): + if (peaks_neg_fin[0] > p_g_u and peaks_neg_fin[1] > p_g_u) or (peaks_neg_fin[0] < p_g_l and peaks_neg_fin[1] < p_g_l) or ((peaks_neg_fin[0] + 200) < p_m and peaks_neg_fin[1] < p_m) or ((peaks_neg_fin[0] - 200) > p_m and peaks_neg_fin[1] > p_m): num_col = 1 peaks_neg_fin = [] if num_col == 2: - if (peaks_neg_fin[0] > p_g_u or - peaks_neg_fin[0] < p_g_l): + if (peaks_neg_fin[0] > p_g_u) or (peaks_neg_fin[0] < p_g_l): num_col = 1 peaks_neg_fin = [] ##print(len(peaks_neg_fin)) diff_peaks = np.abs(np.diff(peaks_neg_fin)) + cut_off = 400 peaks_neg_true = [] forest = [] @@ -544,35 +476,23 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl ##print(num_col,'early') if num_col == 3: - if ((peaks_neg_true[0] > p_g_u and - peaks_neg_true[1] > p_g_u) or - (peaks_neg_true[0] < p_g_l and - peaks_neg_true[1] < p_g_l) or - (peaks_neg_true[0] < p_m and - peaks_neg_true[1] + 200 < p_m) or - (peaks_neg_true[0] - 200 > p_m and - peaks_neg_true[1] > p_m)): + if (peaks_neg_true[0] > p_g_u and peaks_neg_true[1] > p_g_u) or (peaks_neg_true[0] < p_g_l and peaks_neg_true[1] < p_g_l) or (peaks_neg_true[0] < p_m and (peaks_neg_true[1] + 200) < p_m) or ((peaks_neg_true[0] - 200) > p_m and peaks_neg_true[1] > p_m): num_col = 1 peaks_neg_true = [] - elif (peaks_neg_true[0] < p_g_u and - peaks_neg_true[0] > p_g_l and - peaks_neg_true[1] > p_u_quarter): + elif (peaks_neg_true[0] < p_g_u and peaks_neg_true[0] > p_g_l) and (peaks_neg_true[1] > p_u_quarter): peaks_neg_true = [peaks_neg_true[0]] - elif (peaks_neg_true[1] < p_g_u and - peaks_neg_true[1] > p_g_l and - peaks_neg_true[0] < p_quarter): + elif (peaks_neg_true[1] < p_g_u and peaks_neg_true[1] > p_g_l) and (peaks_neg_true[0] < p_quarter): peaks_neg_true = [peaks_neg_true[1]] if num_col == 2: - if (peaks_neg_true[0] > p_g_u or - peaks_neg_true[0] < p_g_l): + if (peaks_neg_true[0] > p_g_u) or (peaks_neg_true[0] < p_g_l): num_col = 1 peaks_neg_true = [] - diff_peaks_abnormal = diff_peaks[diff_peaks < 360] + diff_peaks_annormal = diff_peaks[diff_peaks < 360] - if len(diff_peaks_abnormal) > 0: - arg_help = np.arange(len(diff_peaks)) + if len(diff_peaks_annormal) > 0: + arg_help = np.array(range(len(diff_peaks))) arg_help_ann = arg_help[diff_peaks < 360] peaks_neg_fin_new = [] @@ -594,6 +514,7 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # plt.plot(peaks_neg_true,z[peaks_neg_true],'*') # plt.plot([0,len(y)], [grenze,grenze]) # plt.show() + ##print(len(peaks_neg_true)) return len(peaks_neg_true), peaks_neg_true @@ -602,6 +523,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): ##plt.plot(regions_without_separators_0) ##plt.show() + sigma_ = 15 meda_n_updown = regions_without_separators_0[len(regions_without_separators_0) :: -1] @@ -612,24 +534,32 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): last_nonzero = len(regions_without_separators_0) - last_nonzero y = regions_without_separators_0 # [first_nonzero:last_nonzero] + y_help = np.zeros(len(y) + 20) + y_help[10 : len(y) + 10] = y - x = np.arange(len(y)) + + x = np.array(range(len(y))) zneg_rev = -y_help + np.max(y_help) + zneg = np.zeros(len(zneg_rev) + 20) + zneg[10 : len(zneg_rev) + 10] = zneg_rev + z = gaussian_filter1d(y, sigma_) zneg = gaussian_filter1d(zneg, sigma_) peaks_neg, _ = find_peaks(zneg, height=0) peaks, _ = find_peaks(z, height=0) + peaks_neg = peaks_neg - 10 - 10 + peaks_neg_org = np.copy(peaks_neg) - peaks_neg = peaks_neg[(peaks_neg > first_nonzero) & - (peaks_neg < last_nonzero)] - peaks = peaks[(peaks > 0.09 * regions_without_separators.shape[1]) & - (peaks < 0.91 * regions_without_separators.shape[1])] + + peaks_neg = peaks_neg[(peaks_neg > first_nonzero) & (peaks_neg < last_nonzero)] + + peaks = peaks[(peaks > 0.09 * regions_without_separators.shape[1]) & (peaks < 0.91 * regions_without_separators.shape[1])] peaks_neg = peaks_neg[(peaks_neg > 500) & (peaks_neg < (regions_without_separators.shape[1] - 500))] # print(peaks) @@ -644,8 +574,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): # $print(min_peaks_pos) dis_talaei = (min_peaks_pos - min_peaks_neg) / multiplier # print(interest_pos) - grenze = min_peaks_pos - dis_talaei - # np.mean(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])-np.std(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])/2.0 + grenze = min_peaks_pos - dis_talaei # np.mean(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])-np.std(y[peaks_neg[0]:peaks_neg[len(peaks_neg)-1]])/2.0 interest_neg_fin = interest_neg[(interest_neg < grenze)] peaks_neg_fin = peaks_neg[(interest_neg < grenze)] @@ -659,21 +588,13 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): p_g_u = len(y) - int(len(y) / 3.0) if num_col == 3: - if ((peaks_neg_fin[0] > p_g_u and - peaks_neg_fin[1] > p_g_u) or - (peaks_neg_fin[0] < p_g_l and - peaks_neg_fin[1] < p_g_l) or - (peaks_neg_fin[0] < p_m and - peaks_neg_fin[1] < p_m) or - (peaks_neg_fin[0] > p_m and - peaks_neg_fin[1] > p_m)): + if (peaks_neg_fin[0] > p_g_u and peaks_neg_fin[1] > p_g_u) or (peaks_neg_fin[0] < p_g_l and peaks_neg_fin[1] < p_g_l) or (peaks_neg_fin[0] < p_m and peaks_neg_fin[1] < p_m) or (peaks_neg_fin[0] > p_m and peaks_neg_fin[1] > p_m): num_col = 1 else: pass if num_col == 2: - if (peaks_neg_fin[0] > p_g_u or - peaks_neg_fin[0] < p_g_l): + if (peaks_neg_fin[0] > p_g_u) or (peaks_neg_fin[0] < p_g_l): num_col = 1 else: pass @@ -712,36 +633,23 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): p_u_quarter = len(y) - p_quarter if num_col == 3: - if ((peaks_neg_true[0] > p_g_u and - peaks_neg_true[1] > p_g_u) or - (peaks_neg_true[0] < p_g_l and - peaks_neg_true[1] < p_g_l) or - (peaks_neg_true[0] < p_m and - peaks_neg_true[1] < p_m) or - (peaks_neg_true[0] > p_m and - peaks_neg_true[1] > p_m)): + if (peaks_neg_true[0] > p_g_u and peaks_neg_true[1] > p_g_u) or (peaks_neg_true[0] < p_g_l and peaks_neg_true[1] < p_g_l) or (peaks_neg_true[0] < p_m and peaks_neg_true[1] < p_m) or (peaks_neg_true[0] > p_m and peaks_neg_true[1] > p_m): num_col = 1 peaks_neg_true = [] - elif (peaks_neg_true[0] < p_g_u and - peaks_neg_true[0] > p_g_l and - peaks_neg_true[1] > p_u_quarter): + elif (peaks_neg_true[0] < p_g_u and peaks_neg_true[0] > p_g_l) and (peaks_neg_true[1] > p_u_quarter): peaks_neg_true = [peaks_neg_true[0]] - elif (peaks_neg_true[1] < p_g_u and - peaks_neg_true[1] > p_g_l and - peaks_neg_true[0] < p_quarter): + elif (peaks_neg_true[1] < p_g_u and peaks_neg_true[1] > p_g_l) and (peaks_neg_true[0] < p_quarter): peaks_neg_true = [peaks_neg_true[1]] else: pass if num_col == 2: - if (peaks_neg_true[0] > p_g_u or - peaks_neg_true[0] < p_g_l): + if (peaks_neg_true[0] > p_g_u) or (peaks_neg_true[0] < p_g_l): num_col = 1 peaks_neg_true = [] if num_col == 4: - if (len(np.array(peaks_neg_true)[np.array(peaks_neg_true) < p_g_l]) == 2 or - len(np.array(peaks_neg_true)[np.array(peaks_neg_true) > (len(y) - p_g_l)]) == 2): + if len(np.array(peaks_neg_true)[np.array(peaks_neg_true) < p_g_l]) == 2 or len(np.array(peaks_neg_true)[np.array(peaks_neg_true) > (len(y) - p_g_l)]) == 2: num_col = 1 peaks_neg_true = [] else: @@ -753,10 +661,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): for i in range(len(peaks_neg_true)): hill_main = peaks_neg_true[i] # deep_depth=z[peaks_neg] - hills_around = peaks_neg_org[((peaks_neg_org > hill_main) & - (peaks_neg_org <= hill_main + 400)) | - ((peaks_neg_org < hill_main) & - (peaks_neg_org >= hill_main - 400))] + hills_around = peaks_neg_org[((peaks_neg_org > hill_main) & (peaks_neg_org <= hill_main + 400)) | ((peaks_neg_org < hill_main) & (peaks_neg_org >= hill_main - 400))] deep_depth_around = z[hills_around] # print(hill_main,z[hill_main],hills_around,deep_depth_around,'manoooo') @@ -769,11 +674,13 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): pass diff_peaks_annormal = diff_peaks[diff_peaks < 360] + if len(diff_peaks_annormal) > 0: - arg_help = np.arange(len(diff_peaks)) + arg_help = np.array(range(len(diff_peaks))) arg_help_ann = arg_help[diff_peaks < 360] peaks_neg_fin_new = [] + for ii in range(len(peaks_neg_fin)): if ii in arg_help_ann: arg_min = np.argmin([interest_neg_fin[ii], interest_neg_fin[ii + 1]]) @@ -781,6 +688,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): peaks_neg_fin_new.append(peaks_neg_fin[ii]) else: peaks_neg_fin_new.append(peaks_neg_fin[ii + 1]) + elif (ii - 1) in arg_help_ann: pass else: @@ -790,19 +698,22 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): # sometime pages with one columns gives also some negative peaks. delete those peaks param = z[peaks_neg_true] / float(min_peaks_pos) * 100 + if len(param[param <= 41]) == 0: peaks_neg_true = [] return len(peaks_fin_true), peaks_fin_true def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): - regions_without_separators_0 = regions_without_separators.sum(axis=0) + regions_without_separators_0 = regions_without_separators[:, :, 0].sum(axis=0) ##plt.plot(regions_without_separators_0) ##plt.show() + sigma_ = 35 # 70#35 z = gaussian_filter1d(regions_without_separators_0, sigma_) + peaks, _ = find_peaks(z, height=0) # print(peaks,'peaksnew') @@ -810,107 +721,78 @@ def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): def return_regions_without_separators(regions_pre): kernel = np.ones((5, 5), np.uint8) - regions_without_separators = ((regions_pre[:, :] != 6) & - (regions_pre[:, :] != 0)) - # regions_without_separators=( (image_regions_eraly_p[:,:,:]!=6) & - # (image_regions_eraly_p[:,:,:]!=0) & - # (image_regions_eraly_p[:,:,:]!=5) & - # (image_regions_eraly_p[:,:,:]!=8) & - # (image_regions_eraly_p[:,:,:]!=7)) + regions_without_separators = ((regions_pre[:, :] != 6) & (regions_pre[:, :] != 0)) * 1 + # regions_without_separators=( (image_regions_eraly_p[:,:,:]!=6) & (image_regions_eraly_p[:,:,:]!=0) & (image_regions_eraly_p[:,:,:]!=5) & (image_regions_eraly_p[:,:,:]!=8) & (image_regions_eraly_p[:,:,:]!=7))*1 - regions_without_separators = cv2.erode(regions_without_separators.astype(np.uint8), kernel, iterations=6) + regions_without_separators = regions_without_separators.astype(np.uint8) + + regions_without_separators = cv2.erode(regions_without_separators, kernel, iterations=6) return regions_without_separators -def put_drop_out_from_only_drop_model(layout_no_patch, layout1): - if layout_no_patch.ndim == 3: - layout_no_patch = layout_no_patch[:, :, 0] - drop_only = (layout_no_patch[:, :] == 4) * 1 +def put_drop_out_from_only_drop_model(layout_no_patch, layout1): + + drop_only = (layout_no_patch[:, :, 0] == 4) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) - areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) - for j in range(len(contours_drop_parent))]) + areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) for j in range(len(contours_drop_parent))]) areas_cnt_text = areas_cnt_text / float(drop_only.shape[0] * drop_only.shape[1]) - contours_drop_parent = [contours_drop_parent[jz] - for jz in range(len(contours_drop_parent)) - if areas_cnt_text[jz] > 0.00001] - areas_cnt_text = [areas_cnt_text[jz] - for jz in range(len(areas_cnt_text)) - if areas_cnt_text[jz] > 0.00001] + + contours_drop_parent = [contours_drop_parent[jz] for jz in range(len(contours_drop_parent)) if areas_cnt_text[jz] > 0.00001] + + areas_cnt_text = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > 0.00001] contours_drop_parent_final = [] + for jj in range(len(contours_drop_parent)): x, y, w, h = cv2.boundingRect(contours_drop_parent[jj]) # boxes.append([int(x), int(y), int(w), int(h)]) map_of_drop_contour_bb = np.zeros((layout1.shape[0], layout1.shape[1])) map_of_drop_contour_bb[y : y + h, x : x + w] = layout1[y : y + h, x : x + w] - if (100. * - (map_of_drop_contour_bb == 1).sum() / - (map_of_drop_contour_bb == 5).sum()) >= 15: + + if (((map_of_drop_contour_bb == 1) * 1).sum() / float(((map_of_drop_contour_bb == 5) * 1).sum()) * 100) >= 15: contours_drop_parent_final.append(contours_drop_parent[jj]) - layout_no_patch[:, :][layout_no_patch[:, :] == 4] = 0 - layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=4) + layout_no_patch[:, :, 0][layout_no_patch[:, :, 0] == 4] = 0 + + layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=(4, 4, 4)) return layout_no_patch -def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch, drop_capital_label, text_regions_p): - drop_only = (layout_in_patch[:, :, 0] == drop_capital_label) * 1 +def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch): + + drop_only = (layout_in_patch[:, :, 0] == 4) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) - areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) - for j in range(len(contours_drop_parent))]) + areas_cnt_text = np.array([cv2.contourArea(contours_drop_parent[j]) for j in range(len(contours_drop_parent))]) areas_cnt_text = areas_cnt_text / float(drop_only.shape[0] * drop_only.shape[1]) - contours_drop_parent = [contours_drop_parent[jz] - for jz in range(len(contours_drop_parent)) - if areas_cnt_text[jz] > 0.00001] - areas_cnt_text = [areas_cnt_text[jz] - for jz in range(len(areas_cnt_text)) - if areas_cnt_text[jz] > 0.00001] + + contours_drop_parent = [contours_drop_parent[jz] for jz in range(len(contours_drop_parent)) if areas_cnt_text[jz] > 0.00001] + + areas_cnt_text = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > 0.001] contours_drop_parent_final = [] + for jj in range(len(contours_drop_parent)): x, y, w, h = cv2.boundingRect(contours_drop_parent[jj]) - box = slice(y, y + h), slice(x, x + w) - box0 = box + (0,) - mask_of_drop_cpaital_in_early_layout = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1])) - mask_of_drop_cpaital_in_early_layout[box] = text_regions_p[box] - - all_drop_capital_pixels_which_is_text_in_early_lo = np.sum(mask_of_drop_cpaital_in_early_layout[box]==1) - mask_of_drop_cpaital_in_early_layout[box] = 1 - all_drop_capital_pixels = np.sum(mask_of_drop_cpaital_in_early_layout==1) - - percent_text_to_all_in_drop = all_drop_capital_pixels_which_is_text_in_early_lo / float(all_drop_capital_pixels) - if (areas_cnt_text[jj] * float(drop_only.shape[0] * drop_only.shape[1]) / float(w * h) > 0.6 and - percent_text_to_all_in_drop >= 0.3): - layout_in_patch[box0] = drop_capital_label - else: - layout_in_patch[box0][layout_in_patch[box0] == drop_capital_label] = drop_capital_label - layout_in_patch[box0][layout_in_patch[box0] == 0] = drop_capital_label - layout_in_patch[box0][layout_in_patch[box0] == 4] = drop_capital_label# images - #layout_in_patch[box0][layout_in_patch[box0] == drop_capital_label] = 1#drop_capital_label + layout_in_patch[y : y + h, x : x + w, 0] = 4 return layout_in_patch -def check_any_text_region_in_model_one_is_main_or_header( - regions_model_1, regions_model_full, - contours_only_text_parent, - all_box_coord, all_found_textline_polygons, - slopes, - contours_only_text_parent_d_ordered, conf_contours): - - cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin = \ - find_new_features_of_contours(contours_only_text_parent) +def check_any_text_region_in_model_one_is_main_or_header(regions_model_1,regions_model_full,contours_only_text_parent,all_box_coord,all_found_texline_polygons,slopes,contours_only_text_parent_d_ordered): + cx_main,cy_main ,x_min_main , x_max_main, y_min_main ,y_max_main,y_corr_x_min_from_argmin=find_new_features_of_contours(contours_only_text_parent) length_con=x_max_main-x_min_main height_con=y_max_main-y_min_main - all_found_textline_polygons_main=[] - all_found_textline_polygons_head=[] + + + all_found_texline_polygons_main=[] + all_found_texline_polygons_head=[] all_box_coord_main=[] all_box_coord_head=[] @@ -921,159 +803,51 @@ def check_any_text_region_in_model_one_is_main_or_header( contours_only_text_parent_main=[] contours_only_text_parent_head=[] - conf_contours_main=[] - conf_contours_head=[] - contours_only_text_parent_main_d=[] contours_only_text_parent_head_d=[] - for ii, con in enumerate(contours_only_text_parent): - img = np.zeros(regions_model_1.shape[:2]) - img = cv2.fillPoly(img, pts=[con], color=255) + for ii in range(len(contours_only_text_parent)): + con=contours_only_text_parent[ii] + img=np.zeros((regions_model_1.shape[0],regions_model_1.shape[1],3)) + img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) - all_pixels=((img == 255)*1).sum() - pixels_header=( ( (img == 255) & (regions_model_full[:,:,0]==2) )*1 ).sum() + + + all_pixels=((img[:,:,0]==255)*1).sum() + + pixels_header=( ( (img[:,:,0]==255) & (regions_model_full[:,:,0]==2) )*1 ).sum() pixels_main=all_pixels-pixels_header + if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=2 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 contours_only_text_parent_head.append(con) - if len(contours_only_text_parent_d_ordered): + if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_head.append(all_box_coord[ii]) slopes_head.append(slopes[ii]) - all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) - conf_contours_head.append(None) + all_found_texline_polygons_head.append(all_found_texline_polygons[ii]) else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=1 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 contours_only_text_parent_main.append(con) - conf_contours_main.append(conf_contours[ii]) - if len(contours_only_text_parent_d_ordered): + if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_main.append(all_box_coord[ii]) slopes_main.append(slopes[ii]) - all_found_textline_polygons_main.append(all_found_textline_polygons[ii]) + all_found_texline_polygons_main.append(all_found_texline_polygons[ii]) #print(all_pixels,pixels_main,pixels_header) - return (regions_model_1, - contours_only_text_parent_main, - contours_only_text_parent_head, - all_box_coord_main, - all_box_coord_head, - all_found_textline_polygons_main, - all_found_textline_polygons_head, - slopes_main, - slopes_head, - contours_only_text_parent_main_d, - contours_only_text_parent_head_d, - conf_contours_main, - conf_contours_head) -def check_any_text_region_in_model_one_is_main_or_header_light( - regions_model_1, regions_model_full, - contours_only_text_parent, - all_box_coord, all_found_textline_polygons, - slopes, - contours_only_text_parent_d_ordered, - conf_contours): - ### to make it faster - h_o = regions_model_1.shape[0] - w_o = regions_model_1.shape[1] - zoom = 3 - regions_model_1 = cv2.resize(regions_model_1, (regions_model_1.shape[1] // zoom, - regions_model_1.shape[0] // zoom), - interpolation=cv2.INTER_NEAREST) - regions_model_full = cv2.resize(regions_model_full, (regions_model_full.shape[1] // zoom, - regions_model_full.shape[0] // zoom), - interpolation=cv2.INTER_NEAREST) - contours_only_text_parent_z = [(cnt / zoom).astype(int) for cnt in contours_only_text_parent] - - ### - cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin = \ - find_new_features_of_contours(contours_only_text_parent_z) - - length_con=x_max_main-x_min_main - height_con=y_max_main-y_min_main - - all_found_textline_polygons_main=[] - all_found_textline_polygons_head=[] - - all_box_coord_main=[] - all_box_coord_head=[] - - slopes_main=[] - slopes_head=[] - - contours_only_text_parent_main=[] - contours_only_text_parent_head=[] - - conf_contours_main=[] - conf_contours_head=[] - - contours_only_text_parent_main_d=[] - contours_only_text_parent_head_d=[] - - for ii, con in enumerate(contours_only_text_parent_z): - img = np.zeros(regions_model_1.shape[:2]) - img = cv2.fillPoly(img, pts=[con], color=255) - - all_pixels = (img == 255).sum() - pixels_header=((img == 255) & - (regions_model_full[:,:,0]==2)).sum() - pixels_main = all_pixels - pixels_header - - if (( pixels_header / float(pixels_main) >= 0.6 and - length_con[ii] / float(height_con[ii]) >= 1.3 and - length_con[ii] / float(height_con[ii]) <= 3 ) or - ( pixels_header / float(pixels_main) >= 0.3 and - length_con[ii] / float(height_con[ii]) >=3 )): - - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 2 - contours_only_text_parent_head.append(contours_only_text_parent[ii]) - conf_contours_head.append(None) # why not conf_contours[ii], too? - if len(contours_only_text_parent_d_ordered): - contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) - all_box_coord_head.append(all_box_coord[ii]) - slopes_head.append(slopes[ii]) - all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) - - else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 1 - contours_only_text_parent_main.append(contours_only_text_parent[ii]) - conf_contours_main.append(conf_contours[ii]) - if len(contours_only_text_parent_d_ordered): - contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) - all_box_coord_main.append(all_box_coord[ii]) - slopes_main.append(slopes[ii]) - all_found_textline_polygons_main.append(all_found_textline_polygons[ii]) - #print(all_pixels,pixels_main,pixels_header) - - ### to make it faster - regions_model_1 = cv2.resize(regions_model_1, (w_o, h_o), interpolation=cv2.INTER_NEAREST) - # regions_model_full = cv2.resize(img, (regions_model_full.shape[1] // zoom, - # regions_model_full.shape[0] // zoom), - # interpolation=cv2.INTER_NEAREST) - ### - - return (regions_model_1, - contours_only_text_parent_main, - contours_only_text_parent_head, - all_box_coord_main, - all_box_coord_head, - all_found_textline_polygons_main, - all_found_textline_polygons_head, - slopes_main, - slopes_head, - contours_only_text_parent_main_d, - contours_only_text_parent_head_d, - conf_contours_main, - conf_contours_head) + #plt.imshow(img[:,:,0]) + #plt.show() + return regions_model_1,contours_only_text_parent_main,contours_only_text_parent_head,all_box_coord_main,all_box_coord_head,all_found_texline_polygons_main,all_found_texline_polygons_head,slopes_main,slopes_head,contours_only_text_parent_main_d,contours_only_text_parent_head_d def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col): # print(textlines_con) # textlines_con=textlines_con.astype(np.uint32) + textlines_con_changed = [] for m1 in range(len(textlines_con)): @@ -1092,10 +866,9 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) ##plt.imshow(img_text_all) ##plt.show() - areas_cnt_text = np.array([cv2.contourArea(textlines_tot[j]) - for j in range(len(textlines_tot))]) + areas_cnt_text = np.array([cv2.contourArea(textlines_tot[j]) for j in range(len(textlines_tot))]) areas_cnt_text = areas_cnt_text / float(textline_iamge.shape[0] * textline_iamge.shape[1]) - indexes_textlines = np.arange(len(textlines_tot)) + indexes_textlines = np.array(range(len(textlines_tot))) # print(areas_cnt_text,np.min(areas_cnt_text),np.max(areas_cnt_text)) if num_col == 0: @@ -1120,65 +893,75 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) textlines_big.append(textlines_tot[i]) textlines_big_org_form.append(textlines_tot_org_form[i]) - img_textline_s = np.zeros(textline_iamge.shape[:2]) - img_textline_s = cv2.fillPoly(img_textline_s, pts=textlines_small, color=1) + img_textline_s = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_textline_s = cv2.fillPoly(img_textline_s, pts=textlines_small, color=(1, 1, 1)) - img_textline_b = np.zeros(textline_iamge.shape[:2]) - img_textline_b = cv2.fillPoly(img_textline_b, pts=textlines_big, color=1) + img_textline_b = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_textline_b = cv2.fillPoly(img_textline_b, pts=textlines_big, color=(1, 1, 1)) sum_small_big_all = img_textline_s + img_textline_b sum_small_big_all2 = (sum_small_big_all[:, :] == 2) * 1 sum_intersection_sb = sum_small_big_all2.sum(axis=1).sum() + if sum_intersection_sb > 0: + dis_small_from_bigs_tot = [] for z1 in range(len(textlines_small)): # print(len(textlines_small),'small') intersections = [] for z2 in range(len(textlines_big)): - img_text = np.zeros(textline_iamge.shape[:2]) - img_text = cv2.fillPoly(img_text, pts=[textlines_small[z1]], color=1) + img_text = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_text = cv2.fillPoly(img_text, pts=[textlines_small[z1]], color=(1, 1, 1)) - img_text2 = np.zeros(textline_iamge.shape[:2]) - img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z2]], color=1) + img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z2]], color=(1, 1, 1)) sum_small_big = img_text2 + img_text sum_small_big_2 = (sum_small_big[:, :] == 2) * 1 sum_intersection = sum_small_big_2.sum(axis=1).sum() + # print(sum_intersection) + intersections.append(sum_intersection) if len(np.array(intersections)[np.array(intersections) > 0]) == 0: intersections = [] + try: dis_small_from_bigs_tot.append(np.argmax(intersections)) except: dis_small_from_bigs_tot.append(-1) smalls_list = np.array(dis_small_from_bigs_tot)[np.array(dis_small_from_bigs_tot) >= 0] + # index_small_textlines_rest=list( set(indexes_textlines_small)-set(smalls_list) ) textlines_big_with_change = [] textlines_big_with_change_con = [] textlines_small_with_change = [] + for z in list(set(smalls_list)): index_small_textlines = list(np.where(np.array(dis_small_from_bigs_tot) == z)[0]) # print(z,index_small_textlines) - img_text2 = np.zeros(textline_iamge.shape[:2], dtype=np.uint8) - img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z]], color=255) + img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1], 3)) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z]], color=(255, 255, 255)) textlines_big_with_change.append(z) for k in index_small_textlines: - img_text2 = cv2.fillPoly(img_text2, pts=[textlines_small[k]], color=255) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_small[k]], color=(255, 255, 255)) textlines_small_with_change.append(k) - _, thresh = cv2.threshold(img_text2, 0, 255, 0) - cont, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + img_text2 = img_text2.astype(np.uint8) + imgray = cv2.cvtColor(img_text2, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(cont[0],type(cont)) + textlines_big_with_change_con.append(cont) textlines_big_org_form[z] = cont[0] @@ -1188,104 +971,212 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) # print(textlines_big_with_change,'textlines_big_with_change') # print(textlines_small_with_change,'textlines_small_with_change') # print(textlines_big) + textlines_con_changed.append(textlines_big_org_form) - textlines_con_changed.append(textlines_big_org_form) + else: + textlines_con_changed.append(textlines_big_org_form) return textlines_con_changed -def order_of_regions(textline_mask, contours_main, contours_head, y_ref): +def order_of_regions(textline_mask, contours_main, contours_header, y_ref): + ##plt.imshow(textline_mask) ##plt.show() - y = textline_mask.sum(axis=1) # horizontal projection profile + """ + print(len(contours_main),'contours_main') + mada_n=textline_mask.sum(axis=1) + y=mada_n[:] + + y_help=np.zeros(len(y)+40) + y_help[20:len(y)+20]=y + x=np.array( range(len(y)) ) + + + peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) + + ##plt.imshow(textline_mask[:,:]) + ##plt.show() + + + sigma_gaus=8 + + z= gaussian_filter1d(y_help, sigma_gaus) + zneg_rev=-y_help+np.max(y_help) + + zneg=np.zeros(len(zneg_rev)+40) + zneg[20:len(zneg_rev)+20]=zneg_rev + zneg= gaussian_filter1d(zneg, sigma_gaus) + + + peaks, _ = find_peaks(z, height=0) + peaks_neg, _ = find_peaks(zneg, height=0) + + peaks_neg=peaks_neg-20-20 + peaks=peaks-20 + """ + + textline_sum_along_width = textline_mask.sum(axis=1) + + y = textline_sum_along_width[:] y_padded = np.zeros(len(y) + 40) y_padded[20 : len(y) + 20] = y + x = np.array(range(len(y))) + + peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) sigma_gaus = 8 - #z = gaussian_filter1d(y_padded, sigma_gaus) - #peaks, _ = find_peaks(z, height=0) - #peaks = peaks - 20 - zneg_rev = np.max(y_padded) - y_padded + + z = gaussian_filter1d(y_padded, sigma_gaus) + zneg_rev = -y_padded + np.max(y_padded) + zneg = np.zeros(len(zneg_rev) + 40) zneg[20 : len(zneg_rev) + 20] = zneg_rev zneg = gaussian_filter1d(zneg, sigma_gaus) + peaks, _ = find_peaks(z, height=0) peaks_neg, _ = find_peaks(zneg, height=0) + peaks_neg = peaks_neg - 20 - 20 + peaks = peaks - 20 ##plt.plot(z) ##plt.show() - cx_main, cy_main = find_center_of_contours(contours_main) - cx_head, cy_head = find_center_of_contours(contours_head) - peaks_neg_new = np.append(np.insert(peaks_neg, 0, 0), textline_mask.shape[0]) - # offset from bbox of mask - peaks_neg_new += y_ref + if contours_main != None: + areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) + M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) - # assert not len(cy_main) or np.min(peaks_neg_new) <= np.min(cy_main) and np.max(cy_main) <= np.max(peaks_neg_new) - # assert not len(cy_head) or np.min(peaks_neg_new) <= np.min(cy_head) and np.max(cy_head) <= np.max(peaks_neg_new) + y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + + if len(contours_header) != None: + areas_header = np.array([cv2.contourArea(contours_header[j]) for j in range(len(contours_header))]) + M_header = [cv2.moments(contours_header[j]) for j in range(len(contours_header))] + cx_header = [(M_header[j]["m10"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] + cy_header = [(M_header[j]["m01"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] + + x_min_header = np.array([np.min(contours_header[j][:, 0, 0]) for j in range(len(contours_header))]) + x_max_header = np.array([np.max(contours_header[j][:, 0, 0]) for j in range(len(contours_header))]) + + y_min_header = np.array([np.min(contours_header[j][:, 0, 1]) for j in range(len(contours_header))]) + y_max_header = np.array([np.max(contours_header[j][:, 0, 1]) for j in range(len(contours_header))]) + # print(cy_main,'mainy') + + peaks_neg_new = [] + + peaks_neg_new.append(0 + y_ref) + for iii in range(len(peaks_neg)): + peaks_neg_new.append(peaks_neg[iii] + y_ref) + + peaks_neg_new.append(textline_mask.shape[0] + y_ref) + + if len(cy_main) > 0 and np.max(cy_main) > np.max(peaks_neg_new): + cy_main = np.array(cy_main) * (np.max(peaks_neg_new) / np.max(cy_main)) - 10 + + if contours_main != None: + indexer_main = np.array(range(len(contours_main))) + + if contours_main != None: + len_main = len(contours_main) + else: + len_main = 0 + + matrix_of_orders = np.zeros((len(contours_main) + len(contours_header), 5)) + + matrix_of_orders[:, 0] = np.array(range(len(contours_main) + len(contours_header))) - matrix_of_orders = np.zeros((len(contours_main) + len(contours_head), 5), dtype=int) - matrix_of_orders[:, 0] = np.arange(len(contours_main) + len(contours_head)) matrix_of_orders[: len(contours_main), 1] = 1 matrix_of_orders[len(contours_main) :, 1] = 2 + matrix_of_orders[: len(contours_main), 2] = cx_main - matrix_of_orders[len(contours_main) :, 2] = cx_head + matrix_of_orders[len(contours_main) :, 2] = cx_header + matrix_of_orders[: len(contours_main), 3] = cy_main - matrix_of_orders[len(contours_main) :, 3] = cy_head - matrix_of_orders[: len(contours_main), 4] = np.arange(len(contours_main)) - matrix_of_orders[len(contours_main) :, 4] = np.arange(len(contours_head)) + matrix_of_orders[len(contours_main) :, 3] = cy_header + + matrix_of_orders[: len(contours_main), 4] = np.array(range(len(contours_main))) + matrix_of_orders[len(contours_main) :, 4] = np.array(range(len(contours_header))) # print(peaks_neg_new,'peaks_neg_new') + # print(matrix_of_orders,'matrix_of_orders') # print(peaks_neg_new,np.max(peaks_neg_new)) final_indexers_sorted = [] final_types = [] final_index_type = [] - for top, bot in pairwise(peaks_neg_new): - indexes_in, types_in, cxs_in, cys_in, typed_indexes_in = \ - matrix_of_orders[(matrix_of_orders[:, 3] >= top) & - (matrix_of_orders[:, 3] < bot)].T + for i in range(len(peaks_neg_new) - 1): + top = peaks_neg_new[i] + down = peaks_neg_new[i + 1] + + # print(top,down,'topdown') + + indexes_in = matrix_of_orders[:, 0][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] + cxs_in = matrix_of_orders[:, 2][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] + cys_in = matrix_of_orders[:, 3][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] + types_of_text = matrix_of_orders[:, 1][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] + index_types_of_text = matrix_of_orders[:, 4][(matrix_of_orders[:, 3] >= top) & ((matrix_of_orders[:, 3] < down))] + + # print(top,down) + # print(cys_in,'cyyyins') + # print(indexes_in,'indexes') sorted_inside = np.argsort(cxs_in) - final_indexers_sorted.extend(indexes_in[sorted_inside]) - final_types.extend(types_in[sorted_inside]) - final_index_type.extend(typed_indexes_in[sorted_inside]) + + ind_in_int = indexes_in[sorted_inside] + ind_in_type = types_of_text[sorted_inside] + ind_ind_type = index_types_of_text[sorted_inside] + + for j in range(len(ind_in_int)): + final_indexers_sorted.append(int(ind_in_int[j])) + final_types.append(int(ind_in_type[j])) + final_index_type.append(int(ind_ind_type[j])) ##matrix_of_orders[:len_main,4]=final_indexers_sorted[:] - # assert len(final_indexers_sorted) == len(contours_main) + len(contours_head) - # assert not len(final_indexers_sorted) or max(final_index_type) == max(len(contours_main) + # print(peaks_neg_new,'peaks') + # print(final_indexers_sorted,'indexsorted') + # print(final_types,'types') + # print(final_index_type,'final_index_type') - return np.array(final_indexers_sorted), np.array(final_types), np.array(final_index_type) - -def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( - img_p_in_ver, img_in_hor,num_col_classifier): + return final_indexers_sorted, matrix_of_orders, final_types, final_index_type +def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(img_p_in_ver, img_in_hor,num_col_classifier): #img_p_in_ver = cv2.erode(img_p_in_ver, self.kernel, iterations=2) - _, thresh = cv2.threshold(img_p_in_ver, 0, 255, 0) - contours_lines_ver, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - slope_lines_ver, _, x_min_main_ver, _, _, _, y_min_main_ver, y_max_main_ver, cx_main_ver = \ - find_features_of_lines(contours_lines_ver) + img_p_in_ver=img_p_in_ver.astype(np.uint8) + img_p_in_ver=np.repeat(img_p_in_ver[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(img_p_in_ver, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_lines_ver,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + slope_lines_ver,dist_x_ver, x_min_main_ver ,x_max_main_ver ,cy_main_ver,slope_lines_org_ver,y_min_main_ver, y_max_main_ver, cx_main_ver=find_features_of_lines(contours_lines_ver) + for i in range(len(x_min_main_ver)): - img_p_in_ver[int(y_min_main_ver[i]): - int(y_min_main_ver[i])+30, - int(cx_main_ver[i])-25: - int(cx_main_ver[i])+25] = 0 - img_p_in_ver[int(y_max_main_ver[i])-30: - int(y_max_main_ver[i]), - int(cx_main_ver[i])-25: - int(cx_main_ver[i])+25] = 0 + img_p_in_ver[int(y_min_main_ver[i]):int(y_min_main_ver[i])+30,int(cx_main_ver[i])-25:int(cx_main_ver[i])+25,0]=0 + img_p_in_ver[int(y_max_main_ver[i])-30:int(y_max_main_ver[i]),int(cx_main_ver[i])-25:int(cx_main_ver[i])+25,0]=0 + + + img_in_hor=img_in_hor.astype(np.uint8) + img_in_hor=np.repeat(img_in_hor[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(img_in_hor, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) - _, thresh = cv2.threshold(img_in_hor, 0, 255, 0) - contours_lines_hor, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - slope_lines_hor, dist_x_hor, x_min_main_hor, x_max_main_hor, cy_main_hor, _, _, _, _ = \ - find_features_of_lines(contours_lines_hor) + contours_lines_hor,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + slope_lines_hor,dist_x_hor, x_min_main_hor ,x_max_main_hor ,cy_main_hor,slope_lines_org_hor,y_min_main_hor, y_max_main_hor, cx_main_hor=find_features_of_lines(contours_lines_hor) + + x_width_smaller_than_acolumn_width=img_in_hor.shape[1]/float(num_col_classifier+1.) - + len_lines_bigger_than_x_width_smaller_than_acolumn_width=len( dist_x_hor[dist_x_hor>=x_width_smaller_than_acolumn_width] ) - len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column=int(len_lines_bigger_than_x_width_smaller_than_acolumn_width / - float(num_col_classifier)) - if len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column < 10: - args_hor=np.arange(len(slope_lines_hor)) + + len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column=int( len_lines_bigger_than_x_width_smaller_than_acolumn_width/float(num_col_classifier) ) + + + if len_lines_bigger_than_x_width_smaller_than_acolumn_width_per_column<10: + args_hor=np.array( range(len(slope_lines_hor) )) all_args_uniq=contours_in_same_horizon(cy_main_hor) #print(all_args_uniq,'all_args_uniq') if len(all_args_uniq)>0: @@ -1298,32 +1189,26 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( some_cy=cy_main_hor[all_args_uniq[dd]] some_x_min=x_min_main_hor[all_args_uniq[dd]] some_x_max=x_max_main_hor[all_args_uniq[dd]] - + #img_in=np.zeros(separators_closeup_n[:,:,2].shape) #print(img_p_in_ver.shape[1],some_x_max-some_x_min,'xdiff') diff_x_some=some_x_max-some_x_min for jv in range(len(some_args)): - img_p_in=cv2.fillPoly(img_in_hor, pts=[contours_lines_hor[some_args[jv]]], color=(1,1,1)) + + img_p_in=cv2.fillPoly(img_in_hor, pts =[contours_lines_hor[some_args[jv]]], color=(1,1,1)) + if any(i_diff>(img_p_in_ver.shape[1]/float(3.3)) for i_diff in diff_x_some): - img_p_in[int(np.mean(some_cy))-5: - int(np.mean(some_cy))+5, - int(np.min(some_x_min)): - int(np.max(some_x_max)) ]=1 + img_p_in[int(np.mean(some_cy))-5:int(np.mean(some_cy))+5, int(np.min(some_x_min)):int(np.max(some_x_max)) ]=1 + sum_dis=dist_x_hor[some_args].sum() diff_max_min_uniques=np.max(x_max_main_hor[some_args])-np.min(x_min_main_hor[some_args]) - - if (diff_max_min_uniques > sum_dis and - sum_dis / float(diff_max_min_uniques) > 0.85 and - diff_max_min_uniques / float(img_p_in_ver.shape[1]) > 0.85 and - np.std(dist_x_hor[some_args]) < 0.55 * np.mean(dist_x_hor[some_args])): - # print(dist_x_hor[some_args], - # dist_x_hor[some_args].sum(), - # np.min(x_min_main_hor[some_args]), - # np.max(x_max_main_hor[some_args]),'jalibdi') - # print(np.mean( dist_x_hor[some_args] ), - # np.std( dist_x_hor[some_args] ), - # np.var( dist_x_hor[some_args] ),'jalibdiha') + + + if diff_max_min_uniques>sum_dis and ( (sum_dis/float(diff_max_min_uniques) ) >0.85 ) and ( (diff_max_min_uniques/float(img_p_in_ver.shape[1]))>0.85 ) and np.std( dist_x_hor[some_args] )<(0.55*np.mean( dist_x_hor[some_args] )): + #print(dist_x_hor[some_args],dist_x_hor[some_args].sum(),np.min(x_min_main_hor[some_args]) ,np.max(x_max_main_hor[some_args]),'jalibdi') + #print(np.mean( dist_x_hor[some_args] ),np.std( dist_x_hor[some_args] ),np.var( dist_x_hor[some_args] ),'jalibdiha') special_separators.append(np.mean(cy_main_hor[some_args])) + else: img_p_in=img_in_hor special_separators=[] @@ -1331,19 +1216,29 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( img_p_in=img_in_hor special_separators=[] - img_p_in_ver[img_p_in_ver == 255] = 1 - sep_ver_hor = img_p_in + img_p_in_ver - sep_ver_hor_cross = (sep_ver_hor == 2) * 1 - _, thresh = cv2.threshold(sep_ver_hor_cross.astype(np.uint8), 0, 255, 0) - contours_cross, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - center_cross = np.array(find_center_of_contours(contours_cross), dtype=int) - for cx, cy in center_cross.T: - img_p_in[cy - 30: cy + 30, cx + 5: cx + 40] = 0 - img_p_in[cy - 30: cy + 30, cx - 40: cx - 4] = 0 + + img_p_in_ver[:,:,0][img_p_in_ver[:,:,0]==255]=1 + sep_ver_hor=img_p_in+img_p_in_ver + + + sep_ver_hor_cross=(sep_ver_hor[:,:,0]==2)*1 + + sep_ver_hor_cross=np.repeat(sep_ver_hor_cross[:, :, np.newaxis], 3, axis=2) + sep_ver_hor_cross=sep_ver_hor_cross.astype(np.uint8) + imgray = cv2.cvtColor(sep_ver_hor_cross, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_cross,_=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + cx_cross,cy_cross ,_ , _, _ ,_,_=find_new_features_of_contours(contours_cross) + + for ii in range(len(cx_cross)): + img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])+5:int(cx_cross[ii])+40,0]=0 + img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])-40:int(cx_cross[ii])-4,0]=0 + else: img_p_in=np.copy(img_in_hor) special_separators=[] - return img_p_in, special_separators + return img_p_in[:,:,0],special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot = [] @@ -1353,46 +1248,102 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot.append(last_point) return peaks_neg_tot -def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, label_lines, contours_h=None): - t_ins_c0 = time.time() - separators_closeup=( (region_pre_p[:,:]==label_lines))*1 - separators_closeup[0:110,:]=0 - separators_closeup[separators_closeup.shape[0]-150:,:]=0 +def find_number_of_columns_in_document(region_pre_p, num_col_classifier, pixel_lines, contours_h=None): + separators_closeup=( (region_pre_p[:,:,:]==pixel_lines))*1 + + separators_closeup[0:110,:,:]=0 + separators_closeup[separators_closeup.shape[0]-150:,:,:]=0 + kernel = np.ones((5,5),np.uint8) + separators_closeup=separators_closeup.astype(np.uint8) separators_closeup = cv2.dilate(separators_closeup,kernel,iterations = 1) separators_closeup = cv2.erode(separators_closeup,kernel,iterations = 1) + separators_closeup_new=np.zeros((separators_closeup.shape[0] ,separators_closeup.shape[1] )) + + + + ##_,separators_closeup_n=self.combine_hor_lines_and_delete_cross_points_and_get_lines_features_back(region_pre_p[:,:,0]) separators_closeup_n=np.copy(separators_closeup) + separators_closeup_n=separators_closeup_n.astype(np.uint8) - + ##plt.imshow(separators_closeup_n[:,:,0]) + ##plt.show() + separators_closeup_n_binary=np.zeros(( separators_closeup_n.shape[0],separators_closeup_n.shape[1]) ) - separators_closeup_n_binary[:,:]=separators_closeup_n[:,:] + separators_closeup_n_binary[:,:]=separators_closeup_n[:,:,0] + separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]!=0]=1 - - _, thresh_e = cv2.threshold(separators_closeup_n_binary, 0, 255, 0) - contours_line_e, _ = cv2.findContours(thresh_e.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - _, dist_xe, _, _, _, _, y_min_main, y_max_main, _ = \ - find_features_of_lines(contours_line_e) - dist_ye = y_max_main - y_min_main - args_e=np.arange(len(contours_line_e)) - args_hor_e=args_e[(dist_ye<=50) & - (dist_xe>=3*dist_ye)] + #separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]==0]=255 + #separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]==-255]=0 + + + #separators_closeup_n_binary=(separators_closeup_n_binary[:,:]==2)*1 + + #gray = cv2.cvtColor(separators_closeup_n, cv2.COLOR_BGR2GRAY) + + ### + + #print(separators_closeup_n_binary.shape) + gray_early=np.repeat(separators_closeup_n_binary[:, :, np.newaxis], 3, axis=2) + gray_early=gray_early.astype(np.uint8) + + #print(gray_early.shape,'burda') + imgray_e = cv2.cvtColor(gray_early, cv2.COLOR_BGR2GRAY) + #print('burda2') + ret_e, thresh_e = cv2.threshold(imgray_e, 0, 255, 0) + + #print('burda3') + contours_line_e,hierarchy_e=cv2.findContours(thresh_e,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + #slope_lines_e,dist_x_e, x_min_main_e ,x_max_main_e ,cy_main_e,slope_lines_org_e,y_min_main_e, y_max_main_e, cx_main_e=self.find_features_of_lines(contours_line_e) + + slope_linese,dist_xe, x_min_maine ,x_max_maine ,cy_maine,slope_lines_orge,y_min_maine, y_max_maine, cx_maine=find_features_of_lines(contours_line_e) + + dist_ye=y_max_maine-y_min_maine + #print(y_max_maine-y_min_maine,'y') + #print(dist_xe,'x') + + + args_e=np.array(range(len(contours_line_e))) + args_hor_e=args_e[(dist_ye<=50) & (dist_xe>=3*dist_ye)] + + #print(args_hor_e,'jidi',len(args_hor_e),'jilva') + cnts_hor_e=[] for ce in args_hor_e: cnts_hor_e.append(contours_line_e[ce]) - - separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary, pts=cnts_hor_e, color=0) + #print(len(slope_linese),'lieee') + + figs_e=np.zeros(thresh_e.shape) + figs_e=cv2.fillPoly(figs_e,pts=cnts_hor_e,color=(1,1,1)) + + #plt.imshow(figs_e) + #plt.show() + + ### + + separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary,pts=cnts_hor_e,color=(0,0,0)) + gray = cv2.bitwise_not(separators_closeup_n_binary) gray=gray.astype(np.uint8) + + #plt.imshow(gray) + #plt.show() + + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ - cv2.THRESH_BINARY, 15, -2) + cv2.THRESH_BINARY, 15, -2) + ##plt.imshow(bw[:,:]) + ##plt.show() + horizontal = np.copy(bw) vertical = np.copy(bw) - + cols = horizontal.shape[1] horizontal_size = cols // 30 # Create structure element for extracting horizontal lines through morphology operations @@ -1402,10 +1353,22 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, horizontal = cv2.dilate(horizontal, horizontalStructure) kernel = np.ones((5,5),np.uint8) + + horizontal = cv2.dilate(horizontal,kernel,iterations = 2) horizontal = cv2.erode(horizontal,kernel,iterations = 2) - horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=255) - + + + ### + #print(np.unique(horizontal),'uni') + horizontal=cv2.fillPoly(horizontal,pts=cnts_hor_e,color=(255,255,255)) + ### + + + + #plt.imshow(horizontal) + #plt.show() + rows = vertical.shape[0] verticalsize = rows // 30 # Create structure element for extracting vertical lines through morphology operations @@ -1413,21 +1376,38 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, # Apply morphology operations vertical = cv2.erode(vertical, verticalStructure) vertical = cv2.dilate(vertical, verticalStructure) + vertical = cv2.dilate(vertical,kernel,iterations = 1) + # Show extracted vertical lines - horizontal, special_separators = \ - combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( - vertical, horizontal, num_col_classifier) - + horizontal,special_separators=combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(vertical,horizontal,num_col_classifier) + + + #plt.imshow(horizontal) + #plt.show() + #print(vertical.shape,np.unique(vertical),'verticalvertical') separators_closeup_new[:,:][vertical[:,:]!=0]=1 separators_closeup_new[:,:][horizontal[:,:]!=0]=1 + + ##plt.imshow(separators_closeup_new) + ##plt.show() + ##separators_closeup_n + vertical=np.repeat(vertical[:, :, np.newaxis], 3, axis=2) + vertical=vertical.astype(np.uint8) + + ##plt.plot(vertical[:,:,0].sum(axis=0)) + ##plt.show() + + #plt.plot(vertical[:,:,0].sum(axis=1)) + #plt.show() - _, thresh = cv2.threshold(vertical, 0, 255, 0) - contours_line_vers, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ - find_features_of_lines(contours_line_vers) - - args=np.arange(len(slope_lines)) + imgray = cv2.cvtColor(vertical, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_line_vers,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + slope_lines,dist_x, x_min_main ,x_max_main ,cy_main,slope_lines_org,y_min_main, y_max_main, cx_main=find_features_of_lines(contours_line_vers) + #print(slope_lines,'vertical') + args=np.array( range(len(slope_lines) )) args_ver=args[slope_lines==1] dist_x_ver=dist_x[slope_lines==1] y_min_main_ver=y_min_main[slope_lines==1] @@ -1437,17 +1417,25 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, cx_main_ver=cx_main[slope_lines==1] dist_y_ver=y_max_main_ver-y_min_main_ver len_y=separators_closeup.shape[0]/3.0 - - _, thresh = cv2.threshold(horizontal, 0, 255, 0) - contours_line_hors, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ - find_features_of_lines(contours_line_hors) - + + + #plt.imshow(horizontal) + #plt.show() + + horizontal=np.repeat(horizontal[:, :, np.newaxis], 3, axis=2) + horizontal=horizontal.astype(np.uint8) + imgray = cv2.cvtColor(horizontal, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_line_hors,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + slope_lines,dist_x, x_min_main ,x_max_main ,cy_main,slope_lines_org,y_min_main, y_max_main, cx_main=find_features_of_lines(contours_line_hors) + slope_lines_org_hor=slope_lines_org[slope_lines==0] - args=np.arange(len(slope_lines)) + args=np.array( range(len(slope_lines) )) len_x=separators_closeup.shape[1]/5.0 - dist_y=np.abs(y_max_main-y_min_main) + dist_y=np.abs(y_max_main-y_min_main) + args_hor=args[slope_lines==0] dist_x_hor=dist_x[slope_lines==0] y_min_main_hor=y_min_main[slope_lines==0] @@ -1464,347 +1452,413 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, y_min_main_hor=y_min_main_hor[dist_x_hor>=len_x/2.0] y_max_main_hor=y_max_main_hor[dist_x_hor>=len_x/2.0] dist_y_hor=dist_y_hor[dist_x_hor>=len_x/2.0] + slope_lines_org_hor=slope_lines_org_hor[dist_x_hor>=len_x/2.0] dist_x_hor=dist_x_hor[dist_x_hor>=len_x/2.0] - + + matrix_of_lines_ch=np.zeros((len(cy_main_hor)+len(cx_main_ver),10)) + matrix_of_lines_ch[:len(cy_main_hor),0]=args_hor matrix_of_lines_ch[len(cy_main_hor):,0]=args_ver + + matrix_of_lines_ch[len(cy_main_hor):,1]=cx_main_ver + matrix_of_lines_ch[:len(cy_main_hor),2]=x_min_main_hor+50#x_min_main_hor+150 matrix_of_lines_ch[len(cy_main_hor):,2]=x_min_main_ver + matrix_of_lines_ch[:len(cy_main_hor),3]=x_max_main_hor-50#x_max_main_hor-150 matrix_of_lines_ch[len(cy_main_hor):,3]=x_max_main_ver + matrix_of_lines_ch[:len(cy_main_hor),4]=dist_x_hor matrix_of_lines_ch[len(cy_main_hor):,4]=dist_x_ver + matrix_of_lines_ch[:len(cy_main_hor),5]=cy_main_hor + + matrix_of_lines_ch[:len(cy_main_hor),6]=y_min_main_hor matrix_of_lines_ch[len(cy_main_hor):,6]=y_min_main_ver + matrix_of_lines_ch[:len(cy_main_hor),7]=y_max_main_hor matrix_of_lines_ch[len(cy_main_hor):,7]=y_max_main_ver + matrix_of_lines_ch[:len(cy_main_hor),8]=dist_y_hor matrix_of_lines_ch[len(cy_main_hor):,8]=dist_y_ver - matrix_of_lines_ch[len(cy_main_hor):,9]=1 + + matrix_of_lines_ch[len(cy_main_hor):,9]=1 + + + if contours_h is not None: - _, dist_x_head, x_min_main_head, x_max_main_head, cy_main_head, _, y_min_main_head, y_max_main_head, _ = \ - find_features_of_lines(contours_h) + slope_lines_head,dist_x_head, x_min_main_head ,x_max_main_head ,cy_main_head,slope_lines_org_head,y_min_main_head, y_max_main_head, cx_main_head=find_features_of_lines(contours_h) matrix_l_n=np.zeros((matrix_of_lines_ch.shape[0]+len(cy_main_head),matrix_of_lines_ch.shape[1])) matrix_l_n[:matrix_of_lines_ch.shape[0],:]=np.copy(matrix_of_lines_ch[:,:]) - args_head=np.arange(len(cy_main_head)) + len(cy_main_hor) - + args_head=np.array(range(len(cy_main_head)))+len(cy_main_hor) + matrix_l_n[matrix_of_lines_ch.shape[0]:,0]=args_head matrix_l_n[matrix_of_lines_ch.shape[0]:,2]=x_min_main_head+30 matrix_l_n[matrix_of_lines_ch.shape[0]:,3]=x_max_main_head-30 + matrix_l_n[matrix_of_lines_ch.shape[0]:,4]=dist_x_head + matrix_l_n[matrix_of_lines_ch.shape[0]:,5]=y_min_main_head-3-8 matrix_l_n[matrix_of_lines_ch.shape[0]:,6]=y_min_main_head-5-8 matrix_l_n[matrix_of_lines_ch.shape[0]:,7]=y_max_main_head#y_min_main_head+1-8 matrix_l_n[matrix_of_lines_ch.shape[0]:,8]=4 + matrix_of_lines_ch=np.copy(matrix_l_n) - - cy_main_splitters=cy_main_hor[(x_min_main_hor<=.16*region_pre_p.shape[1]) & - (x_max_main_hor>=.84*region_pre_p.shape[1])] + + + cy_main_splitters=cy_main_hor[ (x_min_main_hor<=.16*region_pre_p.shape[1]) & (x_max_main_hor>=.84*region_pre_p.shape[1] )] + cy_main_splitters=np.array( list(cy_main_splitters)+list(special_separators)) + if contours_h is not None: try: - cy_main_splitters_head=cy_main_head[(x_min_main_head<=.16*region_pre_p.shape[1]) & - (x_max_main_head>=.84*region_pre_p.shape[1])] + cy_main_splitters_head=cy_main_head[ (x_min_main_head<=.16*region_pre_p.shape[1]) & (x_max_main_head>=.84*region_pre_p.shape[1] )] cy_main_splitters=np.array( list(cy_main_splitters)+list(cy_main_splitters_head)) except: pass args_cy_splitter=np.argsort(cy_main_splitters) + cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - + splitter_y_new=[] splitter_y_new.append(0) for i in range(len(cy_main_splitters_sort)): - splitter_y_new.append( cy_main_splitters_sort[i] ) + splitter_y_new.append( cy_main_splitters_sort[i] ) + splitter_y_new.append(region_pre_p.shape[0]) + splitter_y_new_diff=np.diff(splitter_y_new)/float(region_pre_p.shape[0])*100 - - args_big_parts=np.arange(len(splitter_y_new_diff))[ splitter_y_new_diff>22 ] - + + args_big_parts=np.array(range(len(splitter_y_new_diff))) [ splitter_y_new_diff>22 ] + + + regions_without_separators=return_regions_without_separators(region_pre_p) + + length_y_threshold=regions_without_separators.shape[0]/4.0 - + num_col_fin=0 peaks_neg_fin_fin=[] + for itiles in args_big_parts: - regions_without_separators_tile=regions_without_separators[int(splitter_y_new[itiles]): - int(splitter_y_new[itiles+1]),:] + + + regions_without_separators_tile=regions_without_separators[int(splitter_y_new[itiles]):int(splitter_y_new[itiles+1]),:,0] + #image_page_background_zero_tile=image_page_background_zero[int(splitter_y_new[itiles]):int(splitter_y_new[itiles+1]),:] + + #print(regions_without_separators_tile.shape) + ##plt.imshow(regions_without_separators_tile) + ##plt.show() + + #num_col, peaks_neg_fin=self.find_num_col(regions_without_separators_tile,multiplier=6.0) + + #regions_without_separators_tile=cv2.erode(regions_without_separators_tile,kernel,iterations = 3) + # try: - num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile, - num_col_classifier, tables, multiplier=7.0) + num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile,multiplier=7.0) except: num_col = 0 peaks_neg_fin = [] + if num_col>num_col_fin: num_col_fin=num_col peaks_neg_fin_fin=peaks_neg_fin + if len(args_big_parts)==1 and (len(peaks_neg_fin_fin)+1)=500] peaks_neg_fin=peaks_neg_fin[peaks_neg_fin<=(vertical.shape[1]-500)] peaks_neg_fin_fin=peaks_neg_fin[:] - + + #print(peaks_neg_fin_fin,'peaks_neg_fin_fintaza') + + return num_col_fin, peaks_neg_fin_fin,matrix_of_lines_ch,splitter_y_new,separators_closeup_n + -def return_boxes_of_images_by_order_of_reading_new( - splitter_y_new, regions_without_separators, - matrix_of_lines_ch, - num_col_classifier, erosion_hurts, tables, - right2left_readingorder, - logger=None): - - if right2left_readingorder: - regions_without_separators = cv2.flip(regions_without_separators,1) - if logger is None: - logger = getLogger(__package__) - logger.debug('enter return_boxes_of_images_by_order_of_reading_new') - +def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts): boxes=[] - peaks_neg_tot_tables = [] - splitter_y_new = np.array(splitter_y_new, dtype=int) + + for i in range(len(splitter_y_new)-1): #print(splitter_y_new[i],splitter_y_new[i+1]) - matrix_new = matrix_of_lines_ch[:,:][(matrix_of_lines_ch[:,6]> splitter_y_new[i] ) & - (matrix_of_lines_ch[:,7]< splitter_y_new[i+1] )] + matrix_new=matrix_of_lines_ch[:,:][ (matrix_of_lines_ch[:,6]> splitter_y_new[i] ) & (matrix_of_lines_ch[:,7]< splitter_y_new[i+1] ) ] #print(len( matrix_new[:,9][matrix_new[:,9]==1] )) + #print(matrix_new[:,8][matrix_new[:,9]==1],'gaddaaa') + # check to see is there any vertical separator to find holes. - #if (len(matrix_new[:,9][matrix_new[:,9]==1]) > 0 and - # np.max(matrix_new[:,8][matrix_new[:,9]==1]) >= - # 0.1 * (np.abs(splitter_y_new[i+1]-splitter_y_new[i]))): - if True: + if 1>0:#len( matrix_new[:,9][matrix_new[:,9]==1] )>0 and np.max(matrix_new[:,8][matrix_new[:,9]==1])>=0.1*(np.abs(splitter_y_new[i+1]-splitter_y_new[i] )): + try: - num_col, peaks_neg_fin = find_num_col( - regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], :], - num_col_classifier, tables, multiplier=6. if erosion_hurts else 7.) + if erosion_hurts: + num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=6.) + else: + num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=7.) except: peaks_neg_fin=[] - num_col = 0 + + try: - if (len(peaks_neg_fin)+1)=len(peaks_neg_fin2): peaks_neg_fin=list(np.copy(peaks_neg_fin1)) else: peaks_neg_fin=list(np.copy(peaks_neg_fin2)) + + + peaks_neg_fin=list(np.array(peaks_neg_fin)+peaks_neg_fin_early[i_n]) - + if i_n!=(len(peaks_neg_fin_early)-2): peaks_neg_fin_rev.append(peaks_neg_fin_early[i_n+1]) #print(peaks_neg_fin,'peaks_neg_fin') peaks_neg_fin_rev=peaks_neg_fin_rev+peaks_neg_fin - if len(peaks_neg_fin_rev)>=len(peaks_neg_fin_org): + + + + + if len(peaks_neg_fin_rev)>=len(peaks_neg_fin_org): peaks_neg_fin=list(np.sort(peaks_neg_fin_rev)) num_col=len(peaks_neg_fin) else: peaks_neg_fin=list(np.copy(peaks_neg_fin_org)) num_col=len(peaks_neg_fin) - + #print(peaks_neg_fin,'peaks_neg_fin') except: - logger.exception("cannot find peaks consistent with columns") - #num_col, peaks_neg_fin = find_num_col( - # regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1],:], - # multiplier=7.0) + pass + #num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=7.0) x_min_hor_some=matrix_new[:,2][ (matrix_new[:,9]==0) ] x_max_hor_some=matrix_new[:,3][ (matrix_new[:,9]==0) ] cy_hor_some=matrix_new[:,5][ (matrix_new[:,9]==0) ] cy_hor_diff=matrix_new[:,7][ (matrix_new[:,9]==0) ] arg_org_hor_some=matrix_new[:,0][ (matrix_new[:,9]==0) ] - - if right2left_readingorder: - x_max_hor_some_new = regions_without_separators.shape[1] - x_min_hor_some - x_min_hor_some_new = regions_without_separators.shape[1] - x_max_hor_some - x_min_hor_some =list(np.copy(x_min_hor_some_new)) - x_max_hor_some =list(np.copy(x_max_hor_some_new)) + + + + peaks_neg_tot=return_points_with_boundies(peaks_neg_fin,0, regions_without_separators[:,:].shape[1]) - peaks_neg_tot_tables.append(peaks_neg_tot) + + reading_order_type,x_starting,x_ending,y_type_2,y_diff_type_2,y_lines_without_mother,x_start_without_mother,x_end_without_mother,there_is_sep_with_child,y_lines_with_child_without_mother,x_start_with_child_without_mother,x_end_with_child_without_mother=return_x_start_end_mothers_childs_and_type_of_reading_order(x_min_hor_some,x_max_hor_some,cy_hor_some,peaks_neg_tot,cy_hor_diff) + - reading_order_type, x_starting, x_ending, y_type_2, y_diff_type_2, \ - y_lines_without_mother, x_start_without_mother, x_end_without_mother, there_is_sep_with_child, \ - y_lines_with_child_without_mother, x_start_with_child_without_mother, x_end_with_child_without_mother, \ - new_main_sep_y = return_x_start_end_mothers_childs_and_type_of_reading_order( - x_min_hor_some, x_max_hor_some, cy_hor_some, peaks_neg_tot, cy_hor_diff) + + if (reading_order_type==1) or (reading_order_type==0 and (len(y_lines_without_mother)>=2 or there_is_sep_with_child==1)): - all_columns = set(range(len(peaks_neg_tot) - 1)) - if ((reading_order_type==1) or - (reading_order_type==0 and - (len(y_lines_without_mother)>=2 or there_is_sep_with_child==1))): + try: - y_grenze = splitter_y_new[i] + 300 + y_grenze=int(splitter_y_new[i])+300 + + + #check if there is a big separator in this y_mains_sep_ohne_grenzen - - args_early_ys=np.arange(len(y_type_2)) + + args_early_ys=np.array(range(len(y_type_2))) + #print(args_early_ys,'args_early_ys') - #print(splitter_y_new[i], splitter_y_new[i+1]) - - x_starting_up = x_starting[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - x_ending_up = x_ending[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - y_type_2_up = y_type_2[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - y_diff_type_2_up = y_diff_type_2[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - args_up = args_early_ys[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - if len(y_type_2_up) > 0: - y_main_separator_up = y_type_2_up [(x_starting_up==0) & - (x_ending_up==(len(peaks_neg_tot)-1) )] - y_diff_main_separator_up = y_diff_type_2_up[(x_starting_up==0) & - (x_ending_up==(len(peaks_neg_tot)-1) )] - args_main_to_deleted = args_up[(x_starting_up==0) & - (x_ending_up==(len(peaks_neg_tot)-1) )] + #print(int(splitter_y_new[i]),int(splitter_y_new[i+1])) + + y_type_2_up=np.array(y_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + x_starting_up=np.array(x_starting)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + x_ending_up=np.array(x_ending)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + y_diff_type_2_up=np.array(y_diff_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + args_up=args_early_ys[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + + + + if len(y_type_2_up)>0: + y_main_separator_up=y_type_2_up[(x_starting_up==0) & (x_ending_up==(len(peaks_neg_tot)-1) )] + y_diff_main_separator_up=y_diff_type_2_up[(x_starting_up==0) & (x_ending_up==(len(peaks_neg_tot)-1) )] + args_main_to_deleted=args_up[(x_starting_up==0) & (x_ending_up==(len(peaks_neg_tot)-1) )] #print(y_main_separator_up,y_diff_main_separator_up,args_main_to_deleted,'fffffjammmm') - if len(y_diff_main_separator_up) > 0: - args_to_be_kept = np.array(list( set(args_early_ys) - set(args_main_to_deleted) )) + + if len(y_diff_main_separator_up)>0: + args_to_be_kept=np.array( list( set(args_early_ys)-set(args_main_to_deleted) ) ) #print(args_to_be_kept,'args_to_be_kept') - boxes.append([0, peaks_neg_tot[len(peaks_neg_tot)-1], - splitter_y_new[i], y_diff_main_separator_up.max()]) - splitter_y_new[i] = y_diff_main_separator_up.max() - + boxes.append([0,peaks_neg_tot[len(peaks_neg_tot)-1],int(splitter_y_new[i]),int( np.max(y_diff_main_separator_up))]) + splitter_y_new[i]=[ np.max(y_diff_main_separator_up) ][0] + #print(splitter_y_new[i],'splitter_y_new[i]') - y_type_2 = y_type_2[args_to_be_kept] - x_starting = x_starting[args_to_be_kept] - x_ending = x_ending[args_to_be_kept] - y_diff_type_2 = y_diff_type_2[args_to_be_kept] - + y_type_2=np.array(y_type_2)[args_to_be_kept] + x_starting=np.array(x_starting)[args_to_be_kept] + x_ending=np.array(x_ending)[args_to_be_kept] + y_diff_type_2=np.array(y_diff_type_2)[args_to_be_kept] + #print('galdiha') - y_grenze = splitter_y_new[i] + 200 - args_early_ys2=np.arange(len(y_type_2)) - y_type_2_up=y_type_2[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - x_starting_up=x_starting[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - x_ending_up=x_ending[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - y_diff_type_2_up=y_diff_type_2[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] - args_up2=args_early_ys2[(y_type_2 > splitter_y_new[i]) & - (y_type_2 <= y_grenze)] + y_grenze=int(splitter_y_new[i])+200 + + + args_early_ys2=np.array(range(len(y_type_2))) + y_type_2_up=np.array(y_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + x_starting_up=np.array(x_starting)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + x_ending_up=np.array(x_ending)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + y_diff_type_2_up=np.array(y_diff_type_2)[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + args_up2=args_early_ys2[( np.array(y_type_2)>int(splitter_y_new[i]) ) & (np.array(y_type_2)<=y_grenze)] + + #print(y_type_2_up,x_starting_up,x_ending_up,'didid') - nodes_in = set() + + nodes_in=[] for ij in range(len(x_starting_up)): - nodes_in.update(range(x_starting_up[ij], - x_ending_up[ij])) - #print(nodes_in,'nodes_in') - - if nodes_in == set(range(len(peaks_neg_tot)-1)): + nodes_in=nodes_in+list(np.array(range(x_starting_up[ij],x_ending_up[ij]))) + + #print(np.unique(nodes_in),'nodes_in') + + if set(np.unique(nodes_in))==set(np.array(range(len(peaks_neg_tot)-1)) ): pass - elif nodes_in == set(range(1, len(peaks_neg_tot)-1)): + elif set( np.unique(nodes_in) )==set( np.array(range(1,len(peaks_neg_tot)-1)) ): pass else: #print('burdaydikh') - args_to_be_kept2=np.array(list( set(args_early_ys2)-set(args_up2) )) - + args_to_be_kept2=np.array( list( set(args_early_ys2)-set(args_up2) ) ) + if len(args_to_be_kept2)>0: - y_type_2 = y_type_2[args_to_be_kept2] - x_starting = x_starting[args_to_be_kept2] - x_ending = x_ending[args_to_be_kept2] - y_diff_type_2 = y_diff_type_2[args_to_be_kept2] + y_type_2=np.array(y_type_2)[args_to_be_kept2] + x_starting=np.array(x_starting)[args_to_be_kept2] + x_ending=np.array(x_ending)[args_to_be_kept2] + y_diff_type_2=np.array(y_diff_type_2)[args_to_be_kept2] else: pass + #print('burdaydikh2') + + + elif len(y_diff_main_separator_up)==0: - nodes_in = set() + nodes_in=[] for ij in range(len(x_starting_up)): - nodes_in.update(range(x_starting_up[ij], - x_ending_up[ij])) - #print(nodes_in,'nodes_in2') + nodes_in=nodes_in+list(np.array(range(x_starting_up[ij],x_ending_up[ij]))) + + #print(np.unique(nodes_in),'nodes_in2') #print(np.array(range(len(peaks_neg_tot)-1)),'np.array(range(len(peaks_neg_tot)-1))') - - if nodes_in == set(range(len(peaks_neg_tot)-1)): + + + + if set(np.unique(nodes_in))==set(np.array(range(len(peaks_neg_tot)-1)) ): pass - elif nodes_in == set(range(1,len(peaks_neg_tot)-1)): + elif set(np.unique(nodes_in) )==set( np.array(range(1,len(peaks_neg_tot)-1)) ): pass else: #print('burdaydikh') #print(args_early_ys,'args_early_ys') #print(args_up,'args_up') - args_to_be_kept2=np.array(list( set(args_early_ys) - set(args_up) )) - + args_to_be_kept2=np.array( list( set(args_early_ys)-set(args_up) ) ) + #print(args_to_be_kept2,'args_to_be_kept2') + #print(len(y_type_2),len(x_starting),len(x_ending),len(y_diff_type_2)) + if len(args_to_be_kept2)>0: - y_type_2 = y_type_2[args_to_be_kept2] - x_starting = x_starting[args_to_be_kept2] - x_ending = x_ending[args_to_be_kept2] - y_diff_type_2 = y_diff_type_2[args_to_be_kept2] + y_type_2=np.array(y_type_2)[args_to_be_kept2] + x_starting=np.array(x_starting)[args_to_be_kept2] + x_ending=np.array(x_ending)[args_to_be_kept2] + y_diff_type_2=np.array(y_diff_type_2)[args_to_be_kept2] else: pass + #print('burdaydikh2') - + + + + + + + x_starting=np.array(x_starting) + x_ending=np.array(x_ending) + y_type_2=np.array(y_type_2) + y_diff_type_2_up=np.array(y_diff_type_2_up) + #int(splitter_y_new[i]) + y_lines_by_order=[] x_start_by_order=[] x_end_by_order=[] + if (len(x_end_with_child_without_mother)==0 and reading_order_type==0) or reading_order_type==1: + + if reading_order_type==1: - y_lines_by_order.append(splitter_y_new[i]) + y_lines_by_order.append(int(splitter_y_new[i])) x_start_by_order.append(0) x_end_by_order.append(len(peaks_neg_tot)-2) else: #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') - columns_covered_by_mothers = set() + + columns_covered_by_mothers=[] + for dj in range(len(x_start_without_mother)): - columns_covered_by_mothers.update( - range(x_start_without_mother[dj], - x_end_without_mother[dj])) - columns_not_covered = list(all_columns - columns_covered_by_mothers) - y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + - len(x_start_without_mother), - dtype=int) * splitter_y_new[i]) - ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) - ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, np.array(columns_not_covered, int)) - x_starting = np.append(x_starting, x_start_without_mother) - x_ending = np.append(x_ending, np.array(columns_not_covered, int) + 1) - x_ending = np.append(x_ending, x_end_without_mother) + columns_covered_by_mothers=columns_covered_by_mothers+list(np.array(range(x_start_without_mother[dj],x_end_without_mother[dj])) ) + columns_covered_by_mothers=list(set(columns_covered_by_mothers)) + + all_columns=np.array(range(len(peaks_neg_tot)-1)) + + columns_not_covered=list( set(all_columns)-set(columns_covered_by_mothers) ) + + + y_type_2=list(y_type_2) + x_starting=list(x_starting) + x_ending=list(x_ending) + + for lj in columns_not_covered: + y_type_2.append(int(splitter_y_new[i])) + x_starting.append(lj) + x_ending.append(lj+1) + ##y_lines_by_order.append(int(splitter_y_new[i])) + ##x_start_by_order.append(0) + for lk in range(len(x_start_without_mother)): + y_type_2.append(int(splitter_y_new[i])) + x_starting.append(x_start_without_mother[lk]) + x_ending.append(x_end_without_mother[lk]) + + + y_type_2=np.array(y_type_2) + x_starting=np.array(x_starting) + x_ending=np.array(x_ending) + + + - ind_args=np.arange(len(y_type_2)) + ind_args=np.array(range(len(y_type_2))) #ind_args=np.array(ind_args) #print(ind_args,'ind_args') for column in range(len(peaks_neg_tot)-1): @@ -1828,113 +1882,158 @@ def return_boxes_of_images_by_order_of_reading_new( y_lines_by_order.append(y_col_sort[ii]) x_start_by_order.append(x_start_column_sort[ii]) x_end_by_order.append(x_end_column_sort[ii]-1) + else: - #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') - columns_covered_by_mothers = set() - for dj in range(len(x_start_without_mother)): - columns_covered_by_mothers.update( - range(x_start_without_mother[dj], - x_end_without_mother[dj])) - columns_not_covered = list(all_columns - columns_covered_by_mothers) - y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + len(x_start_without_mother), - dtype=int) * splitter_y_new[i]) - ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) - ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, np.array(columns_not_covered, int)) - x_starting = np.append(x_starting, x_start_without_mother) - x_ending = np.append(x_ending, np.array(columns_not_covered, int) + 1) - x_ending = np.append(x_ending, x_end_without_mother) - columns_covered_by_with_child_no_mothers = set() + #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') + + columns_covered_by_mothers=[] + + for dj in range(len(x_start_without_mother)): + columns_covered_by_mothers=columns_covered_by_mothers+list(np.array(range(x_start_without_mother[dj],x_end_without_mother[dj])) ) + columns_covered_by_mothers=list(set(columns_covered_by_mothers)) + + all_columns=np.array(range(len(peaks_neg_tot)-1)) + + columns_not_covered=list( set(all_columns)-set(columns_covered_by_mothers) ) + + + y_type_2=list(y_type_2) + x_starting=list(x_starting) + x_ending=list(x_ending) + + for lj in columns_not_covered: + y_type_2.append(int(splitter_y_new[i])) + x_starting.append(lj) + x_ending.append(lj+1) + ##y_lines_by_order.append(int(splitter_y_new[i])) + ##x_start_by_order.append(0) + for lk in range(len(x_start_without_mother)): + y_type_2.append(int(splitter_y_new[i])) + x_starting.append(x_start_without_mother[lk]) + x_ending.append(x_end_without_mother[lk]) + + + y_type_2=np.array(y_type_2) + x_starting=np.array(x_starting) + x_ending=np.array(x_ending) + + columns_covered_by_with_child_no_mothers=[] + for dj in range(len(x_end_with_child_without_mother)): - columns_covered_by_with_child_no_mothers.update( - range(x_start_with_child_without_mother[dj], - x_end_with_child_without_mother[dj])) - columns_not_covered_child_no_mother = list( - all_columns - columns_covered_by_with_child_no_mothers) + columns_covered_by_with_child_no_mothers=columns_covered_by_with_child_no_mothers+list(np.array(range(x_start_with_child_without_mother[dj],x_end_with_child_without_mother[dj])) ) + columns_covered_by_with_child_no_mothers=list(set(columns_covered_by_with_child_no_mothers)) + + all_columns=np.array(range(len(peaks_neg_tot)-1)) + + columns_not_covered_child_no_mother=list( set(all_columns)-set(columns_covered_by_with_child_no_mothers) ) #indexes_to_be_spanned=[] - for i_s in range(len(x_end_with_child_without_mother)): + for i_s in range( len(x_end_with_child_without_mother) ): columns_not_covered_child_no_mother.append(x_start_with_child_without_mother[i_s]) - columns_not_covered_child_no_mother = np.sort(columns_not_covered_child_no_mother) - ind_args = np.arange(len(y_type_2)) - x_end_with_child_without_mother = np.array(x_end_with_child_without_mother, int) - x_start_with_child_without_mother = np.array(x_start_with_child_without_mother, int) + + + + columns_not_covered_child_no_mother=np.sort(columns_not_covered_child_no_mother) + + + ind_args=np.array(range(len(y_type_2))) + + + for i_s_nc in columns_not_covered_child_no_mother: if i_s_nc in x_start_with_child_without_mother: - x_end_biggest_column = \ - x_end_with_child_without_mother[x_start_with_child_without_mother==i_s_nc][0] - args_all_biggest_lines = ind_args[(x_starting==i_s_nc) & - (x_ending==x_end_biggest_column)] - y_column_nc = y_type_2[args_all_biggest_lines] - x_start_column_nc = x_starting[args_all_biggest_lines] - x_end_column_nc = x_ending[args_all_biggest_lines] - y_column_nc = np.sort(y_column_nc) + x_end_biggest_column=np.array(x_end_with_child_without_mother)[np.array(x_start_with_child_without_mother)==i_s_nc][0] + args_all_biggest_lines=ind_args[(x_starting==i_s_nc) & (x_ending==x_end_biggest_column)] + + args_all_biggest_lines=np.array(args_all_biggest_lines) + y_column_nc=y_type_2[args_all_biggest_lines] + x_start_column_nc=x_starting[args_all_biggest_lines] + x_end_column_nc=x_ending[args_all_biggest_lines] + + y_column_nc=np.sort(y_column_nc) + for i_c in range(len(y_column_nc)): if i_c==(len(y_column_nc)-1): - ind_all_lines_between_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & - (y_type_2=i_s_nc) & - (x_ending<=x_end_biggest_column)] + ind_all_lines_betweeen_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & (y_type_2=i_s_nc) & (x_ending<=x_end_biggest_column)] else: - ind_all_lines_between_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & - (y_type_2=i_s_nc) & - (x_ending<=x_end_biggest_column)] - y_all_between_nm_wc = y_type_2[ind_all_lines_between_nm_wc] - x_starting_all_between_nm_wc = x_starting[ind_all_lines_between_nm_wc] - x_ending_all_between_nm_wc = x_ending[ind_all_lines_between_nm_wc] + ind_all_lines_betweeen_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & (y_type_2=i_s_nc) & (x_ending<=x_end_biggest_column)] + + y_all_between_nm_wc=y_type_2[ind_all_lines_betweeen_nm_wc] + x_starting_all_between_nm_wc=x_starting[ind_all_lines_betweeen_nm_wc] + x_ending_all_between_nm_wc=x_ending[ind_all_lines_betweeen_nm_wc] + + x_diff_all_between_nm_wc=x_ending_all_between_nm_wc-x_starting_all_between_nm_wc + - x_diff_all_between_nm_wc = x_ending_all_between_nm_wc - x_starting_all_between_nm_wc if len(x_diff_all_between_nm_wc)>0: biggest=np.argmax(x_diff_all_between_nm_wc) - - columns_covered_by_mothers = set() + + + columns_covered_by_mothers=[] + for dj in range(len(x_starting_all_between_nm_wc)): - columns_covered_by_mothers.update( - range(x_starting_all_between_nm_wc[dj], - x_ending_all_between_nm_wc[dj])) - child_columns = set(range(i_s_nc, x_end_biggest_column)) - columns_not_covered = list(child_columns - columns_covered_by_mothers) + columns_covered_by_mothers=columns_covered_by_mothers+list(np.array(range(x_starting_all_between_nm_wc[dj],x_ending_all_between_nm_wc[dj])) ) + columns_covered_by_mothers=list(set(columns_covered_by_mothers)) + + all_columns=np.array(range(i_s_nc,x_end_biggest_column)) + + columns_not_covered=list( set(all_columns)-set(columns_covered_by_mothers) ) + should_longest_line_be_extended=0 - if (len(x_diff_all_between_nm_wc) > 0 and - set(list(range(x_starting_all_between_nm_wc[biggest], - x_ending_all_between_nm_wc[biggest])) + - list(columns_not_covered)) != child_columns): + if len(x_diff_all_between_nm_wc)>0 and set( list( np.array(range(x_starting_all_between_nm_wc[biggest],x_ending_all_between_nm_wc[biggest])) )+list(columns_not_covered) ) !=set(all_columns): should_longest_line_be_extended=1 - index_lines_so_close_to_top_separator = \ - np.arange(len(y_all_between_nm_wc))[(y_all_between_nm_wc>y_column_nc[i_c]) & - (y_all_between_nm_wc<=(y_column_nc[i_c]+500))] - if len(index_lines_so_close_to_top_separator) > 0: - indexes_remained_after_deleting_closed_lines= \ - np.array(list(set(list(range(len(y_all_between_nm_wc)))) - - set(list(index_lines_so_close_to_top_separator)))) - if len(indexes_remained_after_deleting_closed_lines) > 0: - y_all_between_nm_wc = \ - y_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] - x_starting_all_between_nm_wc = \ - x_starting_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] - x_ending_all_between_nm_wc = \ - x_ending_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + + index_lines_so_close_to_top_separator=np.array(range(len(y_all_between_nm_wc)))[(y_all_between_nm_wc>y_column_nc[i_c]) & (y_all_between_nm_wc<=(y_column_nc[i_c]+500))] + + + if len(index_lines_so_close_to_top_separator)>0: + indexes_remained_after_deleting_closed_lines= np.array( list ( set( list( np.array(range(len(y_all_between_nm_wc))) ) ) -set(list( index_lines_so_close_to_top_separator) ) ) ) + + if len(indexes_remained_after_deleting_closed_lines)>0: + y_all_between_nm_wc=y_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + x_starting_all_between_nm_wc=x_starting_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + x_ending_all_between_nm_wc=x_ending_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + + + y_all_between_nm_wc=list(y_all_between_nm_wc) + x_starting_all_between_nm_wc=list(x_starting_all_between_nm_wc) + x_ending_all_between_nm_wc=list(x_ending_all_between_nm_wc) + + + y_all_between_nm_wc.append(y_column_nc[i_c] ) + x_starting_all_between_nm_wc.append(i_s_nc) + x_ending_all_between_nm_wc.append(x_end_biggest_column) + - y_all_between_nm_wc = np.append(y_all_between_nm_wc, y_column_nc[i_c]) - x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, i_s_nc) - x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, x_end_biggest_column) - - if len(x_diff_all_between_nm_wc) > 0: + + + y_all_between_nm_wc=list(y_all_between_nm_wc) + x_starting_all_between_nm_wc=list(x_starting_all_between_nm_wc) + x_ending_all_between_nm_wc=list(x_ending_all_between_nm_wc) + + if len(x_diff_all_between_nm_wc)>0: try: - y_all_between_nm_wc = np.append(y_all_between_nm_wc, y_column_nc[i_c]) - x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, x_starting_all_between_nm_wc[biggest]) - x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, x_ending_all_between_nm_wc[biggest]) + x_starting_all_between_nm_wc.append(x_starting_all_between_nm_wc[biggest]) + x_ending_all_between_nm_wc.append(x_ending_all_between_nm_wc[biggest]) + y_all_between_nm_wc.append(y_column_nc[i_c]) except: - logger.exception("cannot append") + pass - y_all_between_nm_wc = np.append(y_all_between_nm_wc, [y_column_nc[i_c]] * len(columns_not_covered)) - x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, np.array(columns_not_covered, int)) - x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, np.array(columns_not_covered, int) + 1) - - ind_args_between=np.arange(len(x_ending_all_between_nm_wc)) - for column in range(int(i_s_nc), int(x_end_biggest_column)): + + + for c_n_c in columns_not_covered: + y_all_between_nm_wc.append(y_column_nc[i_c]) + x_starting_all_between_nm_wc.append(c_n_c) + x_ending_all_between_nm_wc.append(c_n_c+1) + + y_all_between_nm_wc=np.array(y_all_between_nm_wc) + x_starting_all_between_nm_wc=np.array(x_starting_all_between_nm_wc) + x_ending_all_between_nm_wc=np.array(x_ending_all_between_nm_wc) + + ind_args_between=np.array(range(len(x_ending_all_between_nm_wc))) + + for column in range(i_s_nc,x_end_biggest_column): ind_args_in_col=ind_args_between[x_starting_all_between_nm_wc==column] #print('babali2') #print(ind_args_in_col,'ind_args_in_col') @@ -1954,7 +2053,14 @@ def return_boxes_of_images_by_order_of_reading_new( y_lines_by_order.append(y_col_sort[ii]) x_start_by_order.append(x_start_column_sort[ii]) x_end_by_order.append(x_end_column_sort[ii]-1) + + + + + + else: + #print(column,'column') ind_args_in_col=ind_args[x_starting==i_s_nc] #print('babali2') @@ -1975,76 +2081,106 @@ def return_boxes_of_images_by_order_of_reading_new( x_start_by_order.append(x_start_column_sort[ii]) x_end_by_order.append(x_end_column_sort[ii]-1) + + for il in range(len(y_lines_by_order)): - y_copy = list(y_lines_by_order) - x_start_copy = list(x_start_by_order) - x_end_copy = list(x_end_by_order) - + + + y_copy=list( np.copy(y_lines_by_order) ) + x_start_copy=list( np.copy(x_start_by_order) ) + x_end_copy=list ( np.copy(x_end_by_order) ) + #print(y_copy,'y_copy') y_itself=y_copy.pop(il) x_start_itself=x_start_copy.pop(il) x_end_itself=x_end_copy.pop(il) - + #print(y_copy,'y_copy2') - for column in range(int(x_start_itself), int(x_end_itself)+1): + + for column in range(x_start_itself,x_end_itself+1): #print(column,'cols') y_in_cols=[] for yic in range(len(y_copy)): #print('burda') - if (y_copy[yic]>y_itself and - column>=x_start_copy[yic] and - column<=x_end_copy[yic]): + if y_copy[yic]>y_itself and column>=x_start_copy[yic] and column<=x_end_copy[yic]: y_in_cols.append(y_copy[yic]) #print('burda2') #print(y_in_cols,'y_in_cols') if len(y_in_cols)>0: y_down=np.min(y_in_cols) else: - y_down=splitter_y_new[i+1] - #print(y_itself,'y_itself') - boxes.append([peaks_neg_tot[column], - peaks_neg_tot[column+1], - y_itself, - y_down]) + y_down=[int(splitter_y_new[i+1])][0] + #print(y_itself,'y_itself') + boxes.append([peaks_neg_tot[column],peaks_neg_tot[column+1],y_itself,y_down]) except: - logger.exception("cannot assign boxes") - boxes.append([0, peaks_neg_tot[len(peaks_neg_tot)-1], - splitter_y_new[i], splitter_y_new[i+1]]) + boxes.append([0,peaks_neg_tot[len(peaks_neg_tot)-1],int(splitter_y_new[i]),int(splitter_y_new[i+1])]) + + + else: y_lines_by_order=[] x_start_by_order=[] x_end_by_order=[] if len(x_starting)>0: - columns_covered_by_lines_covered_more_than_2col = set() + all_columns = np.array(range(len(peaks_neg_tot)-1)) + columns_covered_by_lines_covered_more_than_2col=[] + for dj in range(len(x_starting)): - if set(range(x_starting[dj], x_ending[dj])) != all_columns: - columns_covered_by_lines_covered_more_than_2col.update( - range(x_starting[dj], x_ending[dj])) - columns_not_covered = list(all_columns - columns_covered_by_lines_covered_more_than_2col) - - y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + 1, - dtype=int) * splitter_y_new[i]) - ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) - ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, np.array(columns_not_covered, x_starting.dtype)) - x_ending = np.append(x_ending, np.array(columns_not_covered, x_ending.dtype) + 1) - if len(new_main_sep_y) > 0: - x_starting = np.append(x_starting, 0) - x_ending = np.append(x_ending, len(peaks_neg_tot) - 1) - else: - x_starting = np.append(x_starting, x_starting[0]) - x_ending = np.append(x_ending, x_ending[0]) + if set( list(np.array(range(x_starting[dj],x_ending[dj])) ) ) == set(all_columns): + pass + else: + columns_covered_by_lines_covered_more_than_2col=columns_covered_by_lines_covered_more_than_2col+list(np.array(range(x_starting[dj],x_ending[dj])) ) + columns_covered_by_lines_covered_more_than_2col=list(set(columns_covered_by_lines_covered_more_than_2col)) + + + + columns_not_covered=list( set(all_columns)-set(columns_covered_by_lines_covered_more_than_2col) ) + + + y_type_2=list(y_type_2) + x_starting=list(x_starting) + x_ending=list(x_ending) + + for lj in columns_not_covered: + y_type_2.append(int(splitter_y_new[i])) + x_starting.append(lj) + x_ending.append(lj+1) + ##y_lines_by_order.append(int(splitter_y_new[i])) + ##x_start_by_order.append(0) + + y_type_2.append(int(splitter_y_new[i])) + x_starting.append(x_starting[0]) + x_ending.append(x_ending[0]) + + + y_type_2=np.array(y_type_2) + x_starting=np.array(x_starting) + x_ending=np.array(x_ending) else: - columns_not_covered = list(all_columns) - y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered), - dtype=int) * splitter_y_new[i]) - ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) - ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, np.array(columns_not_covered, x_starting.dtype)) - x_ending = np.append(x_ending, np.array(columns_not_covered, x_ending.dtype) + 1) - - ind_args = np.arange(len(y_type_2)) - + all_columns=np.array(range(len(peaks_neg_tot)-1)) + columns_not_covered=list( set(all_columns) ) + + + y_type_2=list(y_type_2) + x_starting=list(x_starting) + x_ending=list(x_ending) + + for lj in columns_not_covered: + y_type_2.append(int(splitter_y_new[i])) + x_starting.append(lj) + x_ending.append(lj+1) + ##y_lines_by_order.append(int(splitter_y_new[i])) + ##x_start_by_order.append(0) + + + + y_type_2=np.array(y_type_2) + x_starting=np.array(x_starting) + x_ending=np.array(x_ending) + + ind_args=np.array(range(len(y_type_2))) + #ind_args=np.array(ind_args) + #print(ind_args,'ind_args') for column in range(len(peaks_neg_tot)-1): #print(column,'column') ind_args_in_col=ind_args[x_starting==column] @@ -2064,65 +2200,41 @@ def return_boxes_of_images_by_order_of_reading_new( y_lines_by_order.append(y_col_sort[ii]) x_start_by_order.append(x_start_column_sort[ii]) x_end_by_order.append(x_end_column_sort[ii]-1) - + + for il in range(len(y_lines_by_order)): - y_copy = list(y_lines_by_order) - x_start_copy = list(x_start_by_order) - x_end_copy = list(x_end_by_order) - + + + y_copy=list( np.copy(y_lines_by_order) ) + x_start_copy=list( np.copy(x_start_by_order) ) + x_end_copy=list ( np.copy(x_end_by_order) ) + #print(y_copy,'y_copy') y_itself=y_copy.pop(il) x_start_itself=x_start_copy.pop(il) x_end_itself=x_end_copy.pop(il) - - for column in range(x_start_itself, x_end_itself+1): + + #print(y_copy,'y_copy2') + + for column in range(x_start_itself,x_end_itself+1): #print(column,'cols') y_in_cols=[] for yic in range(len(y_copy)): #print('burda') - if (y_copy[yic]>y_itself and - column>=x_start_copy[yic] and - column<=x_end_copy[yic]): + if y_copy[yic]>y_itself and column>=x_start_copy[yic] and column<=x_end_copy[yic]: y_in_cols.append(y_copy[yic]) #print('burda2') #print(y_in_cols,'y_in_cols') if len(y_in_cols)>0: y_down=np.min(y_in_cols) else: - y_down=splitter_y_new[i+1] - #print(y_itself,'y_itself') - boxes.append([peaks_neg_tot[column], - peaks_neg_tot[column+1], - y_itself, - y_down]) + y_down=[int(splitter_y_new[i+1])][0] + #print(y_itself,'y_itself') + boxes.append([peaks_neg_tot[column],peaks_neg_tot[column+1],y_itself,y_down]) + + + #else: #boxes.append([ 0, regions_without_separators[:,:].shape[1] ,splitter_y_new[i],splitter_y_new[i+1]]) - if right2left_readingorder: - peaks_neg_tot_tables_new = [] - if len(peaks_neg_tot_tables)>=1: - for peaks_tab_ind in peaks_neg_tot_tables: - peaks_neg_tot_tables_ind = regions_without_separators.shape[1] - np.array(peaks_tab_ind) - peaks_neg_tot_tables_ind = list(peaks_neg_tot_tables_ind[::-1]) - peaks_neg_tot_tables_new.append(peaks_neg_tot_tables_ind) - - for i in range(len(boxes)): - x_start_new = regions_without_separators.shape[1] - boxes[i][1] - x_end_new = regions_without_separators.shape[1] - boxes[i][0] - boxes[i][0] = x_start_new - boxes[i][1] = x_end_new - peaks_neg_tot_tables = peaks_neg_tot_tables_new - - logger.debug('exit return_boxes_of_images_by_order_of_reading_new') - return boxes, peaks_neg_tot_tables - -def is_image_filename(fname: str) -> bool: - return fname.lower().endswith(('.jpg', - '.jpeg', - '.png', - '.tif', - '.tiff', - )) - -def is_xml_filename(fname: str) -> bool: - return fname.lower().endswith('.xml') + return boxes diff --git a/qurator/eynollah/utils/contour.py b/qurator/eynollah/utils/contour.py new file mode 100644 index 0000000..3209731 --- /dev/null +++ b/qurator/eynollah/utils/contour.py @@ -0,0 +1,238 @@ +import cv2 +import numpy as np +from shapely import geometry + +from .rotate import rotate_image, rotation_image_new + +def contours_in_same_horizon(cy_main_hor): + X1 = np.zeros((len(cy_main_hor), len(cy_main_hor))) + X2 = np.zeros((len(cy_main_hor), len(cy_main_hor))) + + X1[0::1, :] = cy_main_hor[:] + X2 = X1.T + + X_dif = np.abs(X2 - X1) + args_help = np.array(range(len(cy_main_hor))) + all_args = [] + for i in range(len(cy_main_hor)): + list_h = list(args_help[X_dif[i, :] <= 20]) + list_h.append(i) + if len(list_h) > 1: + all_args.append(list(set(list_h))) + return np.unique(all_args) + +def find_contours_mean_y_diff(contours_main): + M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + return np.mean(np.diff(np.sort(np.array(cy_main)))) + + +def get_text_region_boxes_by_given_contours(contours): + + kernel = np.ones((5, 5), np.uint8) + boxes = [] + contours_new = [] + for jj in range(len(contours)): + x, y, w, h = cv2.boundingRect(contours[jj]) + + boxes.append([x, y, w, h]) + contours_new.append(contours[jj]) + + del contours + return boxes, contours_new + +def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area): + found_polygons_early = list() + jv = 0 + for c in contours: + if len(c) < 3: # A polygon cannot have less than 3 points + continue + + polygon = geometry.Polygon([point[0] for point in c]) + area = polygon.area + if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1: # and hierarchy[0][jv][3]==-1 : + found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) + jv += 1 + return found_polygons_early + +def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): + found_polygons_early = list() + + jv = 0 + for c in contours: + if len(c) < 3: # A polygon cannot have less than 3 points + continue + + polygon = geometry.Polygon([point[0] for point in c]) + # area = cv2.contourArea(c) + area = polygon.area + ##print(np.prod(thresh.shape[:2])) + # Check that polygon has area greater than minimal area + # print(hierarchy[0][jv][3],hierarchy ) + if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : + # print(c[0][0][1]) + found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) + jv += 1 + return found_polygons_early + +def find_new_features_of_contours(contours_main): + + areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) + M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + try: + x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + + argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + + x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0] for j in range(len(contours_main))]) + y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1] for j in range(len(contours_main))]) + + x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + + y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + except: + x_min_main = np.array([np.min(contours_main[j][:, 0]) for j in range(len(contours_main))]) + + argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) for j in range(len(contours_main))]) + + x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] for j in range(len(contours_main))]) + y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] for j in range(len(contours_main))]) + + x_max_main = np.array([np.max(contours_main[j][:, 0]) for j in range(len(contours_main))]) + + y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))]) + + # dis_x=np.abs(x_max_main-x_min_main) + + return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin + +def return_parent_contours(contours, hierarchy): + contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] + return contours_parent + +def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): + + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) + + return contours_imgs + +def get_textregion_contours_in_org_image(cnts, img, slope_first): + + cnts_org = [] + # print(cnts,'cnts') + for i in range(len(cnts)): + img_copy = np.zeros(img.shape) + img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1)) + + # plt.imshow(img_copy) + # plt.show() + + # print(img.shape,'img') + img_copy = rotation_image_new(img_copy, -slope_first) + ##print(img_copy.shape,'img_copy') + # plt.imshow(img_copy) + # plt.show() + + img_copy = img_copy.astype(np.uint8) + imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) + cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) + # print(np.shape(cont_int[0])) + cnts_org.append(cont_int[0]) + + # print(cnts_org,'cnts_org') + + # sys.exit() + # self.y_shift = np.abs(img_copy.shape[0] - img.shape[0]) + # self.x_shift = np.abs(img_copy.shape[1] - img.shape[1]) + return cnts_org + +def return_contours_of_interested_textline(region_pre_p, pixel): + + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003) + return contours_imgs + +def return_contours_of_image(image): + + if len(image.shape) == 2: + image = np.repeat(image[:, :, np.newaxis], 3, axis=2) + image = image.astype(np.uint8) + else: + image = image.astype(np.uint8) + imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + return contours, hierarchy + +def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003): + + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size) + + return contours_imgs + +def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area): + + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area) + + img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3)) + img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1)) + return img_ret[:, :, 0] + diff --git a/src/eynollah/utils/counter.py b/qurator/eynollah/utils/counter.py similarity index 83% rename from src/eynollah/utils/counter.py rename to qurator/eynollah/utils/counter.py index e6205c8..bc1d765 100644 --- a/src/eynollah/utils/counter.py +++ b/qurator/eynollah/utils/counter.py @@ -3,17 +3,17 @@ from collections import Counter REGION_ID_TEMPLATE = 'region_%04d' LINE_ID_TEMPLATE = 'region_%04d_line_%04d' -class EynollahIdCounter: +class EynollahIdCounter(): def __init__(self, region_idx=0, line_idx=0): self._counter = Counter() - self._initial_region_idx = region_idx - self._initial_line_idx = line_idx + self._inital_region_idx = region_idx + self._inital_line_idx = line_idx self.reset() def reset(self): - self.set('region', self._initial_region_idx) - self.set('line', self._initial_line_idx) + self.set('region', self._inital_region_idx) + self.set('line', self._inital_line_idx) def inc(self, name, val=1): self._counter.update({name: val}) diff --git a/src/eynollah/utils/drop_capitals.py b/qurator/eynollah/utils/drop_capitals.py similarity index 62% rename from src/eynollah/utils/drop_capitals.py rename to qurator/eynollah/utils/drop_capitals.py index 9f82fac..a69e9f5 100644 --- a/src/eynollah/utils/drop_capitals.py +++ b/qurator/eynollah/utils/drop_capitals.py @@ -1,11 +1,9 @@ import numpy as np import cv2 from .contour import ( - find_center_of_contours, find_new_features_of_contours, return_contours_of_image, return_parent_contours, - return_contours_of_interested_region, ) def adhere_drop_capital_region_into_corresponding_textline( @@ -15,16 +13,15 @@ def adhere_drop_capital_region_into_corresponding_textline( contours_only_text_parent_h, all_box_coord, all_box_coord_h, - all_found_textline_polygons, - all_found_textline_polygons_h, + all_found_texline_polygons, + all_found_texline_polygons_h, kernel=None, curved_line=False, - textline_light=False, ): - # print(np.shape(all_found_textline_polygons),np.shape(all_found_textline_polygons[3]),'all_found_textline_polygonsshape') - # print(all_found_textline_polygons[3]) - cx_m, cy_m = find_center_of_contours(contours_only_text_parent) - cx_h, cy_h = find_center_of_contours(contours_only_text_parent_h) + # print(np.shape(all_found_texline_polygons),np.shape(all_found_texline_polygons[3]),'all_found_texline_polygonsshape') + # print(all_found_texline_polygons[3]) + cx_m, cy_m, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) + cx_h, cy_h, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_h) cx_d, cy_d, _, _, y_min_d, y_max_d, _ = find_new_features_of_contours(polygons_of_drop_capitals) img_con_all = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) @@ -79,7 +76,7 @@ def adhere_drop_capital_region_into_corresponding_textline( # region_with_intersected_drop=region_with_intersected_drop/3 region_with_intersected_drop = region_with_intersected_drop.astype(np.uint8) # print(np.unique(img_con_all_copy[:,:,0])) - if curved_line or textline_light: + if curved_line: if len(region_with_intersected_drop) > 1: sum_pixels_of_intersection = [] @@ -90,9 +87,9 @@ def adhere_drop_capital_region_into_corresponding_textline( region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1 # print(region_final,'region_final') - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) try: - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -108,26 +105,21 @@ def adhere_drop_capital_region_into_corresponding_textline( arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop])) # print(arg_min) - cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min]) - cnt_nearest[:, 0, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2] - cnt_nearest[:, 0, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0] + cnt_nearest = np.copy(all_found_texline_polygons[int(region_final)][arg_min]) + cnt_nearest[:, 0, 0] = all_found_texline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2] + cnt_nearest[:, 0, 1] = all_found_texline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0] img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255)) img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255)) img_textlines = img_textlines.astype(np.uint8) - - contours_combined = return_contours_of_interested_region(img_textlines, 255, 0) - - #plt.imshow(img_textlines) - #plt.show() - - #imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) - #ret, thresh = cv2.threshold(imgray, 0, 255, 0) + imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) - #contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) contours_biggest = contours_combined[np.argmax(areas_cnt_text)] @@ -138,13 +130,8 @@ def adhere_drop_capital_region_into_corresponding_textline( # contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0] # contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) - - if len(contours_combined)==1: - all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest - elif len(contours_combined)==2: - all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] ) - else: - pass + + all_found_texline_polygons[int(region_final)][arg_min] = contours_biggest except: # print('gordun1') @@ -152,64 +139,59 @@ def adhere_drop_capital_region_into_corresponding_textline( elif len(region_with_intersected_drop) == 1: region_final = region_with_intersected_drop[0] - 1 - # areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))]) + # areas_main=np.array([cv2.contourArea(all_found_texline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_texline_polygons[int(region_final)]))]) - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) + + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) + # print(all_box_coord[j_cont]) + # print(cx_t) + # print(cy_t) + # print(cx_d[i_drop]) + # print(cy_d[i_drop]) + y_lines = np.array(cy_t) # all_box_coord[int(region_final)][0]+np.array(cy_t) + + y_lines[y_lines < y_min_d[i_drop]] = 0 + # print(y_lines) + + arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop])) + # print(arg_min) + + cnt_nearest = np.copy(all_found_texline_polygons[int(region_final)][arg_min]) + cnt_nearest[:, 0, 0] = all_found_texline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2] + cnt_nearest[:, 0, 1] = all_found_texline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0] + + img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) + img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255)) + img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255)) + + img_textlines = img_textlines.astype(np.uint8) + + # plt.imshow(img_textlines) + # plt.show() + imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # print(len(contours_combined),'len textlines mixed') + areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) + + contours_biggest = contours_combined[np.argmax(areas_cnt_text)] + + # print(np.shape(contours_biggest)) + # print(contours_biggest[:]) + # contours_biggest[:,0,0]=contours_biggest[:,0,0]#-all_box_coord[int(region_final)][2] + # contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0] + # print(np.shape(contours_biggest),'contours_biggest') + # print(np.shape(all_found_texline_polygons[int(region_final)][arg_min])) + ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) + all_found_texline_polygons[int(region_final)][arg_min] = contours_biggest + + # print(cx_t,'print') try: - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) - # print(all_box_coord[j_cont]) - # print(cx_t) - # print(cy_t) - # print(cx_d[i_drop]) - # print(cy_d[i_drop]) - y_lines = np.array(cy_t) # all_box_coord[int(region_final)][0]+np.array(cy_t) - - y_lines[y_lines < y_min_d[i_drop]] = 0 - # print(y_lines) - - arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop])) - # print(arg_min) - - cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min]) - cnt_nearest[:, 0, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2] - cnt_nearest[:, 0, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0] - - img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) - img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255)) - img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255)) - - img_textlines = img_textlines.astype(np.uint8) - - - contours_combined = return_contours_of_interested_region(img_textlines, 255, 0) - ##imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) - ##ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - ##contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) - - contours_biggest = contours_combined[np.argmax(areas_cnt_text)] - - # print(np.shape(contours_biggest)) - # print(contours_biggest[:]) - # contours_biggest[:,0,0]=contours_biggest[:,0,0]#-all_box_coord[int(region_final)][2] - # contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0] - # print(np.shape(contours_biggest),'contours_biggest') - # print(np.shape(all_found_textline_polygons[int(region_final)][arg_min])) - ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) - if len(contours_combined)==1: - all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest - elif len(contours_combined)==2: - all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] ) - else: - pass - except: - pass - - try: - # print(all_found_textline_polygons[j_cont][0]) - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # print(all_found_texline_polygons[j_cont][0]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -223,20 +205,19 @@ def adhere_drop_capital_region_into_corresponding_textline( arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop])) # print(arg_min) - cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min]) - cnt_nearest[:, 0, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2] - cnt_nearest[:, 0, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0] + cnt_nearest = np.copy(all_found_texline_polygons[int(region_final)][arg_min]) + cnt_nearest[:, 0, 0] = all_found_texline_polygons[int(region_final)][arg_min][:, 0, 0] # +all_box_coord[int(region_final)][2] + cnt_nearest[:, 0, 1] = all_found_texline_polygons[int(region_final)][arg_min][:, 0, 1] # +all_box_coord[int(region_final)][0] img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255)) img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255)) img_textlines = img_textlines.astype(np.uint8) - contours_combined = return_contours_of_interested_region(img_textlines, 255, 0) - #imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) - #ret, thresh = cv2.threshold(imgray, 0, 255, 0) + imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) - #contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -249,20 +230,15 @@ def adhere_drop_capital_region_into_corresponding_textline( contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] # -all_box_coord[int(region_final)][0] ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) - if len(contours_combined)==1: - all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest - elif len(contours_combined)==2: - all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] ) - else: - pass - # all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest + all_found_texline_polygons[int(region_final)][arg_min] = contours_biggest + # all_found_texline_polygons[int(region_final)][arg_min]=contours_biggest except: pass else: pass - ##cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + ##cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) ###print(all_box_coord[j_cont]) ###print(cx_t) ###print(cy_t) @@ -276,9 +252,9 @@ def adhere_drop_capital_region_into_corresponding_textline( ##arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) ) ###print(arg_min) - ##cnt_nearest=np.copy(all_found_textline_polygons[int(region_final)][arg_min]) - ##cnt_nearest[:,0,0]=all_found_textline_polygons[int(region_final)][arg_min][:,0,0]#+all_box_coord[int(region_final)][2] - ##cnt_nearest[:,0,1]=all_found_textline_polygons[int(region_final)][arg_min][:,0,1]#+all_box_coord[int(region_final)][0] + ##cnt_nearest=np.copy(all_found_texline_polygons[int(region_final)][arg_min]) + ##cnt_nearest[:,0,0]=all_found_texline_polygons[int(region_final)][arg_min][:,0,0]#+all_box_coord[int(region_final)][2] + ##cnt_nearest[:,0,1]=all_found_texline_polygons[int(region_final)][arg_min][:,0,1]#+all_box_coord[int(region_final)][0] ##img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) ##img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255)) @@ -304,7 +280,7 @@ def adhere_drop_capital_region_into_corresponding_textline( ##contours_biggest[:,0,1]=contours_biggest[:,0,1]#-all_box_coord[int(region_final)][0] ##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2]) - ##all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest + ##all_found_texline_polygons[int(region_final)][arg_min]=contours_biggest else: if len(region_with_intersected_drop) > 1: @@ -316,9 +292,9 @@ def adhere_drop_capital_region_into_corresponding_textline( region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1 # print(region_final,'region_final') - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) try: - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -334,21 +310,19 @@ def adhere_drop_capital_region_into_corresponding_textline( arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop])) # print(arg_min) - cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min]) - cnt_nearest[:, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0] + all_box_coord[int(region_final)][2] - cnt_nearest[:, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 1] + all_box_coord[int(region_final)][0] + cnt_nearest = np.copy(all_found_texline_polygons[int(region_final)][arg_min]) + cnt_nearest[:, 0] = all_found_texline_polygons[int(region_final)][arg_min][:, 0] + all_box_coord[int(region_final)][2] + cnt_nearest[:, 1] = all_found_texline_polygons[int(region_final)][arg_min][:, 1] + all_box_coord[int(region_final)][0] img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255)) img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255)) img_textlines = img_textlines.astype(np.uint8) - contours_combined = return_contours_of_interested_region(img_textlines, 255, 0) - - #imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) - #ret, thresh = cv2.threshold(imgray, 0, 255, 0) + imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) - #contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -361,12 +335,8 @@ def adhere_drop_capital_region_into_corresponding_textline( contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] - all_box_coord[int(region_final)][0] contours_biggest = contours_biggest.reshape(np.shape(contours_biggest)[0], np.shape(contours_biggest)[2]) - if len(contours_combined)==1: - all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest - elif len(contours_combined)==2: - all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] ) - else: - pass + + all_found_texline_polygons[int(region_final)][arg_min] = contours_biggest except: # print('gordun1') @@ -374,14 +344,14 @@ def adhere_drop_capital_region_into_corresponding_textline( elif len(region_with_intersected_drop) == 1: region_final = region_with_intersected_drop[0] - 1 - # areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))]) + # areas_main=np.array([cv2.contourArea(all_found_texline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_texline_polygons[int(region_final)]))]) - # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) # print(cx_t,'print') try: - # print(all_found_textline_polygons[j_cont][0]) - cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) + # print(all_found_texline_polygons[j_cont][0]) + cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_texline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -395,21 +365,19 @@ def adhere_drop_capital_region_into_corresponding_textline( arg_min = np.argmin(np.abs(y_lines - y_min_d[i_drop])) # print(arg_min) - cnt_nearest = np.copy(all_found_textline_polygons[int(region_final)][arg_min]) - cnt_nearest[:, 0] = all_found_textline_polygons[int(region_final)][arg_min][:, 0] + all_box_coord[int(region_final)][2] - cnt_nearest[:, 1] = all_found_textline_polygons[int(region_final)][arg_min][:, 1] + all_box_coord[int(region_final)][0] + cnt_nearest = np.copy(all_found_texline_polygons[int(region_final)][arg_min]) + cnt_nearest[:, 0] = all_found_texline_polygons[int(region_final)][arg_min][:, 0] + all_box_coord[int(region_final)][2] + cnt_nearest[:, 1] = all_found_texline_polygons[int(region_final)][arg_min][:, 1] + all_box_coord[int(region_final)][0] img_textlines = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) img_textlines = cv2.fillPoly(img_textlines, pts=[cnt_nearest], color=(255, 255, 255)) img_textlines = cv2.fillPoly(img_textlines, pts=[polygons_of_drop_capitals[i_drop]], color=(255, 255, 255)) img_textlines = img_textlines.astype(np.uint8) - contours_combined = return_contours_of_interested_region(img_textlines, 255, 0) - - #imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) - #ret, thresh = cv2.threshold(imgray, 0, 255, 0) + imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) - #contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -422,13 +390,8 @@ def adhere_drop_capital_region_into_corresponding_textline( contours_biggest[:, 0, 1] = contours_biggest[:, 0, 1] - all_box_coord[int(region_final)][0] contours_biggest = contours_biggest.reshape(np.shape(contours_biggest)[0], np.shape(contours_biggest)[2]) - if len(contours_combined)==1: - all_found_textline_polygons[int(region_final)][arg_min] = contours_biggest - elif len(contours_combined)==2: - all_found_textline_polygons[int(region_final)].insert(arg_min, polygons_of_drop_capitals[i_drop] ) - else: - pass - # all_found_textline_polygons[int(region_final)][arg_min]=contours_biggest + all_found_texline_polygons[int(region_final)][arg_min] = contours_biggest + # all_found_texline_polygons[int(region_final)][arg_min]=contours_biggest except: pass @@ -453,8 +416,8 @@ def adhere_drop_capital_region_into_corresponding_textline( ######plt.show() #####try: #####if len(contours_new_parent)==1: - ######print(all_found_textline_polygons[j_cont][0]) - #####cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[j_cont]) + ######print(all_found_texline_polygons[j_cont][0]) + #####cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_texline_polygons[j_cont]) ######print(all_box_coord[j_cont]) ######print(cx_t) ######print(cy_t) @@ -467,9 +430,9 @@ def adhere_drop_capital_region_into_corresponding_textline( #####arg_min=np.argmin(np.abs(y_lines-y_min_d[i_drop]) ) ######print(arg_min) - #####cnt_nearest=np.copy(all_found_textline_polygons[j_cont][arg_min]) - #####cnt_nearest[:,0]=all_found_textline_polygons[j_cont][arg_min][:,0]+all_box_coord[j_cont][2] - #####cnt_nearest[:,1]=all_found_textline_polygons[j_cont][arg_min][:,1]+all_box_coord[j_cont][0] + #####cnt_nearest=np.copy(all_found_texline_polygons[j_cont][arg_min]) + #####cnt_nearest[:,0]=all_found_texline_polygons[j_cont][arg_min][:,0]+all_box_coord[j_cont][2] + #####cnt_nearest[:,1]=all_found_texline_polygons[j_cont][arg_min][:,1]+all_box_coord[j_cont][0] #####img_textlines=np.zeros((text_regions_p.shape[0],text_regions_p.shape[1],3)) #####img_textlines=cv2.fillPoly(img_textlines,pts=[cnt_nearest],color=(255,255,255)) @@ -490,7 +453,7 @@ def adhere_drop_capital_region_into_corresponding_textline( #####contours_biggest[:,0,0]=contours_biggest[:,0,0]-all_box_coord[j_cont][2] #####contours_biggest[:,0,1]=contours_biggest[:,0,1]-all_box_coord[j_cont][0] - #####all_found_textline_polygons[j_cont][arg_min]=contours_biggest + #####all_found_texline_polygons[j_cont][arg_min]=contours_biggest ######print(contours_biggest) ######plt.imshow(img_textlines[:,:,0]) ######plt.show() @@ -498,7 +461,7 @@ def adhere_drop_capital_region_into_corresponding_textline( #####pass #####except: #####pass - return all_found_textline_polygons + return all_found_texline_polygons def filter_small_drop_capitals_from_no_patch_layout(layout_no_patch, layout1): diff --git a/src/eynollah/utils/is_nan.py b/qurator/eynollah/utils/is_nan.py similarity index 100% rename from src/eynollah/utils/is_nan.py rename to qurator/eynollah/utils/is_nan.py diff --git a/src/eynollah/utils/marginals.py b/qurator/eynollah/utils/marginals.py similarity index 51% rename from src/eynollah/utils/marginals.py rename to qurator/eynollah/utils/marginals.py index eaf0048..7c43de6 100644 --- a/src/eynollah/utils/marginals.py +++ b/qurator/eynollah/utils/marginals.py @@ -2,14 +2,17 @@ import numpy as np import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d + + from .contour import find_new_features_of_contours, return_contours_of_interested_region from .resize import resize_image from .rotate import rotate_image -def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_version=False, kernel=None): +def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None): mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1])) mask_marginals=mask_marginals.astype(np.uint8) + text_with_lines=text_with_lines.astype(np.uint8) ##text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3) @@ -25,12 +28,8 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.8),text_with_lines.shape[1]) text_with_lines=cv2.erode(text_with_lines,kernel,iterations=7) text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - - - if light_version: - kernel_hor = np.ones((1, 5), dtype=np.uint8) - text_with_lines = cv2.erode(text_with_lines,kernel_hor,iterations=6) - + + text_with_lines_y=text_with_lines.sum(axis=0) text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0) @@ -43,23 +42,34 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve elif thickness_along_y_percent>=30 and thickness_along_y_percent<50: min_textline_thickness=20 else: - if light_version: - min_textline_thickness=45 - else: - min_textline_thickness=40 + min_textline_thickness=40 + if thickness_along_y_percent>=14: text_with_lines_y_rev=-1*text_with_lines_y[:] + #print(text_with_lines_y) + #print(text_with_lines_y_rev) + + + + + #plt.plot(text_with_lines_y) + #plt.show() + text_with_lines_y_rev=text_with_lines_y_rev-np.min(text_with_lines_y_rev) + #plt.plot(text_with_lines_y_rev) + #plt.show() sigma_gaus=1 region_sum_0= gaussian_filter1d(text_with_lines_y, sigma_gaus) region_sum_0_rev=gaussian_filter1d(text_with_lines_y_rev, sigma_gaus) + #plt.plot(region_sum_0_rev) + #plt.show() region_sum_0_updown=region_sum_0[len(region_sum_0)::-1] first_nonzero=(next((i for i, x in enumerate(region_sum_0) if x), None)) @@ -68,18 +78,44 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve last_nonzero=len(region_sum_0)-last_nonzero + ##img_sum_0_smooth_rev=-region_sum_0 + + mid_point=(last_nonzero+first_nonzero)/2. one_third_right=(last_nonzero-mid_point)/3.0 one_third_left=(mid_point-first_nonzero)/3.0 - peaks, _ = find_peaks(text_with_lines_y_rev, height=0) - peaks=np.array(peaks) - peaks=peaks[(peaks>first_nonzero) & (peaks < last_nonzero)] - peaks=peaks[region_sum_0[peaks]first_nonzero) & ((peaksmid_point] peaks_left=peaks[peaks=mask_marginals.shape[1]: point_right=mask_marginals.shape[1]-1 @@ -111,8 +148,10 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve except: mask_marginals[:,:]=1 + #print(mask_marginals.shape,point_left,point_right,'nadosh') mask_marginals_rotated=rotate_image(mask_marginals,-slope_deskew) + #print(mask_marginals_rotated.shape,'nadosh') mask_marginals_rotated_sum=mask_marginals_rotated.sum(axis=0) mask_marginals_rotated_sum[mask_marginals_rotated_sum!=0]=1 @@ -128,92 +167,73 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve if max_point_of_right_marginal>=text_regions.shape[1]: max_point_of_right_marginal=text_regions.shape[1]-1 - if light_version: - text_regions_org = np.copy(text_regions) - text_regions[text_regions[:,:]==1]=4 - - pixel_img=4 - min_area_text=0.00001 - - polygon_mask_marginals_rotated = return_contours_of_interested_region(mask_marginals,1,min_area_text) - - polygon_mask_marginals_rotated = polygon_mask_marginals_rotated[0] - polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) + #print(np.min(index_x_interest) ,np.max(index_x_interest),'minmaxnew') + #print(mask_marginals_rotated.shape,text_regions.shape,'mask_marginals_rotated') + #plt.imshow(mask_marginals) + #plt.show() - cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) + #plt.imshow(mask_marginals_rotated) + #plt.show() - text_regions[(text_regions[:,:]==4)]=1 + text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4 - marginlas_should_be_main_text=[] + #plt.imshow(text_regions) + #plt.show() - x_min_marginals_left=[] - x_min_marginals_right=[] + pixel_img=4 + min_area_text=0.00001 + polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) - for i in range(len(cx_text_only)): - results = cv2.pointPolygonTest(polygon_mask_marginals_rotated, (cx_text_only[i], cy_text_only[i]), False) + cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) - if results == -1: - marginlas_should_be_main_text.append(polygons_of_marginals[i]) + text_regions[(text_regions[:,:]==4)]=1 + marginlas_should_be_main_text=[] + x_min_marginals_left=[] + x_min_marginals_right=[] - text_regions_org=cv2.fillPoly(text_regions_org, pts =marginlas_should_be_main_text, color=(4,4)) - text_regions = np.copy(text_regions_org) - + for i in range(len(cx_text_only)): - else: - - text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4 - - pixel_img=4 - min_area_text=0.00001 - - polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) - - cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) - - text_regions[(text_regions[:,:]==4)]=1 - - marginlas_should_be_main_text=[] - - x_min_marginals_left=[] - x_min_marginals_right=[] - - for i in range(len(cx_text_only)): - x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i]) - y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i]) - - if x_width_mar>16 and y_height_mar/x_width_mar<18: - marginlas_should_be_main_text.append(polygons_of_marginals[i]) - if x_min_text_only[i]<(mid_point-one_third_left): - x_min_marginals_left_new=x_min_text_only[i] - if len(x_min_marginals_left)==0: - x_min_marginals_left.append(x_min_marginals_left_new) - else: - x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new) + x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i]) + y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i]) + #print(x_width_mar,y_height_mar,y_height_mar/x_width_mar,'y_height_mar') + if x_width_mar>16 and y_height_mar/x_width_mar<18: + marginlas_should_be_main_text.append(polygons_of_marginals[i]) + if x_min_text_only[i]<(mid_point-one_third_left): + x_min_marginals_left_new=x_min_text_only[i] + if len(x_min_marginals_left)==0: + x_min_marginals_left.append(x_min_marginals_left_new) else: - x_min_marginals_right_new=x_min_text_only[i] - if len(x_min_marginals_right)==0: - x_min_marginals_right.append(x_min_marginals_right_new) - else: - x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new) + x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new) + else: + x_min_marginals_right_new=x_min_text_only[i] + if len(x_min_marginals_right)==0: + x_min_marginals_right.append(x_min_marginals_right_new) + else: + x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new) - if len(x_min_marginals_left)==0: - x_min_marginals_left=[0] - if len(x_min_marginals_right)==0: - x_min_marginals_right=[text_regions.shape[1]-1] + if len(x_min_marginals_left)==0: + x_min_marginals_left=[0] + if len(x_min_marginals_right)==0: + x_min_marginals_right=[text_regions.shape[1]-1] - text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4)) - #text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 - #text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 - - - text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0 - text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0 + #print(x_min_marginals_left[0],x_min_marginals_right[0],'margo') + + #print(marginlas_should_be_main_text,'marginlas_should_be_main_text') + text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4)) + + #print(np.unique(text_regions)) + + #text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 + #text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 + + text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0 + text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0 ###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4 diff --git a/src/eynollah/utils/pil_cv2.py b/qurator/eynollah/utils/pil_cv2.py similarity index 80% rename from src/eynollah/utils/pil_cv2.py rename to qurator/eynollah/utils/pil_cv2.py index 9f6913e..20dc22f 100644 --- a/src/eynollah/utils/pil_cv2.py +++ b/qurator/eynollah/utils/pil_cv2.py @@ -1,4 +1,3 @@ -from contextlib import nullcontext from PIL import Image import numpy as np from ocrd_models import OcrdExif @@ -17,14 +16,13 @@ def pil2cv(img): def check_dpi(img): try: - if isinstance(img, Image.Image): - pil_image = nullcontext(img) + if isinstance(img, Image.__class__): + pil_image = img elif isinstance(img, str): pil_image = Image.open(img) else: - pil_image = nullcontext(cv2pil(img)) - with pil_image: - exif = OcrdExif(pil_image) + pil_image = cv2pil(img) + exif = OcrdExif(pil_image) resolution = exif.resolution if resolution == 1: raise Exception() diff --git a/src/eynollah/utils/resize.py b/qurator/eynollah/utils/resize.py similarity index 100% rename from src/eynollah/utils/resize.py rename to qurator/eynollah/utils/resize.py diff --git a/src/eynollah/utils/rotate.py b/qurator/eynollah/utils/rotate.py similarity index 79% rename from src/eynollah/utils/rotate.py rename to qurator/eynollah/utils/rotate.py index 189693d..9cadd4b 100644 --- a/src/eynollah/utils/rotate.py +++ b/qurator/eynollah/utils/rotate.py @@ -1,4 +1,6 @@ import math + +import imutils import cv2 def rotatedRectWithMaxArea(w, h, angle): @@ -33,14 +35,14 @@ def rotate_max_area_new(image, rotated, angle): return rotated[y1:y2, x1:x2] def rotation_image_new(img, thetha): - rotated = rotate_image(img, thetha) + rotated = imutils.rotate(img, thetha) return rotate_max_area_new(img, rotated, thetha) def rotate_image(img_patch, slope): (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, slope, 1.0) - return cv2.warpAffine(img_patch, M, (w, h) ) + return cv2.warpAffine(img_patch, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) def rotate_image_different( img, slope): # img = cv2.imread('images/input.jpg') @@ -50,27 +52,26 @@ def rotate_image_different( img, slope): img_rotation = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows)) return img_rotation -def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_table_prediction, angle): +def rotate_max_area(image, rotated, rotated_textline, rotated_layout, angle): wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape y1 = h // 2 - int(hr / 2) y2 = y1 + int(hr) x1 = w // 2 - int(wr / 2) x2 = x1 + int(wr) - return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_table_prediction[y1:y2, x1:x2] + return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2] -def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thetha): - rotated = rotate_image(img, thetha) - rotated_textline = rotate_image(textline, thetha) - rotated_layout = rotate_image(text_regions_p_1, thetha) - rotated_table_prediction = rotate_image(table_prediction, thetha) - return rotate_max_area(img, rotated, rotated_textline, rotated_layout, rotated_table_prediction, thetha) +def rotation_not_90_func(img, textline, text_regions_p_1, thetha): + rotated = imutils.rotate(img, thetha) + rotated_textline = imutils.rotate(textline, thetha) + rotated_layout = imutils.rotate(text_regions_p_1, thetha) + return rotate_max_area(img, rotated, rotated_textline, rotated_layout, thetha) def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regions_p_fully, thetha): - rotated = rotate_image(img, thetha) - rotated_textline = rotate_image(textline, thetha) - rotated_layout = rotate_image(text_regions_p_1, thetha) - rotated_layout_full = rotate_image(text_regions_p_fully, thetha) + rotated = imutils.rotate(img, thetha) + rotated_textline = imutils.rotate(textline, thetha) + rotated_layout = imutils.rotate(text_regions_p_1, thetha) + rotated_layout_full = imutils.rotate(text_regions_p_fully, thetha) return rotate_max_area_full_layout(img, rotated, rotated_textline, rotated_layout, rotated_layout_full, thetha) def rotate_max_area_full_layout(image, rotated, rotated_textline, rotated_layout, rotated_layout_full, angle): diff --git a/src/eynollah/utils/separate_lines.py b/qurator/eynollah/utils/separate_lines.py similarity index 62% rename from src/eynollah/utils/separate_lines.py rename to qurator/eynollah/utils/separate_lines.py index 22ef00d..acdc2e9 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/qurator/eynollah/utils/separate_lines.py @@ -1,28 +1,20 @@ -import os -from logging import getLogger -from functools import partial import numpy as np import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d -from multiprocessing import Process, Queue, cpu_count -from multiprocessing import Pool +import os + from .rotate import rotate_image -from .resize import resize_image from .contour import ( return_parent_contours, filter_contours_area_of_image_tables, return_contours_of_image, - filter_contours_area_of_image, - return_contours_of_interested_textline, - find_contours_mean_y_diff, + filter_contours_area_of_image ) -from .shm import share_ndarray, wrap_ndarray_shared +from .is_nan import isNaN from . import ( find_num_col_deskew, - crop_image_inside_box, - box2rect, - box2slice, + isNaN, ) def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): @@ -46,7 +38,9 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): y_max_cont = img_patch.shape[0] xv = np.linspace(x_min_cont, x_max_cont, 1000) + textline_patch_sum_along_width = img_patch.sum(axis=axis) + first_nonzero = 0 # (next((i for i, x in enumerate(mada_n) if x), None)) y = textline_patch_sum_along_width[:] # [first_nonzero:last_nonzero] @@ -55,8 +49,11 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) + if 1 > 0: + try: + y_padded_smoothed_e = gaussian_filter1d(y_padded, 2) y_padded_up_to_down_e = -y_padded + np.max(y_padded) y_padded_up_to_down_padded_e = np.zeros(len(y_padded_up_to_down_e) + 40) @@ -67,8 +64,7 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ - y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] + arg_neg_must_be_deleted = np.array(range(len(peaks_neg_e)))[y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -79,14 +75,12 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): clusters_to_be_deleted = [] if len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : - arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): @@ -96,6 +90,7 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): for m1 in range(len(clusters_to_be_deleted[m])): peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1] - 1]] peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1]]] + peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg_e[clusters_to_be_deleted[m][m1]]] peaks_new_tot = [] for i1 in peaks_new: @@ -108,18 +103,16 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): peaks_new_tot = peaks_e[:] textline_con, hierarchy = return_contours_of_image(img_patch) - textline_con_fil = filter_contours_area_of_image(img_patch, - textline_con, hierarchy, - max_area=1, min_area=0.0008) - if len(np.diff(peaks_new_tot))>1: - y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) - else: - sigma_gaus = 12 + textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) + y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) + + sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) + # print(sigma_gaus,'sigma_gaus') except: sigma_gaus = 12 if sigma_gaus < 3: sigma_gaus = 3 + # print(sigma_gaus,'sigma') y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down = -y_padded + np.max(y_padded) @@ -130,24 +123,18 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): peaks, _ = find_peaks(y_padded_smoothed, height=0) peaks_neg, _ = find_peaks(y_padded_up_to_down_padded, height=0) - return (x, y, - x_d, y_d, - xv, - x_min_cont, y_min_cont, - x_max_cont, y_max_cont, - first_nonzero, - y_padded_up_to_down_padded, - y_padded_smoothed, - peaks, peaks_neg, - rotation_matrix) + return x, y, x_d, y_d, xv, x_min_cont, y_min_cont, x_max_cont, y_max_cont, first_nonzero, y_padded_up_to_down_padded, y_padded_smoothed, peaks, peaks_neg, rotation_matrix def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): - h, w = img_patch.shape[:2] + + (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, -thetha, 1.0) x_d = M[0, 2] y_d = M[1, 2] - rotation_matrix = M[:2, :2] + + thetha = thetha / 180. * np.pi + rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]]) contour_text_interest_copy = contour_text_interest.copy() x_cont = contour_text_interest[:, 0, 0] @@ -161,7 +148,9 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): y_max_cont = img_patch.shape[0] xv = np.linspace(x_min_cont, x_max_cont, 1000) + textline_patch_sum_along_width = img_patch.sum(axis=1) + first_nonzero = 0 # (next((i for i, x in enumerate(mada_n) if x), None)) y = textline_patch_sum_along_width[:] # [first_nonzero:last_nonzero] @@ -171,103 +160,131 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) - try: - y_padded_smoothed_e= gaussian_filter1d(y_padded, 2) - y_padded_up_to_down_e=-y_padded+np.max(y_padded) - y_padded_up_to_down_padded_e=np.zeros(len(y_padded_up_to_down_e)+40) - y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e)+20]=y_padded_up_to_down_e - y_padded_up_to_down_padded_e= gaussian_filter1d(y_padded_up_to_down_padded_e, 2) - - peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) - peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) - neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) + if 1>0: - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ - y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3] - diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) - - arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) - arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] - peaks_new=peaks_e[:] - peaks_neg_new=peaks_neg_e[:] + try: - clusters_to_be_deleted=[] - if len(arg_diff_cluster)>0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) - for i in range(len(arg_diff_cluster)-1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1: - arg_diff_cluster[i+1]+1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) - if len(clusters_to_be_deleted)>0: - peaks_new_extra=[] - for m in range(len(clusters_to_be_deleted)): - min_cluster=np.min(peaks_e[clusters_to_be_deleted[m]]) - max_cluster=np.max(peaks_e[clusters_to_be_deleted[m]]) - peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) - for m1 in range(len(clusters_to_be_deleted[m])): - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]-1]] - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]]] - peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg_e[clusters_to_be_deleted[m][m1]]] - peaks_new_tot=[] - for i1 in peaks_new: - peaks_new_tot.append(i1) - for i1 in peaks_new_extra: - peaks_new_tot.append(i1) - peaks_new_tot=np.sort(peaks_new_tot) - else: - peaks_new_tot=peaks_e[:] - - textline_con,hierarchy=return_contours_of_image(img_patch) - textline_con_fil=filter_contours_area_of_image(img_patch, - textline_con, hierarchy, - max_area=1, min_area=0.0008) - - if len(np.diff(peaks_new_tot))>0: - y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus=int( y_diff_mean * (7./40.0) ) - else: - sigma_gaus=12 + y_padded_smoothed_e= gaussian_filter1d(y_padded, 2) + y_padded_up_to_down_e=-y_padded+np.max(y_padded) + y_padded_up_to_down_padded_e=np.zeros(len(y_padded_up_to_down_e)+40) + y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e)+20]=y_padded_up_to_down_e + y_padded_up_to_down_padded_e= gaussian_filter1d(y_padded_up_to_down_padded_e, 2) - except: - sigma_gaus=12 - if sigma_gaus<3: - sigma_gaus=3 + + peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) + peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) + neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) + + arg_neg_must_be_deleted= np.array(range(len(peaks_neg_e)))[y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3 ] + diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) + + + + arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) + arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] + + + peaks_new=peaks_e[:] + peaks_neg_new=peaks_neg_e[:] + + clusters_to_be_deleted=[] + if len(arg_diff_cluster)>0: + + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) + for i in range(len(arg_diff_cluster)-1): + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1:arg_diff_cluster[i+1]+1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) + + + if len(clusters_to_be_deleted)>0: + peaks_new_extra=[] + for m in range(len(clusters_to_be_deleted)): + min_cluster=np.min(peaks_e[clusters_to_be_deleted[m]]) + max_cluster=np.max(peaks_e[clusters_to_be_deleted[m]]) + peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) + for m1 in range(len(clusters_to_be_deleted[m])): + peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]-1]] + peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]]] + + peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg_e[clusters_to_be_deleted[m][m1]]] + peaks_new_tot=[] + for i1 in peaks_new: + peaks_new_tot.append(i1) + for i1 in peaks_new_extra: + peaks_new_tot.append(i1) + peaks_new_tot=np.sort(peaks_new_tot) + + + else: + peaks_new_tot=peaks_e[:] + + + textline_con,hierarchy=return_contours_of_image(img_patch) + textline_con_fil=filter_contours_area_of_image(img_patch,textline_con,hierarchy,max_area=1,min_area=0.0008) + y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) + + sigma_gaus=int( y_diff_mean * (7./40.0) ) + #print(sigma_gaus,'sigma_gaus') + except: + sigma_gaus=12 + if sigma_gaus<3: + sigma_gaus=3 + #print(sigma_gaus,'sigma') + y_padded_smoothed= gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down=-y_padded+np.max(y_padded) y_padded_up_to_down_padded=np.zeros(len(y_padded_up_to_down)+40) y_padded_up_to_down_padded[20:len(y_padded_up_to_down)+20]=y_padded_up_to_down y_padded_up_to_down_padded= gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) + + peaks, _ = find_peaks(y_padded_smoothed, height=0) peaks_neg, _ = find_peaks(y_padded_up_to_down_padded, height=0) + + + + try: neg_peaks_max=np.max(y_padded_smoothed[peaks]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ - y_padded_up_to_down_padded[peaks_neg]/float(neg_peaks_max)<0.42] + + + arg_neg_must_be_deleted= np.array(range(len(peaks_neg)))[y_padded_up_to_down_padded[peaks_neg]/float(neg_peaks_max)<0.42 ] + + diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) + + arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] - except: arg_neg_must_be_deleted=[] arg_diff_cluster=[] + + try: peaks_new=peaks[:] peaks_neg_new=peaks_neg[:] clusters_to_be_deleted=[] + + if len(arg_diff_cluster)>=2 and len(arg_diff_cluster)>0: + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) for i in range(len(arg_diff_cluster)-1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1: - arg_diff_cluster[i+1]+1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1:arg_diff_cluster[i+1]+1]) clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) elif len(arg_neg_must_be_deleted)>=2 and len(arg_diff_cluster)==0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) + + if len(arg_neg_must_be_deleted)==1: clusters_to_be_deleted.append(arg_neg_must_be_deleted) + + if len(clusters_to_be_deleted)>0: peaks_new_extra=[] for m in range(len(clusters_to_be_deleted)): @@ -277,6 +294,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): for m1 in range(len(clusters_to_be_deleted[m])): peaks_new=peaks_new[peaks_new!=peaks[clusters_to_be_deleted[m][m1]-1]] peaks_new=peaks_new[peaks_new!=peaks[clusters_to_be_deleted[m][m1]]] + peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg[clusters_to_be_deleted[m][m1]]] peaks_new_tot=[] for i1 in peaks_new: @@ -285,29 +303,51 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_new_tot.append(i1) peaks_new_tot=np.sort(peaks_new_tot) + ##plt.plot(y_padded_up_to_down_padded) + ##plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') + ##plt.show() + + ##plt.plot(y_padded_up_to_down_padded) + ##plt.plot(peaks_neg_new,y_padded_up_to_down_padded[peaks_neg_new],'*') + ##plt.show() + + ##plt.plot(y_padded_smoothed) + ##plt.plot(peaks,y_padded_smoothed[peaks],'*') + ##plt.show() + + ##plt.plot(y_padded_smoothed) + ##plt.plot(peaks_new_tot,y_padded_smoothed[peaks_new_tot],'*') + ##plt.show() + peaks=peaks_new_tot[:] peaks_neg=peaks_neg_new[:] + + else: peaks_new_tot=peaks[:] peaks=peaks_new_tot[:] peaks_neg=peaks_neg_new[:] except: pass - if len(y_padded_smoothed[peaks]) > 1: - mean_value_of_peaks=np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks=np.std(y_padded_smoothed[peaks]) - else: - mean_value_of_peaks = np.nan - std_value_of_peaks = np.nan + + + mean_value_of_peaks=np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks=np.std(y_padded_smoothed[peaks]) peaks_values=y_padded_smoothed[peaks] + + peaks_neg = peaks_neg - 20 - 20 peaks = peaks - 20 + for jj in range(len(peaks_neg)): if peaks_neg[jj] > len(x) - 1: peaks_neg[jj] = len(x) - 1 + for jj in range(len(peaks)): if peaks[jj] > len(x) - 1: peaks[jj] = len(x) - 1 + + textline_boxes = [] textline_boxes_rot = [] @@ -321,47 +361,34 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1 - ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) - #point_up - # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1 - ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) - #point_up - # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( - 1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./2) + 1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( 1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + + if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) @@ -393,6 +420,8 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): if point_up_rot2<0: point_up_rot2=0 + + x_min_rot1=x_min_rot1-x_help x_max_rot2=x_max_rot2-x_help x_max_rot3=x_max_rot3-x_help @@ -403,24 +432,29 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help + + + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) + elif len(peaks) < 1: pass elif len(peaks) == 1: - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[0] + first_nonzero])), True) - for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[0] + first_nonzero])), True) + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] + if len(xvinside) == 0: x_min = x_min_cont x_max = x_max_cont @@ -443,6 +477,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x_max_rot3, point_down_rot3 = p3[0] + x_d, p3[1] + y_d x_min_rot4, point_down_rot4 = p4[0] + x_d, p4[1] + y_d + if x_min_rot1<0: x_min_rot1=0 if x_min_rot4<0: @@ -451,6 +486,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up_rot1=0 if point_up_rot2<0: point_up_rot2=0 + x_min_rot1=x_min_rot1-x_help x_max_rot2=x_max_rot2-x_help @@ -461,15 +497,22 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up_rot2=point_up_rot2-y_help point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help + + + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(y_min)], [int(x_max), int(y_min)], [int(x_max), int(y_max)], [int(x_min), int(y_max)]])) + + + elif len(peaks) == 2: dis_to_next = np.abs(peaks[1] - peaks[0]) for jj in range(len(peaks)): @@ -487,13 +530,12 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): except: point_up =peaks[jj] + first_nonzero - int(1. / 1.8 * dis_to_next) - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) - for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] + if len(xvinside) == 0: x_min = x_min_cont x_max = x_max_cont @@ -511,6 +553,8 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x_max_rot3, point_down_rot3 = p3[0] + x_d, p3[1] + y_d x_min_rot4, point_down_rot4 = p4[0] + x_d, p4[1] + y_d + + if x_min_rot1<0: x_min_rot1=0 if x_min_rot4<0: @@ -530,16 +574,21 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help + + + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) else: for jj in range(len(peaks)): + if jj == 0: dis_to_next = peaks[jj + 1] - peaks[jj] # point_up=peaks[jj]+first_nonzero-int(1./3*dis_to_next) @@ -563,13 +612,12 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up = peaks[jj] + first_nonzero - int(1. / 1.9 * dis_to_next_up) point_down = peaks[jj] + first_nonzero + int(1. / 1.9 * dis_to_next_down) - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) - for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] + if len(xvinside) == 0: x_min = x_min_cont x_max = x_max_cont @@ -595,6 +643,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up_rot1=0 if point_up_rot2<0: point_up_rot2=0 + x_min_rot1=x_min_rot1-x_help x_max_rot2=x_max_rot2-x_help @@ -605,23 +654,29 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up_rot2=point_up_rot2-y_help point_down_rot3=point_down_rot3-y_help point_down_rot4=point_down_rot4-y_help + + + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) + + return peaks, textline_boxes_rot def separate_lines_vertical(img_patch, contour_text_interest, thetha): + thetha = thetha + 90 contour_text_interest_copy = contour_text_interest.copy() - x, y, x_d, y_d, xv, x_min_cont, y_min_cont, x_max_cont, y_max_cont, \ - first_nonzero, y_padded_up_to_down_padded, y_padded_smoothed, \ - peaks, peaks_neg, rotation_matrix = dedup_separate_lines(img_patch, contour_text_interest, thetha, 0) + x, y, x_d, y_d, xv, x_min_cont, y_min_cont, x_max_cont, y_max_cont, first_nonzero, y_padded_up_to_down_padded, y_padded_smoothed, peaks, peaks_neg, rotation_matrix = dedup_separate_lines(img_patch, contour_text_interest, thetha, 0) + # plt.plot(y_padded_up_to_down_padded) # plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') @@ -635,8 +690,8 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): neg_peaks_max = np.max(y_padded_up_to_down_padded[peaks_neg]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ - y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.42] + arg_neg_must_be_deleted = np.array(range(len(peaks_neg)))[y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.42] + diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -646,16 +701,18 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): peaks_neg_new = peaks_neg[:] clusters_to_be_deleted = [] - if len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) >= 2: + if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : - arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) - else: + + if len(arg_neg_must_be_deleted) == 1: clusters_to_be_deleted.append(arg_neg_must_be_deleted) + if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): @@ -665,6 +722,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): for m1 in range(len(clusters_to_be_deleted[m])): peaks_new = peaks_new[peaks_new != peaks[clusters_to_be_deleted[m][m1] - 1]] peaks_new = peaks_new[peaks_new != peaks[clusters_to_be_deleted[m][m1]]] + peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg[clusters_to_be_deleted[m][m1]]] peaks_new_tot = [] for i1 in peaks_new: @@ -680,14 +738,9 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): peaks_new_tot = peaks[:] peaks = peaks_new_tot[:] peaks_neg = peaks_neg_new[:] - - if len(y_padded_smoothed[peaks])>1: - mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks = np.std(y_padded_smoothed[peaks]) - else: - mean_value_of_peaks = np.nan - std_value_of_peaks = np.nan - + + mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks = np.std(y_padded_smoothed[peaks]) peaks_values = y_padded_smoothed[peaks] peaks_neg = peaks_neg - 20 - 20 @@ -705,6 +758,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): textline_boxes_rot = [] if len(peaks_neg) == len(peaks) + 1 and len(peaks) >= 3: + # print('11') for jj in range(len(peaks)): if jj == (len(peaks) - 1): @@ -712,53 +766,34 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 - ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) - #point_up - # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 - ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) - #point_up - # np.max(y_cont) - #peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) - ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) - ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) - ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) - ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) - for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] + if len(xvinside) == 0: x_min = x_min_cont x_max = x_max_cont @@ -785,16 +820,13 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], - [int(x_max_rot2), int(point_up_rot2)], - [int(x_max_rot3), int(point_down_rot3)], - [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(point_up)], - [int(x_max), int(point_up)], - [int(x_max), int(point_down)], - [int(x_min), int(point_down)]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + + textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) + elif len(peaks) < 1: pass + elif len(peaks) == 1: x_min = x_min_cont x_max = x_max_cont @@ -821,14 +853,10 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], - [int(x_max_rot2), int(point_up_rot2)], - [int(x_max_rot3), int(point_down_rot3)], - [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(y_min)], - [int(x_max), int(y_min)], - [int(x_max), int(y_max)], - [int(x_min), int(y_max)]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + + textline_boxes.append(np.array([[int(x_min), int(y_min)], [int(x_max), int(y_min)], [int(x_max), int(y_max)], [int(x_min), int(y_max)]])) + elif len(peaks) == 2: dis_to_next = np.abs(peaks[1] - peaks[0]) for jj in range(len(peaks)): @@ -843,13 +871,11 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): point_down = img_patch.shape[0] - 2 point_up = peaks[jj] + first_nonzero - int(1.0 / 1.8 * dis_to_next) - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) - for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] + if len(xvinside) == 0: x_min = x_min_cont x_max = x_max_cont @@ -876,16 +902,12 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], - [int(x_max_rot2), int(point_up_rot2)], - [int(x_max_rot3), int(point_down_rot3)], - [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(point_up)], - [int(x_max), int(point_up)], - [int(x_max), int(point_down)], - [int(x_min), int(point_down)]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + + textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) else: for jj in range(len(peaks)): + if jj == 0: dis_to_next = peaks[jj + 1] - peaks[jj] # point_up=peaks[jj]+first_nonzero-int(1./3*dis_to_next) @@ -909,13 +931,11 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): point_up = peaks[jj] + first_nonzero - int(1.0 / 1.9 * dis_to_next_up) point_down = peaks[jj] + first_nonzero + int(1.0 / 1.9 * dis_to_next_down) - distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), - True) - for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] + if len(xvinside) == 0: x_min = x_min_cont x_max = x_max_cont @@ -942,17 +962,14 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): if point_up_rot2 < 0: point_up_rot2 = 0 - textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], - [int(x_max_rot2), int(point_up_rot2)], - [int(x_max_rot3), int(point_down_rot3)], - [int(x_min_rot4), int(point_down_rot4)]])) - textline_boxes.append(np.array([[int(x_min), int(point_up)], - [int(x_max), int(point_up)], - [int(x_max), int(point_down)], - [int(x_min), int(point_down)]])) + textline_boxes_rot.append(np.array([[int(x_min_rot1), int(point_up_rot1)], [int(x_max_rot2), int(point_up_rot2)], [int(x_max_rot3), int(point_down_rot3)], [int(x_min_rot4), int(point_down_rot4)]])) + + textline_boxes.append(np.array([[int(x_min), int(point_up)], [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) + return peaks, textline_boxes_rot def separate_lines_new_inside_tiles2(img_patch, thetha): + (h, w) = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, -thetha, 1.0) @@ -974,7 +991,9 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): y_max_cont = img_patch.shape[0] xv = np.linspace(x_min_cont, x_max_cont, 1000) + textline_patch_sum_along_width = img_patch.sum(axis=1) + first_nonzero = 0 # (next((i for i, x in enumerate(mada_n) if x), None)) y = textline_patch_sum_along_width[:] # [first_nonzero:last_nonzero] @@ -984,7 +1003,9 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) if 1 > 0: + try: + y_padded_smoothed_e = gaussian_filter1d(y_padded, 2) y_padded_up_to_down_e = -y_padded + np.max(y_padded) y_padded_up_to_down_padded_e = np.zeros(len(y_padded_up_to_down_e) + 40) @@ -995,8 +1016,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ - y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] + arg_neg_must_be_deleted = np.array(range(len(peaks_neg_e)))[y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -1007,13 +1027,12 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): clusters_to_be_deleted = [] if len(arg_diff_cluster) > 0: + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[i] + 1: - arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append( - arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): @@ -1023,6 +1042,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): for m1 in range(len(clusters_to_be_deleted[m])): peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1] - 1]] peaks_new = peaks_new[peaks_new != peaks_e[clusters_to_be_deleted[m][m1]]] + peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg_e[clusters_to_be_deleted[m][m1]]] peaks_new_tot = [] for i1 in peaks_new: @@ -1030,23 +1050,21 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): for i1 in peaks_new_extra: peaks_new_tot.append(i1) peaks_new_tot = np.sort(peaks_new_tot) + else: peaks_new_tot = peaks_e[:] textline_con, hierarchy = return_contours_of_image(img_patch) - textline_con_fil = filter_contours_area_of_image(img_patch, - textline_con, hierarchy, - max_area=1, min_area=0.0008) - if len(np.diff(peaks_new_tot)): - y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) - else: - sigma_gaus = 12 + textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) + y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) + sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) + # print(sigma_gaus,'sigma_gaus') except: sigma_gaus = 12 if sigma_gaus < 3: sigma_gaus = 3 + # print(sigma_gaus,'sigma') y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down = -y_padded + np.max(y_padded) @@ -1063,24 +1081,27 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): try: neg_peaks_max = np.max(y_padded_smoothed[peaks]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ - y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.24] + arg_neg_must_be_deleted = np.array(range(len(peaks_neg)))[y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.24] + diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) arg_diff_cluster = arg_diff[diff_arg_neg_must_be_deleted > 1] clusters_to_be_deleted = [] - if len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) >= 2: + + if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : - arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) - else: + + if len(arg_neg_must_be_deleted) == 1: clusters_to_be_deleted.append(arg_neg_must_be_deleted) + if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): @@ -1090,6 +1111,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): for m1 in range(len(clusters_to_be_deleted[m])): peaks_new = peaks_new[peaks_new != peaks[clusters_to_be_deleted[m][m1] - 1]] peaks_new = peaks_new[peaks_new != peaks[clusters_to_be_deleted[m][m1]]] + peaks_neg_new = peaks_neg_new[peaks_neg_new != peaks_neg[clusters_to_be_deleted[m][m1]]] peaks_new_tot = [] for i1 in peaks_new: @@ -1113,6 +1135,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): # plt.plot(y_padded_smoothed) # plt.plot(peaks_new_tot,y_padded_smoothed[peaks_new_tot],'*') # plt.show() + peaks = peaks_new_tot[:] peaks_neg = peaks_neg_new[:] except: @@ -1122,14 +1145,9 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): peaks_new_tot = peaks[:] peaks = peaks_new_tot[:] peaks_neg = peaks_neg_new[:] - - if len(y_padded_smoothed[peaks]) > 1: - mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks = np.std(y_padded_smoothed[peaks]) - else: - mean_value_of_peaks = np.nan - std_value_of_peaks = np.nan - + + mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks = np.std(y_padded_smoothed[peaks]) peaks_values = y_padded_smoothed[peaks] ###peaks_neg = peaks_neg - 20 - 20 @@ -1139,10 +1157,13 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): if len(peaks_neg_true) > 0: peaks_neg_true = np.array(peaks_neg_true) + peaks_neg_true = peaks_neg_true - 20 - 20 + # print(peaks_neg_true) for i in range(len(peaks_neg_true)): img_patch[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 + else: pass @@ -1225,12 +1246,14 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks[i] <= cut_off: forest.append(peaks_neg[i + 1]) if diff_peaks[i] > cut_off: - if not np.isnan(forest[np.argmin(z[forest])]): + # print(forest[np.argmin(z[forest]) ] ) + if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) forest = [] forest.append(peaks_neg[i + 1]) if i == (len(peaks_neg) - 1): - if not np.isnan(forest[np.argmin(z[forest])]): + # print(print(forest[np.argmin(z[forest]) ] )) + if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) diff_peaks_pos = np.abs(np.diff(peaks)) @@ -1246,14 +1269,17 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks_pos[i] <= cut_off: forest.append(peaks[i + 1]) if diff_peaks_pos[i] > cut_off: - if not np.isnan(forest[np.argmax(z[forest])]): + # print(forest[np.argmin(z[forest]) ] ) + if not isNaN(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) forest = [] forest.append(peaks[i + 1]) if i == (len(peaks) - 1): - if not np.isnan(forest[np.argmax(z[forest])]): + # print(print(forest[np.argmin(z[forest]) ] )) + if not isNaN(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) + # print(len(peaks_neg_true) ,len(peaks_pos_true) ,'lensss') if len(peaks_neg_true) > 0: peaks_neg_true = np.array(peaks_neg_true) @@ -1279,6 +1305,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): """ peaks_neg_true = peaks_neg_true - 20 - 20 + # print(peaks_neg_true) for i in range(len(peaks_neg_true)): img_path[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 @@ -1301,123 +1328,166 @@ def separate_lines_new_inside_tiles(img_path, thetha): def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_ind, add_boxes_coor_into_textlines): kernel = np.ones((5, 5), np.uint8) - label = 255 + pixel = 255 min_area = 0 max_area = 1 - if img_patch.ndim == 3: - cnts_images = (img_patch[:, :, 0] == label) * 1 + if len(img_patch.shape) == 3: + cnts_images = (img_patch[:, :, 0] == pixel) * 1 else: - cnts_images = (img_patch[:, :] == label) * 1 - _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) - contours_imgs, hierarchy = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + cnts_images = (img_patch[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) - contours_imgs = filter_contours_area_of_image_tables(thresh, - contours_imgs, hierarchy, - max_area=max_area, min_area=min_area) + contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area) + cont_final = [] + ###print(add_boxes_coor_into_textlines,'ikki') for i in range(len(contours_imgs)): - img_contour = np.zeros(cnts_images.shape[:2], dtype=np.uint8) - img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=255) + img_contour = np.zeros((cnts_images.shape[0], cnts_images.shape[1], 3)) + img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=(255, 255, 255)) + + img_contour = img_contour.astype(np.uint8) img_contour = cv2.dilate(img_contour, kernel, iterations=4) - _, threshrot = cv2.threshold(img_contour, 0, 255, 0) - contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + imgrayrot = cv2.cvtColor(img_contour, cv2.COLOR_BGR2GRAY) + _, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) + contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) ##contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[ ##0] ##contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] ##if add_boxes_coor_into_textlines: + ##print(np.shape(contours_text_rot[0]),'sjppo') ##contours_text_rot[0][:, 0, 0]=contours_text_rot[0][:, 0, 0] + box_ind[0] ##contours_text_rot[0][:, 0, 1]=contours_text_rot[0][:, 0, 1] + box_ind[1] cont_final.append(contours_text_rot[0]) + ##print(cont_final,'nadizzzz') return None, cont_final -def textline_contours_postprocessing(textline_mask, slope, - contour_text_interest, box_ind, - add_boxes_coor_into_textlines=False): - textline_mask = textline_mask * 255 + +def textline_contours_postprocessing(textline_mask, slope, contour_text_interest, box_ind, add_boxes_coor_into_textlines=False): + + textline_mask = np.repeat(textline_mask[:, :, np.newaxis], 3, axis=2) * 255 + textline_mask = textline_mask.astype(np.uint8) kernel = np.ones((5, 5), np.uint8) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_OPEN, kernel) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_CLOSE, kernel) textline_mask = cv2.erode(textline_mask, kernel, iterations=2) # textline_mask = cv2.erode(textline_mask, kernel, iterations=1) - x_help = 30 - y_help = 2 + # print(textline_mask.shape[0]/float(textline_mask.shape[1]),'miz') + try: + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(textline_mask) + # plt.show() - textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), - textline_mask.shape[1] + int(2 * x_help))) - textline_mask_help[y_help : y_help + textline_mask.shape[0], - x_help : x_help + textline_mask.shape[1]] = np.copy(textline_mask[:, :]) + # if abs(slope)>1: + # x_help=30 + # y_help=2 + # else: + # x_help=2 + # y_help=2 - dst = rotate_image(textline_mask_help, slope) - dst[dst != 0] = 1 + x_help = 30 + y_help = 2 - # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: - # plt.imshow(dst) - # plt.show() + textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), textline_mask.shape[1] + int(2 * x_help), 3)) + textline_mask_help[y_help : y_help + textline_mask.shape[0], x_help : x_help + textline_mask.shape[1], :] = np.copy(textline_mask[:, :, :]) - contour_text_copy = contour_text_interest.copy() - contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0] - contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] + dst = rotate_image(textline_mask_help, slope) + dst = dst[:, :, 0] + dst[dst != 0] = 1 - img_contour = np.zeros((box_ind[3], box_ind[2])) - img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=255) + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(dst) + # plt.show() - img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), - img_contour.shape[1] + int(2 * x_help))) - img_contour_help[y_help : y_help + img_contour.shape[0], - x_help : x_help + img_contour.shape[1]] = np.copy(img_contour[:, :]) + contour_text_copy = contour_text_interest.copy() - img_contour_rot = rotate_image(img_contour_help, slope) + contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0] + contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] - _, threshrot = cv2.threshold(img_contour_rot, 0, 255, 0) - contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + img_contour = np.zeros((box_ind[3], box_ind[2], 3)) + img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=(255, 255, 255)) - len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] - ind_big_con = np.argmax(len_con_text_rot) + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(img_contour) + # plt.show() - if abs(slope) > 45: - _, contours_rotated_clean = separate_lines_vertical_cont( - textline_mask, contours_text_rot[ind_big_con], box_ind, slope, - add_boxes_coor_into_textlines=add_boxes_coor_into_textlines) - else: - _, contours_rotated_clean = separate_lines( - dst, contours_text_rot[ind_big_con], slope, x_help, y_help) + img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), img_contour.shape[1] + int(2 * x_help), 3)) + + img_contour_help[y_help : y_help + img_contour.shape[0], x_help : x_help + img_contour.shape[1], :] = np.copy(img_contour[:, :, :]) + + img_contour_rot = rotate_image(img_contour_help, slope) + + # plt.imshow(img_contour_rot_help) + # plt.show() + + # plt.imshow(dst_help) + # plt.show() + + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(img_contour_rot_help) + # plt.show() + + # plt.imshow(dst_help) + # plt.show() + + img_contour_rot = img_contour_rot.astype(np.uint8) + # dst_help = dst_help.astype(np.uint8) + imgrayrot = cv2.cvtColor(img_contour_rot, cv2.COLOR_BGR2GRAY) + _, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) + contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] + ind_big_con = np.argmax(len_con_text_rot) + + # print('juzaa') + if abs(slope) > 45: + # print(add_boxes_coor_into_textlines,'avval') + _, contours_rotated_clean = separate_lines_vertical_cont(textline_mask, contours_text_rot[ind_big_con], box_ind, slope, add_boxes_coor_into_textlines=add_boxes_coor_into_textlines) + else: + _, contours_rotated_clean = separate_lines(dst, contours_text_rot[ind_big_con], slope, x_help, y_help) + + except: + + contours_rotated_clean = [] return contours_rotated_clean -def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, plotter=None): - if logger is None: - logger = getLogger(__package__) - if not np.prod(img_crop.shape): - return img_crop +def separate_lines_new2(img_path, thetha, num_col, slope_region, plotter=None): if num_col == 1: - num_patches = int(img_crop.shape[1] / 200.0) + num_patches = int(img_path.shape[1] / 200.0) else: - num_patches = int(img_crop.shape[1] / 140.0) - # num_patches=int(img_crop.shape[1]/200.) + num_patches = int(img_path.shape[1] / 140.0) + # num_patches=int(img_path.shape[1]/200.) if num_patches == 0: num_patches = 1 - img_patch_interest = img_crop[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[15]+dis_down ,:] + img_patch_ineterst = img_path[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[15]+dis_down ,:] - # plt.imshow(img_patch_interest) + # plt.imshow(img_patch_ineterst) # plt.show() - length_x = int(img_crop.shape[1] / float(num_patches)) + length_x = int(img_path.shape[1] / float(num_patches)) # margin = int(0.04 * length_x) just recently this was changed because it break lines into 2 margin = int(0.04 * length_x) + # print(margin,'margin') # if margin<=4: # margin = int(0.08 * length_x) + # margin=0 width_mid = length_x - 2 * margin - nxf = img_crop.shape[1] / float(width_mid) + + nxf = img_path.shape[1] / float(width_mid) if nxf > int(nxf): nxf = int(nxf) + 1 @@ -1433,16 +1503,16 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl index_x_d = i * width_mid index_x_u = index_x_d + length_x - if index_x_u > img_crop.shape[1]: - index_x_u = img_crop.shape[1] - index_x_d = img_crop.shape[1] - length_x + if index_x_u > img_path.shape[1]: + index_x_u = img_path.shape[1] + index_x_d = img_path.shape[1] - length_x # img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - img_xline = img_patch_interest[:, index_x_d:index_x_u] + img_xline = img_patch_ineterst[:, index_x_d:index_x_u] + sigma = 2 try: - assert img_xline.any() - slope_xline = return_deskew_slop(img_xline, 2, logger=logger, plotter=plotter) + slope_xline = return_deskew_slop(img_xline, sigma, plotter=plotter) except: slope_xline = 0 @@ -1451,12 +1521,14 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl # if abs(slope_region)>70 and abs(slope_xline)<25: # slope_xline=[slope_region][0] slopes_tile_wise.append(slope_xline) + # print(slope_xline,'xlineeee') img_line_rotated = rotate_image(img_xline, slope_xline) img_line_rotated[:, :][img_line_rotated[:, :] != 0] = 1 - - img_patch_interest = img_crop[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[14]+dis_down ,:] - img_patch_interest_revised = np.zeros(img_patch_interest.shape) + # print(slopes_tile_wise,'slopes_tile_wise') + img_patch_ineterst = img_path[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[14]+dis_down ,:] + + img_patch_ineterst_revised = np.zeros(img_patch_ineterst.shape) for i in range(nxf): if i == 0: @@ -1466,18 +1538,18 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl index_x_d = i * width_mid index_x_u = index_x_d + length_x - if index_x_u > img_crop.shape[1]: - index_x_u = img_crop.shape[1] - index_x_d = img_crop.shape[1] - length_x + if index_x_u > img_path.shape[1]: + index_x_u = img_path.shape[1] + index_x_d = img_path.shape[1] - length_x - img_xline = img_patch_interest[:, index_x_d:index_x_u] + img_xline = img_patch_ineterst[:, index_x_d:index_x_u] img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] img_resized = np.zeros((int(img_int.shape[0] * (1.2)), int(img_int.shape[1] * (3)))) - img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], - int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] = img_int[:, :] + + img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], int(img_int.shape[1] * (1)) : int(img_int.shape[1] * (1)) + img_int.shape[1]] = img_int[:, :] # plt.imshow(img_xline) # plt.show() img_line_rotated = rotate_image(img_resized, slopes_tile_wise[i]) @@ -1488,293 +1560,240 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_patch_separated_returned = rotate_image(img_patch_separated, -slopes_tile_wise[i]) img_patch_separated_returned[:, :][img_patch_separated_returned[:, :] != 0] = 1 - img_patch_separated_returned_true_size = img_patch_separated_returned[ - int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], - int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] + img_patch_separated_returned_true_size = img_patch_separated_returned[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], int(img_int.shape[1] * (1)) : int(img_int.shape[1] * (1)) + img_int.shape[1]] img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] - img_patch_interest_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size + img_patch_ineterst_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size - return img_patch_interest_revised + # plt.imshow(img_patch_ineterst_revised) + # plt.show() + return img_patch_ineterst_revised -@wrap_ndarray_shared(kw='img') -def do_image_rotation(angle, img=None, sigma_des=1.0, logger=None): - if logger is None: - logger = getLogger(__package__) - img_rot = rotate_image(img, angle) - img_rot[img_rot!=0] = 1 - try: - var = find_num_col_deskew(img_rot, sigma_des, 20.3) - except: - logger.exception("cannot determine variance for angle %.2f°", angle) - var = 0 - return var +def return_deskew_slop(img_patch_org, sigma_des, main_page=False, plotter=None): -def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, - main_page=False, logger=None, plotter=None, map=None): if main_page and plotter: plotter.save_plot_of_textline_density(img_patch_org) - + img_int=np.zeros((img_patch_org.shape[0],img_patch_org.shape[1])) img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] + + max_shape=np.max(img_int.shape) img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) + onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) + #img_resized=np.zeros((int( img_int.shape[0]*(1.8) ) , int( img_int.shape[1]*(2.6) ) )) - #img_resized[ int( img_int.shape[0]*(.4)):int( img_int.shape[0]*(.4))+img_int.shape[0], - # int( img_int.shape[1]*(.8)):int( img_int.shape[1]*(.8))+img_int.shape[1] ]=img_int[:,:] + + + + #img_resized[ int( img_int.shape[0]*(.4)):int( img_int.shape[0]*(.4))+img_int.shape[0] , int( img_int.shape[1]*(.8)):int( img_int.shape[1]*(.8))+img_int.shape[1] ]=img_int[:,:] img_resized[ onset_y:onset_y+img_int.shape[0] , onset_x:onset_x+img_int.shape[1] ]=img_int[:,:] - if main_page and img_patch_org.shape[1] > img_patch_org.shape[0]: - angles = np.array([-45, 0, 45, 90,]) - angle, _ = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + #print(img_resized.shape,'img_resizedshape') + #plt.imshow(img_resized) + #plt.show() - angles = np.linspace(angle - 22.5, angle + 22.5, n_tot_angles) - angle, _ = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) - elif main_page: - #angles = np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) - angles = np.concatenate((np.linspace(-12, -7, n_tot_angles // 4), - np.linspace(-6, 6, n_tot_angles // 2), - np.linspace(7, 12, n_tot_angles // 4))) - angle, var = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + if main_page and img_patch_org.shape[1]>img_patch_org.shape[0]: + + #plt.imshow(img_resized) + #plt.show() + angels=np.array([-45, 0 , 45 , 90 , ])#np.linspace(-12,12,100)#np.array([0 , 45 , 90 , -45]) + + var_res=[] + + for rot in angels: + img_rot=rotate_image(img_resized,rot) + #plt.imshow(img_rot) + #plt.show() + img_rot[img_rot!=0]=1 + #neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) + #print(var_spectrum,'var_spectrum') + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + ##print(rot,var_spectrum,'var_spectrum') + except: + var_spectrum=0 + var_res.append(var_spectrum) + try: + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 + + + angels=np.linspace(ang_int-22.5,ang_int+22.5,100) + + var_res=[] + for rot in angels: + img_rot=rotate_image(img_resized,rot) + ##plt.imshow(img_rot) + ##plt.show() + img_rot[img_rot!=0]=1 + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + except: + var_spectrum=0 + var_res.append(var_spectrum) + try: + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 + + elif main_page and img_patch_org.shape[1]<=img_patch_org.shape[0]: + + #plt.imshow(img_resized) + #plt.show() + angels=np.linspace(-12,12,100)#np.array([0 , 45 , 90 , -45]) + + + var_res=[] + + for rot in angels: + img_rot=rotate_image(img_resized,rot) + #plt.imshow(img_rot) + #plt.show() + img_rot[img_rot!=0]=1 + #neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) + #print(var_spectrum,'var_spectrum') + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + + except: + var_spectrum=0 + + var_res.append(var_spectrum) + + + if plotter: + plotter.save_plot_of_rotation_angle(angels, var_res) + try: + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 early_slope_edge=11 - if abs(angle) > early_slope_edge: - if angle < 0: - angles2 = np.linspace(-90, -12, n_tot_angles) - else: - angles2 = np.linspace(90, 12, n_tot_angles) - angle2, var2 = get_smallest_skew(img_resized, sigma_des, angles2, map=map, logger=logger, plotter=plotter) - if var2 > var: - angle = angle2 + if abs(ang_int)>early_slope_edge and ang_int<0: + angels=np.linspace(-90,-12,100) + var_res=[] + for rot in angels: + img_rot=rotate_image(img_resized,rot) + ##plt.imshow(img_rot) + ##plt.show() + img_rot[img_rot!=0]=1 + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + except: + var_spectrum=0 + var_res.append(var_spectrum) + try: + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 + + elif abs(ang_int)>early_slope_edge and ang_int>0: + + angels=np.linspace(90,12,100) + var_res=[] + for rot in angels: + img_rot=rotate_image(img_resized,rot) + ##plt.imshow(img_rot) + ##plt.show() + img_rot[img_rot!=0]=1 + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + #print(indexer,'indexer') + except: + var_spectrum=0 + var_res.append(var_spectrum) + try: + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 else: - angles = np.linspace(-25, 25, int(0.5 * n_tot_angles) + 10) - angle, var = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + angels=np.linspace(-25,25,60) + var_res=[] + indexer=0 + for rot in angels: + img_rot=rotate_image(img_resized,rot) + #plt.imshow(img_rot) + #plt.show() + img_rot[img_rot!=0]=1 + #neg_peaks,var_spectrum=self.find_num_col_deskew(img_rot,sigma_des,20.3 ) + #print(var_spectrum,'var_spectrum') + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + except: + var_spectrum=0 + var_res.append(var_spectrum) + try: + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 + + #plt.plot(var_res) + #plt.show() + ##plt.plot(mom3_res) + ##plt.show() + #print(ang_int,'ang_int111') early_slope_edge=22 - if abs(angle) > early_slope_edge: - if angle < 0: - angles2 = np.linspace(-90, -25, int(0.5 * n_tot_angles) + 10) - else: - angles2 = np.linspace(90, 25, int(0.5 * n_tot_angles) + 10) - angle2, var2 = get_smallest_skew(img_resized, sigma_des, angles2, map=map, logger=logger, plotter=plotter) - if var2 > var: - angle = angle2 - return angle + if abs(ang_int)>early_slope_edge and ang_int<0: -def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map): - if logger is None: - logger = getLogger(__package__) - if map is None: - results = [do_image_rotation.__wrapped__(angle, img=img, sigma_des=sigma_des, logger=logger) - for angle in angles] - else: - with share_ndarray(img) as img_shared: - results = list(map(partial(do_image_rotation, img=img_shared, sigma_des=sigma_des, logger=None), - angles)) - if plotter: - plotter.save_plot_of_rotation_angle(angles, results) - try: - var_res = np.array(results) - assert var_res.any() - idx = np.argmax(var_res) - angle = angles[idx] - var = var_res[idx] - except: - logger.exception("cannot determine best angle among %s", str(angles)) - angle = 0 - var = 0 - return angle, var + angels=np.linspace(-90,-25,60) -@wrap_ndarray_shared(kw='textline_mask_tot_ea') -def do_work_of_slopes_new( - box_text, contour, contour_par, - textline_mask_tot_ea=None, slope_deskew=0.0, - logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None -): - if KERNEL is None: - KERNEL = np.ones((5, 5), np.uint8) - if logger is None: - logger = getLogger(__package__) - logger.debug('enter do_work_of_slopes_new') + var_res=[] - x, y, w, h = box_text - crop_coor = box2rect(box_text) - mask_textline = np.zeros(textline_mask_tot_ea.shape) - mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1)) - all_text_region_raw = textline_mask_tot_ea * mask_textline - all_text_region_raw = all_text_region_raw[y: y + h, x: x + w].astype(np.uint8) - img_int_p = all_text_region_raw[:,:] - img_int_p = cv2.erode(img_int_p, KERNEL, iterations=2) + for rot in angels: + img_rot=rotate_image(img_resized,rot) + ##plt.imshow(img_rot) + ##plt.show() + img_rot[img_rot!=0]=1 + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + except: + var_spectrum=0 + var_res.append(var_spectrum) - if not np.prod(img_int_p.shape) or img_int_p.shape[0] /img_int_p.shape[1] < 0.1: - slope = 0 - slope_for_all = slope_deskew - all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w] - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text, 0) - else: - try: - textline_con, hierarchy = return_contours_of_image(img_int_p) - textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, - hierarchy, - max_area=1, min_area=0.00008) - y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN - if np.isnan(y_diff_mean): - slope_for_all = MAX_SLOPE - else: - sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) - img_int_p[img_int_p > 0] = 1 - slope_for_all = return_deskew_slop(img_int_p, sigma_des, logger=logger, plotter=plotter) - if abs(slope_for_all) <= 0.5: - slope_for_all = slope_deskew - except: - logger.exception("cannot determine angle of contours") - slope_for_all = MAX_SLOPE - - if slope_for_all == MAX_SLOPE: - slope_for_all = slope_deskew - slope = slope_for_all - mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) - mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1)) - - all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].copy() - mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] - - all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text) - - return cnt_clean_rot, crop_coor, slope - -@wrap_ndarray_shared(kw='textline_mask_tot_ea') -@wrap_ndarray_shared(kw='mask_texts_only') -def do_work_of_slopes_new_curved( - box_text, contour_par, - textline_mask_tot_ea=None, mask_texts_only=None, - num_col=1, scale_par=1.0, slope_deskew=0.0, - logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None -): - if KERNEL is None: - KERNEL = np.ones((5, 5), np.uint8) - if logger is None: - logger = getLogger(__package__) - logger.debug("enter do_work_of_slopes_new_curved") - - x, y, w, h = box_text - all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].astype(np.uint8) - img_int_p = all_text_region_raw[:, :] - - # img_int_p=cv2.erode(img_int_p,KERNEL,iterations = 2) - # plt.imshow(img_int_p) - # plt.show() - - if not np.prod(img_int_p.shape) or img_int_p.shape[0] / img_int_p.shape[1] < 0.1: - slope = 0 - slope_for_all = slope_deskew - else: - try: - textline_con, hierarchy = return_contours_of_image(img_int_p) - textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, - hierarchy, - max_area=1, min_area=0.0008) - y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN - if np.isnan(y_diff_mean): - slope_for_all = MAX_SLOPE - else: - sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) - img_int_p[img_int_p > 0] = 1 - slope_for_all = return_deskew_slop(img_int_p, sigma_des, logger=logger, plotter=plotter) - if abs(slope_for_all) < 0.5: - slope_for_all = slope_deskew - except: - logger.exception("cannot determine angle of contours") - slope_for_all = MAX_SLOPE - - if slope_for_all == MAX_SLOPE: - slope_for_all = slope_deskew - slope = slope_for_all - - crop_coor = box2rect(box_text) - - if abs(slope_for_all) < 45: - textline_region_in_image = np.zeros(textline_mask_tot_ea.shape) - x, y, w, h = cv2.boundingRect(contour_par) - mask_biggest = np.zeros(mask_texts_only.shape) - mask_biggest = cv2.fillPoly(mask_biggest, pts=[contour_par], color=(1, 1, 1)) - mask_region_in_patch_region = mask_biggest[y : y + h, x : x + w] - textline_biggest_region = mask_biggest * textline_mask_tot_ea - - textline_rotated_separated = separate_lines_new2(textline_biggest_region[y: y+h, x: x+w], 0, - num_col, slope_for_all, - logger=logger, plotter=plotter) - - - textline_rotated_separated[mask_region_in_patch_region[:, :] != 1] = 0 - - textline_region_in_image[y : y + h, x : x + w] = textline_rotated_separated - - - pixel_img = 1 - cnt_textlines_in_image = return_contours_of_interested_textline(textline_region_in_image, pixel_img) - - textlines_cnt_per_region = [] - for jjjj in range(len(cnt_textlines_in_image)): - mask_biggest2 = np.zeros(mask_texts_only.shape) - mask_biggest2 = cv2.fillPoly(mask_biggest2, pts=[cnt_textlines_in_image[jjjj]], color=(1, 1, 1)) - if num_col + 1 == 1: - mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=5) - else: - mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=4) - - pixel_img = 1 - mask_biggest2 = resize_image(mask_biggest2, - int(mask_biggest2.shape[0] * scale_par), - int(mask_biggest2.shape[1] * scale_par)) - cnt_textlines_in_image_ind = return_contours_of_interested_textline(mask_biggest2, pixel_img) try: - textlines_cnt_per_region.append(cnt_textlines_in_image_ind[0]) - except Exception as why: - logger.error(why) - else: - textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, - slope_for_all, contour_par, - box_text, True) + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 - return textlines_cnt_per_region[::-1], crop_coor, slope + elif abs(ang_int)>early_slope_edge and ang_int>0: -@wrap_ndarray_shared(kw='textline_mask_tot_ea') -def do_work_of_slopes_new_light( - box_text, contour, contour_par, - textline_mask_tot_ea=None, slope_deskew=0, textline_light=True, - logger=None -): - if logger is None: - logger = getLogger(__package__) - logger.debug('enter do_work_of_slopes_new_light') + angels=np.linspace(90,25,60) - x, y, w, h = box_text - crop_coor = box2rect(box_text) - mask_textline = np.zeros(textline_mask_tot_ea.shape) - mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1)) - all_text_region_raw = textline_mask_tot_ea * mask_textline - all_text_region_raw = all_text_region_raw[y: y + h, x: x + w].astype(np.uint8) + var_res=[] - mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) - mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1)) + indexer=0 + for rot in angels: + img_rot=rotate_image(img_resized,rot) + ##plt.imshow(img_rot) + ##plt.show() + img_rot[img_rot!=0]=1 + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + #print(indexer,'indexer') + except: + var_spectrum=0 - if textline_light: - all_text_region_raw = np.copy(textline_mask_tot_ea) - all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw) - cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot, - max_area=1, min_area=0.00001) - else: - all_text_region_raw = np.copy(textline_mask_tot_ea[y: y + h, x: x + w]) - mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] - all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_deskew, contour_par, box_text) + var_res.append(var_spectrum) + try: + var_res=np.array(var_res) + ang_int=angels[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 + + return ang_int - return cnt_clean_rot, crop_coor, slope_deskew diff --git a/src/eynollah/utils/xml.py b/qurator/eynollah/utils/xml.py similarity index 84% rename from src/eynollah/utils/xml.py rename to qurator/eynollah/utils/xml.py index 88d1df8..ac02190 100644 --- a/src/eynollah/utils/xml.py +++ b/qurator/eynollah/utils/xml.py @@ -21,6 +21,7 @@ from ocrd_models.ocrd_page import ( RegionRefType, SeparatorRegionType, TableRegionType, + TextEquivType, TextLineType, TextRegionType, UnorderedGroupIndexedType, @@ -46,26 +47,24 @@ def create_page_xml(imageFilename, height, width): )) return pcgts -def xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right): +def xml_reading_order(page, order_of_texts, id_of_marginalia): region_order = ReadingOrderType() og = OrderedGroupType(id="ro357564684568544579089") page.set_ReadingOrder(region_order) region_order.set_OrderedGroup(og) region_counter = EynollahIdCounter() - - for id_marginal in id_of_marginalia_left: - og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) + for idx_textregion, _ in enumerate(order_of_texts): + og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(order_of_texts[idx_textregion] + 1))) region_counter.inc('region') - - for idx_textregion in order_of_texts: - og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(idx_textregion + 1))) - region_counter.inc('region') - - for id_marginal in id_of_marginalia_right: + for id_marginal in id_of_marginalia: og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) region_counter.inc('region') -def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, indexes_sorted, index_of_types, kind_of_texts, ref_point): +def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, matrix_of_orders, indexes_sorted, index_of_types, kind_of_texts, ref_point): + indexes_sorted = np.array(indexes_sorted) + index_of_types = np.array(index_of_types) + kind_of_texts = np.array(kind_of_texts) + id_of_texts = [] order_of_texts = [] @@ -74,7 +73,7 @@ def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region index_of_types_2 = index_of_types[kind_of_texts == 2] indexes_sorted_2 = indexes_sorted[kind_of_texts == 2] - + counter = EynollahIdCounter(region_idx=ref_point) for idx_textregion, _ in enumerate(found_polygons_text_region): id_of_texts.append(counter.next_region_id) diff --git a/qurator/eynollah/writer.py b/qurator/eynollah/writer.py new file mode 100644 index 0000000..8dfd2b2 --- /dev/null +++ b/qurator/eynollah/writer.py @@ -0,0 +1,260 @@ +# pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member +# pylint: disable=import-error +from pathlib import Path +import os.path + +from .utils.xml import create_page_xml, xml_reading_order +from .utils.counter import EynollahIdCounter + +from ocrd_utils import getLogger +from ocrd_models.ocrd_page import ( + BorderType, + CoordsType, + TextEquivType, + PcGtsType, + TextLineType, + TextRegionType, + ImageRegionType, + TableRegionType, + SeparatorRegionType, + to_xml + ) +import numpy as np + +class EynollahXmlWriter(): + + def __init__(self, *, dir_out, image_filename, curved_line, pcgts=None): + self.logger = getLogger('eynollah.writer') + self.counter = EynollahIdCounter() + self.dir_out = dir_out + self.image_filename = image_filename + self.curved_line = curved_line + self.pcgts = pcgts + self.scale_x = None # XXX set outside __init__ + self.scale_y = None # XXX set outside __init__ + self.height_org = None # XXX set outside __init__ + self.width_org = None # XXX set outside __init__ + + @property + def image_filename_stem(self): + return Path(Path(self.image_filename).name).stem + + def calculate_page_coords(self, cont_page): + self.logger.debug('enter calculate_page_coords') + points_page_print = "" + for _, contour in enumerate(cont_page[0]): + if len(contour) == 2: + points_page_print += str(int((contour[0]) / self.scale_x)) + points_page_print += ',' + points_page_print += str(int((contour[1]) / self.scale_y)) + else: + points_page_print += str(int((contour[0][0]) / self.scale_x)) + points_page_print += ',' + points_page_print += str(int((contour[0][1] ) / self.scale_y)) + points_page_print = points_page_print + ' ' + return points_page_print[:-1] + + def serialize_lines_in_marginal(self, marginal_region, all_found_texline_polygons_marginals, marginal_idx, page_coord, all_box_coord_marginals, slopes_marginals, counter): + for j in range(len(all_found_texline_polygons_marginals[marginal_idx])): + coords = CoordsType() + textline = TextLineType(id=counter.next_line_id, Coords=coords) + marginal_region.add_TextLine(textline) + textline.add_TextEquiv(TextEquivType(Unicode='')) + points_co = '' + for l in range(len(all_found_texline_polygons_marginals[marginal_idx][j])): + if not self.curved_line: + if len(all_found_texline_polygons_marginals[marginal_idx][j][l]) == 2: + textline_x_coord = max(0, int((all_found_texline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) + textline_y_coord = max(0, int((all_found_texline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + else: + textline_x_coord = max(0, int((all_found_texline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) + textline_y_coord = max(0, int((all_found_texline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) + points_co += str(textline_x_coord) + points_co += ',' + points_co += str(textline_y_coord) + if self.curved_line and np.abs(slopes_marginals[marginal_idx]) <= 45: + if len(all_found_texline_polygons_marginals[marginal_idx][j][l]) == 2: + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][1] + page_coord[0]) / self.scale_y)) + else: + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][0][0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][0][1] + page_coord[0]) / self.scale_y)) + + elif self.curved_line and np.abs(slopes_marginals[marginal_idx]) > 45: + if len(all_found_texline_polygons_marginals[marginal_idx][j][l]) == 2: + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) + else: + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((all_found_texline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) + points_co += ' ' + coords.set_points(points_co[:-1]) + + def serialize_lines_in_region(self, text_region, all_found_texline_polygons, region_idx, page_coord, all_box_coord, slopes, counter): + self.logger.debug('enter serialize_lines_in_region') + for j in range(len(all_found_texline_polygons[region_idx])): + coords = CoordsType() + textline = TextLineType(id=counter.next_line_id, Coords=coords, TextEquiv=[TextEquivType(index=0, Unicode='')]) + text_region.add_TextLine(textline) + region_bboxes = all_box_coord[region_idx] + points_co = '' + for idx_contour_textline, contour_textline in enumerate(all_found_texline_polygons[region_idx][j]): + if not self.curved_line: + if len(contour_textline) == 2: + textline_x_coord = max(0, int((contour_textline[0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) + textline_y_coord = max(0, int((contour_textline[1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) + else: + textline_x_coord = max(0, int((contour_textline[0][0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) + textline_y_coord = max(0, int((contour_textline[0][1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) + points_co += str(textline_x_coord) + points_co += ',' + points_co += str(textline_y_coord) + + if self.curved_line and np.abs(slopes[region_idx]) <= 45: + if len(contour_textline) == 2: + points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y)) + else: + points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) + elif self.curved_line and np.abs(slopes[region_idx]) > 45: + if len(contour_textline)==2: + points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2])/self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0])/self.scale_y)) + else: + points_co += str(int((contour_textline[0][0] + region_bboxes[2]+page_coord[2])/self.scale_x)) + points_co += ',' + points_co += str(int((contour_textline[0][1] + region_bboxes[0]+page_coord[0])/self.scale_y)) + points_co += ' ' + coords.set_points(points_co[:-1]) + + def write_pagexml(self, pcgts): + out_fname = os.path.join(self.dir_out, self.image_filename_stem) + ".xml" + self.logger.info("output filename: '%s'", out_fname) + with open(out_fname, 'w') as f: + f.write(to_xml(pcgts)) + + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_texline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml): + self.logger.debug('enter build_pagexml_no_full_layout') + + # create the file structure + pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) + page = pcgts.get_Page() + page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) + + counter = EynollahIdCounter() + if len(found_polygons_text_region) > 0: + _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) + id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals] + xml_reading_order(page, order_of_texts, id_of_marginalia) + + for mm in range(len(found_polygons_text_region)): + textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord)), + TextEquiv=[TextEquivType(index=0, Unicode='')]) + page.add_TextRegion(textregion) + self.serialize_lines_in_region(textregion, all_found_texline_polygons, mm, page_coord, all_box_coord, slopes, counter) + + for mm in range(len(found_polygons_marginals)): + marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + page.add_TextRegion(marginal) + self.serialize_lines_in_marginal(marginal, all_found_texline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) + + for mm in range(len(found_polygons_text_region_img)): + img_region = ImageRegionType(id=counter.next_region_id, Coords=CoordsType()) + page.add_ImageRegion(img_region) + points_co = '' + for lmm in range(len(found_polygons_text_region_img[mm])): + points_co += str(int((found_polygons_text_region_img[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) + points_co += ',' + points_co += str(int((found_polygons_text_region_img[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) + points_co += ' ' + img_region.get_Coords().set_points(points_co[:-1]) + + for mm in range(len(polygons_lines_to_be_written_in_xml)): + sep_hor = SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType()) + page.add_SeparatorRegion(sep_hor) + points_co = '' + for lmm in range(len(polygons_lines_to_be_written_in_xml[mm])): + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,0] ) / self.scale_x)) + points_co += ',' + points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,1] ) / self.scale_y)) + points_co += ' ' + sep_hor.get_Coords().set_points(points_co[:-1]) + + return pcgts + + def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_texline_polygons, all_found_texline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml): + self.logger.debug('enter build_pagexml_full_layout') + + # create the file structure + pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) + page = pcgts.get_Page() + page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) + + counter = EynollahIdCounter() + _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) + id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals] + xml_reading_order(page, order_of_texts, id_of_marginalia) + + for mm in range(len(found_polygons_text_region)): + textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', + TextEquiv=[TextEquivType(index=0, Unicode='')], + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord))) + page.add_TextRegion(textregion) + self.serialize_lines_in_region(textregion, all_found_texline_polygons, mm, page_coord, all_box_coord, slopes, counter) + + self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) + for mm in range(len(found_polygons_text_region_h)): + textregion = TextRegionType(id=counter.next_region_id, type_='header', + TextEquiv=[TextEquivType(index=0, Unicode='')], + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) + page.add_TextRegion(textregion) + self.serialize_lines_in_region(textregion, all_found_texline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter) + + for mm in range(len(found_polygons_marginals)): + marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', + TextEquiv=[TextEquivType(index=0, Unicode='')], + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + page.add_TextRegion(marginal) + self.serialize_lines_in_marginal(marginal, all_found_texline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) + + for mm in range(len(found_polygons_drop_capitals)): + page.add_TextRegion(TextRegionType(id=counter.next_region_id, type_='drop-capital', + TextEquiv=[TextEquivType(index=0, Unicode='')], + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord)))) + + for mm in range(len(found_polygons_text_region_img)): + page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) + + for mm in range(len(polygons_lines_to_be_written_in_xml)): + page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0])))) + + for mm in range(len(found_polygons_tables)): + page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) + + return pcgts + + def calculate_polygon_coords(self, contour, page_coord): + self.logger.debug('enter calculate_polygon_coords') + coords = '' + for value_bbox in contour: + if len(value_bbox) == 2: + coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y)) + else: + coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) + coords=coords + ' ' + return coords[:-1] + diff --git a/requirements-ocr.txt b/requirements-ocr.txt deleted file mode 100644 index 9f31ebb..0000000 --- a/requirements-ocr.txt +++ /dev/null @@ -1,2 +0,0 @@ -torch <= 2.0.1 -transformers <= 4.30.2 diff --git a/requirements-plotting.txt b/requirements-plotting.txt deleted file mode 100644 index 6ccafc3..0000000 --- a/requirements-plotting.txt +++ /dev/null @@ -1 +0,0 @@ -matplotlib diff --git a/requirements-test.txt b/requirements-test.txt index 3ebcf71..9bb2a15 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,4 +1,2 @@ pytest -pytest-isolate -coverage[toml] black diff --git a/requirements-training.txt b/requirements-training.txt deleted file mode 120000 index e1bc9c3..0000000 --- a/requirements-training.txt +++ /dev/null @@ -1 +0,0 @@ -train/requirements.txt \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index db1d7df..8520780 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ # ocrd includes opencv, numpy, shapely, click -ocrd >= 3.3.0 -numpy <1.24.0 +ocrd >= 2.23.3 +keras >= 2.3.1, < 2.4 scikit-learn >= 0.23.2 -tensorflow < 2.13 -numba <= 0.58.1 -scikit-image -biopython +tensorflow-gpu >= 1.15, < 2 +imutils >= 0.5.3 +matplotlib +setuptools >= 50 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..9abf158 --- /dev/null +++ b/setup.py @@ -0,0 +1,28 @@ +from setuptools import setup, find_packages +from json import load + +install_requires = open('requirements.txt').read().split('\n') +with open('ocrd-tool.json', 'r', encoding='utf-8') as f: + version = load(f)['version'] + +setup( + name='eynollah', + version=version, + long_description=open('README.md').read(), + long_description_content_type='text/markdown', + author='Vahid Rezanezhad', + url='https://github.com/qurator-spk/eynollah', + license='Apache License 2.0', + namespace_packages=['qurator'], + packages=find_packages(exclude=['tests']), + install_requires=install_requires, + package_data={ + '': ['*.json'] + }, + entry_points={ + 'console_scripts': [ + 'eynollah=qurator.eynollah.cli:main', + 'ocrd-eynollah-segment=qurator.eynollah.ocrd_cli:main', + ] + }, +) diff --git a/src/eynollah/Charis-Regular.ttf b/src/eynollah/Charis-Regular.ttf deleted file mode 100644 index a4e75a4..0000000 Binary files a/src/eynollah/Charis-Regular.ttf and /dev/null differ diff --git a/src/eynollah/__init__.py b/src/eynollah/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py deleted file mode 100644 index c9bad52..0000000 --- a/src/eynollah/cli.py +++ /dev/null @@ -1,579 +0,0 @@ -import sys -import click -import logging -from ocrd_utils import initLogging, getLevelName, getLogger -from eynollah.eynollah import Eynollah, Eynollah_ocr -from eynollah.sbb_binarize import SbbBinarizer -from eynollah.image_enhancer import Enhancer -from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout - -@click.group() -def main(): - pass - -@main.command() -@click.option( - "--input", - "-i", - help="PAGE-XML input filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of PAGE-XML input files (instead of --input)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--out", - "-o", - help="directory for output images", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) - -def machine_based_reading_order(input, dir_in, out, model, log_level): - assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - orderer = machine_based_reading_order_on_layout(model) - if log_level: - orderer.logger.setLevel(getLevelName(log_level)) - - orderer.run(xml_filename=input, - dir_in=dir_in, - dir_out=out, - ) - - -@main.command() -@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') -@click.option('--model_dir', '-m', type=click.Path(exists=True, file_okay=False), required=True, help='directory containing models for prediction') -@click.option( - "--input-image", "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False) -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--output", - "-o", - help="output image (if using -i) or output image directory (if using -di)", - type=click.Path(file_okay=True, dir_okay=True), - required=True, -) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) -def binarization(patches, model_dir, input_image, dir_in, output, log_level): - assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - binarizer = SbbBinarizer(model_dir) - if log_level: - binarizer.log.setLevel(getLevelName(log_level)) - binarizer.run(image_path=input_image, use_patches=patches, output=output, dir_in=dir_in) - - -@main.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), - required=True, -) - -@click.option( - "--num_col_upper", - "-ncu", - help="lower limit of columns in document image", -) -@click.option( - "--num_col_lower", - "-ncl", - help="upper limit of columns in document image", -) -@click.option( - "--save_org_scale/--no_save_org_scale", - "-sos/-nosos", - is_flag=True, - help="if this parameter set to true, this tool will save the enhanced image in org scale.", -) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) - -def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, save_org_scale, log_level): - assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - initLogging() - enhancer = Enhancer( - model, - num_col_upper=num_col_upper, - num_col_lower=num_col_lower, - save_org_scale=save_org_scale, - ) - if log_level: - enhancer.logger.setLevel(getLevelName(log_level)) - enhancer.run(overwrite=overwrite, - dir_in=dir_in, - image_filename=image, - dir_out=out, - ) - -@main.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--model_version", - "-mv", - help="override default versions of model categories", - type=(str, str), - multiple=True, -) -@click.option( - "--save_images", - "-si", - help="if a directory is given, images in documents will be cropped and saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_layout", - "-sl", - help="if a directory is given, plot of layout will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_deskewed", - "-sd", - help="if a directory is given, deskewed image will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_all", - "-sa", - help="if a directory is given, all plots needed for documentation will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_page", - "-sp", - help="if a directory is given, page crop of image will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--enable-plotting/--disable-plotting", - "-ep/-noep", - is_flag=True, - help="If set, will plot intermediary files and images", -) -@click.option( - "--extract_only_images/--disable-extracting_only_images", - "-eoi/-noeoi", - is_flag=True, - help="If a directory is given, only images in documents will be cropped and saved there and the other processing will not be done", -) -@click.option( - "--allow-enhancement/--no-allow-enhancement", - "-ae/-noae", - is_flag=True, - help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory", -) -@click.option( - "--curved-line/--no-curvedline", - "-cl/-nocl", - is_flag=True, - help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.", -) -@click.option( - "--textline_light/--no-textline_light", - "-tll/-notll", - is_flag=True, - help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method.", -) -@click.option( - "--full-layout/--no-full-layout", - "-fl/-nofl", - is_flag=True, - help="if this parameter set to true, this tool will try to return all elements of layout.", -) -@click.option( - "--tables/--no-tables", - "-tab/-notab", - is_flag=True, - help="if this parameter set to true, this tool will try to detect tables.", -) -@click.option( - "--right2left/--left2right", - "-r2l/-l2r", - is_flag=True, - help="if this parameter set to true, this tool will extract right-to-left reading order.", -) -@click.option( - "--input_binary/--input-RGB", - "-ib/-irgb", - is_flag=True, - help="in general, eynollah uses RGB as input but if the input document is strongly dark, bright or for any other reason you can turn binarized input on. This option does not mean that you have to provide a binary image, otherwise this means that the tool itself will binarized the RGB input document.", -) -@click.option( - "--allow_scaling/--no-allow-scaling", - "-as/-noas", - is_flag=True, - help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection", -) -@click.option( - "--headers_off/--headers-on", - "-ho/-noho", - is_flag=True, - help="if this parameter set to true, this tool would ignore headers role in reading order", -) -@click.option( - "--light_version/--original", - "-light/-org", - is_flag=True, - help="if this parameter set to true, this tool would use lighter version", -) -@click.option( - "--ignore_page_extraction/--extract_page_included", - "-ipe/-epi", - is_flag=True, - help="if this parameter set to true, this tool would ignore page extraction", -) -@click.option( - "--reading_order_machine_based/--heuristic_reading_order", - "-romb/-hro", - is_flag=True, - help="if this parameter set to true, this tool would apply machine based reading order detection", -) -@click.option( - "--do_ocr", - "-ocr/-noocr", - is_flag=True, - help="if this parameter set to true, this tool will try to do ocr", -) -@click.option( - "--transformer_ocr", - "-tr/-notr", - is_flag=True, - help="if this parameter set to true, this tool will apply transformer ocr", -) -@click.option( - "--batch_size_ocr", - "-bs_ocr", - help="number of inference batch size of ocr model. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", -) -@click.option( - "--num_col_upper", - "-ncu", - help="lower limit of columns in document image", -) -@click.option( - "--num_col_lower", - "-ncl", - help="upper limit of columns in document image", -) -@click.option( - "--threshold_art_class_layout", - "-tharl", - help="threshold of artifical class in the case of layout detection. The default value is 0.1", -) -@click.option( - "--threshold_art_class_textline", - "-thart", - help="threshold of artifical class in the case of textline detection. The default value is 0.1", -) -@click.option( - "--skip_layout_and_reading_order", - "-slro/-noslro", - is_flag=True, - help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.", -) -# TODO move to top-level CLI context -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override 'eynollah' log level globally to this", -) -# -@click.option( - "--setup-logging", - is_flag=True, - help="Setup a basic console logger", -) - -def layout(image, out, overwrite, dir_in, model, model_version, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, transformer_ocr, batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level, setup_logging): - if setup_logging: - console_handler = logging.StreamHandler(sys.stdout) - console_handler.setLevel(logging.INFO) - formatter = logging.Formatter('%(message)s') - console_handler.setFormatter(formatter) - getLogger('eynollah').addHandler(console_handler) - getLogger('eynollah').setLevel(logging.INFO) - else: - initLogging() - assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" - assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" - assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" - assert enable_plotting or not save_page, "Plotting with -sp also requires -ep" - assert enable_plotting or not save_images, "Plotting with -si also requires -ep" - assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep" - assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \ - "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" - assert textline_light == light_version, "Both light textline detection -tll and light version -light must be set or unset equally" - assert not extract_only_images or not allow_enhancement, "Image extraction -eoi can not be set alongside allow_enhancement -ae" - assert not extract_only_images or not allow_scaling, "Image extraction -eoi can not be set alongside allow_scaling -as" - assert not extract_only_images or not light_version, "Image extraction -eoi can not be set alongside light_version -light" - assert not extract_only_images or not curved_line, "Image extraction -eoi can not be set alongside curved_line -cl" - assert not extract_only_images or not textline_light, "Image extraction -eoi can not be set alongside textline_light -tll" - assert not extract_only_images or not full_layout, "Image extraction -eoi can not be set alongside full_layout -fl" - assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab" - assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" - assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" - assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - eynollah = Eynollah( - model, - model_versions=model_version, - extract_only_images=extract_only_images, - enable_plotting=enable_plotting, - allow_enhancement=allow_enhancement, - curved_line=curved_line, - textline_light=textline_light, - full_layout=full_layout, - tables=tables, - right2left=right2left, - input_binary=input_binary, - allow_scaling=allow_scaling, - headers_off=headers_off, - light_version=light_version, - ignore_page_extraction=ignore_page_extraction, - reading_order_machine_based=reading_order_machine_based, - do_ocr=do_ocr, - transformer_ocr=transformer_ocr, - batch_size_ocr=batch_size_ocr, - num_col_upper=num_col_upper, - num_col_lower=num_col_lower, - skip_layout_and_reading_order=skip_layout_and_reading_order, - threshold_art_class_textline=threshold_art_class_textline, - threshold_art_class_layout=threshold_art_class_layout, - ) - if log_level: - eynollah.logger.setLevel(getLevelName(log_level)) - eynollah.run(overwrite=overwrite, - image_filename=image, - dir_in=dir_in, - dir_out=out, - dir_of_cropped_images=save_images, - dir_of_layout=save_layout, - dir_of_deskewed=save_deskewed, - dir_of_all=save_all, - dir_save_page=save_page, - ) - -@main.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_in_bin", - "-dib", - help="directory of binarized images (in addition to --dir_in for RGB images; filename stems must match the RGB image files, with '.png' suffix).\nPerform prediction using both RGB and binary images. (This does not necessarily improve results, however it may be beneficial for certain document images.)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_xmls", - "-dx", - help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--dir_out_image_text", - "-doit", - help="directory for output images, newly rendered with predicted text", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--model_name", - help="Specific model file path to use for OCR", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--tr_ocr", - "-trocr/-notrocr", - is_flag=True, - help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.", -) -@click.option( - "--export_textline_images_and_text", - "-etit/-noetit", - is_flag=True, - help="if this parameter set to true, images and text in xml will be exported into output dir. This files can be used for training a OCR engine.", -) -@click.option( - "--do_not_mask_with_textline_contour", - "-nmtc/-mtc", - is_flag=True, - help="if this parameter set to true, cropped textline images will not be masked with textline contour.", -) -@click.option( - "--batch_size", - "-bs", - help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", -) -@click.option( - "--dataset_abbrevation", - "-ds_pref", - help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", -) -@click.option( - "--min_conf_value_of_textline_text", - "-min_conf", - help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.", -) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) - -def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): - initLogging() - - assert bool(model) != bool(model_name), "Either -m (model directory) or --model_name (specific model name) must be provided." - assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" - assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" - assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" - assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" - assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" - assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." - eynollah_ocr = Eynollah_ocr( - dir_models=model, - model_name=model_name, - tr_ocr=tr_ocr, - export_textline_images_and_text=export_textline_images_and_text, - do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, - batch_size=batch_size, - pref_of_dataset=dataset_abbrevation, - min_conf_value_of_textline_text=min_conf_value_of_textline_text, - ) - if log_level: - eynollah_ocr.logger.setLevel(getLevelName(log_level)) - eynollah_ocr.run(overwrite=overwrite, - dir_in=dir_in, - dir_in_bin=dir_in_bin, - image_filename=image, - dir_xmls=dir_xmls, - dir_out_image_text=dir_out_image_text, - dir_out=out, - ) - -if __name__ == "__main__": - main() diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py deleted file mode 100644 index 13acba6..0000000 --- a/src/eynollah/eynollah.py +++ /dev/null @@ -1,5839 +0,0 @@ -# pylint: disable=no-member,invalid-name,line-too-long,missing-function-docstring,missing-class-docstring,too-many-branches -# pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member -# pylint: disable=too-many-public-methods,too-many-arguments,too-many-instance-attributes,too-many-public-methods, -# pylint: disable=consider-using-enumerate -""" -document layout analysis (segmentation) with output in PAGE-XML -""" - -# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files -import sys -if sys.version_info < (3, 10): - import importlib_resources -else: - import importlib.resources as importlib_resources - -from difflib import SequenceMatcher as sq -from PIL import Image, ImageDraw, ImageFont -import math -import os -import sys -import time -from typing import Dict, List, Optional, Tuple -import atexit -import warnings -from functools import partial -from pathlib import Path -from multiprocessing import cpu_count -import gc -import copy -import json - -from concurrent.futures import ProcessPoolExecutor -import xml.etree.ElementTree as ET -import cv2 -import numpy as np -import shapely.affinity -from scipy.signal import find_peaks -from scipy.ndimage import gaussian_filter1d -from numba import cuda -from skimage.morphology import skeletonize -from ocrd import OcrdPage -from ocrd_utils import getLogger, tf_disable_interactive_logs -import statistics - -try: - import torch -except ImportError: - torch = None -try: - import matplotlib.pyplot as plt -except ImportError: - plt = None -try: - from transformers import TrOCRProcessor, VisionEncoderDecoderModel -except ImportError: - TrOCRProcessor = VisionEncoderDecoderModel = None - -#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' -tf_disable_interactive_logs() -import tensorflow as tf -from tensorflow.python.keras import backend as K -from tensorflow.keras.models import load_model -tf.get_logger().setLevel("ERROR") -warnings.filterwarnings("ignore") -# use tf1 compatibility for keras backend -from tensorflow.compat.v1.keras.backend import set_session -from tensorflow.keras import layers -from tensorflow.keras.layers import StringLookup - -from .utils.contour import ( - filter_contours_area_of_image, - filter_contours_area_of_image_tables, - find_contours_mean_y_diff, - find_center_of_contours, - find_new_features_of_contours, - find_features_of_contours, - get_text_region_boxes_by_given_contours, - get_textregion_contours_in_org_image, - get_textregion_contours_in_org_image_light, - return_contours_of_image, - return_contours_of_interested_region, - return_contours_of_interested_textline, - return_parent_contours, - dilate_textregion_contours, - dilate_textline_contours, - polygon2contour, - contour2polygon, - join_polygons, - make_intersection, -) -from .utils.rotate import ( - rotate_image, - rotation_not_90_func, - rotation_not_90_func_full_layout, - rotation_image_new -) -from .utils.utils_ocr import ( - return_start_and_end_of_common_text_of_textline_ocr_without_common_section, - return_textline_contour_with_added_box_coordinate, - preprocess_and_resize_image_for_ocrcnn_model, - return_textlines_split_if_needed, - decode_batch_predictions, - return_rnn_cnn_ocr_of_given_textlines, - fit_text_single_line, - break_curved_line_into_small_pieces_and_then_merge, - get_orientation_moments, - rotate_image_with_padding, - get_contours_and_bounding_boxes -) -from .utils.separate_lines import ( - separate_lines_new2, - return_deskew_slop, - do_work_of_slopes_new, - do_work_of_slopes_new_curved, - do_work_of_slopes_new_light, -) -from .utils.drop_capitals import ( - adhere_drop_capital_region_into_corresponding_textline, - filter_small_drop_capitals_from_no_patch_layout -) -from .utils.marginals import get_marginals -from .utils.resize import resize_image -from .utils.shm import share_ndarray -from .utils import ( - is_image_filename, - boosting_headers_by_longshot_region_segmentation, - crop_image_inside_box, - box2rect, - box2slice, - find_num_col, - otsu_copy_binary, - put_drop_out_from_only_drop_model, - putt_bb_of_drop_capitals_of_model_in_patches_in_layout, - check_any_text_region_in_model_one_is_main_or_header, - check_any_text_region_in_model_one_is_main_or_header_light, - small_textlines_to_parent_adherence2, - order_of_regions, - find_number_of_columns_in_document, - return_boxes_of_images_by_order_of_reading_new -) -from .utils.pil_cv2 import check_dpi, pil2cv -from .utils.xml import order_and_id_of_texts -from .plot import EynollahPlotter -from .writer import EynollahXmlWriter - -MIN_AREA_REGION = 0.000001 -SLOPE_THRESHOLD = 0.13 -RATIO_OF_TWO_MODEL_THRESHOLD = 95.50 #98.45: -DPI_THRESHOLD = 298 -MAX_SLOPE = 999 -KERNEL = np.ones((5, 5), np.uint8) - -projection_dim = 64 -patch_size = 1 -num_patches =21*21#14*14#28*28#14*14#28*28 - - -class Patches(layers.Layer): - def __init__(self, **kwargs): - super(Patches, self).__init__() - self.patch_size = patch_size - - def call(self, images): - batch_size = tf.shape(images)[0] - patches = tf.image.extract_patches( - images=images, - sizes=[1, self.patch_size, self.patch_size, 1], - strides=[1, self.patch_size, self.patch_size, 1], - rates=[1, 1, 1, 1], - padding="VALID", - ) - patch_dims = patches.shape[-1] - patches = tf.reshape(patches, [batch_size, -1, patch_dims]) - return patches - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'patch_size': self.patch_size, - }) - return config - -class PatchEncoder(layers.Layer): - def __init__(self, **kwargs): - super(PatchEncoder, self).__init__() - self.num_patches = num_patches - self.projection = layers.Dense(units=projection_dim) - self.position_embedding = layers.Embedding( - input_dim=num_patches, output_dim=projection_dim - ) - - def call(self, patch): - positions = tf.range(start=0, limit=self.num_patches, delta=1) - encoded = self.projection(patch) + self.position_embedding(positions) - return encoded - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'num_patches': self.num_patches, - 'projection': self.projection, - 'position_embedding': self.position_embedding, - }) - return config - -class Eynollah: - def __init__( - self, - dir_models : str, - model_versions: List[Tuple[str, str]] = [], - extract_only_images : bool =False, - enable_plotting : bool = False, - allow_enhancement : bool = False, - curved_line : bool = False, - textline_light : bool = False, - full_layout : bool = False, - tables : bool = False, - right2left : bool = False, - input_binary : bool = False, - allow_scaling : bool = False, - headers_off : bool = False, - light_version : bool = False, - ignore_page_extraction : bool = False, - reading_order_machine_based : bool = False, - do_ocr : bool = False, - transformer_ocr: bool = False, - batch_size_ocr: Optional[int] = None, - num_col_upper : Optional[int] = None, - num_col_lower : Optional[int] = None, - threshold_art_class_layout: Optional[float] = None, - threshold_art_class_textline: Optional[float] = None, - skip_layout_and_reading_order : bool = False, - ): - self.logger = getLogger('eynollah') - self.plotter = None - - if skip_layout_and_reading_order: - textline_light = True - self.light_version = light_version - self.reading_order_machine_based = reading_order_machine_based - self.enable_plotting = enable_plotting - self.allow_enhancement = allow_enhancement - self.curved_line = curved_line - self.textline_light = textline_light - self.full_layout = full_layout - self.tables = tables - self.right2left = right2left - self.input_binary = input_binary - self.allow_scaling = allow_scaling - self.headers_off = headers_off - self.light_version = light_version - self.extract_only_images = extract_only_images - self.ignore_page_extraction = ignore_page_extraction - self.skip_layout_and_reading_order = skip_layout_and_reading_order - self.ocr = do_ocr - self.tr = transformer_ocr - if not batch_size_ocr: - self.b_s_ocr = 8 - else: - self.b_s_ocr = int(batch_size_ocr) - if num_col_upper: - self.num_col_upper = int(num_col_upper) - else: - self.num_col_upper = num_col_upper - if num_col_lower: - self.num_col_lower = int(num_col_lower) - else: - self.num_col_lower = num_col_lower - - # for parallelization of CPU-intensive tasks: - self.executor = ProcessPoolExecutor(max_workers=cpu_count()) - - if threshold_art_class_layout: - self.threshold_art_class_layout = float(threshold_art_class_layout) - else: - self.threshold_art_class_layout = 0.1 - - if threshold_art_class_textline: - self.threshold_art_class_textline = float(threshold_art_class_textline) - else: - self.threshold_art_class_textline = 0.1 - - t_start = time.time() - - # #gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) - # #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=7.7, allow_growth=True) - # #session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) - # config = tf.compat.v1.ConfigProto() - # config.gpu_options.allow_growth = True - # #session = tf.InteractiveSession() - # session = tf.compat.v1.Session(config=config) - # set_session(session) - try: - for device in tf.config.list_physical_devices('GPU'): - tf.config.experimental.set_memory_growth(device, True) - except: - self.logger.warning("no GPU device available") - - self.logger.info("Loading models...") - self.setup_models(dir_models, model_versions) - self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") - - @staticmethod - def our_load_model(model_file, basedir=""): - if basedir: - model_file = os.path.join(basedir, model_file) - if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): - # prefer SavedModel over HDF5 format if it exists - model_file = model_file[:-3] - try: - model = load_model(model_file, compile=False) - except: - model = load_model(model_file, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - return model - - def setup_models(self, basedir: Path, model_versions: List[Tuple[str, str]] = []): - self.model_versions = { - "enhancement": "eynollah-enhancement_20210425", - "binarization": "eynollah-binarization_20210425", - "col_classifier": "eynollah-column-classifier_20210425", - "page": "model_eynollah_page_extraction_20250915", - #?: "eynollah-main-regions-aug-scaling_20210425", - "region": ( # early layout - "eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" if self.extract_only_images else - "eynollah-main-regions_20220314" if self.light_version else - "eynollah-main-regions-ensembled_20210425"), - "region_p2": ( # early layout, non-light, 2nd part - "eynollah-main-regions-aug-rotation_20210425"), - "region_1_2": ( # early layout, light, 1-or-2-column - #"modelens_12sp_elay_0_3_4__3_6_n" - #"modelens_earlylayout_12spaltige_2_3_5_6_7_8" - #"modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" - #"modelens_1_2_4_5_early_lay_1_2_spaltige" - #"model_3_eraly_layout_no_patches_1_2_spaltige" - "modelens_e_l_all_sp_0_1_2_3_4_171024"), - "region_fl_np": ( # full layout / no patches - #"modelens_full_lay_1_3_031124" - #"modelens_full_lay_13__3_19_241024" - #"model_full_lay_13_241024" - #"modelens_full_lay_13_17_231024" - #"modelens_full_lay_1_2_221024" - #"eynollah-full-regions-1column_20210425" - "modelens_full_lay_1__4_3_091124"), - "region_fl": ( # full layout / with patches - #"eynollah-full-regions-3+column_20210425" - ##"model_2_full_layout_new_trans" - #"modelens_full_lay_1_3_031124" - #"modelens_full_lay_13__3_19_241024" - #"model_full_lay_13_241024" - #"modelens_full_lay_13_17_231024" - #"modelens_full_lay_1_2_221024" - #"modelens_full_layout_24_till_28" - #"model_2_full_layout_new_trans" - "modelens_full_lay_1__4_3_091124"), - "reading_order": ( - #"model_mb_ro_aug_ens_11" - #"model_step_3200000_mb_ro" - #"model_ens_reading_order_machine_based" - #"model_mb_ro_aug_ens_8" - #"model_ens_reading_order_machine_based" - "model_eynollah_reading_order_20250824"), - "textline": ( - #"modelens_textline_1_4_16092024" - #"model_textline_ens_3_4_5_6_artificial" - #"modelens_textline_1_3_4_20240915" - #"model_textline_ens_3_4_5_6_artificial" - #"modelens_textline_9_12_13_14_15" - #"eynollah-textline_light_20210425" - "modelens_textline_0_1__2_4_16092024" if self.textline_light else - #"eynollah-textline_20210425" - "modelens_textline_0_1__2_4_16092024"), - "table": ( - None if not self.tables else - "modelens_table_0t4_201124" if self.light_version else - "eynollah-tables_20210319"), - "ocr": ( - None if not self.ocr else - "model_eynollah_ocr_trocr_20250919" if self.tr else - "model_eynollah_ocr_cnnrnn_20250930") - } - # override defaults from CLI - for key, val in model_versions: - assert key in self.model_versions, "unknown model category '%s'" % key - self.logger.warning("overriding default model %s version %s to %s", key, self.model_versions[key], val) - self.model_versions[key] = val - # load models, depending on modes - # (note: loading too many models can cause OOM on GPU/CUDA, - # thus, we try set up the minimal configuration for the current mode) - loadable = [ - "col_classifier", - "binarization", - "page", - "region" - ] - if not self.extract_only_images: - loadable.append("textline") - if self.light_version: - loadable.append("region_1_2") - else: - loadable.append("region_p2") - # if self.allow_enhancement:? - loadable.append("enhancement") - if self.full_layout: - loadable.append("region_fl_np") - #loadable.append("region_fl") - if self.reading_order_machine_based: - loadable.append("reading_order") - if self.tables: - loadable.append("table") - - self.models = {name: self.our_load_model(self.model_versions[name], basedir) - for name in loadable - } - - if self.ocr: - ocr_model_dir = os.path.join(basedir, self.model_versions["ocr"]) - if self.tr: - self.models["ocr"] = VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) - if torch.cuda.is_available(): - self.logger.info("Using GPU acceleration") - self.device = torch.device("cuda:0") - else: - self.logger.info("Using CPU processing") - self.device = torch.device("cpu") - #self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") - self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") - else: - ocr_model = load_model(ocr_model_dir, compile=False) - self.models["ocr"] = tf.keras.models.Model( - ocr_model.get_layer(name = "image").input, - ocr_model.get_layer(name = "dense2").output) - - with open(os.path.join(ocr_model_dir, "characters_org.txt"), "r") as config_file: - characters = json.load(config_file) - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - # Mapping integers back to original characters. - self.num_to_char = StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) - - def __del__(self): - if hasattr(self, 'executor') and getattr(self, 'executor'): - self.executor.shutdown() - self.executor = None - if hasattr(self, 'models') and getattr(self, 'models'): - for model_name in list(self.models): - if self.models[model_name]: - del self.models[model_name] - - def cache_images(self, image_filename=None, image_pil=None, dpi=None): - ret = {} - t_c0 = time.time() - if image_filename: - ret['img'] = cv2.imread(image_filename) - if self.light_version: - self.dpi = 100 - else: - self.dpi = check_dpi(image_filename) - else: - ret['img'] = pil2cv(image_pil) - if self.light_version: - self.dpi = 100 - else: - self.dpi = check_dpi(image_pil) - ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) - for prefix in ('', '_grayscale'): - ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) - self._imgs = ret - if dpi is not None: - self.dpi = dpi - - def reset_file_name_dir(self, image_filename, dir_out): - t_c = time.time() - self.cache_images(image_filename=image_filename) - self.writer = EynollahXmlWriter( - dir_out=dir_out, - image_filename=image_filename, - curved_line=self.curved_line, - textline_light = self.textline_light) - - def imread(self, grayscale=False, uint8=True): - key = 'img' - if grayscale: - key += '_grayscale' - if uint8: - key += '_uint8' - return self._imgs[key].copy() - - def isNaN(self, num): - return num != num - - def predict_enhancement(self, img): - self.logger.debug("enter predict_enhancement") - - img_height_model = self.models["enhancement"].layers[-1].output_shape[1] - img_width_model = self.models["enhancement"].layers[-1].output_shape[2] - if img.shape[0] < img_height_model: - img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) - if img.shape[1] < img_width_model: - img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST) - margin = int(0 * img_width_model) - width_mid = img_width_model - 2 * margin - height_mid = img_height_model - 2 * margin - img = img / 255. - img_h = img.shape[0] - img_w = img.shape[1] - - prediction_true = np.zeros((img_h, img_w, 3)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - - for i in range(nxf): - for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - else: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - else: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - img_width_model - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - img_height_model - - img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.models["enhancement"].predict(img_patch, verbose=0) - seg = label_p_pred[0, :, :, :] * 255 - - if i == 0 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + 0:index_x_u - margin] = \ - seg[0:-margin or None, - 0:-margin or None] - elif i == nxf - 1 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + margin:index_x_u - 0] = \ - seg[margin:, - margin:] - elif i == 0 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + 0:index_x_u - margin] = \ - seg[margin:, - 0:-margin or None] - elif i == nxf - 1 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + margin:index_x_u - 0] = \ - seg[0:-margin or None, - margin:] - elif i == 0 and j != 0 and j != nyf - 1: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + 0:index_x_u - margin] = \ - seg[margin:-margin or None, - 0:-margin or None] - elif i == nxf - 1 and j != 0 and j != nyf - 1: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + margin:index_x_u - 0] = \ - seg[margin:-margin or None, - margin:] - elif i != 0 and i != nxf - 1 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + margin:index_x_u - margin] = \ - seg[0:-margin or None, - margin:-margin or None] - elif i != 0 and i != nxf - 1 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + margin:index_x_u - margin] = \ - seg[margin:, - margin:-margin or None] - else: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + margin:index_x_u - margin] = \ - seg[margin:-margin or None, - margin:-margin or None] - - prediction_true = prediction_true.astype(int) - return prediction_true - - def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1 and width_early < 1100: - img_w_new = 2000 - elif num_col == 1 and width_early >= 2500: - img_w_new = 2000 - elif num_col == 1 and width_early >= 1100 and width_early < 2500: - img_w_new = width_early - elif num_col == 2 and width_early < 2000: - img_w_new = 2400 - elif num_col == 2 and width_early >= 3500: - img_w_new = 2400 - elif num_col == 2 and width_early >= 2000 and width_early < 3500: - img_w_new = width_early - elif num_col == 3 and width_early < 2000: - img_w_new = 3000 - elif num_col == 3 and width_early >= 4000: - img_w_new = 3000 - elif num_col == 3 and width_early >= 2000 and width_early < 4000: - img_w_new = width_early - elif num_col == 4 and width_early < 2500: - img_w_new = 4000 - elif num_col == 4 and width_early >= 5000: - img_w_new = 4000 - elif num_col == 4 and width_early >= 2500 and width_early < 5000: - img_w_new = width_early - elif num_col == 5 and width_early < 3700: - img_w_new = 5000 - elif num_col == 5 and width_early >= 7000: - img_w_new = 5000 - elif num_col == 5 and width_early >= 3700 and width_early < 7000: - img_w_new = width_early - elif num_col == 6 and width_early < 4500: - img_w_new = 6500 # 5400 - else: - img_w_new = width_early - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: - img_new = np.copy(img) - num_column_is_classified = False - #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: - elif img_h_new >= 8000: - img_new = np.copy(img) - num_column_is_classified = False - else: - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - - def calculate_width_height_by_columns_1_2(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1: - img_w_new = 1000 - else: - img_w_new = 1300 - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: - img_new = np.copy(img) - num_column_is_classified = False - #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: - elif img_h_new >= 8000: - img_new = np.copy(img) - num_column_is_classified = False - else: - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - - def calculate_width_height_by_columns_extract_only_images(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1: - img_w_new = 700 - elif num_col == 2: - img_w_new = 900 - elif num_col == 3: - img_w_new = 1500 - elif num_col == 4: - img_w_new = 1800 - elif num_col == 5: - img_w_new = 2200 - elif num_col == 6: - img_w_new = 2500 - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - - def resize_image_with_column_classifier(self, is_image_enhanced, img_bin): - self.logger.debug("enter resize_image_with_column_classifier") - if self.input_binary: - img = np.copy(img_bin) - else: - img = self.imread() - - _, page_coord = self.early_page_for_num_of_column_classification(img) - - if self.input_binary: - img_in = np.copy(img) - img_in = img_in / 255.0 - width_early = img_in.shape[1] - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = img_in.reshape(1, 448, 448, 3) - else: - img_1ch = self.imread(grayscale=True, uint8=False) - width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - # plt.imshow(img_1ch) - # plt.show() - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) - - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) - num_col = np.argmax(label_p_pred[0]) + 1 - - self.logger.info("Found %s columns (%s)", num_col, label_p_pred) - img_new, _ = self.calculate_width_height_by_columns(img, num_col, width_early, label_p_pred) - - if img_new.shape[1] > img.shape[1]: - img_new = self.predict_enhancement(img_new) - is_image_enhanced = True - - return img, img_new, is_image_enhanced - - def resize_and_enhance_image_with_column_classifier(self, light_version): - self.logger.debug("enter resize_and_enhance_image_with_column_classifier") - dpi = self.dpi - self.logger.info("Detected %s DPI", dpi) - if self.input_binary: - img = self.imread() - prediction_bin = self.do_prediction(True, img, self.models["binarization"], n_batch_inference=5) - prediction_bin = 255 * (prediction_bin[:,:,0] == 0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) - img= np.copy(prediction_bin) - img_bin = prediction_bin - else: - img = self.imread() - img_bin = None - - width_early = img.shape[1] - t1 = time.time() - _, page_coord = self.early_page_for_num_of_column_classification(img_bin) - - self.image_page_org_size = img[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3], :] - self.page_coord = page_coord - - if self.num_col_upper and not self.num_col_lower: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - elif self.num_col_lower and not self.num_col_upper: - num_col = self.num_col_lower - label_p_pred = [np.ones(6)] - elif not self.num_col_upper and not self.num_col_lower: - if self.input_binary: - img_in = np.copy(img) - img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = img_in.reshape(1, 448, 448, 3) - else: - img_1ch = self.imread(grayscale=True) - width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) - num_col = np.argmax(label_p_pred[0]) + 1 - - elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): - if self.input_binary: - img_in = np.copy(img) - img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = img_in.reshape(1, 448, 448, 3) - else: - img_1ch = self.imread(grayscale=True) - width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) - num_col = np.argmax(label_p_pred[0]) + 1 - - if num_col > self.num_col_upper: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - if num_col < self.num_col_lower: - num_col = self.num_col_lower - label_p_pred = [np.ones(6)] - else: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - - self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) - if not self.extract_only_images: - if dpi < DPI_THRESHOLD: - if light_version and num_col in (1,2): - img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( - img, num_col, width_early, label_p_pred) - else: - img_new, num_column_is_classified = self.calculate_width_height_by_columns( - img, num_col, width_early, label_p_pred) - if light_version: - image_res = np.copy(img_new) - else: - image_res = self.predict_enhancement(img_new) - is_image_enhanced = True - else: - if light_version and num_col in (1,2): - img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( - img, num_col, width_early, label_p_pred) - image_res = np.copy(img_new) - is_image_enhanced = True - else: - num_column_is_classified = True - image_res = np.copy(img) - is_image_enhanced = False - else: - num_column_is_classified = True - image_res = np.copy(img) - is_image_enhanced = False - - self.logger.debug("exit resize_and_enhance_image_with_column_classifier") - return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin - - # pylint: disable=attribute-defined-outside-init - def get_image_and_scales(self, img_org, img_res, scale): - self.logger.debug("enter get_image_and_scales") - self.image = np.copy(img_res) - self.image_org = np.copy(img_org) - self.height_org = self.image.shape[0] - self.width_org = self.image.shape[1] - - self.img_hight_int = int(self.image.shape[0] * scale) - self.img_width_int = int(self.image.shape[1] * scale) - self.scale_y = self.img_hight_int / float(self.image.shape[0]) - self.scale_x = self.img_width_int / float(self.image.shape[1]) - - self.image = resize_image(self.image, self.img_hight_int, self.img_width_int) - - # Also set for the plotter - if self.plotter: - self.plotter.image_org = self.image_org - self.plotter.scale_y = self.scale_y - self.plotter.scale_x = self.scale_x - # Also set for the writer - self.writer.image_org = self.image_org - self.writer.scale_y = self.scale_y - self.writer.scale_x = self.scale_x - self.writer.height_org = self.height_org - self.writer.width_org = self.width_org - - def get_image_and_scales_after_enhancing(self, img_org, img_res): - self.logger.debug("enter get_image_and_scales_after_enhancing") - self.image = np.copy(img_res) - self.image = self.image.astype(np.uint8) - self.image_org = np.copy(img_org) - self.height_org = self.image_org.shape[0] - self.width_org = self.image_org.shape[1] - - self.scale_y = img_res.shape[0] / float(self.image_org.shape[0]) - self.scale_x = img_res.shape[1] / float(self.image_org.shape[1]) - - # Also set for the plotter - if self.plotter: - self.plotter.image_org = self.image_org - self.plotter.scale_y = self.scale_y - self.plotter.scale_x = self.scale_x - # Also set for the writer - self.writer.image_org = self.image_org - self.writer.scale_y = self.scale_y - self.writer.scale_x = self.scale_x - self.writer.height_org = self.height_org - self.writer.width_org = self.width_org - - def do_prediction( - self, patches, img, model, - n_batch_inference=1, marginal_of_patch_percent=0.1, - thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False, - thresholding_for_fl_light_version=False, - threshold_art_class_textline=0.1): - - self.logger.debug("enter do_prediction (patches=%d)", patches) - img_height_model = model.layers[-1].output_shape[1] - img_width_model = model.layers[-1].output_shape[2] - - if not patches: - img_h_page = img.shape[0] - img_w_page = img.shape[1] - img = img / float(255.0) - img = resize_image(img, img_height_model, img_width_model) - - label_p_pred = model.predict(img[np.newaxis], verbose=0) - seg = np.argmax(label_p_pred, axis=3)[0] - - if thresholding_for_artificial_class_in_light_version: - seg_art = label_p_pred[0,:,:,2] - - seg_art[seg_art0] =1 - - skeleton_art = skeletonize(seg_art) - skeleton_art = skeleton_art*1 - - seg[skeleton_art==1]=2 - - if thresholding_for_fl_light_version: - seg_header = label_p_pred[0,:,:,2] - - seg_header[seg_header<0.2] = 0 - seg_header[seg_header>0] =1 - - seg[seg_header==1]=2 - - seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) - return prediction_true - - if img.shape[0] < img_height_model: - img = resize_image(img, img_height_model, img.shape[1]) - if img.shape[1] < img_width_model: - img = resize_image(img, img.shape[0], img_width_model) - - self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) - margin = int(marginal_of_patch_percent * img_height_model) - width_mid = img_width_model - 2 * margin - height_mid = img_height_model - 2 * margin - img = img / 255. - #img = img.astype(np.float16) - img_h = img.shape[0] - img_w = img.shape[1] - prediction_true = np.zeros((img_h, img_w, 3)) - mask_true = np.zeros((img_h, img_w)) - nxf = math.ceil(img_w / float(width_mid)) - nyf = math.ceil(img_h / float(height_mid)) - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - img_patch = np.zeros((n_batch_inference, img_height_model, img_width_model, 3)) - for i in range(nxf): - for j in range(nyf): - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - img_width_model - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - img_height_model - - list_i_s.append(i) - list_j_s.append(j) - list_x_u.append(index_x_u) - list_x_d.append(index_x_d) - list_y_d.append(index_y_d) - list_y_u.append(index_y_u) - - img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - batch_indexer += 1 - - if (batch_indexer == n_batch_inference or - # last batch - i == nxf - 1 and j == nyf - 1): - self.logger.debug("predicting patches on %s", str(img_patch.shape)) - label_p_pred = model.predict(img_patch, verbose=0) - seg = np.argmax(label_p_pred, axis=3) - - if thresholding_for_some_classes_in_light_version: - seg_not_base = label_p_pred[:,:,:,4] - seg_not_base[seg_not_base>0.03] =1 - seg_not_base[seg_not_base<1] =0 - - seg_line = label_p_pred[:,:,:,3] - seg_line[seg_line>0.1] =1 - seg_line[seg_line<1] =0 - - seg_background = label_p_pred[:,:,:,0] - seg_background[seg_background>0.25] =1 - seg_background[seg_background<1] =0 - - seg[seg_not_base==1]=4 - seg[seg_background==1]=0 - seg[(seg_line==1) & (seg==0)]=3 - if thresholding_for_artificial_class_in_light_version: - seg_art = label_p_pred[:,:,:,2] - - seg_art[seg_art0] =1 - - ##seg[seg_art==1]=2 - - indexer_inside_batch = 0 - for i_batch, j_batch in zip(list_i_s, list_j_s): - seg_in = seg[indexer_inside_batch] - - if thresholding_for_artificial_class_in_light_version: - seg_in_art = seg_art[indexer_inside_batch] - - index_y_u_in = list_y_u[indexer_inside_batch] - index_y_d_in = list_y_d[indexer_inside_batch] - - index_x_u_in = list_x_u[indexer_inside_batch] - index_x_d_in = list_x_d[indexer_inside_batch] - - if i_batch == 0 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[0:-margin or None, - 0:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[0:-margin or None, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[margin:, - margin:, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[margin:, - margin:] - - elif i_batch == 0 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[margin:, - 0:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[margin:, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[0:-margin or None, - margin:, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[0:-margin or None, - margin:] - - elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[margin:-margin or None, - 0:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[margin:-margin or None, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[margin:-margin or None, - margin:, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[margin:-margin or None, - margin:] - - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[0:-margin or None, - margin:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[0:-margin or None, - margin:-margin or None] - - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[margin:, - margin:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[margin:, - margin:-margin or None] - - else: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[margin:-margin or None, - margin:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[margin:-margin or None, - margin:-margin or None] - indexer_inside_batch += 1 - - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - img_patch[:] = 0 - - prediction_true = prediction_true.astype(np.uint8) - - if thresholding_for_artificial_class_in_light_version: - kernel_min = np.ones((3, 3), np.uint8) - prediction_true[:,:,0][prediction_true[:,:,0]==2] = 0 - - skeleton_art = skeletonize(prediction_true[:,:,1]) - skeleton_art = skeleton_art*1 - - skeleton_art = skeleton_art.astype('uint8') - - skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) - - prediction_true[:,:,0][skeleton_art==1]=2 - #del model - gc.collect() - return prediction_true - - def do_padding_with_scale(self, img, scale): - h_n = int(img.shape[0]*scale) - w_n = int(img.shape[1]*scale) - - channel0_avg = int( np.mean(img[:,:,0]) ) - channel1_avg = int( np.mean(img[:,:,1]) ) - channel2_avg = int( np.mean(img[:,:,2]) ) - - h_diff = img.shape[0] - h_n - w_diff = img.shape[1] - w_n - - h_start = int(0.5 * h_diff) - w_start = int(0.5 * w_diff) - - img_res = resize_image(img, h_n, w_n) - #label_res = resize_image(label, h_n, w_n) - - img_scaled_padded = np.copy(img) - - #label_scaled_padded = np.zeros(label.shape) - - img_scaled_padded[:,:,0] = channel0_avg - img_scaled_padded[:,:,1] = channel1_avg - img_scaled_padded[:,:,2] = channel2_avg - - img_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = img_res[:,:,:] - #label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:] - - return img_scaled_padded#, label_scaled_padded - - def do_prediction_new_concept_scatter_nd( - self, patches, img, model, - n_batch_inference=1, marginal_of_patch_percent=0.1, - thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False): - - self.logger.debug("enter do_prediction_new_concept") - img_height_model = model.layers[-1].output_shape[1] - img_width_model = model.layers[-1].output_shape[2] - - if not patches: - img_h_page = img.shape[0] - img_w_page = img.shape[1] - img = img / 255.0 - img = resize_image(img, img_height_model, img_width_model) - - label_p_pred = model.predict(img[np.newaxis], verbose=0) - seg = np.argmax(label_p_pred, axis=3)[0] - - if thresholding_for_artificial_class_in_light_version: - #seg_text = label_p_pred[0,:,:,1] - #seg_text[seg_text<0.2] =0 - #seg_text[seg_text>0] =1 - #seg[seg_text==1]=1 - - seg_art = label_p_pred[0,:,:,4] - seg_art[seg_art<0.2] =0 - seg_art[seg_art>0] =1 - seg[seg_art==1]=4 - - seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) - return prediction_true - - if img.shape[0] < img_height_model: - img = resize_image(img, img_height_model, img.shape[1]) - if img.shape[1] < img_width_model: - img = resize_image(img, img.shape[0], img_width_model) - - self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) - ##margin = int(marginal_of_patch_percent * img_height_model) - #width_mid = img_width_model - 2 * margin - #height_mid = img_height_model - 2 * margin - img = img / 255.0 - img = img.astype(np.float16) - img_h = img.shape[0] - img_w = img.shape[1] - - stride_x = img_width_model - 100 - stride_y = img_height_model - 100 - - one_tensor = tf.ones_like(img) - img_patches, one_patches = tf.image.extract_patches( - images=[img, one_tensor], - sizes=[1, img_height_model, img_width_model, 1], - strides=[1, stride_y, stride_x, 1], - rates=[1, 1, 1, 1], - padding='SAME') - img_patches = tf.squeeze(img_patches) - one_patches = tf.squeeze(one_patches) - img_patches_resh = tf.reshape(img_patches, shape=(img_patches.shape[0] * img_patches.shape[1], - img_height_model, img_width_model, 3)) - pred_patches = model.predict(img_patches_resh, batch_size=n_batch_inference) - one_patches = tf.reshape(one_patches, shape=(img_patches.shape[0] * img_patches.shape[1], - img_height_model, img_width_model, 3)) - x = tf.range(img.shape[1]) - y = tf.range(img.shape[0]) - x, y = tf.meshgrid(x, y) - indices = tf.stack([y, x], axis=-1) - - indices_patches = tf.image.extract_patches( - images=tf.expand_dims(indices, axis=0), - sizes=[1, img_height_model, img_width_model, 1], - strides=[1, stride_y, stride_x, 1], - rates=[1, 1, 1, 1], - padding='SAME') - indices_patches = tf.squeeze(indices_patches) - indices_patches = tf.reshape(indices_patches, shape=(img_patches.shape[0] * img_patches.shape[1], - img_height_model, img_width_model, 2)) - margin_y = int( 0.5 * (img_height_model - stride_y) ) - margin_x = int( 0.5 * (img_width_model - stride_x) ) - - mask_margin = np.zeros((img_height_model, img_width_model)) - mask_margin[margin_y:img_height_model - margin_y, - margin_x:img_width_model - margin_x] = 1 - - indices_patches_array = indices_patches.numpy() - for i in range(indices_patches_array.shape[0]): - indices_patches_array[i,:,:,0] = indices_patches_array[i,:,:,0]*mask_margin - indices_patches_array[i,:,:,1] = indices_patches_array[i,:,:,1]*mask_margin - - reconstructed = tf.scatter_nd( - indices=indices_patches_array, - updates=pred_patches, - shape=(img.shape[0], img.shape[1], pred_patches.shape[-1])).numpy() - - prediction_true = np.argmax(reconstructed, axis=2).astype(np.uint8) - gc.collect() - return np.repeat(prediction_true[:, :, np.newaxis], 3, axis=2) - - def do_prediction_new_concept( - self, patches, img, model, - n_batch_inference=1, marginal_of_patch_percent=0.1, - thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False, - threshold_art_class_textline=0.1, - threshold_art_class_layout=0.1): - - self.logger.debug("enter do_prediction_new_concept") - img_height_model = model.layers[-1].output_shape[1] - img_width_model = model.layers[-1].output_shape[2] - - if not patches: - img_h_page = img.shape[0] - img_w_page = img.shape[1] - img = img / 255.0 - img = resize_image(img, img_height_model, img_width_model) - - label_p_pred = model.predict(img[np.newaxis], verbose=0) - seg = np.argmax(label_p_pred, axis=3)[0] - - seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) - - if thresholding_for_artificial_class_in_light_version: - kernel_min = np.ones((3, 3), np.uint8) - seg_art = label_p_pred[0,:,:,4] - seg_art[seg_art0] =1 - #seg[seg_art==1]=4 - seg_art = resize_image(seg_art, img_h_page, img_w_page).astype(np.uint8) - - prediction_true[:,:,0][prediction_true[:,:,0]==4] = 0 - - skeleton_art = skeletonize(seg_art) - skeleton_art = skeleton_art*1 - - skeleton_art = skeleton_art.astype('uint8') - - skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) - - prediction_true[:,:,0][skeleton_art==1] = 4 - - return prediction_true , resize_image(label_p_pred[0, :, :, 1] , img_h_page, img_w_page) - - if img.shape[0] < img_height_model: - img = resize_image(img, img_height_model, img.shape[1]) - if img.shape[1] < img_width_model: - img = resize_image(img, img.shape[0], img_width_model) - - self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) - margin = int(marginal_of_patch_percent * img_height_model) - width_mid = img_width_model - 2 * margin - height_mid = img_height_model - 2 * margin - img = img / 255.0 - img = img.astype(np.float16) - img_h = img.shape[0] - img_w = img.shape[1] - prediction_true = np.zeros((img_h, img_w, 3)) - confidence_matrix = np.zeros((img_h, img_w)) - mask_true = np.zeros((img_h, img_w)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - img_patch = np.zeros((n_batch_inference, img_height_model, img_width_model, 3)) - for i in range(nxf): - for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - else: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - else: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - img_width_model - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - img_height_model - - list_i_s.append(i) - list_j_s.append(j) - list_x_u.append(index_x_u) - list_x_d.append(index_x_d) - list_y_d.append(index_y_d) - list_y_u.append(index_y_u) - - img_patch[batch_indexer] = img[index_y_d:index_y_u, index_x_d:index_x_u] - batch_indexer += 1 - - if (batch_indexer == n_batch_inference or - # last batch - i == nxf - 1 and j == nyf - 1): - self.logger.debug("predicting patches on %s", str(img_patch.shape)) - label_p_pred = model.predict(img_patch,verbose=0) - seg = np.argmax(label_p_pred, axis=3) - - if thresholding_for_some_classes_in_light_version: - seg_art = label_p_pred[:,:,:,4] - seg_art[seg_art0] =1 - - seg_line = label_p_pred[:,:,:,3] - seg_line[seg_line>0.4] =1#seg_line[seg_line>0.5] =1#seg_line[seg_line>0.1] =1 - seg_line[seg_line<1] =0 - - ##seg[seg_art==1]=4 - #seg[(seg_line==1) & (seg==0)]=3 - if thresholding_for_artificial_class_in_light_version: - seg_art = label_p_pred[:,:,:,2] - - seg_art[seg_art0] =1 - - ##seg[seg_art==1]=2 - - indexer_inside_batch = 0 - for i_batch, j_batch in zip(list_i_s, list_j_s): - seg_in = seg[indexer_inside_batch] - - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - seg_in_art = seg_art[indexer_inside_batch] - - index_y_u_in = list_y_u[indexer_inside_batch] - index_y_d_in = list_y_d[indexer_inside_batch] - - index_x_u_in = list_x_u[indexer_inside_batch] - index_x_d_in = list_x_d[indexer_inside_batch] - - if i_batch == 0 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[0:-margin or None, - 0:-margin or None, - np.newaxis] - confidence_matrix[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - label_p_pred[0, 0:-margin or None, - 0:-margin or None, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[0:-margin or None, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[margin:, - margin:, - np.newaxis] - confidence_matrix[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - 0] = \ - label_p_pred[0, margin:, - margin:, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[margin:, - margin:] - - elif i_batch == 0 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[margin:, - 0:-margin or None, - np.newaxis] - confidence_matrix[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + 0:index_x_u_in - margin] = \ - label_p_pred[0, margin:, - 0:-margin or None, - 1] - - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[margin:, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[0:-margin or None, - margin:, - np.newaxis] - confidence_matrix[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - label_p_pred[0, 0:-margin or None, - margin:, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[0:-margin or None, - margin:] - - elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[margin:-margin or None, - 0:-margin or None, - np.newaxis] - confidence_matrix[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - label_p_pred[0, margin:-margin or None, - 0:-margin or None, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[margin:-margin or None, - 0:-margin or None] - elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[margin:-margin or None, - margin:, - np.newaxis] - confidence_matrix[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - label_p_pred[0, margin:-margin or None, - margin:, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[margin:-margin or None, - margin:] - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[0:-margin or None, - margin:-margin or None, - np.newaxis] - confidence_matrix[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - label_p_pred[0, 0:-margin or None, - margin:-margin or None, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[0:-margin or None, - margin:-margin or None] - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[margin:, - margin:-margin or None, - np.newaxis] - confidence_matrix[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - margin] = \ - label_p_pred[0, margin:, - margin:-margin or None, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[margin:, - margin:-margin or None] - else: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[margin:-margin or None, - margin:-margin or None, - np.newaxis] - confidence_matrix[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - label_p_pred[0, margin:-margin or None, - margin:-margin or None, - 1] - if (thresholding_for_artificial_class_in_light_version or - thresholding_for_some_classes_in_light_version): - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[margin:-margin or None, - margin:-margin or None] - indexer_inside_batch += 1 - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - img_patch[:] = 0 - - prediction_true = prediction_true.astype(np.uint8) - - if thresholding_for_artificial_class_in_light_version: - kernel_min = np.ones((3, 3), np.uint8) - prediction_true[:,:,0][prediction_true[:,:,0]==2] = 0 - - skeleton_art = skeletonize(prediction_true[:,:,1]) - skeleton_art = skeleton_art*1 - - skeleton_art = skeleton_art.astype('uint8') - - skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) - - prediction_true[:,:,0][skeleton_art==1]=2 - - if thresholding_for_some_classes_in_light_version: - kernel_min = np.ones((3, 3), np.uint8) - prediction_true[:,:,0][prediction_true[:,:,0]==4] = 0 - - skeleton_art = skeletonize(prediction_true[:,:,1]) - skeleton_art = skeleton_art*1 - - skeleton_art = skeleton_art.astype('uint8') - - skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) - - prediction_true[:,:,0][skeleton_art==1]=4 - gc.collect() - return prediction_true, confidence_matrix - - def extract_page(self): - self.logger.debug("enter extract_page") - cont_page = [] - if not self.ignore_page_extraction: - img = np.copy(self.image)#cv2.GaussianBlur(self.image, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.models["page"]) - imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - ##thresh = cv2.dilate(thresh, KERNEL, iterations=3) - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - if len(contours)>0: - cnt_size = np.array([cv2.contourArea(contours[j]) - for j in range(len(contours))]) - cnt = contours[np.argmax(cnt_size)] - x, y, w, h = cv2.boundingRect(cnt) - #if x <= 30: - #w += x - #x = 0 - #if (self.image.shape[1] - (x + w)) <= 30: - #w = w + (self.image.shape[1] - (x + w)) - #if y <= 30: - #h = h + y - #y = 0 - #if (self.image.shape[0] - (y + h)) <= 30: - #h = h + (self.image.shape[0] - (y + h)) - box = [x, y, w, h] - else: - box = [0, 0, img.shape[1], img.shape[0]] - cropped_page, page_coord = crop_image_inside_box(box, self.image) - cont_page = [cnt] - #cont_page.append(np.array([[page_coord[2], page_coord[0]], - #[page_coord[3], page_coord[0]], - #[page_coord[3], page_coord[1]], - #[page_coord[2], page_coord[1]]])) - self.logger.debug("exit extract_page") - else: - box = [0, 0, self.image.shape[1], self.image.shape[0]] - cropped_page, page_coord = crop_image_inside_box(box, self.image) - cont_page.append(np.array([[page_coord[2], page_coord[0]], - [page_coord[3], page_coord[0]], - [page_coord[3], page_coord[1]], - [page_coord[2], page_coord[1]]])) - return cropped_page, page_coord, cont_page - - def early_page_for_num_of_column_classification(self,img_bin): - if not self.ignore_page_extraction: - self.logger.debug("enter early_page_for_num_of_column_classification") - if self.input_binary: - img = np.copy(img_bin).astype(np.uint8) - else: - img = self.imread() - img = cv2.GaussianBlur(img, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.models["page"]) - - imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - thresh = cv2.dilate(thresh, KERNEL, iterations=3) - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if len(contours)>0: - cnt_size = np.array([cv2.contourArea(contours[j]) - for j in range(len(contours))]) - cnt = contours[np.argmax(cnt_size)] - box = cv2.boundingRect(cnt) - else: - box = [0, 0, img.shape[1], img.shape[0]] - cropped_page, page_coord = crop_image_inside_box(box, img) - - self.logger.debug("exit early_page_for_num_of_column_classification") - else: - img = self.imread() - box = [0, 0, img.shape[1], img.shape[0]] - cropped_page, page_coord = crop_image_inside_box(box, img) - return cropped_page, page_coord - - def extract_text_regions_new(self, img, patches, cols): - self.logger.debug("enter extract_text_regions") - img_height_h = img.shape[0] - img_width_h = img.shape[1] - model_region = self.models["region_fl"] if patches else self.models["region_fl_np"] - - if self.light_version: - thresholding_for_fl_light_version = True - elif not patches: - img = otsu_copy_binary(img).astype(np.uint8) - prediction_regions = None - thresholding_for_fl_light_version = False - elif cols: - img = otsu_copy_binary(img).astype(np.uint8) - if cols == 1: - img = resize_image(img, int(img_height_h * 1000 / float(img_width_h)), 1000).astype(np.uint8) - elif cols == 2: - img = resize_image(img, int(img_height_h * 1300 / float(img_width_h)), 1300).astype(np.uint8) - elif cols == 3: - img = resize_image(img, int(img_height_h * 1600 / float(img_width_h)), 1600).astype(np.uint8) - elif cols == 4: - img = resize_image(img, int(img_height_h * 1900 / float(img_width_h)), 1900).astype(np.uint8) - elif cols == 5: - img = resize_image(img, int(img_height_h * 2200 / float(img_width_h)), 2200).astype(np.uint8) - else: - img = resize_image(img, int(img_height_h * 2500 / float(img_width_h)), 2500).astype(np.uint8) - - prediction_regions = self.do_prediction(patches, img, model_region, - marginal_of_patch_percent=0.1, - n_batch_inference=3, - thresholding_for_fl_light_version=thresholding_for_fl_light_version) - prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) - self.logger.debug("exit extract_text_regions") - return prediction_regions, prediction_regions - - def extract_text_regions(self, img, patches, cols): - self.logger.debug("enter extract_text_regions") - img_height_h = img.shape[0] - img_width_h = img.shape[1] - model_region = self.models["region_fl"] if patches else self.models["region_fl_np"] - - if not patches: - img = otsu_copy_binary(img) - img = img.astype(np.uint8) - prediction_regions2 = None - elif cols: - if cols == 1: - img_height_new = int(img_height_h * 0.7) - img_width_new = int(img_width_h * 0.7) - elif cols == 2: - img_height_new = int(img_height_h * 0.4) - img_width_new = int(img_width_h * 0.4) - else: - img_height_new = int(img_height_h * 0.3) - img_width_new = int(img_width_h * 0.3) - img2 = otsu_copy_binary(img) - img2 = img2.astype(np.uint8) - img2 = resize_image(img2, img_height_new, img_width_new) - prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent=0.1) - prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h) - - img = otsu_copy_binary(img).astype(np.uint8) - if cols == 1: - img = resize_image(img, int(img_height_h * 0.5), int(img_width_h * 0.5)).astype(np.uint8) - elif cols == 2 and img_width_h >= 2000: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - elif cols == 3 and ((self.scale_x == 1 and img_width_h > 3000) or - (self.scale_x != 1 and img_width_h > 2800)): - img = resize_image(img, 2800 * img_height_h // img_width_h, 2800).astype(np.uint8) - elif cols == 4 and ((self.scale_x == 1 and img_width_h > 4000) or - (self.scale_x != 1 and img_width_h > 3700)): - img = resize_image(img, 3700 * img_height_h // img_width_h, 3700).astype(np.uint8) - elif cols == 4: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - elif cols == 5 and self.scale_x == 1 and img_width_h > 5000: - img = resize_image(img, int(img_height_h * 0.7), int(img_width_h * 0.7)).astype(np.uint8) - elif cols == 5: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - elif img_width_h > 5600: - img = resize_image(img, 5600 * img_height_h // img_width_h, 5600).astype(np.uint8) - else: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - - prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent=0.1) - prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) - self.logger.debug("exit extract_text_regions") - return prediction_regions, prediction_regions2 - - def get_textlines_of_a_textregion_sorted(self, textlines_textregion, cx_textline, cy_textline, w_h_textline): - N = len(cy_textline) - if N==0: - return [] - - diff_cy = np.abs( np.diff(sorted(cy_textline)) ) - diff_cx = np.abs(np.diff(sorted(cx_textline)) ) - - - if len(diff_cy)>0: - mean_y_diff = np.mean(diff_cy) - mean_x_diff = np.mean(diff_cx) - count_hor = np.count_nonzero(np.array(w_h_textline) > 1) - count_ver = len(w_h_textline) - count_hor - - else: - mean_y_diff = 0 - mean_x_diff = 0 - count_hor = 1 - count_ver = 0 - - - if count_hor >= count_ver: - row_threshold = mean_y_diff / 1.5 if mean_y_diff > 0 else 10 - - indices_sorted_by_y = sorted(range(N), key=lambda i: cy_textline[i]) - - rows = [] - current_row = [indices_sorted_by_y[0]] - for i in range(1, N): - current_idx = indices_sorted_by_y[i] - prev_idx = current_row[0] - if abs(cy_textline[current_idx] - cy_textline[prev_idx]) <= row_threshold: - current_row.append(current_idx) - else: - rows.append(current_row) - current_row = [current_idx] - rows.append(current_row) - - sorted_textlines = [] - for row in rows: - row_sorted = sorted(row, key=lambda i: cx_textline[i]) - for idx in row_sorted: - sorted_textlines.append(textlines_textregion[idx]) - - else: - row_threshold = mean_x_diff / 1.5 if mean_x_diff > 0 else 10 - indices_sorted_by_x = sorted(range(N), key=lambda i: cx_textline[i]) - - rows = [] - current_row = [indices_sorted_by_x[0]] - - for i in range(1, N): - current_idy = indices_sorted_by_x[i] - prev_idy = current_row[0] - if abs(cx_textline[current_idy] - cx_textline[prev_idy] ) <= row_threshold: - current_row.append(current_idy) - else: - rows.append(current_row) - current_row = [current_idy] - rows.append(current_row) - - sorted_textlines = [] - for row in rows: - row_sorted = sorted(row , key=lambda i: cy_textline[i]) - for idy in row_sorted: - sorted_textlines.append(textlines_textregion[idy]) - - return sorted_textlines - - def get_slopes_and_deskew_new_light2(self, contours_par, textline_mask_tot, boxes, slope_deskew): - - polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot,1,0.00001) - cx_main_tot, cy_main_tot = find_center_of_contours(polygons_of_textlines) - w_h_textlines = [cv2.boundingRect(polygon)[2:] for polygon in polygons_of_textlines] - - args_textlines = np.arange(len(polygons_of_textlines)) - all_found_textline_polygons = [] - slopes = [] - all_box_coord =[] - - for index, con_region_ind in enumerate(contours_par): - results = [cv2.pointPolygonTest(con_region_ind, (cx_main_tot[ind], cy_main_tot[ind]), False) - for ind in args_textlines ] - results = np.array(results) - indexes_in = args_textlines[results==1] - textlines_ins = [polygons_of_textlines[ind] for ind in indexes_in] - cx_textline_in = [cx_main_tot[ind] for ind in indexes_in] - cy_textline_in = [cy_main_tot[ind] for ind in indexes_in] - w_h_textlines_in = [w_h_textlines[ind][0] / float(w_h_textlines[ind][1]) for ind in indexes_in] - - textlines_ins = self.get_textlines_of_a_textregion_sorted(textlines_ins, - cx_textline_in, - cy_textline_in, - w_h_textlines_in) - - all_found_textline_polygons.append(textlines_ins)#[::-1]) - slopes.append(slope_deskew) - - crop_coor = box2rect(boxes[index]) - all_box_coord.append(crop_coor) - - return (all_found_textline_polygons, - all_box_coord, - slopes) - - def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): - if not len(contours): - return [], [], [] - self.logger.debug("enter get_slopes_and_deskew_new_light") - with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: - results = self.executor.map(partial(do_work_of_slopes_new_light, - textline_mask_tot_ea=textline_mask_tot_shared, - slope_deskew=slope_deskew, - textline_light=self.textline_light, - logger=self.logger,), - boxes, contours, contours_par) - results = list(results) # exhaust prior to release - #textline_polygons, box_coord, slopes = zip(*results) - self.logger.debug("exit get_slopes_and_deskew_new_light") - return tuple(zip(*results)) - - def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): - if not len(contours): - return [], [], [] - self.logger.debug("enter get_slopes_and_deskew_new") - with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: - results = self.executor.map(partial(do_work_of_slopes_new, - textline_mask_tot_ea=textline_mask_tot_shared, - slope_deskew=slope_deskew, - MAX_SLOPE=MAX_SLOPE, - KERNEL=KERNEL, - logger=self.logger, - plotter=self.plotter,), - boxes, contours, contours_par) - results = list(results) # exhaust prior to release - #textline_polygons, box_coord, slopes = zip(*results) - self.logger.debug("exit get_slopes_and_deskew_new") - return tuple(zip(*results)) - - def get_slopes_and_deskew_new_curved(self, contours_par, textline_mask_tot, boxes, - mask_texts_only, num_col, scale_par, slope_deskew): - if not len(contours_par): - return [], [], [] - self.logger.debug("enter get_slopes_and_deskew_new_curved") - with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: - with share_ndarray(mask_texts_only) as mask_texts_only_shared: - results = self.executor.map(partial(do_work_of_slopes_new_curved, - textline_mask_tot_ea=textline_mask_tot_shared, - mask_texts_only=mask_texts_only_shared, - num_col=num_col, - scale_par=scale_par, - slope_deskew=slope_deskew, - MAX_SLOPE=MAX_SLOPE, - KERNEL=KERNEL, - logger=self.logger, - plotter=self.plotter,), - boxes, contours_par) - results = list(results) # exhaust prior to release - #textline_polygons, box_coord, slopes = zip(*results) - self.logger.debug("exit get_slopes_and_deskew_new_curved") - return tuple(zip(*results)) - - def textline_contours(self, img, use_patches, scaler_h, scaler_w, num_col_classifier=None): - self.logger.debug('enter textline_contours') - - #img = img.astype(np.uint8) - img_org = np.copy(img) - img_h = img_org.shape[0] - img_w = img_org.shape[1] - img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w)) - - prediction_textline = self.do_prediction(use_patches, img, self.models["textline"], - marginal_of_patch_percent=0.15, - n_batch_inference=3, - thresholding_for_artificial_class_in_light_version=self.textline_light, - threshold_art_class_textline=self.threshold_art_class_textline) - #if not self.textline_light: - #if num_col_classifier==1: - #prediction_textline_nopatch = self.do_prediction(False, img, self.models["textline"]) - #prediction_textline[:,:][prediction_textline_nopatch[:,:]==0] = 0 - - prediction_textline = resize_image(prediction_textline, img_h, img_w) - textline_mask_tot_ea_art = (prediction_textline[:,:]==2)*1 - - old_art = np.copy(textline_mask_tot_ea_art) - if not self.textline_light: - textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') - #textline_mask_tot_ea_art = cv2.dilate(textline_mask_tot_ea_art, KERNEL, iterations=1) - prediction_textline[:,:][textline_mask_tot_ea_art[:,:]==1]=2 - """ - else: - textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') - hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 1)) - - kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) - ##cv2.imwrite('textline_mask_tot_ea_art.png', textline_mask_tot_ea_art) - textline_mask_tot_ea_art = cv2.dilate(textline_mask_tot_ea_art, hor_kernel, iterations=1) - - ###cv2.imwrite('dil_textline_mask_tot_ea_art.png', dil_textline_mask_tot_ea_art) - - textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') - - #print(np.shape(dil_textline_mask_tot_ea_art), np.unique(dil_textline_mask_tot_ea_art), 'dil_textline_mask_tot_ea_art') - tsk = time.time() - skeleton_art_textline = skeletonize(textline_mask_tot_ea_art[:,:,0]) - - skeleton_art_textline = skeleton_art_textline*1 - - skeleton_art_textline = skeleton_art_textline.astype('uint8') - - skeleton_art_textline = cv2.dilate(skeleton_art_textline, kernel, iterations=1) - - #print(np.unique(skeleton_art_textline), np.shape(skeleton_art_textline)) - - #print(skeleton_art_textline, np.unique(skeleton_art_textline)) - - #cv2.imwrite('skeleton_art_textline.png', skeleton_art_textline) - - - prediction_textline[:,:,0][skeleton_art_textline[:,:]==1]=2 - - #cv2.imwrite('prediction_textline1.png', prediction_textline[:,:,0]) - - ##hor_kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 1)) - ##ver_kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3)) - ##textline_mask_tot_ea_main = (prediction_textline[:,:]==1)*1 - ##textline_mask_tot_ea_main = textline_mask_tot_ea_main.astype('uint8') - - ##dil_textline_mask_tot_ea_main = cv2.erode(textline_mask_tot_ea_main, ver_kernel2, iterations=1) - - ##dil_textline_mask_tot_ea_main = cv2.dilate(textline_mask_tot_ea_main, hor_kernel2, iterations=1) - - ##dil_textline_mask_tot_ea_main = cv2.dilate(textline_mask_tot_ea_main, ver_kernel2, iterations=1) - - ##prediction_textline[:,:][dil_textline_mask_tot_ea_main[:,:]==1]=1 - - """ - - textline_mask_tot_ea_lines = (prediction_textline[:,:]==1)*1 - textline_mask_tot_ea_lines = textline_mask_tot_ea_lines.astype('uint8') - if not self.textline_light: - textline_mask_tot_ea_lines = cv2.dilate(textline_mask_tot_ea_lines, KERNEL, iterations=1) - - prediction_textline[:,:][textline_mask_tot_ea_lines[:,:]==1]=1 - if not self.textline_light: - prediction_textline[:,:][old_art[:,:]==1]=2 - - #cv2.imwrite('prediction_textline2.png', prediction_textline[:,:,0]) - - prediction_textline_longshot = self.do_prediction(False, img, self.models["textline"]) - prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w) - - - #cv2.imwrite('prediction_textline.png', prediction_textline[:,:,0]) - #sys.exit() - self.logger.debug('exit textline_contours') - return ((prediction_textline[:, :, 0]==1).astype(np.uint8), - (prediction_textline_longshot_true_size[:, :, 0]==1).astype(np.uint8)) - - - def get_regions_light_v_extract_only_images(self,img,is_image_enhanced, num_col_classifier): - self.logger.debug("enter get_regions_extract_images_only") - erosion_hurts = False - img_org = np.copy(img) - img_height_h = img_org.shape[0] - img_width_h = img_org.shape[1] - - if num_col_classifier == 1: - img_w_new = 700 - elif num_col_classifier == 2: - img_w_new = 900 - elif num_col_classifier == 3: - img_w_new = 1500 - elif num_col_classifier == 4: - img_w_new = 1800 - elif num_col_classifier == 5: - img_w_new = 2200 - elif num_col_classifier == 6: - img_w_new = 2500 - img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) - img_resized = resize_image(img,img_h_new, img_w_new ) - - prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.models["region"]) - - prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) - image_page, page_coord, cont_page = self.extract_page() - - prediction_regions_org = prediction_regions_org[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only = (prediction_regions_org[:,:] ==3)*1 - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - mask_images_only=(prediction_regions_org[:,:] ==2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1,1,1)) - - text_regions_p_true[text_regions_p_true.shape[0]-15:text_regions_p_true.shape[0], :] = 0 - text_regions_p_true[:, text_regions_p_true.shape[1]-15:text_regions_p_true.shape[1]] = 0 - - ##polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001) - polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.001) - image_boundary_of_doc = np.zeros((text_regions_p_true.shape[0], text_regions_p_true.shape[1])) - - ###image_boundary_of_doc[:6, :] = 1 - ###image_boundary_of_doc[text_regions_p_true.shape[0]-6:text_regions_p_true.shape[0], :] = 1 - - ###image_boundary_of_doc[:, :6] = 1 - ###image_boundary_of_doc[:, text_regions_p_true.shape[1]-6:text_regions_p_true.shape[1]] = 1 - - polygons_of_images_fin = [] - for ploy_img_ind in polygons_of_images: - """ - test_poly_image = np.zeros((text_regions_p_true.shape[0], text_regions_p_true.shape[1])) - test_poly_image = cv2.fillPoly(test_poly_image, pts=[ploy_img_ind], color=(1,1,1)) - - test_poly_image = test_poly_image + image_boundary_of_doc - test_poly_image_intersected_area = ( test_poly_image[:,:]==2 )*1 - - test_poly_image_intersected_area = test_poly_image_intersected_area.sum() - - if test_poly_image_intersected_area==0: - ##polygons_of_images_fin.append(ploy_img_ind) - - box = cv2.boundingRect(ploy_img_ind) - page_coord_img = box2rect(box) - # cont_page.append(np.array([[page_coord[2], page_coord[0]], - # [page_coord[3], page_coord[0]], - # [page_coord[3], page_coord[1]], - # [page_coord[2], page_coord[1]]])) - polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], - [page_coord_img[3], page_coord_img[0]], - [page_coord_img[3], page_coord_img[1]], - [page_coord_img[2], page_coord_img[1]]]) ) - """ - box = x, y, w, h = cv2.boundingRect(ploy_img_ind) - if h < 150 or w < 150: - pass - else: - page_coord_img = box2rect(box) - # cont_page.append(np.array([[page_coord[2], page_coord[0]], - # [page_coord[3], page_coord[0]], - # [page_coord[3], page_coord[1]], - # [page_coord[2], page_coord[1]]])) - polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], - [page_coord_img[3], page_coord_img[0]], - [page_coord_img[3], page_coord_img[1]], - [page_coord_img[2], page_coord_img[1]]])) - - self.logger.debug("exit get_regions_extract_images_only") - return (text_regions_p_true, - erosion_hurts, - polygons_seplines, - polygons_of_images_fin, - image_page, - page_coord, - cont_page) - - def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier): - self.logger.debug("enter get_regions_light_v") - t_in = time.time() - erosion_hurts = False - img_org = np.copy(img) - img_height_h = img_org.shape[0] - img_width_h = img_org.shape[1] - - #print(num_col_classifier,'num_col_classifier') - - if num_col_classifier == 1: - img_w_new = 1000 - elif num_col_classifier == 2: - img_w_new = 1500#1500 - elif num_col_classifier == 3: - img_w_new = 2000 - elif num_col_classifier == 4: - img_w_new = 2500 - elif num_col_classifier == 5: - img_w_new = 3000 - else: - img_w_new = 4000 - img_h_new = img_w_new * img_org.shape[0] // img_org.shape[1] - img_resized = resize_image(img,img_h_new, img_w_new ) - - t_bin = time.time() - #if (not self.input_binary) or self.full_layout: - #if self.input_binary: - #img_bin = np.copy(img_resized) - ###if (not self.input_binary and self.full_layout) or (not self.input_binary and num_col_classifier >= 30): - ###prediction_bin = self.do_prediction(True, img_resized, self.models["binarization"], n_batch_inference=5) - - ####print("inside bin ", time.time()-t_bin) - ###prediction_bin=prediction_bin[:,:,0] - ###prediction_bin = (prediction_bin[:,:]==0)*1 - ###prediction_bin = prediction_bin*255 - - ###prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - - ###prediction_bin = prediction_bin.astype(np.uint16) - ####img= np.copy(prediction_bin) - ###img_bin = np.copy(prediction_bin) - ###else: - ###img_bin = np.copy(img_resized) - if (self.ocr and self.tr) and not self.input_binary: - prediction_bin = self.do_prediction(True, img_resized, self.models["binarization"], n_batch_inference=5) - prediction_bin = 255 * (prediction_bin[:,:,0] == 0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - prediction_bin = prediction_bin.astype(np.uint16) - #img= np.copy(prediction_bin) - img_bin = np.copy(prediction_bin) - else: - img_bin = np.copy(img_resized) - #print("inside 1 ", time.time()-t_in) - - ###textline_mask_tot_ea = self.run_textline(img_bin) - self.logger.debug("detecting textlines on %s with %d colors", - str(img_resized.shape), len(np.unique(img_resized))) - textline_mask_tot_ea = self.run_textline(img_resized, num_col_classifier) - textline_mask_tot_ea = resize_image(textline_mask_tot_ea,img_height_h, img_width_h ) - - #print(self.image_org.shape) - #cv2.imwrite('textline.png', textline_mask_tot_ea) - - #plt.imshwo(self.image_page_org_size) - #plt.show() - if self.skip_layout_and_reading_order: - img_bin = resize_image(img_bin,img_height_h, img_width_h ) - self.logger.debug("exit get_regions_light_v") - return None, erosion_hurts, None, None, textline_mask_tot_ea, img_bin, None - - #print("inside 2 ", time.time()-t_in) - if num_col_classifier == 1 or num_col_classifier == 2: - if self.image_org.shape[0]/self.image_org.shape[1] > 2.5: - self.logger.debug("resized to %dx%d for %d cols", - img_resized.shape[1], img_resized.shape[0], num_col_classifier) - prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.models["region_1_2"], n_batch_inference=1, - thresholding_for_some_classes_in_light_version=True, - threshold_art_class_layout=self.threshold_art_class_layout) - else: - prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) - confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) - prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( - False, self.image_page_org_size, self.models["region_1_2"], n_batch_inference=1, - thresholding_for_artificial_class_in_light_version=True, - threshold_art_class_layout=self.threshold_art_class_layout) - ys = slice(*self.page_coord[0:2]) - xs = slice(*self.page_coord[2:4]) - prediction_regions_org[ys, xs] = prediction_regions_page - confidence_matrix[ys, xs] = confidence_matrix_page - - else: - new_h = (900+ (num_col_classifier-3)*100) - img_resized = resize_image(img_bin, int(new_h * img_bin.shape[0] /img_bin.shape[1]), new_h) - self.logger.debug("resized to %dx%d (new_h=%d) for %d cols", - img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) - prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.models["region_1_2"], n_batch_inference=2, - thresholding_for_some_classes_in_light_version=True, - threshold_art_class_layout=self.threshold_art_class_layout) - ###prediction_regions_org = self.do_prediction(True, img_bin, self.models["region"], - ###n_batch_inference=3, - ###thresholding_for_some_classes_in_light_version=True) - #print("inside 3 ", time.time()-t_in) - #plt.imshow(prediction_regions_org[:,:,0]) - #plt.show() - - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - confidence_matrix = resize_image(confidence_matrix, img_height_h, img_width_h ) - img_bin = resize_image(img_bin, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only = (prediction_regions_org[:,:] ==3)*1 - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - mask_texts_only = mask_texts_only.astype('uint8') - - ##if num_col_classifier == 1 or num_col_classifier == 2: - ###mask_texts_only = cv2.erode(mask_texts_only, KERNEL, iterations=1) - ##mask_texts_only = cv2.dilate(mask_texts_only, KERNEL, iterations=1) - - mask_texts_only = cv2.dilate(mask_texts_only, kernel=np.ones((2,2), np.uint8), iterations=1) - mask_images_only=(prediction_regions_org[:,:] ==2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - test_khat = np.zeros(prediction_regions_org.shape) - test_khat = cv2.fillPoly(test_khat, pts=polygons_seplines, color=(1,1,1)) - - #plt.imshow(test_khat[:,:]) - #plt.show() - #for jv in range(1): - #print(jv, hir_seplines[0][232][3]) - #test_khat = np.zeros(prediction_regions_org.shape) - #test_khat = cv2.fillPoly(test_khat, pts = [polygons_seplines[232]], color=(1,1,1)) - #plt.imshow(test_khat[:,:]) - #plt.show() - - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - test_khat = np.zeros(prediction_regions_org.shape) - test_khat = cv2.fillPoly(test_khat, pts = polygons_seplines, color=(1,1,1)) - - #plt.imshow(test_khat[:,:]) - #plt.show() - #sys.exit() - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - ##polygons_of_only_texts = dilate_textregion_contours(polygons_of_only_texts) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) - - textline_mask_tot_ea[(text_regions_p_true==0) | (text_regions_p_true==4) ] = 0 - #plt.imshow(textline_mask_tot_ea) - #plt.show() - #print("inside 4 ", time.time()-t_in) - self.logger.debug("exit get_regions_light_v") - return (text_regions_p_true, - erosion_hurts, - polygons_seplines, - polygons_of_only_texts, - textline_mask_tot_ea, - img_bin, - confidence_matrix) - - def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier): - self.logger.debug("enter get_regions_from_xy_2models") - erosion_hurts = False - img_org = np.copy(img) - img_height_h = img_org.shape[0] - img_width_h = img_org.shape[1] - - ratio_y=1.3 - ratio_x=1 - - img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org_y = self.do_prediction(True, img, self.models["region"]) - prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h ) - - #plt.imshow(prediction_regions_org_y[:,:,0]) - #plt.show() - prediction_regions_org_y = prediction_regions_org_y[:,:,0] - mask_zeros_y = (prediction_regions_org_y[:,:]==0)*1 - - ##img_only_regions_with_sep = ( (prediction_regions_org_y[:,:] != 3) & (prediction_regions_org_y[:,:] != 0) )*1 - img_only_regions_with_sep = (prediction_regions_org_y == 1).astype(np.uint8) - try: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=20) - _, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) - img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1))) - - prediction_regions_org = self.do_prediction(True, img, self.models["region"]) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - - prediction_regions_org=prediction_regions_org[:,:,0] - prediction_regions_org[(prediction_regions_org[:,:]==1) & (mask_zeros_y[:,:]==1)]=0 - - img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1])) - - prediction_regions_org2 = self.do_prediction(True, img, self.models["region_p2"], marginal_of_patch_percent=0.2) - prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h ) - - mask_zeros2 = (prediction_regions_org2[:,:,0] == 0) - mask_lines2 = (prediction_regions_org2[:,:,0] == 3) - text_sume_early = (prediction_regions_org[:,:] == 1).sum() - prediction_regions_org_copy = np.copy(prediction_regions_org) - prediction_regions_org_copy[(prediction_regions_org_copy[:,:]==1) & (mask_zeros2[:,:]==1)] = 0 - text_sume_second = ((prediction_regions_org_copy[:,:]==1)*1).sum() - rate_two_models = 100. * text_sume_second / text_sume_early - - self.logger.info("ratio_of_two_models: %s", rate_two_models) - if not(is_image_enhanced and rate_two_models < RATIO_OF_TWO_MODEL_THRESHOLD): - prediction_regions_org = np.copy(prediction_regions_org_copy) - - prediction_regions_org[(mask_lines2[:,:]==1) & (prediction_regions_org[:,:]==0)]=3 - mask_lines_only=(prediction_regions_org[:,:]==3)*1 - prediction_regions_org = cv2.erode(prediction_regions_org[:,:], KERNEL, iterations=2) - prediction_regions_org = cv2.dilate(prediction_regions_org[:,:], KERNEL, iterations=2) - - if rate_two_models<=40: - if self.input_binary: - prediction_bin = np.copy(img_org) - else: - prediction_bin = self.do_prediction(True, img_org, self.models["binarization"], n_batch_inference=5) - prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) - prediction_bin = 255 * (prediction_bin[:,:,0]==0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - - ratio_y=1 - ratio_x=1 - - img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - - prediction_regions_org = self.do_prediction(True, img, self.models["region"]) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only=(prediction_regions_org[:,:]==3)*1 - - mask_texts_only=(prediction_regions_org[:,:]==1)*1 - mask_images_only=(prediction_regions_org[:,:]==2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true,pts = polygons_of_only_lines, color=(3, 3, 3)) - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - - text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1)) - - self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_seplines, polygons_of_only_texts - except: - if self.input_binary: - prediction_bin = np.copy(img_org) - prediction_bin = self.do_prediction(True, img_org, self.models["binarization"], n_batch_inference=5) - prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) - prediction_bin = 255 * (prediction_bin[:,:,0]==0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - else: - prediction_bin = np.copy(img_org) - ratio_y=1 - ratio_x=1 - - - img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org = self.do_prediction(True, img, self.models["region"]) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - #mask_lines_only=(prediction_regions_org[:,:]==3)*1 - #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) - - #prediction_regions_org = self.do_prediction(True, img, self.models["region"]) - #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - #prediction_regions_org = prediction_regions_org[:,:,0] - #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 - - mask_lines_only = (prediction_regions_org == 3)*1 - mask_texts_only = (prediction_regions_org == 1)*1 - mask_images_only= (prediction_regions_org == 2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) - - erosion_hurts = True - self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_seplines, polygons_of_only_texts - - def do_order_of_regions( - self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): - - self.logger.debug("enter do_order_of_regions") - contours_only_text_parent = np.array(contours_only_text_parent) - contours_only_text_parent_h = np.array(contours_only_text_parent_h) - boxes = np.array(boxes, dtype=int) # to be on the safe side - c_boxes = np.stack((0.5 * boxes[:, 2:4].sum(axis=1), - 0.5 * boxes[:, 0:2].sum(axis=1))) - cx_main, cy_main, mx_main, Mx_main, my_main, My_main, mxy_main = find_new_features_of_contours( - contours_only_text_parent) - cx_head, cy_head, mx_head, Mx_head, my_head, My_head, mxy_head = find_new_features_of_contours( - contours_only_text_parent_h) - - def match_boxes(only_centers: bool): - arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) - for ii in range(len(contours_only_text_parent)): - check_if_textregion_located_in_a_box = False - for jj, box in enumerate(boxes): - if ((cx_main[ii] >= box[0] and - cx_main[ii] < box[1] and - cy_main[ii] >= box[2] and - cy_main[ii] < box[3]) if only_centers else - (mx_main[ii] >= box[0] and - Mx_main[ii] < box[1] and - my_main[ii] >= box[2] and - My_main[ii] < box[3])): - arg_text_con_main[ii] = jj - check_if_textregion_located_in_a_box = True - break - if not check_if_textregion_located_in_a_box: - dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) - pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & - (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) - ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_main[ii] = ind_min - args_contours_main = np.arange(len(contours_only_text_parent)) - order_by_con_main = np.zeros_like(arg_text_con_main) - - arg_text_con_head = np.zeros(len(contours_only_text_parent_h), dtype=int) - for ii in range(len(contours_only_text_parent_h)): - check_if_textregion_located_in_a_box = False - for jj, box in enumerate(boxes): - if ((cx_head[ii] >= box[0] and - cx_head[ii] < box[1] and - cy_head[ii] >= box[2] and - cy_head[ii] < box[3]) if only_centers else - (mx_head[ii] >= box[0] and - Mx_head[ii] < box[1] and - my_head[ii] >= box[2] and - My_head[ii] < box[3])): - arg_text_con_head[ii] = jj - check_if_textregion_located_in_a_box = True - break - if not check_if_textregion_located_in_a_box: - dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_head[ii]], [cx_head[ii]]]), axis=0) - pcontained_in_box = ((boxes[:, 2] <= cy_head[ii]) & (cy_head[ii] < boxes[:, 3]) & - (boxes[:, 0] <= cx_head[ii]) & (cx_head[ii] < boxes[:, 1])) - ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_head[ii] = ind_min - args_contours_head = np.arange(len(contours_only_text_parent_h)) - order_by_con_head = np.zeros_like(arg_text_con_head) - - ref_point = 0 - order_of_texts_tot = [] - id_of_texts_tot = [] - for iij, box in enumerate(boxes): - ys = slice(*box[2:4]) - xs = slice(*box[0:2]) - args_contours_box_main = args_contours_main[arg_text_con_main == iij] - args_contours_box_head = args_contours_head[arg_text_con_head == iij] - con_inter_box = contours_only_text_parent[args_contours_box_main] - con_inter_box_h = contours_only_text_parent_h[args_contours_box_head] - - indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) - - order_of_texts, id_of_texts = order_and_id_of_texts( - con_inter_box, con_inter_box_h, - indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - - indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] - indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] - indexes_sorted_head = indexes_sorted[kind_of_texts_sorted == 2] - indexes_by_type_head = index_by_kind_sorted[kind_of_texts_sorted == 2] - - for zahler, _ in enumerate(args_contours_box_main): - arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ - np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - - for zahler, _ in enumerate(args_contours_box_head): - arg_order_v = indexes_sorted_head[zahler] - order_by_con_head[args_contours_box_head[indexes_by_type_head[zahler]]] = \ - np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - - for jji in range(len(id_of_texts)): - order_of_texts_tot.append(order_of_texts[jji] + ref_point) - id_of_texts_tot.append(id_of_texts[jji]) - ref_point += len(id_of_texts) - - order_of_texts_tot = np.concatenate((order_by_con_main, - order_by_con_head)) - order_text_new = np.argsort(order_of_texts_tot) - return order_text_new, id_of_texts_tot - - try: - results = match_boxes(False) - except Exception as why: - self.logger.error(why) - results = match_boxes(True) - - self.logger.debug("exit do_order_of_regions") - return results - - def check_iou_of_bounding_box_and_contour_for_tables( - self, layout, table_prediction_early, pixel_table, num_col_classifier): - - layout_org = np.copy(layout) - layout_org[layout_org == pixel_table] = 0 - layout = (layout == pixel_table).astype(np.uint8) * 1 - _, thresh = cv2.threshold(layout, 0, 255, 0) - - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cnt_size = np.array([cv2.contourArea(cnt) for cnt in contours]) - - contours_new = [] - for i, contour in enumerate(contours): - x, y, w, h = cv2.boundingRect(contour) - iou = cnt_size[i] /float(w*h) *100 - if iou<80: - layout_contour = np.zeros(layout_org.shape[:2]) - layout_contour = cv2.fillPoly(layout_contour, pts=[contour] ,color=1) - - layout_contour_sum = layout_contour.sum(axis=0) - layout_contour_sum_diff = np.diff(layout_contour_sum) - layout_contour_sum_diff= np.abs(layout_contour_sum_diff) - layout_contour_sum_diff_smoothed= gaussian_filter1d(layout_contour_sum_diff, 10) - - peaks, _ = find_peaks(layout_contour_sum_diff_smoothed, height=0) - peaks= peaks[layout_contour_sum_diff_smoothed[peaks]>4] - - for j in range(len(peaks)): - layout_contour[:,peaks[j]-3+1:peaks[j]+1+3] = 0 - - layout_contour=cv2.erode(layout_contour[:,:], KERNEL, iterations=5) - layout_contour=cv2.dilate(layout_contour[:,:], KERNEL, iterations=5) - - layout_contour = layout_contour.astype(np.uint8) - _, thresh = cv2.threshold(layout_contour, 0, 255, 0) - - contours_sep, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - for ji in range(len(contours_sep) ): - contours_new.append(contours_sep[ji]) - if num_col_classifier>=2: - only_recent_contour_image = np.zeros(layout.shape[:2]) - only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, - pts=[contours_sep[ji]], color=1) - table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early - iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() - #print(iou_in,'iou_in_in1') - - if iou_in>30: - layout_org = cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=pixel_table) - else: - pass - else: - layout_org= cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=pixel_table) - else: - contours_new.append(contour) - if num_col_classifier>=2: - only_recent_contour_image = np.zeros(layout.shape[:2]) - only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, pts=[contour],color=1) - - table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early - iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() - #print(iou_in,'iou_in') - if iou_in>30: - layout_org = cv2.fillPoly(layout_org, pts=[contour], color=pixel_table) - else: - pass - else: - layout_org = cv2.fillPoly(layout_org, pts=[contour], color=pixel_table) - - return layout_org, contours_new - - def delete_separator_around(self, spliter_y,peaks_neg,image_by_region, pixel_line, pixel_table): - # format of subboxes: box=[x1, x2 , y1, y2] - pix_del = 100 - if len(image_by_region.shape)==3: - for i in range(len(spliter_y)-1): - for j in range(1,len(peaks_neg[i])-1): - ys = slice(int(spliter_y[i]), - int(spliter_y[i+1])) - xs = slice(peaks_neg[i][j] - pix_del, - peaks_neg[i][j] + pix_del) - image_by_region[ys,xs,0][image_by_region[ys,xs,0]==pixel_line] = 0 - image_by_region[ys,xs,0][image_by_region[ys,xs,1]==pixel_line] = 0 - image_by_region[ys,xs,0][image_by_region[ys,xs,2]==pixel_line] = 0 - - image_by_region[ys,xs,0][image_by_region[ys,xs,0]==pixel_table] = 0 - image_by_region[ys,xs,0][image_by_region[ys,xs,1]==pixel_table] = 0 - image_by_region[ys,xs,0][image_by_region[ys,xs,2]==pixel_table] = 0 - else: - for i in range(len(spliter_y)-1): - for j in range(1,len(peaks_neg[i])-1): - ys = slice(int(spliter_y[i]), - int(spliter_y[i+1])) - xs = slice(peaks_neg[i][j] - pix_del, - peaks_neg[i][j] + pix_del) - image_by_region[ys,xs][image_by_region[ys,xs]==pixel_line] = 0 - image_by_region[ys,xs][image_by_region[ys,xs]==pixel_table] = 0 - return image_by_region - - def add_tables_heuristic_to_layout( - self, image_regions_eraly_p, boxes, - slope_mean_hor, spliter_y, peaks_neg_tot, image_revised, - num_col_classifier, min_area, pixel_line): - - pixel_table =10 - image_revised_1 = self.delete_separator_around(spliter_y, peaks_neg_tot, image_revised, pixel_line, pixel_table) - - try: - image_revised_1[:,:30][image_revised_1[:,:30]==pixel_line] = 0 - image_revised_1[:,-30:][image_revised_1[:,-30:]==pixel_line] = 0 - except: - pass - boxes = np.array(boxes, dtype=int) # to be on the safe side - - img_comm = np.zeros(image_revised_1.shape, dtype=np.uint8) - for indiv in np.unique(image_revised_1): - image_col = (image_revised_1 == indiv).astype(np.uint8) * 255 - _, thresh = cv2.threshold(image_col, 0, 255, 0) - contours,hirarchy=cv2.findContours(thresh.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - - if indiv==pixel_table: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, - max_area=1, min_area=0.001) - else: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, - max_area=1, min_area=min_area) - - img_comm = cv2.fillPoly(img_comm, pts=main_contours, color=indiv) - - if not self.isNaN(slope_mean_hor): - image_revised_last = np.zeros(image_regions_eraly_p.shape[:2]) - for i in range(len(boxes)): - box_ys = slice(*boxes[i][2:4]) - box_xs = slice(*boxes[i][0:2]) - image_box = img_comm[box_ys, box_xs] - try: - image_box_tabels_1 = (image_box == pixel_table) * 1 - contours_tab,_=return_contours_of_image(image_box_tabels_1) - contours_tab=filter_contours_area_of_image_tables(image_box_tabels_1,contours_tab,_,1,0.003) - image_box_tabels_1 = (image_box == pixel_line).astype(np.uint8) * 1 - image_box_tabels_and_m_text = ( (image_box == pixel_table) | - (image_box == 1) ).astype(np.uint8) * 1 - - image_box_tabels_1 = cv2.dilate(image_box_tabels_1, KERNEL, iterations=5) - - contours_table_m_text, _ = return_contours_of_image(image_box_tabels_and_m_text) - _, thresh = cv2.threshold(image_box_tabels_1, 0, 255, 0) - contours_line, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - y_min_main_line ,y_max_main_line=find_features_of_contours(contours_line) - y_min_main_tab ,y_max_main_tab=find_features_of_contours(contours_tab) - - (cx_tab_m_text, cy_tab_m_text, - x_min_tab_m_text, x_max_tab_m_text, - y_min_tab_m_text, y_max_tab_m_text, - _) = find_new_features_of_contours(contours_table_m_text) - (cx_tabl, cy_tabl, - x_min_tabl, x_max_tabl, - y_min_tabl, y_max_tabl, - _) = find_new_features_of_contours(contours_tab) - - if len(y_min_main_tab )>0: - y_down_tabs=[] - y_up_tabs=[] - - for i_t in range(len(y_min_main_tab )): - y_down_tab=[] - y_up_tab=[] - for i_l in range(len(y_min_main_line)): - if (y_min_main_tab[i_t] > y_min_main_line[i_l] and - y_max_main_tab[i_t] > y_min_main_line[i_l] and - y_min_main_tab[i_t] > y_max_main_line[i_l] and - y_max_main_tab[i_t] > y_min_main_line[i_l]): - pass - elif (y_min_main_tab[i_t] < y_max_main_line[i_l] and - y_max_main_tab[i_t] < y_max_main_line[i_l] and - y_max_main_tab[i_t] < y_min_main_line[i_l] and - y_min_main_tab[i_t] < y_min_main_line[i_l]): - pass - elif abs(y_max_main_line[i_l] - y_min_main_line[i_l]) < 100: - pass - else: - y_up_tab.append(min([y_min_main_line[i_l], - y_min_main_tab[i_t]])) - y_down_tab.append(max([y_max_main_line[i_l], - y_max_main_tab[i_t]])) - - if len(y_up_tab)==0: - y_up_tabs.append(y_min_main_tab[i_t]) - y_down_tabs.append(y_max_main_tab[i_t]) - else: - y_up_tabs.append(min(y_up_tab)) - y_down_tabs.append(max(y_down_tab)) - else: - y_down_tabs=[] - y_up_tabs=[] - pass - except: - y_down_tabs=[] - y_up_tabs=[] - - for ii in range(len(y_up_tabs)): - image_box[y_up_tabs[ii]:y_down_tabs[ii]] = pixel_table - - image_revised_last[box_ys, box_xs] = image_box - else: - for i in range(len(boxes)): - box_ys = slice(*boxes[i][2:4]) - box_xs = slice(*boxes[i][0:2]) - image_box = img_comm[box_ys, box_xs] - image_revised_last[box_ys, box_xs] = image_box - - if num_col_classifier==1: - img_tables_col_1 = (image_revised_last == pixel_table).astype(np.uint8) - contours_table_col1, _ = return_contours_of_image(img_tables_col_1) - - _,_ ,_ , _, y_min_tab_col1 ,y_max_tab_col1, _= find_new_features_of_contours(contours_table_col1) - - if len(y_min_tab_col1)>0: - for ijv in range(len(y_min_tab_col1)): - image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv])] = pixel_table - return image_revised_last - - def get_tables_from_model(self, img, num_col_classifier): - img_org = np.copy(img) - img_height_h = img_org.shape[0] - img_width_h = img_org.shape[1] - patches = False - if self.light_version: - prediction_table, _ = self.do_prediction_new_concept(patches, img, self.models["table"]) - prediction_table = prediction_table.astype(np.int16) - return prediction_table[:,:,0] - else: - if num_col_classifier < 4 and num_col_classifier > 2: - prediction_table = self.do_prediction(patches, img, self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.models["table"]) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table[:,:,0][pre_updown[:,:,0]==1]=1 - prediction_table = prediction_table.astype(np.int16) - - elif num_col_classifier ==2: - height_ext = 0 # img.shape[0] // 4 - h_start = height_ext // 2 - width_ext = img.shape[1] // 8 - w_start = width_ext // 2 - - img_new = np.zeros((img.shape[0] + height_ext, - img.shape[1] + width_ext, - img.shape[2])).astype(float) - ys = slice(h_start, h_start + img.shape[0]) - xs = slice(w_start, w_start + img.shape[1]) - img_new[ys, xs] = img - - prediction_ext = self.do_prediction(patches, img_new, self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.models["table"]) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table = prediction_ext[ys, xs] - prediction_table_updown = pre_updown[ys, xs] - - prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 - prediction_table = prediction_table.astype(np.int16) - elif num_col_classifier ==1: - height_ext = 0 # img.shape[0] // 4 - h_start = height_ext // 2 - width_ext = img.shape[1] // 4 - w_start = width_ext // 2 - - img_new =np.zeros((img.shape[0] + height_ext, - img.shape[1] + width_ext, - img.shape[2])).astype(float) - ys = slice(h_start, h_start + img.shape[0]) - xs = slice(w_start, w_start + img.shape[1]) - img_new[ys, xs] = img - - prediction_ext = self.do_prediction(patches, img_new, self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.models["table"]) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table = prediction_ext[ys, xs] - prediction_table_updown = pre_updown[ys, xs] - - prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 - prediction_table = prediction_table.astype(np.int16) - else: - prediction_table = np.zeros(img.shape) - img_w_half = img.shape[1] // 2 - - pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], self.models["table"]) - pre2 = self.do_prediction(patches, img[:,img_w_half:,:], self.models["table"]) - pre_full = self.do_prediction(patches, img[:,:,:], self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.models["table"]) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table_full_erode = cv2.erode(pre_full[:,:,0], KERNEL, iterations=4) - prediction_table_full_erode = cv2.dilate(prediction_table_full_erode, KERNEL, iterations=4) - - prediction_table_full_updown_erode = cv2.erode(pre_updown[:,:,0], KERNEL, iterations=4) - prediction_table_full_updown_erode = cv2.dilate(prediction_table_full_updown_erode, KERNEL, iterations=4) - - prediction_table[:,0:img_w_half,:] = pre1[:,:,:] - prediction_table[:,img_w_half:,:] = pre2[:,:,:] - - prediction_table[:,:,0][prediction_table_full_erode[:,:]==1]=1 - prediction_table[:,:,0][prediction_table_full_updown_erode[:,:]==1]=1 - prediction_table = prediction_table.astype(np.int16) - - #prediction_table_erode = cv2.erode(prediction_table[:,:,0], self.kernel, iterations=6) - #prediction_table_erode = cv2.dilate(prediction_table_erode, self.kernel, iterations=6) - - prediction_table_erode = cv2.erode(prediction_table[:,:,0], KERNEL, iterations=20) - prediction_table_erode = cv2.dilate(prediction_table_erode, KERNEL, iterations=20) - return prediction_table_erode.astype(np.int16) - - def run_graphics_and_columns_light( - self, text_regions_p_1, textline_mask_tot_ea, - num_col_classifier, num_column_is_classified, erosion_hurts, img_bin_light): - - #print(text_regions_p_1.shape, 'text_regions_p_1 shape run graphics') - #print(erosion_hurts, 'erosion_hurts') - t_in_gr = time.time() - img_g = self.imread(grayscale=True, uint8=True) - - img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) - img_g3 = img_g3.astype(np.uint8) - img_g3[:, :, 0] = img_g[:, :] - img_g3[:, :, 1] = img_g[:, :] - img_g3[:, :, 2] = img_g[:, :] - - image_page, page_coord, cont_page = self.extract_page() - #print("inside graphics 1 ", time.time() - t_in_gr) - if self.tables: - table_prediction = self.get_tables_from_model(image_page, num_col_classifier) - else: - table_prediction = np.zeros((image_page.shape[0], image_page.shape[1])).astype(np.int16) - - if self.plotter: - self.plotter.save_page_image(image_page) - - if not self.ignore_page_extraction: - mask_page = np.zeros((text_regions_p_1.shape[0], text_regions_p_1.shape[1])).astype(np.int8) - mask_page = cv2.fillPoly(mask_page, pts=[cont_page[0]], color=(1,)) - - text_regions_p_1[mask_page==0] = 0 - textline_mask_tot_ea[mask_page==0] = 0 - - text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - img_bin_light = img_bin_light[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - ###text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - ###textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - ###img_bin_light = img_bin_light[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - mask_images = (text_regions_p_1[:, :] == 2) * 1 - mask_images = mask_images.astype(np.uint8) - mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) - mask_lines = (text_regions_p_1[:, :] == 3) * 1 - mask_lines = mask_lines.astype(np.uint8) - img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 - img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) - - #print("inside graphics 2 ", time.time() - t_in_gr) - if erosion_hurts: - img_only_regions = np.copy(img_only_regions_with_sep[:,:]) - else: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) - - ##print(img_only_regions.shape,'img_only_regions') - ##plt.imshow(img_only_regions[:,:]) - ##plt.show() - ##num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) - try: - num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) - num_col = num_col + 1 - if not num_column_is_classified: - num_col_classifier = num_col + 1 - num_col_classifier = min(self.num_col_upper or num_col_classifier, - max(self.num_col_lower or num_col_classifier, - num_col_classifier)) - except Exception as why: - self.logger.error(why) - num_col = None - #print("inside graphics 3 ", time.time() - t_in_gr) - return (num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, - text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea, img_bin_light) - - def run_graphics_and_columns_without_layout(self, textline_mask_tot_ea, img_bin_light): - #print(text_regions_p_1.shape, 'text_regions_p_1 shape run graphics') - #print(erosion_hurts, 'erosion_hurts') - t_in_gr = time.time() - img_g = self.imread(grayscale=True, uint8=True) - - img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) - img_g3 = img_g3.astype(np.uint8) - img_g3[:, :, 0] = img_g[:, :] - img_g3[:, :, 1] = img_g[:, :] - img_g3[:, :, 2] = img_g[:, :] - - image_page, page_coord, cont_page = self.extract_page() - #print("inside graphics 1 ", time.time() - t_in_gr) - - textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - img_bin_light = img_bin_light[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - return page_coord, image_page, textline_mask_tot_ea, img_bin_light, cont_page - - def run_graphics_and_columns( - self, text_regions_p_1, - num_col_classifier, num_column_is_classified, erosion_hurts): - - t_in_gr = time.time() - img_g = self.imread(grayscale=True, uint8=True) - - img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) - img_g3 = img_g3.astype(np.uint8) - img_g3[:, :, 0] = img_g[:, :] - img_g3[:, :, 1] = img_g[:, :] - img_g3[:, :, 2] = img_g[:, :] - - image_page, page_coord, cont_page = self.extract_page() - - if self.tables: - table_prediction = self.get_tables_from_model(image_page, num_col_classifier) - else: - table_prediction = np.zeros((image_page.shape[0], image_page.shape[1])).astype(np.int16) - - if self.plotter: - self.plotter.save_page_image(image_page) - - text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - mask_images = (text_regions_p_1[:, :] == 2) * 1 - mask_images = mask_images.astype(np.uint8) - mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) - mask_lines = (text_regions_p_1[:, :] == 3) * 1 - mask_lines = mask_lines.astype(np.uint8) - img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 - img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) - - if erosion_hurts: - img_only_regions = np.copy(img_only_regions_with_sep[:,:]) - else: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) - try: - num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) - num_col = num_col + 1 - if not num_column_is_classified: - num_col_classifier = num_col + 1 - except Exception as why: - self.logger.error(why) - num_col = None - return (num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, - text_regions_p_1, cont_page, table_prediction) - - def run_enhancement(self, light_version): - t_in = time.time() - self.logger.info("Resizing and enhancing image...") - is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = \ - self.resize_and_enhance_image_with_column_classifier(light_version) - self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ') - scale = 1 - if is_image_enhanced: - if self.allow_enhancement: - #img_res = img_res.astype(np.uint8) - self.get_image_and_scales(img_org, img_res, scale) - if self.plotter: - self.plotter.save_enhanced_image(img_res) - else: - self.get_image_and_scales_after_enhancing(img_org, img_res) - else: - if self.allow_enhancement: - self.get_image_and_scales(img_org, img_res, scale) - else: - self.get_image_and_scales(img_org, img_res, scale) - if self.allow_scaling: - img_org, img_res, is_image_enhanced = \ - self.resize_image_with_column_classifier(is_image_enhanced, img_bin) - self.get_image_and_scales_after_enhancing(img_org, img_res) - #print("enhancement in ", time.time()-t_in) - return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified - - def run_textline(self, image_page, num_col_classifier=None): - scaler_h_textline = 1#1.3 # 1.2#1.2 - scaler_w_textline = 1#1.3 # 0.9#1 - #print(image_page.shape) - textline_mask_tot_ea, _ = self.textline_contours(image_page, True, - scaler_h_textline, - scaler_w_textline, - num_col_classifier) - if self.textline_light: - textline_mask_tot_ea = textline_mask_tot_ea.astype(np.int16) - - if self.plotter: - self.plotter.save_plot_of_textlines(textline_mask_tot_ea, image_page) - return textline_mask_tot_ea - - def run_deskew(self, textline_mask_tot_ea): - #print(textline_mask_tot_ea.shape, 'textline_mask_tot_ea deskew') - slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), 2, 30, True, - map=self.executor.map, logger=self.logger, plotter=self.plotter) - if self.plotter: - self.plotter.save_deskewed_image(slope_deskew) - self.logger.info("slope_deskew: %.2f°", slope_deskew) - return slope_deskew - - def run_marginals( - self, textline_mask_tot_ea, mask_images, mask_lines, - num_col_classifier, slope_deskew, text_regions_p_1, table_prediction): - - textline_mask_tot = textline_mask_tot_ea[:, :] - textline_mask_tot[mask_images[:, :] == 1] = 0 - - text_regions_p_1[mask_lines[:, :] == 1] = 3 - text_regions_p = text_regions_p_1[:, :] - text_regions_p = np.array(text_regions_p) - if num_col_classifier in (1, 2): - try: - regions_without_separators = (text_regions_p[:, :] == 1) * 1 - if self.tables: - regions_without_separators[table_prediction==1] = 1 - regions_without_separators = regions_without_separators.astype(np.uint8) - text_regions_p = get_marginals( - rotate_image(regions_without_separators, slope_deskew), text_regions_p, - num_col_classifier, slope_deskew, light_version=self.light_version, kernel=KERNEL) - except Exception as e: - self.logger.error("exception %s", e) - - return textline_mask_tot, text_regions_p - - def run_boxes_no_full_layout( - self, image_page, textline_mask_tot, text_regions_p, - slope_deskew, num_col_classifier, table_prediction, erosion_hurts): - - self.logger.debug('enter run_boxes_no_full_layout') - t_0_box = time.time() - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = rotation_not_90_func( - image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) - text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n, text_regions_p.shape[0], text_regions_p.shape[1]) - regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 - if self.tables: - regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 - regions_without_separators = (text_regions_p[:, :] == 1) * 1 - # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 - #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) - #print(time.time()-t_0_box,'time box in 1') - if self.tables: - regions_without_separators[table_prediction ==1 ] = 1 - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - text_regions_p_1_n = None - textline_mask_tot_d = None - regions_without_separators_d = None - pixel_lines = 3 - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - text_regions_p, num_col_classifier, self.tables, pixel_lines) - - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - text_regions_p_1_n, num_col_classifier, self.tables, pixel_lines) - #print(time.time()-t_0_box,'time box in 2') - self.logger.info("num_col_classifier: %s", num_col_classifier) - - if num_col_classifier >= 3: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - regions_without_separators = regions_without_separators.astype(np.uint8) - regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) - else: - regions_without_separators_d = regions_without_separators_d.astype(np.uint8) - regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) - #print(time.time()-t_0_box,'time box in 3') - t1 = time.time() - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new, regions_without_separators, matrix_of_lines_ch, - num_col_classifier, erosion_hurts, self.tables, self.right2left) - boxes_d = None - self.logger.debug("len(boxes): %s", len(boxes)) - #print(time.time()-t_0_box,'time box in 3.1') - - if self.tables: - if self.light_version: - pass - else: - text_regions_p_tables = np.copy(text_regions_p) - text_regions_p_tables[(table_prediction == 1)] = 10 - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables, - num_col_classifier , 0.000005, pixel_line) - #print(time.time()-t_0_box,'time box in 3.2') - img_revised_tab2, contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction, 10, num_col_classifier) - #print(time.time()-t_0_box,'time box in 3.3') - else: - boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, - num_col_classifier, erosion_hurts, self.tables, self.right2left) - boxes = None - self.logger.debug("len(boxes): %s", len(boxes_d)) - - if self.tables: - if self.light_version: - pass - else: - text_regions_p_tables = np.copy(text_regions_p_1_n) - text_regions_p_tables = np.round(text_regions_p_tables) - text_regions_p_tables[(text_regions_p_tables != 3) & (table_prediction_n == 1)] = 10 - - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes_d, 0, splitter_y_new_d, - peaks_neg_tot_tables_d, text_regions_p_tables, - num_col_classifier, 0.000005, pixel_line) - img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction_n, 10, num_col_classifier) - - img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) - img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) - img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) - img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, - text_regions_p.shape[0], text_regions_p.shape[1]) - #print(time.time()-t_0_box,'time box in 4') - self.logger.info("detecting boxes took %.1fs", time.time() - t1) - - if self.tables: - if self.light_version: - text_regions_p[table_prediction == 1] = 10 - img_revised_tab = text_regions_p[:,:] - else: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - img_revised_tab = np.copy(img_revised_tab2) - img_revised_tab[(text_regions_p == 1) & (img_revised_tab != 10)] = 1 - else: - img_revised_tab = np.copy(text_regions_p) - img_revised_tab[img_revised_tab == 10] = 0 - img_revised_tab[img_revised_tab2_d_rotated == 10] = 10 - - text_regions_p[text_regions_p == 10] = 0 - text_regions_p[img_revised_tab == 10] = 10 - else: - img_revised_tab = text_regions_p[:,:] - #img_revised_tab = text_regions_p[:, :] - if self.light_version: - polygons_of_images = return_contours_of_interested_region(text_regions_p, 2) - else: - polygons_of_images = return_contours_of_interested_region(img_revised_tab, 2) - - pixel_img = 4 - min_area_mar = 0.00001 - if self.light_version: - marginal_mask = (text_regions_p[:,:]==pixel_img)*1 - marginal_mask = marginal_mask.astype('uint8') - marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) - - polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) - else: - polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - - pixel_img = 10 - contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - #print(time.time()-t_0_box,'time box in 5') - self.logger.debug('exit run_boxes_no_full_layout') - return (polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, - regions_without_separators_d, boxes, boxes_d, - polygons_of_marginals, contours_tables) - - def run_boxes_full_layout( - self, image_page, textline_mask_tot, text_regions_p, - slope_deskew, num_col_classifier, img_only_regions, - table_prediction, erosion_hurts, img_bin_light): - - self.logger.debug('enter run_boxes_full_layout') - t_full0 = time.time() - if self.tables: - if self.light_version: - text_regions_p[:,:][table_prediction[:,:]==1] = 10 - img_revised_tab = text_regions_p[:,:] - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ - rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, - table_prediction, slope_deskew) - - text_regions_p_1_n = resize_image(text_regions_p_1_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d, - text_regions_p.shape[0], - text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - - regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 - regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 - else: - text_regions_p_1_n = None - textline_mask_tot_d = None - regions_without_separators_d = None - # regions_without_separators = ( text_regions_p[:,:]==1 | text_regions_p[:,:]==2 )*1 - #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) - regions_without_separators = (text_regions_p[:,:] == 1)*1 - regions_without_separators[table_prediction == 1] = 1 - - else: - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ - rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, - table_prediction, slope_deskew) - - text_regions_p_1_n = resize_image(text_regions_p_1_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d, - text_regions_p.shape[0], - text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - - regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 - regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 - else: - text_regions_p_1_n = None - textline_mask_tot_d = None - regions_without_separators_d = None - - # regions_without_separators = ( text_regions_p[:,:]==1 | text_regions_p[:,:]==2 )*1 - #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) - regions_without_separators = (text_regions_p[:,:] == 1)*1 - regions_without_separators[table_prediction == 1] = 1 - - pixel_lines=3 - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - text_regions_p, num_col_classifier, self.tables, pixel_lines) - - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - num_col_d, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - text_regions_p_1_n, num_col_classifier, self.tables, pixel_lines) - - if num_col_classifier>=3: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - regions_without_separators = regions_without_separators.astype(np.uint8) - regions_without_separators = cv2.erode(regions_without_separators[:,:], KERNEL, iterations=6) - - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - regions_without_separators_d = regions_without_separators_d.astype(np.uint8) - regions_without_separators_d = cv2.erode(regions_without_separators_d[:,:], KERNEL, iterations=6) - else: - pass - - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new, regions_without_separators, matrix_of_lines_ch, - num_col_classifier, erosion_hurts, self.tables, self.right2left) - text_regions_p_tables = np.copy(text_regions_p) - text_regions_p_tables[:,:][(table_prediction[:,:]==1)] = 10 - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables, - num_col_classifier , 0.000005, pixel_line) - - img_revised_tab2,contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction, 10, num_col_classifier) - else: - boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, - num_col_classifier, erosion_hurts, self.tables, self.right2left) - text_regions_p_tables = np.copy(text_regions_p_1_n) - text_regions_p_tables = np.round(text_regions_p_tables) - text_regions_p_tables[(text_regions_p_tables != 3) & (table_prediction_n == 1)] = 10 - - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes_d, 0, splitter_y_new_d, - peaks_neg_tot_tables_d, text_regions_p_tables, - num_col_classifier, 0.000005, pixel_line) - - img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction_n, 10, num_col_classifier) - img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) - - img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) - img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) - img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, - text_regions_p.shape[0], - text_regions_p.shape[1]) - - if np.abs(slope_deskew) < 0.13: - img_revised_tab = np.copy(img_revised_tab2) - else: - img_revised_tab = np.copy(text_regions_p) - img_revised_tab[img_revised_tab == 10] = 0 - img_revised_tab[img_revised_tab2_d_rotated == 10] = 10 - - ##img_revised_tab = img_revised_tab2[:,:] - #img_revised_tab = text_regions_p[:,:] - text_regions_p[text_regions_p == 10] = 0 - text_regions_p[img_revised_tab == 10] = 10 - #img_revised_tab[img_revised_tab2 == 10] = 10 - - pixel_img = 4 - min_area_mar = 0.00001 - - if self.light_version: - marginal_mask = (text_regions_p[:,:]==pixel_img)*1 - marginal_mask = marginal_mask.astype('uint8') - marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) - - polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) - else: - polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - - pixel_img = 10 - contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) - - # set first model with second model - text_regions_p[:, :][text_regions_p[:, :] == 2] = 5 - text_regions_p[:, :][text_regions_p[:, :] == 3] = 6 - text_regions_p[:, :][text_regions_p[:, :] == 4] = 8 - - image_page = image_page.astype(np.uint8) - #print("full inside 1", time.time()- t_full0) - regions_fully, regions_fully_only_drop = self.extract_text_regions_new( - img_bin_light if self.light_version else image_page, - False, cols=num_col_classifier) - #print("full inside 2", time.time()- t_full0) - # 6 is the separators lable in old full layout model - # 4 is the drop capital class in old full layout model - # in the new full layout drop capital is 3 and separators are 5 - - # the separators in full layout will not be written on layout - if not self.reading_order_machine_based: - text_regions_p[:,:][regions_fully[:,:,0]==5]=6 - ###regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 3] = 4 - - #text_regions_p[:,:][regions_fully[:,:,0]==6]=6 - ##regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p) - ##regions_fully[:, :, 0][regions_fully_only_drop[:, :] == 4] = 4 - drop_capital_label_in_full_layout_model = 3 - - drops = (regions_fully[:,:,0]==drop_capital_label_in_full_layout_model)*1 - drops= drops.astype(np.uint8) - - regions_fully[:,:,0][regions_fully[:,:,0]==drop_capital_label_in_full_layout_model] = 1 - - drops = cv2.erode(drops[:,:], KERNEL, iterations=1) - regions_fully[:,:,0][drops[:,:]==1] = drop_capital_label_in_full_layout_model - - regions_fully = putt_bb_of_drop_capitals_of_model_in_patches_in_layout( - regions_fully, drop_capital_label_in_full_layout_model, text_regions_p) - ##regions_fully_np, _ = self.extract_text_regions(image_page, False, cols=num_col_classifier) - ##if num_col_classifier > 2: - ##regions_fully_np[:, :, 0][regions_fully_np[:, :, 0] == 4] = 0 - ##else: - ##regions_fully_np = filter_small_drop_capitals_from_no_patch_layout(regions_fully_np, text_regions_p) - - ###regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, - ### regions_fully_np, img_only_regions) - # plt.imshow(regions_fully[:,:,0]) - # plt.show() - text_regions_p[:, :][regions_fully[:, :, 0] == drop_capital_label_in_full_layout_model] = 4 - ####text_regions_p[:, :][regions_fully_np[:, :, 0] == 4] = 4 - #plt.imshow(text_regions_p) - #plt.show() - ####if not self.tables: - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, regions_fully_n = rotation_not_90_func_full_layout( - image_page, textline_mask_tot, text_regions_p, regions_fully, slope_deskew) - - text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1]) - regions_fully_n = resize_image(regions_fully_n, text_regions_p.shape[0], text_regions_p.shape[1]) - if not self.tables: - regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1 - else: - text_regions_p_1_n = None - textline_mask_tot_d = None - regions_without_separators_d = None - if not self.tables: - regions_without_separators = (text_regions_p[:, :] == 1) * 1 - img_revised_tab = np.copy(text_regions_p[:, :]) - polygons_of_images = return_contours_of_interested_region(img_revised_tab, 5) - - self.logger.debug('exit run_boxes_full_layout') - #print("full inside 3", time.time()- t_full0) - return (polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, - regions_without_separators_d, regions_fully, regions_without_separators, - polygons_of_marginals, contours_tables) - - def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): - - height1 =672#448 - width1 = 448#224 - - height2 =672#448 - width2= 448#224 - - height3 =672#448 - width3 = 448#224 - - inference_bs = 3 - - ver_kernel = np.ones((5, 1), dtype=np.uint8) - hor_kernel = np.ones((1, 5), dtype=np.uint8) - - - min_cont_size_to_be_dilated = 10 - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - (cx_conts, cy_conts, - x_min_conts, x_max_conts, - y_min_conts, y_max_conts, - _) = find_new_features_of_contours(contours_only_text_parent) - args_cont_located = np.array(range(len(contours_only_text_parent))) - - diff_y_conts = np.abs(y_max_conts[:]-y_min_conts) - diff_x_conts = np.abs(x_max_conts[:]-x_min_conts) - - mean_x = statistics.mean(diff_x_conts) - median_x = statistics.median(diff_x_conts) - - - diff_x_ratio= diff_x_conts/mean_x - - args_cont_located_excluded = args_cont_located[diff_x_ratio>=1.3] - args_cont_located_included = args_cont_located[diff_x_ratio<1.3] - - contours_only_text_parent_excluded = [contours_only_text_parent[ind] - #contours_only_text_parent[diff_x_ratio>=1.3] - for ind in range(len(contours_only_text_parent)) - if diff_x_ratio[ind]>=1.3] - contours_only_text_parent_included = [contours_only_text_parent[ind] - #contours_only_text_parent[diff_x_ratio<1.3] - for ind in range(len(contours_only_text_parent)) - if diff_x_ratio[ind]<1.3] - - cx_conts_excluded = [cx_conts[ind] - #cx_conts[diff_x_ratio>=1.3] - for ind in range(len(cx_conts)) - if diff_x_ratio[ind]>=1.3] - cx_conts_included = [cx_conts[ind] - #cx_conts[diff_x_ratio<1.3] - for ind in range(len(cx_conts)) - if diff_x_ratio[ind]<1.3] - cy_conts_excluded = [cy_conts[ind] - #cy_conts[diff_x_ratio>=1.3] - for ind in range(len(cy_conts)) - if diff_x_ratio[ind]>=1.3] - cy_conts_included = [cy_conts[ind] - #cy_conts[diff_x_ratio<1.3] - for ind in range(len(cy_conts)) - if diff_x_ratio[ind]<1.3] - - #print(diff_x_ratio, 'ratio') - text_regions_p = text_regions_p.astype('uint8') - - if len(contours_only_text_parent_excluded)>0: - textregion_par = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1])).astype('uint8') - textregion_par = cv2.fillPoly(textregion_par, pts=contours_only_text_parent_included, color=(1,1)) - else: - textregion_par = (text_regions_p[:,:]==1)*1 - textregion_par = textregion_par.astype('uint8') - - text_regions_p_textregions_dilated = cv2.erode(textregion_par , hor_kernel, iterations=2) - text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=4) - text_regions_p_textregions_dilated = cv2.erode(text_regions_p_textregions_dilated , hor_kernel, iterations=1) - text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=5) - text_regions_p_textregions_dilated[text_regions_p[:,:]>1] = 0 - - - contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) - contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) - - indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located = \ - self.return_indexes_of_contours_located_inside_another_list_of_contours( - contours_only_dilated, contours_only_text_parent_included, - cx_conts_included, cy_conts_included, args_cont_located_included) - - - if len(args_cont_located_excluded)>0: - for ind in args_cont_located_excluded: - indexes_of_located_cont.append(np.array([ind])) - contours_only_dilated.append(contours_only_text_parent[ind]) - center_y_coordinates_of_located.append(0) - - array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] - flattened_array = np.concatenate([arr.ravel() for arr in array_list]) - #print(len( np.unique(flattened_array)), 'indexes_of_located_cont uniques') - - missing_textregions = list( set(range(len(contours_only_text_parent))) - set(flattened_array) ) - #print(missing_textregions, 'missing_textregions') - - for ind in missing_textregions: - indexes_of_located_cont.append(np.array([ind])) - contours_only_dilated.append(contours_only_text_parent[ind]) - center_y_coordinates_of_located.append(0) - - - if contours_only_text_parent_h: - for vi in range(len(contours_only_text_parent_h)): - indexes_of_located_cont.append(int(vi+len(contours_only_text_parent))) - - array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] - flattened_array = np.concatenate([arr.ravel() for arr in array_list]) - - y_len = text_regions_p.shape[0] - x_len = text_regions_p.shape[1] - - img_poly = np.zeros((y_len,x_len), dtype='uint8') - img_poly[text_regions_p[:,:]==1] = 1 - img_poly[text_regions_p[:,:]==2] = 2 - img_poly[text_regions_p[:,:]==3] = 4 - img_poly[text_regions_p[:,:]==6] = 5 - - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') - if contours_only_text_parent_h: - _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours( - contours_only_text_parent_h) - for j in range(len(cy_main)): - img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, - int(x_min_main[j]):int(x_max_main[j])] = 1 - co_text_all_org = contours_only_text_parent + contours_only_text_parent_h - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - co_text_all = contours_only_dilated + contours_only_text_parent_h - else: - co_text_all = contours_only_text_parent + contours_only_text_parent_h - else: - co_text_all_org = contours_only_text_parent - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - co_text_all = contours_only_dilated - else: - co_text_all = contours_only_text_parent - - if not len(co_text_all): - return [], [] - - labels_con = np.zeros((int(y_len /6.), int(x_len/6.), len(co_text_all)), dtype=bool) - co_text_all = [(i/6).astype(int) for i in co_text_all] - for i in range(len(co_text_all)): - img = labels_con[:,:,i].astype(np.uint8) - - #img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) - - cv2.fillPoly(img, pts=[co_text_all[i]], color=(1,)) - labels_con[:,:,i] = img - - - labels_con = resize_image(labels_con.astype(np.uint8), height1, width1).astype(bool) - img_header_and_sep = resize_image(img_header_and_sep, height1, width1) - img_poly = resize_image(img_poly, height3, width3) - - - - input_1 = np.zeros((inference_bs, height1, width1, 3)) - ordered = [list(range(len(co_text_all)))] - index_update = 0 - #print(labels_con.shape[2],"number of regions for reading order") - while index_update>=0: - ij_list = ordered.pop(index_update) - i = ij_list.pop(0) - - ante_list = [] - post_list = [] - tot_counter = 0 - batch = [] - for j in ij_list: - img1 = labels_con[:,:,i].astype(float) - img2 = labels_con[:,:,j].astype(float) - img1[img_poly==5] = 2 - img2[img_poly==5] = 2 - img1[img_header_and_sep==1] = 3 - img2[img_header_and_sep==1] = 3 - - input_1[len(batch), :, :, 0] = img1 / 3. - input_1[len(batch), :, :, 2] = img2 / 3. - input_1[len(batch), :, :, 1] = img_poly / 5. - - tot_counter += 1 - batch.append(j) - if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): - y_pr = self.models["reading_order"].predict(input_1 , verbose=0) - for jb, j in enumerate(batch): - if y_pr[jb][0]>=0.5: - post_list.append(j) - else: - ante_list.append(j) - batch = [] - - if len(ante_list): - ordered.insert(index_update, ante_list) - index_update += 1 - ordered.insert(index_update, [i]) - if len(post_list): - ordered.insert(index_update + 1, post_list) - - index_update = -1 - for index_next, ij_list in enumerate(ordered): - if len(ij_list) > 1: - index_update = index_next - break - - ordered = [i[0] for i in ordered] - - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - org_contours_indexes = [] - for ind in range(len(ordered)): - region_with_curr_order = ordered[ind] - if region_with_curr_order < len(contours_only_dilated): - if np.isscalar(indexes_of_located_cont[region_with_curr_order]): - org_contours_indexes.extend([indexes_of_located_cont[region_with_curr_order]]) - else: - arg_sort_located_cont = np.argsort(center_y_coordinates_of_located[region_with_curr_order]) - org_contours_indexes.extend( - np.array(indexes_of_located_cont[region_with_curr_order])[arg_sort_located_cont]) - else: - org_contours_indexes.extend([indexes_of_located_cont[region_with_curr_order]]) - - region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] - return org_contours_indexes, region_ids - else: - region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] - return ordered, region_ids - - def return_start_and_end_of_common_text_of_textline_ocr(self,textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.2*width) - - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - - if len(peaks_real)>70: - print(len(peaks_real), 'len(peaks_real)') - peaks_real = peaks_real[(peaks_realwidth1)] - - arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - - argsort_sorted = np.argsort(peaks_sort_4) - first_4_sorted = peaks_sort_4[argsort_sorted] - y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - #print(first_4_sorted,'first_4_sorted') - - arg_sortnew = np.argsort(y_4_sorted) - peaks_final =np.sort( first_4_sorted[arg_sortnew][2:] ) - - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peaks_final[0], peaks_final[0]], [0, height-1]) - #plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') - - return peaks_final[0], peaks_final[1] - else: - pass - - def return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - self, peaks_real, sum_smoothed, start_split, end_split): - - peaks_real = peaks_real[(peaks_realstart_split)] - - arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - argsort_sorted = np.argsort(peaks_sort_4) - - first_4_sorted = peaks_sort_4[argsort_sorted] - y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - #print(first_4_sorted,'first_4_sorted') - - arg_sortnew = np.argsort(y_4_sorted) - peaks_final =np.sort( first_4_sorted[arg_sortnew][3:] ) - return peaks_final[0] - - def return_start_and_end_of_common_text_of_textline_ocr_new(self, textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.15*width) - - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - mid = int(width/2.) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: - peak_start = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - peaks_real, sum_smoothed, width1, mid+2) - peak_end = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - peaks_real, sum_smoothed, mid-2, width2) - - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peak_start, peak_start], [0, height-1]) - #plt.plot([peak_end, peak_end], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') - - return peak_start, peak_end - else: - pass - - def return_ocr_of_textline_without_common_section( - self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): - - if h2w_ratio > 0.05: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - #width = np.shape(textline_image)[1] - #height = np.shape(textline_image)[0] - #common_window = int(0.3*width) - #width1 = int ( width/2. - common_window ) - #width2 = int ( width/2. + common_window ) - - split_point = return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) - if split_point: - image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - - #pixel_values1 = processor(image1, return_tensors="pt").pixel_values - #pixel_values2 = processor(image2, return_tensors="pt").pixel_values - - pixel_values_merged = processor([image1,image2], return_tensors="pt").pixel_values - generated_ids_merged = model_ocr.generate(pixel_values_merged.to(device)) - generated_text_merged = processor.batch_decode(generated_ids_merged, skip_special_tokens=True) - - #print(generated_text_merged,'generated_text_merged') - - #generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - #generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - - #generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - #generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - - #generated_text = generated_text1 + ' ' + generated_text2 - generated_text = generated_text_merged[0] + ' ' + generated_text_merged[1] - - #print(generated_text1,'generated_text1') - #print(generated_text2, 'generated_text2') - #print('########################################') - else: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - #print(generated_text,'generated_text') - #print('########################################') - return generated_text - - def return_ocr_of_textline( - self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): - - if h2w_ratio > 0.05: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - #width = np.shape(textline_image)[1] - #height = np.shape(textline_image)[0] - #common_window = int(0.3*width) - #width1 = int ( width/2. - common_window ) - #width2 = int ( width/2. + common_window ) - - try: - width1, width2 = self.return_start_and_end_of_common_text_of_textline_ocr_new(textline_image, ind_tot) - - image1 = textline_image[:, :width2,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, width1:,:]#image.crop((width1, 0, width, height)) - - pixel_values1 = processor(image1, return_tensors="pt").pixel_values - pixel_values2 = processor(image2, return_tensors="pt").pixel_values - - generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - - generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - #print(generated_text1,'generated_text1') - #print(generated_text2, 'generated_text2') - #print('########################################') - - match = sq(None, generated_text1, generated_text2).find_longest_match( - 0, len(generated_text1), 0, len(generated_text2)) - generated_text = generated_text1 + generated_text2[match.b+match.size:] - except: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - return generated_text - - def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): - return list(np.array(ls_cons)[np.array(sorted_indexes)]) - - def return_it_in_two_groups(self, x_differential): - split = [ind if x_differential[ind]!=x_differential[ind+1] else -1 - for ind in range(len(x_differential)-1)] - split_masked = list( np.array(split[:])[np.array(split[:])!=-1] ) - if 0 not in split_masked: - split_masked.insert(0, -1) - split_masked.append(len(x_differential)-1) - - split_masked = np.array(split_masked) +1 - - sums = [np.sum(x_differential[split_masked[ind]:split_masked[ind+1]]) - for ind in range(len(split_masked)-1)] - - indexes_to_bec_changed = [ind if (np.abs(sums[ind-1]) > np.abs(sums[ind]) and - np.abs(sums[ind+1]) > np.abs(sums[ind])) else -1 - for ind in range(1,len(sums)-1)] - indexes_to_bec_changed_filtered = np.array(indexes_to_bec_changed)[np.array(indexes_to_bec_changed)!=-1] - - x_differential_new = np.copy(x_differential) - for i in indexes_to_bec_changed_filtered: - i_slice = slice(split_masked[i], split_masked[i+1]) - x_differential_new[i_slice] = -1 * np.array(x_differential)[i_slice] - - return x_differential_new - - def filter_contours_inside_a_bigger_one(self, contours, contours_d_ordered, image, - marginal_cnts=None, type_contour="textregion"): - if type_contour == "textregion": - areas = np.array(list(map(cv2.contourArea, contours))) - area_tot = image.shape[0]*image.shape[1] - areas_ratio = areas / area_tot - cx_main, cy_main = find_center_of_contours(contours) - - contours_index_small = np.flatnonzero(areas_ratio < 1e-3) - contours_index_large = np.flatnonzero(areas_ratio >= 1e-3) - - #contours_> = [contours[ind] for ind in contours_index_large] - indexes_to_be_removed = [] - for ind_small in contours_index_small: - results = [cv2.pointPolygonTest(contours[ind_large], (cx_main[ind_small], - cy_main[ind_small]), - False) - for ind_large in contours_index_large] - results = np.array(results) - if np.any(results==1): - indexes_to_be_removed.append(ind_small) - elif marginal_cnts: - results_marginal = [cv2.pointPolygonTest(marginal_cnt, - (cx_main[ind_small], - cy_main[ind_small]), - False) - for marginal_cnt in marginal_cnts] - results_marginal = np.array(results_marginal) - if np.any(results_marginal==1): - indexes_to_be_removed.append(ind_small) - - contours = np.delete(contours, indexes_to_be_removed, axis=0) - if len(contours_d_ordered): - contours_d_ordered = np.delete(contours_d_ordered, indexes_to_be_removed, axis=0) - - return contours, contours_d_ordered - - else: - contours_txtline_of_all_textregions = [] - indexes_of_textline_tot = [] - index_textline_inside_textregion = [] - for ind_region, textlines in enumerate(contours): - contours_txtline_of_all_textregions.extend(textlines) - index_textline_inside_textregion.extend(list(range(len(textlines)))) - indexes_of_textline_tot.extend([ind_region] * len(textlines)) - - areas_tot = np.array(list(map(cv2.contourArea, contours_txtline_of_all_textregions))) - area_tot_tot = image.shape[0]*image.shape[1] - cx_main_tot, cy_main_tot = find_center_of_contours(contours_txtline_of_all_textregions) - - textline_in_textregion_index_to_del = {} - for ij in range(len(contours_txtline_of_all_textregions)): - area_of_con_interest = areas_tot[ij] - args_without = np.delete(np.arange(len(contours_txtline_of_all_textregions)), ij) - areas_without = areas_tot[args_without] - args_with_bigger_area = args_without[areas_without > 1.5*area_of_con_interest] - - if len(args_with_bigger_area)>0: - results = [cv2.pointPolygonTest(contours_txtline_of_all_textregions[ind], - (cx_main_tot[ij], - cy_main_tot[ij]), - False) - for ind in args_with_bigger_area ] - results = np.array(results) - if np.any(results==1): - #print(indexes_of_textline_tot[ij], index_textline_inside_textregion[ij]) - textline_in_textregion_index_to_del.setdefault( - indexes_of_textline_tot[ij], list()).append( - index_textline_inside_textregion[ij]) - #contours[indexes_of_textline_tot[ij]].pop(index_textline_inside_textregion[ij]) - - for textregion_index_to_del in textline_in_textregion_index_to_del: - contours[textregion_index_to_del] = list(np.delete( - contours[textregion_index_to_del], - textline_in_textregion_index_to_del[textregion_index_to_del], - # needed so numpy does not flatten the entire result when 0 left - axis=0)) - - return contours - - def return_indexes_of_contours_located_inside_another_list_of_contours( - self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): - indexes_of_located_cont = [] - center_x_coordinates_of_located = [] - center_y_coordinates_of_located = [] - #M_main_tot = [cv2.moments(contours_loc[j]) - #for j in range(len(contours_loc))] - #cx_main_loc = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - #cy_main_loc = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - - for ij in range(len(contours)): - results = [cv2.pointPolygonTest(contours[ij], (cx_main_loc[ind], cy_main_loc[ind]), False) - for ind in range(len(cy_main_loc)) ] - results = np.array(results) - indexes_in = np.where((results == 0) | (results == 1)) - # [(results == 0) | (results == 1)]#np.where((results == 0) | (results == 1)) - indexes = indexes_loc[indexes_in] - - indexes_of_located_cont.append(indexes) - center_x_coordinates_of_located.append(np.array(cx_main_loc)[indexes_in] ) - center_y_coordinates_of_located.append(np.array(cy_main_loc)[indexes_in] ) - - return indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located - - - def filter_contours_without_textline_inside( - self, contours_par, contours_textline, - contours_only_text_parent_d_ordered, - conf_contours_textregions): - - assert len(contours_par) == len(contours_textline) - indices = np.arange(len(contours_textline)) - indices = np.delete(indices, np.flatnonzero([len(lines) == 0 for lines in contours_textline])) - def filterfun(lis): - if len(lis) == 0: - return [] - return list(np.array(lis)[indices]) - - return (filterfun(contours_par), - filterfun(contours_textline), - filterfun(contours_only_text_parent_d_ordered), - filterfun(conf_contours_textregions), - # indices - ) - - def separate_marginals_to_left_and_right_and_order_from_top_to_down( - self, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, - slopes_marginals, mid_point_of_page_width): - cx_marg, cy_marg = find_center_of_contours(polygons_of_marginals) - cx_marg = np.array(cx_marg) - cy_marg = np.array(cy_marg) - - def split(lis): - array = np.array(lis) - return (list(array[cx_marg < mid_point_of_page_width]), - list(array[cx_marg >= mid_point_of_page_width])) - - (poly_marg_left, - poly_marg_right) = \ - split(polygons_of_marginals) - - (all_found_textline_polygons_marginals_left, - all_found_textline_polygons_marginals_right) = \ - split(all_found_textline_polygons_marginals) - - (all_box_coord_marginals_left, - all_box_coord_marginals_right) = \ - split(all_box_coord_marginals) - - (slopes_marg_left, - slopes_marg_right) = \ - split(slopes_marginals) - - (cy_marg_left, - cy_marg_right) = \ - split(cy_marg) - - order_left = np.argsort(cy_marg_left) - order_right = np.argsort(cy_marg_right) - def sort_left(lis): - return list(np.array(lis)[order_left]) - def sort_right(lis): - return list(np.array(lis)[order_right]) - - ordered_left_marginals = sort_left(poly_marg_left) - ordered_right_marginals = sort_right(poly_marg_right) - - ordered_left_marginals_textline = sort_left(all_found_textline_polygons_marginals_left) - ordered_right_marginals_textline = sort_right(all_found_textline_polygons_marginals_right) - - ordered_left_marginals_bbox = sort_left(all_box_coord_marginals_left) - ordered_right_marginals_bbox = sort_right(all_box_coord_marginals_right) - - ordered_left_slopes_marginals = sort_left(slopes_marg_left) - ordered_right_slopes_marginals = sort_right(slopes_marg_right) - - return (ordered_left_marginals, - ordered_right_marginals, - ordered_left_marginals_textline, - ordered_right_marginals_textline, - ordered_left_marginals_bbox, - ordered_right_marginals_bbox, - ordered_left_slopes_marginals, - ordered_right_slopes_marginals) - - def run(self, - overwrite: bool = False, - image_filename: Optional[str] = None, - dir_in: Optional[str] = None, - dir_out: Optional[str] = None, - dir_of_cropped_images: Optional[str] = None, - dir_of_layout: Optional[str] = None, - dir_of_deskewed: Optional[str] = None, - dir_of_all: Optional[str] = None, - dir_save_page: Optional[str] = None, - ): - """ - Get image and scales, then extract the page of scanned image - """ - self.logger.debug("enter run") - t0_tot = time.time() - - # Log enabled features directly - enabled_modes = [] - if self.light_version: - enabled_modes.append("Light version") - if self.textline_light: - enabled_modes.append("Light textline detection") - if self.full_layout: - enabled_modes.append("Full layout analysis") - if self.ocr: - enabled_modes.append("OCR") - if self.tables: - enabled_modes.append("Table detection") - if enabled_modes: - self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) - if self.enable_plotting: - self.logger.info("Saving debug plots") - if dir_of_cropped_images: - self.logger.info(f"Saving cropped images to: {dir_of_cropped_images}") - if dir_of_layout: - self.logger.info(f"Saving layout plots to: {dir_of_layout}") - if dir_of_deskewed: - self.logger.info(f"Saving deskewed images to: {dir_of_deskewed}") - - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - elif image_filename: - ls_imgs = [image_filename] - else: - raise ValueError("run requires either a single image filename or a directory") - - for img_filename in ls_imgs: - self.logger.info(img_filename) - t0 = time.time() - - self.reset_file_name_dir(img_filename, dir_out) - if self.enable_plotting: - self.plotter = EynollahPlotter(dir_out=dir_out, - dir_of_all=dir_of_all, - dir_save_page=dir_save_page, - dir_of_deskewed=dir_of_deskewed, - dir_of_cropped_images=dir_of_cropped_images, - dir_of_layout=dir_of_layout, - image_filename_stem=Path(image_filename).stem) - #print("text region early -11 in %.1fs", time.time() - t0) - if os.path.exists(self.writer.output_filename): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", self.writer.output_filename) - else: - self.logger.warning("will skip input for existing output file '%s'", self.writer.output_filename) - continue - - pcgts = self.run_single() - self.logger.info("Job done in %.1fs", time.time() - t0) - self.writer.write_pagexml(pcgts) - - if dir_in: - self.logger.info("All jobs done in %.1fs", time.time() - t0_tot) - - def run_single(self): - t0 = time.time() - - self.logger.info(f"Processing file: {self.writer.image_filename}") - self.logger.info("Step 1/5: Image Enhancement") - - img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = \ - self.run_enhancement(self.light_version) - - self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, " - f"{self.dpi} DPI, {num_col_classifier} columns") - if is_image_enhanced: - self.logger.info("Enhancement applied") - - self.logger.info(f"Enhancement complete ({time.time() - t0:.1f}s)") - - - # Image Extraction Mode - if self.extract_only_images: - self.logger.info("Step 2/5: Image Extraction Mode") - - text_regions_p_1, erosion_hurts, polygons_seplines, polygons_of_images, \ - image_page, page_coord, cont_page = \ - self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) - pcgts = self.writer.build_pagexml_no_full_layout( - [], page_coord, [], [], [], [], - polygons_of_images, [], [], [], [], [], [], [], [], [], - cont_page, [], []) - if self.plotter: - self.plotter.write_images_into_directory(polygons_of_images, image_page) - - self.logger.info("Image extraction complete") - return pcgts - - # Basic Processing Mode - if self.skip_layout_and_reading_order: - self.logger.info("Step 2/5: Basic Processing Mode") - self.logger.info("Skipping layout analysis and reading order detection") - - _ ,_, _, _, textline_mask_tot_ea, img_bin_light, _ = \ - self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier,) - - page_coord, image_page, textline_mask_tot_ea, img_bin_light, cont_page = \ - self.run_graphics_and_columns_without_layout(textline_mask_tot_ea, img_bin_light) - - ##all_found_textline_polygons =self.scale_contours_new(textline_mask_tot_ea) - - cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(textline_mask_tot_ea) - all_found_textline_polygons = filter_contours_area_of_image( - textline_mask_tot_ea, cnt_clean_rot_raw, hir_on_cnt_clean_rot, max_area=1, min_area=0.00001) - - cx_main_tot, cy_main_tot = find_center_of_contours(all_found_textline_polygons) - w_h_textlines = [cv2.boundingRect(polygon)[2:] - for polygon in all_found_textline_polygons] - w_h_textlines = [w / float(h) for w, h in w_h_textlines] - - all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted( - #all_found_textline_polygons[::-1] - all_found_textline_polygons, cx_main_tot, cy_main_tot, w_h_textlines) - all_found_textline_polygons = [ all_found_textline_polygons ] - all_found_textline_polygons = dilate_textline_contours(all_found_textline_polygons) - all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( - all_found_textline_polygons, None, textline_mask_tot_ea, type_contour="textline") - - order_text_new = [0] - slopes =[0] - id_of_texts_tot =['region_0001'] - conf_contours_textregions =[0] - - if self.ocr and not self.tr: - gc.collect() - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons, np.zeros((len(all_found_textline_polygons), 4)), - self.models["ocr"], self.b_s_ocr, self.num_to_char, textline_light=True) - else: - ocr_all_textlines = None - - pcgts = self.writer.build_pagexml_no_full_layout( - cont_page, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, page_coord, [], - [], [], [], [], [], [], - slopes, [], [], - cont_page, [], [], - ocr_all_textlines=ocr_all_textlines, - conf_contours_textregion=conf_contours_textregions, - skip_layout_reading_order=True) - self.logger.info("Basic processing complete") - return pcgts - - #print("text region early -1 in %.1fs", time.time() - t0) - t1 = time.time() - self.logger.info("Step 2/5: Layout Analysis") - - if self.light_version: - self.logger.info("Using light version processing") - text_regions_p_1 ,erosion_hurts, polygons_seplines, polygons_text_early, \ - textline_mask_tot_ea, img_bin_light, confidence_matrix = \ - self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) - #print("text region early -2 in %.1fs", time.time() - t0) - - if num_col_classifier == 1 or num_col_classifier ==2: - if num_col_classifier == 1: - img_w_new = 1000 - else: - img_w_new = 1300 - img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] - - textline_mask_tot_ea_deskew = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) - slope_deskew = self.run_deskew(textline_mask_tot_ea_deskew) - else: - slope_deskew = self.run_deskew(textline_mask_tot_ea) - #print("text region early -2,5 in %.1fs", time.time() - t0) - #self.logger.info("Textregion detection took %.1fs ", time.time() - t1t) - num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ - text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea, img_bin_light = \ - self.run_graphics_and_columns_light(text_regions_p_1, textline_mask_tot_ea, - num_col_classifier, num_column_is_classified, - erosion_hurts, img_bin_light) - #self.logger.info("run graphics %.1fs ", time.time() - t1t) - #print("text region early -3 in %.1fs", time.time() - t0) - textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) - - else: - text_regions_p_1, erosion_hurts, polygons_seplines, polygons_text_early = \ - self.get_regions_from_xy_2models(img_res, is_image_enhanced, - num_col_classifier) - self.logger.info(f"Textregion detection took {time.time() - t1:.1f}s") - confidence_matrix = np.zeros((text_regions_p_1.shape[:2])) - - t1 = time.time() - num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ - text_regions_p_1, cont_page, table_prediction = \ - self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, - erosion_hurts) - self.logger.info(f"Graphics detection took {time.time() - t1:.1f}s") - #self.logger.info('cont_page %s', cont_page) - #plt.imshow(table_prediction) - #plt.show() - self.logger.info(f"Layout analysis complete ({time.time() - t1:.1f}s)") - - if not num_col and len(polygons_text_early) == 0: - self.logger.info("No columns detected - generating empty PAGE-XML") - - pcgts = self.writer.build_pagexml_no_full_layout( - [], page_coord, [], [], [], [], [], [], [], [], [], [], [], [], [], [], - cont_page, [], []) - return pcgts - - #print("text region early in %.1fs", time.time() - t0) - t1 = time.time() - if not self.light_version: - textline_mask_tot_ea = self.run_textline(image_page) - self.logger.info(f"Textline detection took {time.time() - t1:.1f}s") - t1 = time.time() - slope_deskew = self.run_deskew(textline_mask_tot_ea) - self.logger.info(f"Deskewing took {time.time() - t1:.1f}s") - elif num_col_classifier in (1,2): - org_h_l_m = textline_mask_tot_ea.shape[0] - org_w_l_m = textline_mask_tot_ea.shape[1] - if num_col_classifier == 1: - img_w_new = 2000 - else: - img_w_new = 2400 - img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] - - image_page = resize_image(image_page,img_h_new, img_w_new ) - textline_mask_tot_ea = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) - mask_images = resize_image(mask_images,img_h_new, img_w_new ) - mask_lines = resize_image(mask_lines,img_h_new, img_w_new ) - text_regions_p_1 = resize_image(text_regions_p_1,img_h_new, img_w_new ) - table_prediction = resize_image(table_prediction,img_h_new, img_w_new ) - - textline_mask_tot, text_regions_p = \ - self.run_marginals(textline_mask_tot_ea, mask_images, mask_lines, - num_col_classifier, slope_deskew, text_regions_p_1, table_prediction) - if self.plotter: - self.plotter.save_plot_of_layout_main_all(text_regions_p, image_page) - self.plotter.save_plot_of_layout_main(text_regions_p, image_page) - - if image_page.size: - # if ratio of text regions to page area is smaller that 30%, - # then deskew angle will not be allowed to exceed 45 - if (abs(slope_deskew) > 45 and - ((text_regions_p == 1).sum() + - (text_regions_p == 4).sum()) / float(image_page.size) <= 0.3): - slope_deskew = 0 - - # if there is no main text, then relabel marginalia as main - if (text_regions_p == 1).sum() == 0: - text_regions_p[text_regions_p == 4] = 1 - - self.logger.info("Step 3/5: Text Line Detection") - - if self.curved_line: - self.logger.info("Mode: Curved line detection") - elif self.textline_light: - self.logger.info("Mode: Light detection") - - if self.light_version and num_col_classifier in (1,2): - image_page = resize_image(image_page,org_h_l_m, org_w_l_m ) - textline_mask_tot_ea = resize_image(textline_mask_tot_ea,org_h_l_m, org_w_l_m ) - text_regions_p = resize_image(text_regions_p,org_h_l_m, org_w_l_m ) - textline_mask_tot = resize_image(textline_mask_tot,org_h_l_m, org_w_l_m ) - text_regions_p_1 = resize_image(text_regions_p_1,org_h_l_m, org_w_l_m ) - table_prediction = resize_image(table_prediction,org_h_l_m, org_w_l_m ) - - self.logger.info(f"Detection of marginals took {time.time() - t1:.1f}s") - ## birdan sora chock chakir - t1 = time.time() - if not self.full_layout: - polygons_of_images, img_revised_tab, text_regions_p_1_n, \ - textline_mask_tot_d, regions_without_separators_d, \ - boxes, boxes_d, polygons_of_marginals, contours_tables = \ - self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, - num_col_classifier, table_prediction, erosion_hurts) - ###polygons_of_marginals = dilate_textregion_contours(polygons_of_marginals) - else: - polygons_of_images, img_revised_tab, text_regions_p_1_n, \ - textline_mask_tot_d, regions_without_separators_d, \ - regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = \ - self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, - num_col_classifier, img_only_regions, table_prediction, erosion_hurts, - img_bin_light if self.light_version else None) - ###polygons_of_marginals = dilate_textregion_contours(polygons_of_marginals) - if self.light_version: - drop_label_in_full_layout = 4 - textline_mask_tot_ea_org[img_revised_tab==drop_label_in_full_layout] = 0 - - - text_only = (img_revised_tab[:, :] == 1) * 1 - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - text_only_d = (text_regions_p_1_n[:, :] == 1) * 1 - - #print("text region early 2 in %.1fs", time.time() - t0) - ###min_con_area = 0.000005 - contours_only_text, hir_on_text = return_contours_of_image(text_only) - contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text) - contours_only_text_parent_d_ordered = [] - contours_only_text_parent_d = [] - - if len(contours_only_text_parent) > 0: - areas_tot_text = np.prod(text_only.shape) - areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) - areas_cnt_text = areas_cnt_text / float(areas_tot_text) - #self.logger.info('areas_cnt_text %s', areas_cnt_text) - contours_only_text_parent = np.array(contours_only_text_parent)[areas_cnt_text > MIN_AREA_REGION] - areas_cnt_text_parent = areas_cnt_text[areas_cnt_text > MIN_AREA_REGION] - - index_con_parents = np.argsort(areas_cnt_text_parent) - contours_only_text_parent = contours_only_text_parent[index_con_parents] - areas_cnt_text_parent = areas_cnt_text_parent[index_con_parents] - - centers = np.stack(find_center_of_contours(contours_only_text_parent)) # [2, N] - - center0 = centers[:, -1:] # [2, 1] - - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d) - contours_only_text_parent_d = return_parent_contours(contours_only_text_d, hir_on_text_d) - - areas_tot_text_d = np.prod(text_only_d.shape) - areas_cnt_text_d = np.array([cv2.contourArea(c) for c in contours_only_text_parent_d]) - areas_cnt_text_d = areas_cnt_text_d / float(areas_tot_text_d) - - contours_only_text_parent_d = np.array(contours_only_text_parent_d)[areas_cnt_text_d > MIN_AREA_REGION] - areas_cnt_text_d = areas_cnt_text_d[areas_cnt_text_d > MIN_AREA_REGION] - - if len(contours_only_text_parent_d): - index_con_parents_d = np.argsort(areas_cnt_text_d) - contours_only_text_parent_d = np.array(contours_only_text_parent_d)[index_con_parents_d] - areas_cnt_text_d = areas_cnt_text_d[index_con_parents_d] - - centers_d = np.stack(find_center_of_contours(contours_only_text_parent_d)) # [2, N] - - center0_d = centers_d[:, -1:].copy() # [2, 1] - - # find the largest among the largest 5 deskewed contours - # that is also closest to the largest original contour - last5_centers_d = centers_d[:, -5:] - dists_d = np.linalg.norm(center0 - last5_centers_d, axis=0) - ind_largest = len(contours_only_text_parent_d) - last5_centers_d.shape[1] + np.argmin(dists_d) - center0_d[:, 0] = centers_d[:, ind_largest] - - # order new contours the same way as the undeskewed contours - # (by calculating the offset of the largest contours, respectively, - # of the new and undeskewed image; then for each contour, - # finding the closest new contour, with proximity calculated - # as distance of their centers modulo offset vector) - (h, w) = text_only.shape[:2] - center = (w // 2.0, h // 2.0) - M = cv2.getRotationMatrix2D(center, slope_deskew, 1.0) - M_22 = np.array(M)[:2, :2] - center0 = np.dot(M_22, center0) # [2, 1] - offset = center0 - center0_d # [2, 1] - - centers = np.dot(M_22, centers) - offset # [2,N] - # add dimension for area (so only contours of similar size will be considered close) - centers = np.append(centers, areas_cnt_text_parent[np.newaxis], axis=0) - centers_d = np.append(centers_d, areas_cnt_text_d[np.newaxis], axis=0) - - dists = np.zeros((len(contours_only_text_parent), len(contours_only_text_parent_d))) - for i in range(len(contours_only_text_parent)): - dists[i] = np.linalg.norm(centers[:, i:i + 1] - centers_d, axis=0) - corresp = np.zeros(dists.shape, dtype=bool) - # keep searching next-closest until at least one correspondence on each side - while not np.all(corresp.sum(axis=1)) and not np.all(corresp.sum(axis=0)): - idx = np.nanargmin(dists) - i, j = np.unravel_index(idx, dists.shape) - dists[i, j] = np.nan - corresp[i, j] = True - #print("original/deskewed adjacency", corresp.nonzero()) - contours_only_text_parent_d_ordered = np.zeros_like(contours_only_text_parent) - contours_only_text_parent_d_ordered = contours_only_text_parent_d[np.argmax(corresp, axis=1)] - # img1 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) - # for i in range(len(contours_only_text_parent)): - # cv2.fillPoly(img1, pts=[contours_only_text_parent_d_ordered[i]], color=i + 1) - # plt.subplot(2, 2, 1, title="direct corresp contours") - # plt.imshow(img1) - # img2 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) - # join deskewed regions mapping to single original ones - for i in range(len(contours_only_text_parent)): - if np.count_nonzero(corresp[i]) > 1: - indices = np.flatnonzero(corresp[i]) - #print("joining", indices) - polygons_d = [contour2polygon(contour) - for contour in contours_only_text_parent_d[indices]] - contour_d = polygon2contour(join_polygons(polygons_d)) - contours_only_text_parent_d_ordered[i] = contour_d - # cv2.fillPoly(img2, pts=[contour_d], color=i + 1) - # plt.subplot(2, 2, 3, title="joined contours") - # plt.imshow(img2) - # img3 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) - # split deskewed regions mapping to multiple original ones - def deskew(polygon): - polygon = shapely.affinity.rotate(polygon, -slope_deskew, origin=center) - polygon = shapely.affinity.translate(polygon, *offset.squeeze()) - return polygon - for j in range(len(contours_only_text_parent_d)): - if np.count_nonzero(corresp[:, j]) > 1: - indices = np.flatnonzero(corresp[:, j]) - #print("splitting along", indices) - polygons = [deskew(contour2polygon(contour)) - for contour in contours_only_text_parent[indices]] - polygon_d = contour2polygon(contours_only_text_parent_d[j]) - polygons_d = [make_intersection(polygon_d, polygon) - for polygon in polygons] - # ignore where there is no actual overlap - indices = indices[np.flatnonzero(polygons_d)] - contours_d = [polygon2contour(polygon_d) - for polygon_d in polygons_d - if polygon_d] - contours_only_text_parent_d_ordered[indices] = contours_d - # cv2.fillPoly(img3, pts=contours_d, color=j + 1) - # plt.subplot(2, 2, 4, title="split contours") - # plt.imshow(img3) - # img4 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) - # for i in range(len(contours_only_text_parent)): - # cv2.fillPoly(img4, pts=[contours_only_text_parent_d_ordered[i]], color=i + 1) - # plt.subplot(2, 2, 2, title="result contours") - # plt.imshow(img4) - # plt.show() - - if not len(contours_only_text_parent): - # stop early - empty_marginals = [[]] * len(polygons_of_marginals) - if self.full_layout: - pcgts = self.writer.build_pagexml_full_layout( - [], [], page_coord, [], [], [], [], [], [], - polygons_of_images, contours_tables, [], - polygons_of_marginals, polygons_of_marginals, - empty_marginals, empty_marginals, - empty_marginals, empty_marginals, - [], [], [], [], - cont_page, polygons_seplines) - else: - pcgts = self.writer.build_pagexml_no_full_layout( - [], page_coord, [], [], [], [], - polygons_of_images, - polygons_of_marginals, polygons_of_marginals, - empty_marginals, empty_marginals, - empty_marginals, empty_marginals, - [], [], [], - cont_page, polygons_seplines, contours_tables) - return pcgts - - - #print("text region early 3 in %.1fs", time.time() - t0) - if self.light_version: - contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) - contours_only_text_parent , contours_only_text_parent_d_ordered = self.filter_contours_inside_a_bigger_one( - contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, - marginal_cnts=polygons_of_marginals) - #print("text region early 3.5 in %.1fs", time.time() - t0) - conf_contours_textregions = get_textregion_contours_in_org_image_light( - contours_only_text_parent, self.image, confidence_matrix) - #contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) - else: - conf_contours_textregions = get_textregion_contours_in_org_image_light( - contours_only_text_parent, self.image, confidence_matrix) - #print("text region early 4 in %.1fs", time.time() - t0) - boxes_text = get_text_region_boxes_by_given_contours(contours_only_text_parent) - boxes_marginals = get_text_region_boxes_by_given_contours(polygons_of_marginals) - #print("text region early 5 in %.1fs", time.time() - t0) - ## birdan sora chock chakir - if not self.curved_line: - if self.light_version: - if self.textline_light: - all_found_textline_polygons, \ - all_box_coord, slopes = self.get_slopes_and_deskew_new_light2( - contours_only_text_parent, textline_mask_tot_ea_org, - boxes_text, slope_deskew) - all_found_textline_polygons_marginals, \ - all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_light2( - polygons_of_marginals, textline_mask_tot_ea_org, - boxes_marginals, slope_deskew) - - all_found_textline_polygons = dilate_textline_contours( - all_found_textline_polygons) - all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( - all_found_textline_polygons, None, textline_mask_tot_ea_org, type_contour="textline") - all_found_textline_polygons_marginals = dilate_textline_contours( - all_found_textline_polygons_marginals) - contours_only_text_parent, all_found_textline_polygons, \ - contours_only_text_parent_d_ordered, conf_contours_textregions = \ - self.filter_contours_without_textline_inside( - contours_only_text_parent, all_found_textline_polygons, - contours_only_text_parent_d_ordered, conf_contours_textregions) - else: - textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) - all_found_textline_polygons, \ - all_box_coord, slopes = self.get_slopes_and_deskew_new_light( - contours_only_text_parent, contours_only_text_parent, textline_mask_tot_ea, - boxes_text, slope_deskew) - all_found_textline_polygons_marginals, \ - all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_light( - polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, - boxes_marginals, slope_deskew) - #all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( - # all_found_textline_polygons, textline_mask_tot_ea_org, type_contour="textline") - else: - textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) - all_found_textline_polygons, \ - all_box_coord, slopes = self.get_slopes_and_deskew_new( - contours_only_text_parent, contours_only_text_parent, textline_mask_tot_ea, - boxes_text, slope_deskew) - all_found_textline_polygons_marginals, \ - all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new( - polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, - boxes_marginals, slope_deskew) - else: - scale_param = 1 - textline_mask_tot_ea_erode = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=2) - all_found_textline_polygons, \ - all_box_coord, slopes = self.get_slopes_and_deskew_new_curved( - contours_only_text_parent, textline_mask_tot_ea_erode, - boxes_text, text_only, - num_col_classifier, scale_param, slope_deskew) - all_found_textline_polygons = small_textlines_to_parent_adherence2( - all_found_textline_polygons, textline_mask_tot_ea, num_col_classifier) - all_found_textline_polygons_marginals, \ - all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_curved( - polygons_of_marginals, textline_mask_tot_ea_erode, - boxes_marginals, text_only, - num_col_classifier, scale_param, slope_deskew) - all_found_textline_polygons_marginals = small_textlines_to_parent_adherence2( - all_found_textline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) - - mid_point_of_page_width = text_regions_p.shape[1] / 2. - (polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes_marginals_left, slopes_marginals_right) = \ - self.separate_marginals_to_left_and_right_and_order_from_top_to_down( - polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, - slopes_marginals, mid_point_of_page_width) - - #print(len(polygons_of_marginals), len(ordered_left_marginals), len(ordered_right_marginals), 'marginals ordred') - - if self.full_layout: - if self.light_version: - fun = check_any_text_region_in_model_one_is_main_or_header_light - else: - fun = check_any_text_region_in_model_one_is_main_or_header - text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, \ - all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, \ - contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, \ - conf_contours_textregions, conf_contours_textregions_h = fun( - text_regions_p, regions_fully, contours_only_text_parent, - all_box_coord, all_found_textline_polygons, - slopes, contours_only_text_parent_d_ordered, conf_contours_textregions) - - if self.plotter: - self.plotter.save_plot_of_layout(text_regions_p, image_page) - self.plotter.save_plot_of_layout_all(text_regions_p, image_page) - - label_img = 4 - polygons_of_drop_capitals = return_contours_of_interested_region(text_regions_p, label_img, - min_area=0.00003) - ##all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline( - ##text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, - ##all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, - ##kernel=KERNEL, curved_line=self.curved_line, textline_light=self.textline_light) - - if not self.reading_order_machine_based: - label_seps = 6 - if not self.headers_off: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - text_regions_p, num_col_classifier, self.tables, label_seps, contours_only_text_parent_h) - else: - _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - text_regions_p_1_n, num_col_classifier, self.tables, label_seps, contours_only_text_parent_h_d_ordered) - elif self.headers_off: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - text_regions_p, num_col_classifier, self.tables, label_seps) - else: - _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - text_regions_p_1_n, num_col_classifier, self.tables, label_seps) - - if num_col_classifier >= 3: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - regions_without_separators = regions_without_separators.astype(np.uint8) - regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6) - else: - regions_without_separators_d = regions_without_separators_d.astype(np.uint8) - regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6) - - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new, regions_without_separators, matrix_of_lines_ch, - num_col_classifier, erosion_hurts, self.tables, self.right2left, - logger=self.logger) - else: - boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, - num_col_classifier, erosion_hurts, self.tables, self.right2left, - logger=self.logger) - else: - contours_only_text_parent_h = [] - contours_only_text_parent_h_d_ordered = [] - - if self.plotter: - self.plotter.write_images_into_directory(polygons_of_images, image_page) - t_order = time.time() - - self.logger.info("Step 4/5: Reading Order Detection") - - if self.reading_order_machine_based: - self.logger.info("Using machine-based detection") - if self.right2left: - self.logger.info("Right-to-left mode enabled") - if self.headers_off: - self.logger.info("Headers ignored in reading order") - - if self.reading_order_machine_based: - order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( - contours_only_text_parent, contours_only_text_parent_h, text_regions_p) - else: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - order_text_new, id_of_texts_tot = self.do_order_of_regions( - contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) - else: - order_text_new, id_of_texts_tot = self.do_order_of_regions( - contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, - boxes_d, textline_mask_tot_d) - self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") - - ocr_all_textlines = None - ocr_all_textlines_marginals_left = None - ocr_all_textlines_marginals_right = None - ocr_all_textlines_h = None - ocr_all_textlines_drop = None - if self.ocr: - self.logger.info("Step 4.5/5: OCR Processing") - - if not self.tr: - gc.collect() - - if len(all_found_textline_polygons): - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons, all_box_coord, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - if len(all_found_textline_polygons_marginals_left): - ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_left, all_box_coord_marginals_left, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - if len(all_found_textline_polygons_marginals_right): - ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_right, all_box_coord_marginals_right, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - if self.full_layout and len(all_found_textline_polygons): - ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_h, all_box_coord_h, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - if self.full_layout and len(polygons_of_drop_capitals): - ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( - image_page, polygons_of_drop_capitals, np.zeros((len(polygons_of_drop_capitals), 4)), - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - else: - if self.light_version: - self.logger.info("Using light version OCR") - if self.textline_light: - self.logger.info("Using light text line detection for OCR") - self.logger.info("Processing text lines...") - - gc.collect() - - torch.cuda.empty_cache() - self.models["ocr"].to(self.device) - - ind_tot = 0 - #cv2.imwrite('./img_out.png', image_page) - ocr_all_textlines = [] - # FIXME: what about lines in marginals / headings / drop-capitals here? - for indexing, ind_poly_first in enumerate(all_found_textline_polygons): - ocr_textline_in_textregion = [] - for indexing2, ind_poly in enumerate(ind_poly_first): - if not (self.textline_light or self.curved_line): - ind_poly = copy.deepcopy(ind_poly) - box_ind = all_box_coord[indexing] - #print(ind_poly,np.shape(ind_poly), 'ind_poly') - #print(box_ind) - ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) - #print(ind_poly_copy) - ind_poly[ind_poly<0] = 0 - x, y, w, h = cv2.boundingRect(ind_poly) - #print(ind_poly_copy, np.shape(ind_poly_copy)) - #print(x, y, w, h, h/float(w),'ratio') - h2w_ratio = h/float(w) - mask_poly = np.zeros(image_page.shape) - if not self.light_version: - img_poly_on_img = np.copy(image_page) - else: - img_poly_on_img = np.copy(img_bin_light) - mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) - - if self.textline_light: - mask_poly = cv2.dilate(mask_poly, KERNEL, iterations=1) - img_poly_on_img[:,:,0][mask_poly[:,:,0] ==0] = 255 - img_poly_on_img[:,:,1][mask_poly[:,:,0] ==0] = 255 - img_poly_on_img[:,:,2][mask_poly[:,:,0] ==0] = 255 - - img_croped = img_poly_on_img[y:y+h, x:x+w, :] - #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) - text_ocr = self.return_ocr_of_textline_without_common_section( - img_croped, self.models["ocr"], self.processor, self.device, w, h2w_ratio, ind_tot) - ocr_textline_in_textregion.append(text_ocr) - ind_tot = ind_tot +1 - ocr_all_textlines.append(ocr_textline_in_textregion) - - self.logger.info("Step 5/5: Output Generation") - - if self.full_layout: - pcgts = self.writer.build_pagexml_full_layout( - contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, - polygons_of_images, contours_tables, polygons_of_drop_capitals, - polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, ocr_all_textlines, ocr_all_textlines_h, - ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, - ocr_all_textlines_drop, - conf_contours_textregions, conf_contours_textregions_h) - else: - pcgts = self.writer.build_pagexml_no_full_layout( - contours_only_text_parent, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, all_box_coord, polygons_of_images, - polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, contours_tables, - ocr_all_textlines=ocr_all_textlines, - ocr_all_textlines_marginals_left=ocr_all_textlines_marginals_left, - ocr_all_textlines_marginals_right=ocr_all_textlines_marginals_right, - conf_contours_textregions=conf_contours_textregions) - - return pcgts - - -class Eynollah_ocr: - def __init__( - self, - dir_models, - model_name=None, - dir_xmls=None, - tr_ocr=False, - batch_size=None, - export_textline_images_and_text=False, - do_not_mask_with_textline_contour=False, - pref_of_dataset=None, - min_conf_value_of_textline_text : Optional[float]=None, - logger=None, - ): - self.model_name = model_name - self.tr_ocr = tr_ocr - self.export_textline_images_and_text = export_textline_images_and_text - self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour - self.pref_of_dataset = pref_of_dataset - self.logger = logger if logger else getLogger('eynollah') - - if not export_textline_images_and_text: - if min_conf_value_of_textline_text: - self.min_conf_value_of_textline_text = float(min_conf_value_of_textline_text) - else: - self.min_conf_value_of_textline_text = 0.3 - if tr_ocr: - self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - if self.model_name: - self.model_ocr_dir = self.model_name - else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" - self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) - self.model_ocr.to(self.device) - if not batch_size: - self.b_s = 2 - else: - self.b_s = int(batch_size) - - else: - if self.model_name: - self.model_ocr_dir = self.model_name - else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250930" - model_ocr = load_model(self.model_ocr_dir , compile=False) - - self.prediction_model = tf.keras.models.Model( - model_ocr.get_layer(name = "image").input, - model_ocr.get_layer(name = "dense2").output) - if not batch_size: - self.b_s = 8 - else: - self.b_s = int(batch_size) - - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: - characters = json.load(config_file) - - AUTOTUNE = tf.data.AUTOTUNE - - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - - # Mapping integers back to original characters. - self.num_to_char = StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) - self.end_character = len(characters) + 2 - - def run(self, overwrite: bool = False, - dir_in: Optional[str] = None, - dir_in_bin: Optional[str] = None, - image_filename: Optional[str] = None, - dir_xmls: Optional[str] = None, - dir_out_image_text: Optional[str] = None, - dir_out: Optional[str] = None, - ): - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - else: - ls_imgs = [image_filename] - - if self.tr_ocr: - tr_ocr_input_height_and_width = 384 - for dir_img in ls_imgs: - file_name = Path(dir_img).stem - dir_xml = os.path.join(dir_xmls, file_name+'.xml') - out_file_ocr = os.path.join(dir_out, file_name+'.xml') - - if os.path.exists(out_file_ocr): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) - else: - self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) - continue - - img = cv2.imread(dir_img) - - if dir_out_image_text: - out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') - image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") - draw = ImageDraw.Draw(image_text) - total_bb_coordinates = [] - - ##file_name = Path(dir_xmls).stem - tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) - - - - cropped_lines = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - extracted_texts = [] - - indexer_text_region = 0 - indexer_b_s = 0 - - for nn in root1.iter(region_tags): - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - x,y,w,h = cv2.boundingRect(textline_coords) - - if dir_out_image_text: - total_bb_coordinates.append([x,y,w,h]) - - h2w_ratio = h/float(w) - - img_poly_on_img = np.copy(img) - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - img_crop[mask_poly==0] = 255 - - self.logger.debug("processing %d lines for '%s'", - len(cropped_lines), nn.attrib['id']) - if h2w_ratio > 0.1: - cropped_lines.append(resize_image(img_crop, - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width) ) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - splited_images, _ = return_textlines_split_if_needed(img_crop, None) - #print(splited_images) - if splited_images: - cropped_lines.append(resize_image(splited_images[0], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - cropped_lines.append(resize_image(splited_images[1], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(-1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - cropped_lines.append(img_crop) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - - indexer_text_region = indexer_text_region +1 - - if indexer_b_s!=0: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - ####extracted_texts = [] - ####n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - ####for i in range(n_iterations): - ####if i==(n_iterations-1): - ####n_start = i*self.b_s - ####imgs = cropped_lines[n_start:] - ####else: - ####n_start = i*self.b_s - ####n_end = (i+1)*self.b_s - ####imgs = cropped_lines[n_start:n_end] - ####pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - ####generated_ids_merged = self.model_ocr.generate( - #### pixel_values_merged.to(self.device)) - ####generated_text_merged = self.processor.batch_decode( - #### generated_ids_merged, skip_special_tokens=True) - - ####extracted_texts = extracted_texts + generated_text_merged - - del cropped_lines - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - #print(extracted_texts_merged, len(extracted_texts_merged)) - - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - if dir_out_image_text: - - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - font = ImageFont.truetype(font=font, size=40) - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - - - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') - #######text_by_textregion = [] - #######for ind in unique_cropped_lines_region_indexer: - #######ind = np.array(cropped_lines_region_indexer)==ind - #######extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - #######text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer) == ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) - else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - - indexer = 0 - indexer_textregion = 0 - for nn in root1.iter(region_tags): - #id_textregion = nn.attrib['id'] - #id_textregions.append(id_textregion) - #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') - - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - ##text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - ##childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] - else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 - - ###sample_order = [(id_to_order[tid], text) - ### for tid, text in zip(id_textregions, textregions_by_existing_ids) - ### if tid in id_to_order] - - ##ordered_texts_sample = [text for _, text in sorted(sample_order)] - ##tot_page_text = ' '.join(ordered_texts_sample) - - ##for page_element in root1.iter(link+'Page'): - ##text_page = ET.SubElement(page_element, 'TextEquiv') - ##unicode_textpage = ET.SubElement(text_page, 'Unicode') - ##unicode_textpage.text = tot_page_text - - ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) - else: - ###max_len = 280#512#280#512 - ###padding_token = 1500#299#1500#299 - image_width = 512#max_len * 4 - image_height = 32 - - - img_size=(image_width, image_height) - - for dir_img in ls_imgs: - file_name = Path(dir_img).stem - dir_xml = os.path.join(dir_xmls, file_name+'.xml') - out_file_ocr = os.path.join(dir_out, file_name+'.xml') - - if os.path.exists(out_file_ocr): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) - else: - self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) - continue - - img = cv2.imread(dir_img) - if dir_in_bin is not None: - cropped_lines_bin = [] - dir_img_bin = os.path.join(dir_in_bin, file_name+'.png') - img_bin = cv2.imread(dir_img_bin) - - if dir_out_image_text: - out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') - image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") - draw = ImageDraw.Draw(image_text) - total_bb_coordinates = [] - - tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) - - cropped_lines = [] - cropped_lines_ver_index = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - tinl = time.time() - indexer_text_region = 0 - indexer_textlines = 0 - for nn in root1.iter(region_tags): - try: - type_textregion = nn.attrib['type'] - except: - type_textregion = 'paragraph' - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - - x,y,w,h = cv2.boundingRect(textline_coords) - - angle_radians = math.atan2(h, w) - # Convert to degrees - angle_degrees = math.degrees(angle_radians) - if type_textregion=='drop-capital': - angle_degrees = 0 - - if dir_out_image_text: - total_bb_coordinates.append([x,y,w,h]) - - w_scaled = w * image_height/float(h) - - img_poly_on_img = np.copy(img) - if dir_in_bin is not None: - img_poly_on_img_bin = np.copy(img_bin) - img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] - - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - if self.export_textline_images_and_text: - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - - else: - # print(file_name, angle_degrees, w*h, - # mask_poly[:,:,0].sum(), - # mask_poly[:,:,0].sum() /float(w*h) , - # 'didi') - - if angle_degrees > 3: - better_des_slope = get_orientation_moments(textline_coords) - - img_crop = rotate_image_with_padding(img_crop, better_des_slope) - if dir_in_bin is not None: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) - - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) - mask_poly = mask_poly.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) - - mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: - img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: - if dir_in_bin is not None: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - else: - better_des_slope = 0 - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - if type_textregion=='drop-capital': - pass - else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: - if dir_in_bin is not None: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - if not self.export_textline_images_and_text: - if w_scaled < 750:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - cropped_lines_meging_indexing.append(0) - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed( - img_crop, img_crop_bin if dir_in_bin is not None else None) - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[0], image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[1], image_height, image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[0], image_height, image_width) - cropped_lines_bin.append(img_fin) - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[1], image_height, image_width) - cropped_lines_bin.append(img_fin) - - else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - - if self.export_textline_images_and_text: - if img_crop.shape[0]==0 or img_crop.shape[1]==0: - pass - else: - if child_textlines.tag.endswith("TextEquiv"): - for cheild_text in child_textlines: - if cheild_text.tag.endswith("Unicode"): - textline_text = cheild_text.text - if textline_text: - base_name = os.path.join( - dir_out, file_name + '_line_' + str(indexer_textlines)) - if self.pref_of_dataset: - base_name += '_' + self.pref_of_dataset - if not self.do_not_mask_with_textline_contour: - base_name += '_masked' - - with open(base_name + '.txt', 'w') as text_file: - text_file.write(textline_text) - cv2.imwrite(base_name + '.png', img_crop) - indexer_textlines+=1 - - if not self.export_textline_images_and_text: - indexer_text_region = indexer_text_region +1 - - if not self.export_textline_images_and_text: - extracted_texts = [] - extracted_conf_value = [] - - n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*self.b_s - imgs = cropped_lines[n_start:] - imgs = np.array(imgs) - imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) - indices_ver = np.where(ver_imgs == 1)[0] - - #print(indices_ver, 'indices_ver') - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_ver_flipped = None - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:] - imgs_bin = np.array(imgs_bin) - imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_bin_ver_flipped = None - else: - n_start = i*self.b_s - n_end = (i+1)*self.b_s - imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) - indices_ver = np.where(ver_imgs == 1)[0] - #print(indices_ver, 'indices_ver') - - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_ver_flipped = None - - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:n_end] - imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) - - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_bin_ver_flipped = None - - - self.logger.debug("processing next %d lines", len(imgs)) - preds = self.prediction_model.predict(imgs, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.prediction_model.predict(imgs_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - if dir_in_bin is not None: - preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.prediction_model.predict(imgs_bin_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds_bin[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - - preds = (preds + preds_bin) / 2. - - pred_texts = decode_batch_predictions(preds, self.num_to_char) - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - - for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - if masked_means[ib] >= self.min_conf_value_of_textline_text: - extracted_texts.append(pred_texts_ib) - extracted_conf_value.append(masked_means[ib]) - else: - extracted_texts.append("") - extracted_conf_value.append(0) - del cropped_lines - if dir_in_bin is not None: - del cropped_lines_bin - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value[ind] - if cropped_lines_meging_indexing[ind]==0 - else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] - for ind_cfm in range(len(extracted_texts_merged)) - if extracted_texts_merged[ind_cfm] is not None] - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - if dir_out_image_text: - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - font = ImageFont.truetype(font=font, size=40) - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer)==ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) - else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') - - ###index_tot_regions = [] - ###tot_region_ref = [] - - ###for jj in root1.iter(link+'RegionRefIndexed'): - ###index_tot_regions.append(jj.attrib['index']) - ###tot_region_ref.append(jj.attrib['regionRef']) - - ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} - - #id_textregions = [] - #textregions_by_existing_ids = [] - indexer = 0 - indexer_textregion = 0 - for nn in root1.iter(region_tags): - #id_textregion = nn.attrib['id'] - #id_textregions.append(id_textregion) - #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') - - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - childtest3.set('conf', - f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] - else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 - - ###sample_order = [(id_to_order[tid], text) - ### for tid, text in zip(id_textregions, textregions_by_existing_ids) - ### if tid in id_to_order] - - ##ordered_texts_sample = [text for _, text in sorted(sample_order)] - ##tot_page_text = ' '.join(ordered_texts_sample) - - ##for page_element in root1.iter(link+'Page'): - ##text_page = ET.SubElement(page_element, 'TextEquiv') - ##unicode_textpage = ET.SubElement(text_page, 'Unicode') - ##unicode_textpage.text = tot_page_text - - ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) - #print("Job done in %.1fs", time.time() - t0) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py deleted file mode 100644 index 9247efe..0000000 --- a/src/eynollah/image_enhancer.py +++ /dev/null @@ -1,731 +0,0 @@ -""" -Image enhancer. The output can be written as same scale of input or in new predicted scale. -""" - -from logging import Logger -import os -import time -from typing import Optional -from pathlib import Path -import gc - -import cv2 -import numpy as np -from ocrd_utils import getLogger, tf_disable_interactive_logs -import tensorflow as tf -from skimage.morphology import skeletonize -from tensorflow.keras.models import load_model - -from .utils.resize import resize_image -from .utils.pil_cv2 import pil2cv -from .utils import ( - is_image_filename, - crop_image_inside_box -) -from .eynollah import PatchEncoder, Patches - -DPI_THRESHOLD = 298 -KERNEL = np.ones((5, 5), np.uint8) - - -class Enhancer: - def __init__( - self, - dir_models : str, - num_col_upper : Optional[int] = None, - num_col_lower : Optional[int] = None, - save_org_scale : bool = False, - logger : Optional[Logger] = None, - ): - self.input_binary = False - self.light_version = False - self.save_org_scale = save_org_scale - if num_col_upper: - self.num_col_upper = int(num_col_upper) - else: - self.num_col_upper = num_col_upper - if num_col_lower: - self.num_col_lower = int(num_col_lower) - else: - self.num_col_lower = num_col_lower - - self.logger = logger if logger else getLogger('enhancement') - self.dir_models = dir_models - self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" - self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" - self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" - self.model_page_dir = dir_models + "/model_eynollah_page_extraction_20250915" - - try: - for device in tf.config.list_physical_devices('GPU'): - tf.config.experimental.set_memory_growth(device, True) - except: - self.logger.warning("no GPU device available") - - self.model_page = self.our_load_model(self.model_page_dir) - self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) - self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement) - self.model_bin = self.our_load_model(self.model_dir_of_binarization) - - def cache_images(self, image_filename=None, image_pil=None, dpi=None): - ret = {} - if image_filename: - ret['img'] = cv2.imread(image_filename) - if self.light_version: - self.dpi = 100 - else: - self.dpi = 0#check_dpi(image_filename) - else: - ret['img'] = pil2cv(image_pil) - if self.light_version: - self.dpi = 100 - else: - self.dpi = 0#check_dpi(image_pil) - ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) - for prefix in ('', '_grayscale'): - ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) - self._imgs = ret - if dpi is not None: - self.dpi = dpi - - def reset_file_name_dir(self, image_filename, dir_out): - self.cache_images(image_filename=image_filename) - self.output_filename = os.path.join(dir_out, Path(image_filename).stem +'.png') - - def imread(self, grayscale=False, uint8=True): - key = 'img' - if grayscale: - key += '_grayscale' - if uint8: - key += '_uint8' - return self._imgs[key].copy() - - def isNaN(self, num): - return num != num - - @staticmethod - def our_load_model(model_file): - if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): - # prefer SavedModel over HDF5 format if it exists - model_file = model_file[:-3] - try: - model = load_model(model_file, compile=False) - except: - model = load_model(model_file, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - return model - - def predict_enhancement(self, img): - self.logger.debug("enter predict_enhancement") - - img_height_model = self.model_enhancement.layers[-1].output_shape[1] - img_width_model = self.model_enhancement.layers[-1].output_shape[2] - if img.shape[0] < img_height_model: - img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) - if img.shape[1] < img_width_model: - img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST) - margin = int(0.1 * img_width_model) - width_mid = img_width_model - 2 * margin - height_mid = img_height_model - 2 * margin - img = img / 255. - img_h = img.shape[0] - img_w = img.shape[1] - - prediction_true = np.zeros((img_h, img_w, 3)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - - for i in range(nxf): - for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - else: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - else: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - img_width_model - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - img_height_model - - img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.model_enhancement.predict(img_patch, verbose=0) - seg = label_p_pred[0, :, :, :] * 255 - - if i == 0 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + 0:index_x_u - margin] = \ - seg[0:-margin or None, - 0:-margin or None] - elif i == nxf - 1 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + margin:index_x_u - 0] = \ - seg[margin:, - margin:] - elif i == 0 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + 0:index_x_u - margin] = \ - seg[margin:, - 0:-margin or None] - elif i == nxf - 1 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + margin:index_x_u - 0] = \ - seg[0:-margin or None, - margin:] - elif i == 0 and j != 0 and j != nyf - 1: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + 0:index_x_u - margin] = \ - seg[margin:-margin or None, - 0:-margin or None] - elif i == nxf - 1 and j != 0 and j != nyf - 1: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + margin:index_x_u - 0] = \ - seg[margin:-margin or None, - margin:] - elif i != 0 and i != nxf - 1 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + margin:index_x_u - margin] = \ - seg[0:-margin or None, - margin:-margin or None] - elif i != 0 and i != nxf - 1 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + margin:index_x_u - margin] = \ - seg[margin:, - margin:-margin or None] - else: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + margin:index_x_u - margin] = \ - seg[margin:-margin or None, - margin:-margin or None] - - prediction_true = prediction_true.astype(int) - return prediction_true - - def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1: - img_w_new = 2000 - elif num_col == 2: - img_w_new = 2400 - elif num_col == 3: - img_w_new = 3000 - elif num_col == 4: - img_w_new = 4000 - elif num_col == 5: - img_w_new = 5000 - elif num_col == 6: - img_w_new = 6500 - else: - img_w_new = width_early - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - if img_h_new >= 8000: - img_new = np.copy(img) - num_column_is_classified = False - else: - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - - def early_page_for_num_of_column_classification(self,img_bin): - self.logger.debug("enter early_page_for_num_of_column_classification") - if self.input_binary: - img = np.copy(img_bin).astype(np.uint8) - else: - img = self.imread() - img = cv2.GaussianBlur(img, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.model_page) - - imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - thresh = cv2.dilate(thresh, KERNEL, iterations=3) - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if len(contours)>0: - cnt_size = np.array([cv2.contourArea(contours[j]) - for j in range(len(contours))]) - cnt = contours[np.argmax(cnt_size)] - box = cv2.boundingRect(cnt) - else: - box = [0, 0, img.shape[1], img.shape[0]] - cropped_page, page_coord = crop_image_inside_box(box, img) - - self.logger.debug("exit early_page_for_num_of_column_classification") - return cropped_page, page_coord - - def calculate_width_height_by_columns_1_2(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1: - img_w_new = 1000 - else: - img_w_new = 1300 - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: - img_new = np.copy(img) - num_column_is_classified = False - #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: - elif img_h_new >= 8000: - img_new = np.copy(img) - num_column_is_classified = False - else: - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - - def resize_and_enhance_image_with_column_classifier(self, light_version): - self.logger.debug("enter resize_and_enhance_image_with_column_classifier") - dpi = 0#self.dpi - self.logger.info("Detected %s DPI", dpi) - if self.input_binary: - img = self.imread() - prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) - prediction_bin = 255 * (prediction_bin[:,:,0]==0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) - img= np.copy(prediction_bin) - img_bin = prediction_bin - else: - img = self.imread() - self.h_org, self.w_org = img.shape[:2] - img_bin = None - - width_early = img.shape[1] - t1 = time.time() - _, page_coord = self.early_page_for_num_of_column_classification(img_bin) - - self.image_page_org_size = img[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3], :] - self.page_coord = page_coord - - if self.num_col_upper and not self.num_col_lower: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - elif self.num_col_lower and not self.num_col_upper: - num_col = self.num_col_lower - label_p_pred = [np.ones(6)] - elif not self.num_col_upper and not self.num_col_lower: - if self.input_binary: - img_in = np.copy(img) - img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = img_in.reshape(1, 448, 448, 3) - else: - img_1ch = self.imread(grayscale=True) - width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.model_classifier.predict(img_in, verbose=0) - num_col = np.argmax(label_p_pred[0]) + 1 - elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): - if self.input_binary: - img_in = np.copy(img) - img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = img_in.reshape(1, 448, 448, 3) - else: - img_1ch = self.imread(grayscale=True) - width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.model_classifier.predict(img_in, verbose=0) - num_col = np.argmax(label_p_pred[0]) + 1 - - if num_col > self.num_col_upper: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - if num_col < self.num_col_lower: - num_col = self.num_col_lower - label_p_pred = [np.ones(6)] - else: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - - self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) - - if dpi < DPI_THRESHOLD: - if light_version and num_col in (1,2): - img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( - img, num_col, width_early, label_p_pred) - else: - img_new, num_column_is_classified = self.calculate_width_height_by_columns( - img, num_col, width_early, label_p_pred) - if light_version: - image_res = np.copy(img_new) - else: - image_res = self.predict_enhancement(img_new) - is_image_enhanced = True - - else: - num_column_is_classified = True - image_res = np.copy(img) - is_image_enhanced = False - - self.logger.debug("exit resize_and_enhance_image_with_column_classifier") - return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin - def do_prediction( - self, patches, img, model, - n_batch_inference=1, marginal_of_patch_percent=0.1, - thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False, thresholding_for_fl_light_version=False, threshold_art_class_textline=0.1): - - self.logger.debug("enter do_prediction") - img_height_model = model.layers[-1].output_shape[1] - img_width_model = model.layers[-1].output_shape[2] - - if not patches: - img_h_page = img.shape[0] - img_w_page = img.shape[1] - img = img / float(255.0) - img = resize_image(img, img_height_model, img_width_model) - - label_p_pred = model.predict(img[np.newaxis], verbose=0) - seg = np.argmax(label_p_pred, axis=3)[0] - - if thresholding_for_artificial_class_in_light_version: - seg_art = label_p_pred[0,:,:,2] - - seg_art[seg_art0] =1 - - skeleton_art = skeletonize(seg_art) - skeleton_art = skeleton_art*1 - - seg[skeleton_art==1]=2 - - if thresholding_for_fl_light_version: - seg_header = label_p_pred[0,:,:,2] - - seg_header[seg_header<0.2] = 0 - seg_header[seg_header>0] =1 - - seg[seg_header==1]=2 - - seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) - return prediction_true - - if img.shape[0] < img_height_model: - img = resize_image(img, img_height_model, img.shape[1]) - if img.shape[1] < img_width_model: - img = resize_image(img, img.shape[0], img_width_model) - - self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) - margin = int(marginal_of_patch_percent * img_height_model) - width_mid = img_width_model - 2 * margin - height_mid = img_height_model - 2 * margin - img = img / 255. - #img = img.astype(np.float16) - img_h = img.shape[0] - img_w = img.shape[1] - prediction_true = np.zeros((img_h, img_w, 3)) - mask_true = np.zeros((img_h, img_w)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - img_patch = np.zeros((n_batch_inference, img_height_model, img_width_model, 3)) - for i in range(nxf): - for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - else: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - else: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - img_width_model - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - img_height_model - - list_i_s.append(i) - list_j_s.append(j) - list_x_u.append(index_x_u) - list_x_d.append(index_x_d) - list_y_d.append(index_y_d) - list_y_u.append(index_y_u) - - img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - batch_indexer += 1 - - if (batch_indexer == n_batch_inference or - # last batch - i == nxf - 1 and j == nyf - 1): - self.logger.debug("predicting patches on %s", str(img_patch.shape)) - label_p_pred = model.predict(img_patch, verbose=0) - seg = np.argmax(label_p_pred, axis=3) - - if thresholding_for_some_classes_in_light_version: - seg_not_base = label_p_pred[:,:,:,4] - seg_not_base[seg_not_base>0.03] =1 - seg_not_base[seg_not_base<1] =0 - - seg_line = label_p_pred[:,:,:,3] - seg_line[seg_line>0.1] =1 - seg_line[seg_line<1] =0 - - seg_background = label_p_pred[:,:,:,0] - seg_background[seg_background>0.25] =1 - seg_background[seg_background<1] =0 - - seg[seg_not_base==1]=4 - seg[seg_background==1]=0 - seg[(seg_line==1) & (seg==0)]=3 - if thresholding_for_artificial_class_in_light_version: - seg_art = label_p_pred[:,:,:,2] - - seg_art[seg_art0] =1 - - ##seg[seg_art==1]=2 - - indexer_inside_batch = 0 - for i_batch, j_batch in zip(list_i_s, list_j_s): - seg_in = seg[indexer_inside_batch] - - if thresholding_for_artificial_class_in_light_version: - seg_in_art = seg_art[indexer_inside_batch] - - index_y_u_in = list_y_u[indexer_inside_batch] - index_y_d_in = list_y_d[indexer_inside_batch] - - index_x_u_in = list_x_u[indexer_inside_batch] - index_x_d_in = list_x_d[indexer_inside_batch] - - if i_batch == 0 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[0:-margin or None, - 0:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[0:-margin or None, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[margin:, - margin:, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[margin:, - margin:] - - elif i_batch == 0 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[margin:, - 0:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[margin:, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[0:-margin or None, - margin:, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[0:-margin or None, - margin:] - - elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin] = \ - seg_in[margin:-margin or None, - 0:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + 0:index_x_u_in - margin, 1] = \ - seg_in_art[margin:-margin or None, - 0:-margin or None] - - elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0] = \ - seg_in[margin:-margin or None, - margin:, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - 0, 1] = \ - seg_in_art[margin:-margin or None, - margin:] - - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[0:-margin or None, - margin:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + 0:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[0:-margin or None, - margin:-margin or None] - - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[margin:, - margin:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - 0, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[margin:, - margin:-margin or None] - - else: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin] = \ - seg_in[margin:-margin or None, - margin:-margin or None, - np.newaxis] - if thresholding_for_artificial_class_in_light_version: - prediction_true[index_y_d_in + margin:index_y_u_in - margin, - index_x_d_in + margin:index_x_u_in - margin, 1] = \ - seg_in_art[margin:-margin or None, - margin:-margin or None] - indexer_inside_batch += 1 - - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - img_patch[:] = 0 - - prediction_true = prediction_true.astype(np.uint8) - - if thresholding_for_artificial_class_in_light_version: - kernel_min = np.ones((3, 3), np.uint8) - prediction_true[:,:,0][prediction_true[:,:,0]==2] = 0 - - skeleton_art = skeletonize(prediction_true[:,:,1]) - skeleton_art = skeleton_art*1 - - skeleton_art = skeleton_art.astype('uint8') - - skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) - - prediction_true[:,:,0][skeleton_art==1]=2 - #del model - gc.collect() - return prediction_true - - def run_enhancement(self, light_version): - t_in = time.time() - self.logger.info("Resizing and enhancing image...") - is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = \ - self.resize_and_enhance_image_with_column_classifier(light_version) - - self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ') - return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified - - - def run_single(self): - t0 = time.time() - img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(light_version=False) - - return img_res - - - def run(self, - overwrite: bool = False, - image_filename: Optional[str] = None, - dir_in: Optional[str] = None, - dir_out: Optional[str] = None, - ): - """ - Get image and scales, then extract the page of scanned image - """ - self.logger.debug("enter run") - t0_tot = time.time() - - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - elif image_filename: - ls_imgs = [image_filename] - else: - raise ValueError("run requires either a single image filename or a directory") - - for img_filename in ls_imgs: - self.logger.info(img_filename) - t0 = time.time() - - self.reset_file_name_dir(img_filename, dir_out) - #print("text region early -11 in %.1fs", time.time() - t0) - - if os.path.exists(self.output_filename): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", self.output_filename) - else: - self.logger.warning("will skip input for existing output file '%s'", self.output_filename) - continue - - image_enhanced = self.run_single() - if self.save_org_scale: - image_enhanced = resize_image(image_enhanced, self.h_org, self.w_org) - - cv2.imwrite(self.output_filename, image_enhanced) - diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py deleted file mode 100644 index 1b991ae..0000000 --- a/src/eynollah/mb_ro_on_layout.py +++ /dev/null @@ -1,812 +0,0 @@ -""" -Image enhancer. The output can be written as same scale of input or in new predicted scale. -""" - -from logging import Logger -import os -import time -from typing import Optional -from pathlib import Path -import xml.etree.ElementTree as ET - -import cv2 -import numpy as np -from ocrd_utils import getLogger -import statistics -import tensorflow as tf -from tensorflow.keras.models import load_model - -from .utils.resize import resize_image -from .utils.contour import ( - find_new_features_of_contours, - return_contours_of_image, - return_parent_contours, -) -from .utils import is_xml_filename -from .eynollah import PatchEncoder, Patches - -DPI_THRESHOLD = 298 -KERNEL = np.ones((5, 5), np.uint8) - - -class machine_based_reading_order_on_layout: - def __init__( - self, - dir_models : str, - logger : Optional[Logger] = None, - ): - self.logger = logger if logger else getLogger('mbreorder') - self.dir_models = dir_models - self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824" - - try: - for device in tf.config.list_physical_devices('GPU'): - tf.config.experimental.set_memory_growth(device, True) - except: - self.logger.warning("no GPU device available") - - self.model_reading_order = self.our_load_model(self.model_reading_order_dir) - self.light_version = True - - @staticmethod - def our_load_model(model_file): - if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): - # prefer SavedModel over HDF5 format if it exists - model_file = model_file[:-3] - try: - model = load_model(model_file, compile=False) - except: - model = load_model(model_file, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - return model - - def read_xml(self, xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - index_tot_regions = [] - tot_region_ref = [] - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - for jj in root1.iter(link+'RegionRefIndexed'): - index_tot_regions.append(jj.attrib['index']) - tot_region_ref.append(jj.attrib['regionRef']) - - if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): - co_printspace = [] - if link+'PrintSpace' in alltags: - region_tags_printspace = np.unique([x for x in alltags if x.endswith('PrintSpace')]) - elif link+'Border' in alltags: - region_tags_printspace = np.unique([x for x in alltags if x.endswith('Border')]) - - for tag in region_tags_printspace: - if link+'PrintSpace' in alltags: - tag_endings_printspace = ['}PrintSpace','}printspace'] - elif link+'Border' in alltags: - tag_endings_printspace = ['}Border','}border'] - - if tag.endswith(tag_endings_printspace[0]) or tag.endswith(tag_endings_printspace[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_printspace.append(np.array(c_t_in)) - img_printspace = np.zeros( (y_len,x_len,3) ) - img_printspace=cv2.fillPoly(img_printspace, pts =co_printspace, color=(1,1,1)) - img_printspace = img_printspace.astype(np.uint8) - - imgray = cv2.cvtColor(img_printspace, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) - cnt = contours[np.argmax(cnt_size)] - x, y, w, h = cv2.boundingRect(cnt) - - bb_coord_printspace = [x, y, w, h] - - else: - bb_coord_printspace = None - - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text_paragraph=[] - co_text_drop=[] - co_text_heading=[] - co_text_header=[] - co_text_marginalia=[] - co_text_catch=[] - co_text_page_number=[] - co_text_signature_mark=[] - co_sep=[] - co_img=[] - co_table=[] - co_graphic=[] - co_graphic_text_annotation=[] - co_graphic_decoration=[] - co_noise=[] - - co_text_paragraph_text=[] - co_text_drop_text=[] - co_text_heading_text=[] - co_text_header_text=[] - co_text_marginalia_text=[] - co_text_catch_text=[] - co_text_page_number_text=[] - co_text_signature_mark_text=[] - co_sep_text=[] - co_img_text=[] - co_table_text=[] - co_graphic_text=[] - co_graphic_text_annotation_text=[] - co_graphic_decoration_text=[] - co_noise_text=[] - - id_paragraph = [] - id_header = [] - id_heading = [] - id_marginalia = [] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - for child2 in nn: - tag2 = child2.tag - if tag2.endswith('}TextEquiv') or tag2.endswith('}TextEquiv'): - for childtext2 in child2: - if childtext2.tag.endswith('}Unicode') or childtext2.tag.endswith('}Unicode'): - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - co_text_drop_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='heading': - co_text_heading_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - co_text_signature_mark_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='header': - co_text_header_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###co_text_catch_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - ###co_text_page_number_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - co_text_marginalia_text.append(childtext2.text) - else: - co_text_paragraph_text.append(childtext2.text) - c_t_in_drop=[] - c_t_in_paragraph=[] - c_t_in_heading=[] - c_t_in_header=[] - c_t_in_page_number=[] - c_t_in_signature_mark=[] - c_t_in_catch=[] - c_t_in_marginalia=[] - - - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - #print('birda1') - p_h=vv.attrib['points'].split(' ') - - - - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - ##id_heading.append(nn.attrib['id']) - c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - #id_paragraph.append(nn.attrib['id']) - - c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - #id_heading.append(nn.attrib['id']) - c_t_in_heading.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - #id_paragraph.append(nn.attrib['id']) - c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - if len(c_t_in_drop)>0: - co_text_drop.append(np.array(c_t_in_drop)) - if len(c_t_in_paragraph)>0: - co_text_paragraph.append(np.array(c_t_in_paragraph)) - id_paragraph.append(nn.attrib['id']) - if len(c_t_in_heading)>0: - co_text_heading.append(np.array(c_t_in_heading)) - id_heading.append(nn.attrib['id']) - - if len(c_t_in_header)>0: - co_text_header.append(np.array(c_t_in_header)) - id_header.append(nn.attrib['id']) - if len(c_t_in_page_number)>0: - co_text_page_number.append(np.array(c_t_in_page_number)) - if len(c_t_in_catch)>0: - co_text_catch.append(np.array(c_t_in_catch)) - - if len(c_t_in_signature_mark)>0: - co_text_signature_mark.append(np.array(c_t_in_signature_mark)) - - if len(c_t_in_marginalia)>0: - co_text_marginalia.append(np.array(c_t_in_marginalia)) - id_marginalia.append(nn.attrib['id']) - - - elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - for nn in root1.iter(tag): - c_t_in=[] - c_t_in_text_annotation=[] - c_t_in_decoration=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - if len(c_t_in_text_annotation)>0: - co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) - if len(c_t_in_decoration)>0: - co_graphic_decoration.append(np.array(c_t_in_decoration)) - if len(c_t_in)>0: - co_graphic.append(np.array(c_t_in)) - - - - elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - co_img_text.append(' ') - - - elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - co_table_text.append(' ') - - elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - co_noise_text.append(' ') - - img = np.zeros( (y_len,x_len,3) ) - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) - - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(3,3,3)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) - - return tree1, root1, bb_coord_printspace, id_paragraph, id_header+id_heading, co_text_paragraph, co_text_header+co_text_heading,\ - tot_region_ref,x_len, y_len,index_tot_regions, img_poly - - def return_indexes_of_contours_loctaed_inside_another_list_of_contours(self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): - indexes_of_located_cont = [] - center_x_coordinates_of_located = [] - center_y_coordinates_of_located = [] - #M_main_tot = [cv2.moments(contours_loc[j]) - #for j in range(len(contours_loc))] - #cx_main_loc = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - #cy_main_loc = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - - for ij in range(len(contours)): - results = [cv2.pointPolygonTest(contours[ij], (cx_main_loc[ind], cy_main_loc[ind]), False) - for ind in range(len(cy_main_loc)) ] - results = np.array(results) - indexes_in = np.where((results == 0) | (results == 1)) - indexes = indexes_loc[indexes_in]# [(results == 0) | (results == 1)]#np.where((results == 0) | (results == 1)) - - indexes_of_located_cont.append(indexes) - center_x_coordinates_of_located.append(np.array(cx_main_loc)[indexes_in] ) - center_y_coordinates_of_located.append(np.array(cy_main_loc)[indexes_in] ) - - return indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located - - def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): - height1 =672#448 - width1 = 448#224 - - height2 =672#448 - width2= 448#224 - - height3 =672#448 - width3 = 448#224 - - inference_bs = 3 - - ver_kernel = np.ones((5, 1), dtype=np.uint8) - hor_kernel = np.ones((1, 5), dtype=np.uint8) - - - min_cont_size_to_be_dilated = 10 - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) - args_cont_located = np.array(range(len(contours_only_text_parent))) - - diff_y_conts = np.abs(y_max_conts[:]-y_min_conts) - diff_x_conts = np.abs(x_max_conts[:]-x_min_conts) - - mean_x = statistics.mean(diff_x_conts) - median_x = statistics.median(diff_x_conts) - - - diff_x_ratio= diff_x_conts/mean_x - - args_cont_located_excluded = args_cont_located[diff_x_ratio>=1.3] - args_cont_located_included = args_cont_located[diff_x_ratio<1.3] - - contours_only_text_parent_excluded = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]>=1.3]#contours_only_text_parent[diff_x_ratio>=1.3] - contours_only_text_parent_included = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]<1.3]#contours_only_text_parent[diff_x_ratio<1.3] - - - cx_conts_excluded = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]>=1.3]#cx_conts[diff_x_ratio>=1.3] - cx_conts_included = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]<1.3]#cx_conts[diff_x_ratio<1.3] - - cy_conts_excluded = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]>=1.3]#cy_conts[diff_x_ratio>=1.3] - cy_conts_included = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]<1.3]#cy_conts[diff_x_ratio<1.3] - - #print(diff_x_ratio, 'ratio') - text_regions_p = text_regions_p.astype('uint8') - - if len(contours_only_text_parent_excluded)>0: - textregion_par = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1])).astype('uint8') - textregion_par = cv2.fillPoly(textregion_par, pts=contours_only_text_parent_included, color=(1,1)) - else: - textregion_par = (text_regions_p[:,:]==1)*1 - textregion_par = textregion_par.astype('uint8') - - text_regions_p_textregions_dilated = cv2.erode(textregion_par , hor_kernel, iterations=2) - text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=4) - text_regions_p_textregions_dilated = cv2.erode(text_regions_p_textregions_dilated , hor_kernel, iterations=1) - text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=5) - text_regions_p_textregions_dilated[text_regions_p[:,:]>1] = 0 - - - contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) - contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) - - indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located = self.return_indexes_of_contours_loctaed_inside_another_list_of_contours(contours_only_dilated, contours_only_text_parent_included, cx_conts_included, cy_conts_included, args_cont_located_included) - - - if len(args_cont_located_excluded)>0: - for ind in args_cont_located_excluded: - indexes_of_located_cont.append(np.array([ind])) - contours_only_dilated.append(contours_only_text_parent[ind]) - center_y_coordinates_of_located.append(0) - - array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] - flattened_array = np.concatenate([arr.ravel() for arr in array_list]) - #print(len( np.unique(flattened_array)), 'indexes_of_located_cont uniques') - - missing_textregions = list( set(np.array(range(len(contours_only_text_parent))) ) - set(np.unique(flattened_array)) ) - #print(missing_textregions, 'missing_textregions') - - for ind in missing_textregions: - indexes_of_located_cont.append(np.array([ind])) - contours_only_dilated.append(contours_only_text_parent[ind]) - center_y_coordinates_of_located.append(0) - - - if contours_only_text_parent_h: - for vi in range(len(contours_only_text_parent_h)): - indexes_of_located_cont.append(int(vi+len(contours_only_text_parent))) - - array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] - flattened_array = np.concatenate([arr.ravel() for arr in array_list]) - - y_len = text_regions_p.shape[0] - x_len = text_regions_p.shape[1] - - img_poly = np.zeros((y_len,x_len), dtype='uint8') - ###img_poly[text_regions_p[:,:]==1] = 1 - ###img_poly[text_regions_p[:,:]==2] = 2 - ###img_poly[text_regions_p[:,:]==3] = 4 - ###img_poly[text_regions_p[:,:]==6] = 5 - - ##img_poly[text_regions_p[:,:]==1] = 1 - ##img_poly[text_regions_p[:,:]==2] = 2 - ##img_poly[text_regions_p[:,:]==3] = 3 - ##img_poly[text_regions_p[:,:]==4] = 4 - ##img_poly[text_regions_p[:,:]==5] = 5 - - img_poly = np.copy(text_regions_p) - - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') - if contours_only_text_parent_h: - _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours( - contours_only_text_parent_h) - for j in range(len(cy_main)): - img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, - int(x_min_main[j]):int(x_max_main[j])] = 1 - co_text_all_org = contours_only_text_parent + contours_only_text_parent_h - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - co_text_all = contours_only_dilated + contours_only_text_parent_h - else: - co_text_all = contours_only_text_parent + contours_only_text_parent_h - else: - co_text_all_org = contours_only_text_parent - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - co_text_all = contours_only_dilated - else: - co_text_all = contours_only_text_parent - - if not len(co_text_all): - return [], [] - - labels_con = np.zeros((int(y_len /6.), int(x_len/6.), len(co_text_all)), dtype=bool) - - co_text_all = [(i/6).astype(int) for i in co_text_all] - for i in range(len(co_text_all)): - img = labels_con[:,:,i].astype(np.uint8) - - #img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) - - cv2.fillPoly(img, pts=[co_text_all[i]], color=(1,)) - labels_con[:,:,i] = img - - - labels_con = resize_image(labels_con.astype(np.uint8), height1, width1).astype(bool) - img_header_and_sep = resize_image(img_header_and_sep, height1, width1) - img_poly = resize_image(img_poly, height3, width3) - - - - input_1 = np.zeros((inference_bs, height1, width1, 3)) - ordered = [list(range(len(co_text_all)))] - index_update = 0 - #print(labels_con.shape[2],"number of regions for reading order") - while index_update>=0: - ij_list = ordered.pop(index_update) - i = ij_list.pop(0) - - ante_list = [] - post_list = [] - tot_counter = 0 - batch = [] - for j in ij_list: - img1 = labels_con[:,:,i].astype(float) - img2 = labels_con[:,:,j].astype(float) - img1[img_poly==5] = 2 - img2[img_poly==5] = 2 - img1[img_header_and_sep==1] = 3 - img2[img_header_and_sep==1] = 3 - - input_1[len(batch), :, :, 0] = img1 / 3. - input_1[len(batch), :, :, 2] = img2 / 3. - input_1[len(batch), :, :, 1] = img_poly / 5. - - tot_counter += 1 - batch.append(j) - if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): - y_pr = self.model_reading_order.predict(input_1 , verbose=0) - for jb, j in enumerate(batch): - if y_pr[jb][0]>=0.5: - post_list.append(j) - else: - ante_list.append(j) - batch = [] - - if len(ante_list): - ordered.insert(index_update, ante_list) - index_update += 1 - ordered.insert(index_update, [i]) - if len(post_list): - ordered.insert(index_update + 1, post_list) - - index_update = -1 - for index_next, ij_list in enumerate(ordered): - if len(ij_list) > 1: - index_update = index_next - break - - ordered = [i[0] for i in ordered] - - ##id_all_text = np.array(id_all_text)[index_sort] - - - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - org_contours_indexes = [] - for ind in range(len(ordered)): - region_with_curr_order = ordered[ind] - if region_with_curr_order < len(contours_only_dilated): - if np.isscalar(indexes_of_located_cont[region_with_curr_order]): - org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] - else: - arg_sort_located_cont = np.argsort(center_y_coordinates_of_located[region_with_curr_order]) - org_contours_indexes = org_contours_indexes + list(np.array(indexes_of_located_cont[region_with_curr_order])[arg_sort_located_cont]) ##org_contours_indexes + list ( - else: - org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] - - region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] - return org_contours_indexes, region_ids - else: - region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] - return ordered, region_ids - - - - - def run(self, - overwrite: bool = False, - xml_filename: Optional[str] = None, - dir_in: Optional[str] = None, - dir_out: Optional[str] = None, - ): - """ - Get image and scales, then extract the page of scanned image - """ - self.logger.debug("enter run") - t0_tot = time.time() - - if dir_in: - ls_xmls = [os.path.join(dir_in, xml_filename) - for xml_filename in filter(is_xml_filename, - os.listdir(dir_in))] - elif xml_filename: - ls_xmls = [xml_filename] - else: - raise ValueError("run requires either a single image filename or a directory") - - for xml_filename in ls_xmls: - self.logger.info(xml_filename) - t0 = time.time() - - file_name = Path(xml_filename).stem - (tree_xml, root_xml, bb_coord_printspace, id_paragraph, id_header, - co_text_paragraph, co_text_header, tot_region_ref, - x_len, y_len, index_tot_regions, img_poly) = self.read_xml(xml_filename) - - id_all_text = id_paragraph + id_header - - order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model(co_text_paragraph, co_text_header, img_poly[:,:,0]) - - id_all_text = np.array(id_all_text)[order_text_new] - - alltags=[elem.tag for elem in root_xml.iter()] - - - - link=alltags[0].split('}')[0]+'}' - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - page_element = root_xml.find(link+'Page') - - - old_ro = root_xml.find(".//{*}ReadingOrder") - - if old_ro is not None: - page_element.remove(old_ro) - - #print(old_ro, 'old_ro') - ro_subelement = ET.Element('ReadingOrder') - - ro_subelement2 = ET.SubElement(ro_subelement, 'OrderedGroup') - ro_subelement2.set('id', "ro357564684568544579089") - - for index, id_text in enumerate(id_all_text): - new_element_2 = ET.SubElement(ro_subelement2, 'RegionRefIndexed') - new_element_2.set('regionRef', id_all_text[index]) - new_element_2.set('index', str(index)) - - if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): - page_element.insert(1, ro_subelement) - else: - page_element.insert(0, ro_subelement) - - alltags=[elem.tag for elem in root_xml.iter()] - - ET.register_namespace("",name_space) - tree_xml.write(os.path.join(dir_out, file_name+'.xml'), - xml_declaration=True, - method='xml', - encoding="utf-8", - default_namespace=None) - - #sys.exit() - diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json deleted file mode 100644 index dbbdc3b..0000000 --- a/src/eynollah/ocrd-tool.json +++ /dev/null @@ -1,147 +0,0 @@ -{ - "version": "0.6.0", - "git_url": "https://github.com/qurator-spk/eynollah", - "dockerhub": "ocrd/eynollah", - "tools": { - "ocrd-eynollah-segment": { - "executable": "ocrd-eynollah-segment", - "categories": ["Layout analysis"], - "description": "Segment page into regions and lines and do reading order detection with eynollah", - "input_file_grp_cardinality": 1, - "output_file_grp_cardinality": 1, - "steps": ["layout/segmentation/region", "layout/segmentation/line"], - "parameters": { - "models": { - "type": "string", - "format": "uri", - "content-type": "text/directory", - "cacheable": true, - "description": "Directory containing models to be used (See https://qurator-data.de/eynollah)", - "required": true - }, - "dpi": { - "type": "number", - "format": "float", - "description": "pixel density in dots per inch (overrides any meta-data in the images); ignored if <= 0 (with fall-back 230)", - "default": 0 - }, - "full_layout": { - "type": "boolean", - "default": true, - "description": "Try to detect all element subtypes, including drop-caps and headings" - }, - "light_version": { - "type": "boolean", - "default": true, - "description": "Try to detect all element subtypes in light version (faster+simpler method for main region detection and deskewing)" - }, - "textline_light": { - "type": "boolean", - "default": true, - "description": "Light version need textline light. If this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method." - }, - "tables": { - "type": "boolean", - "default": false, - "description": "Try to detect table regions" - }, - "curved_line": { - "type": "boolean", - "default": false, - "description": "try to return contour of textlines instead of just rectangle bounding box. Needs more processing time" - }, - "ignore_page_extraction": { - "type": "boolean", - "default": false, - "description": "if this parameter set to true, this tool would ignore page extraction" - }, - "allow_scaling": { - "type": "boolean", - "default": false, - "description": "check the resolution against the number of detected columns and if needed, scale the image up or down during layout detection (heuristic to improve quality and performance)" - }, - "allow_enhancement": { - "type": "boolean", - "default": false, - "description": "if this parameter set to true, this tool would check that input image need resizing and enhancement or not." - }, - "right_to_left": { - "type": "boolean", - "default": false, - "description": "if this parameter set to true, this tool will extract right-to-left reading order." - }, - "headers_off": { - "type": "boolean", - "default": false, - "description": "ignore the special role of headings during reading order detection" - }, - "reading_order_machine_based": { - "type": "boolean", - "default": false, - "description": "use data-driven (rather than rule-based) reading order detection" - } - }, - "resources": [ - { - "url": "https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1", - "name": "models_layout_v0_5_0", - "type": "archive", - "path_in_archive": "models_layout_v0_5_0", - "size": 3525684179, - "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization, image enhancement", - "version_range": ">= v0.5.0" - }, - { - "description": "models for eynollah (TensorFlow SavedModel format)", - "url": "https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz", - "name": "default", - "size": 1894627041, - "type": "archive", - "path_in_archive": "models_eynollah", - "version_range": ">= v0.3.0, < v0.5.0" - } - ] - }, - "ocrd-sbb-binarize": { - "executable": "ocrd-sbb-binarize", - "description": "Pixelwise binarization with selectional auto-encoders in Keras", - "categories": ["Image preprocessing"], - "steps": ["preprocessing/optimization/binarization"], - "input_file_grp_cardinality": 1, - "output_file_grp_cardinality": 1, - "parameters": { - "operation_level": { - "type": "string", - "enum": ["page", "region"], - "default": "page", - "description": "PAGE XML hierarchy level to operate on" - }, - "model": { - "description": "Directory containing HDF5 or SavedModel/ProtoBuf models. Can be an absolute path or a path relative to the OCR-D resource location, the current working directory or the $SBB_BINARIZE_DATA environment variable (if set)", - "type": "string", - "format": "uri", - "content-type": "text/directory", - "required": true - } - }, - "resources": [ - { - "url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2020_01_16.zip", - "name": "default", - "type": "archive", - "path_in_archive": "saved_model_2020_01_16", - "size": 563147331, - "description": "default models provided by github.com/qurator-spk (SavedModel format)" - }, - { - "url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip", - "name": "default-2021-03-09", - "type": "archive", - "path_in_archive": ".", - "size": 133230419, - "description": "updated default models provided by github.com/qurator-spk (SavedModel format)" - } - ] - } - } -} diff --git a/src/eynollah/ocrd_cli_binarization.py b/src/eynollah/ocrd_cli_binarization.py deleted file mode 100644 index 848bbac..0000000 --- a/src/eynollah/ocrd_cli_binarization.py +++ /dev/null @@ -1,109 +0,0 @@ -from typing import Optional - -from PIL import Image -import numpy as np -import cv2 -from click import command - -from ocrd import Processor, OcrdPageResult, OcrdPageResultImage -from ocrd_models.ocrd_page import OcrdPage, AlternativeImageType -from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor - -from .sbb_binarize import SbbBinarizer - - -def cv2pil(img): - return Image.fromarray(img.astype('uint8')) - -def pil2cv(img): - # from ocrd/workspace.py - color_conversion = cv2.COLOR_GRAY2BGR if img.mode in ('1', 'L') else cv2.COLOR_RGB2BGR - pil_as_np_array = np.array(img).astype('uint8') if img.mode == '1' else np.array(img) - return cv2.cvtColor(pil_as_np_array, color_conversion) - -class SbbBinarizeProcessor(Processor): - # already employs GPU (without singleton process atm) - max_workers = 1 - - @property - def executable(self): - return 'ocrd-sbb-binarize' - - def setup(self): - """ - Set up the model prior to processing. - """ - # resolve relative path via OCR-D ResourceManager - model_path = self.resolve_resource(self.parameter['model']) - self.binarizer = SbbBinarizer(model_dir=model_path, logger=self.logger) - - def process_page_pcgts(self, *input_pcgts: Optional[OcrdPage], page_id: Optional[str] = None) -> OcrdPageResult: - """ - Binarize images with sbb_binarization (based on selectional auto-encoders). - - For each page of the input file group, open and deserialize input PAGE-XML - and its respective images. Then iterate over the element hierarchy down to - the requested ``operation_level``. - - For each segment element, retrieve a raw (non-binarized) segment image - according to the layout annotation (from an existing ``AlternativeImage``, - or by cropping into the higher-level images, and deskewing when applicable). - - Pass the image to the binarizer (which runs in fixed-size windows/patches - across the image and stitches the results together). - - Serialize the resulting bilevel image as PNG file and add it to the output - file group (with file ID suffix ``.IMG-BIN``) along with the output PAGE-XML - (referencing it as new ``AlternativeImage`` for the segment element). - - Produce a new PAGE output file by serialising the resulting hierarchy. - """ - assert input_pcgts - assert input_pcgts[0] - assert self.parameter - oplevel = self.parameter['operation_level'] - pcgts = input_pcgts[0] - result = OcrdPageResult(pcgts) - page = pcgts.get_Page() - page_image, page_xywh, _ = self.workspace.image_from_page( - page, page_id, feature_filter='binarized') - - if oplevel == 'page': - self.logger.info("Binarizing on 'page' level in page '%s'", page_id) - page_image_bin = cv2pil(self.binarizer.run(image=pil2cv(page_image), use_patches=True)) - # update PAGE (reference the image file): - page_image_ref = AlternativeImageType(comments=page_xywh['features'] + ',binarized,clipped') - page.add_AlternativeImage(page_image_ref) - result.images.append(OcrdPageResultImage(page_image_bin, '.IMG-BIN', page_image_ref)) - - elif oplevel == 'region': - regions = page.get_AllRegions(['Text', 'Table'], depth=1) - if not regions: - self.logger.warning("Page '%s' contains no text/table regions", page_id) - for region in regions: - region_image, region_xywh = self.workspace.image_from_segment( - region, page_image, page_xywh, feature_filter='binarized') - region_image_bin = cv2pil(self.binarizer.run(image=pil2cv(region_image), use_patches=True)) - # update PAGE (reference the image file): - region_image_ref = AlternativeImageType(comments=region_xywh['features'] + ',binarized') - region.add_AlternativeImage(region_image_ref) - result.images.append(OcrdPageResultImage(region_image_bin, region.id + '.IMG-BIN', region_image_ref)) - - elif oplevel == 'line': - lines = page.get_AllTextLines() - if not lines: - self.logger.warning("Page '%s' contains no text lines", page_id) - for line in lines: - line_image, line_xywh = self.workspace.image_from_segment(line, page_image, page_xywh, feature_filter='binarized') - line_image_bin = cv2pil(self.binarizer.run(image=pil2cv(line_image), use_patches=True)) - # update PAGE (reference the image file): - line_image_ref = AlternativeImageType(comments=line_xywh['features'] + ',binarized') - line.add_AlternativeImage(region_image_ref) - result.images.append(OcrdPageResultImage(line_image_bin, line.id + '.IMG-BIN', line_image_ref)) - - return result - -@command() -@ocrd_cli_options -def main(*args, **kwargs): - return ocrd_cli_wrap_processor(SbbBinarizeProcessor, *args, **kwargs) diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py deleted file mode 100644 index 12c7356..0000000 --- a/src/eynollah/processor.py +++ /dev/null @@ -1,96 +0,0 @@ -from functools import cached_property -from typing import Optional -from ocrd_models import OcrdPage -from ocrd import OcrdPageResultImage, Processor, OcrdPageResult - -from .eynollah import Eynollah, EynollahXmlWriter - -class EynollahProcessor(Processor): - # already employs background CPU multiprocessing per page - # already employs GPU (without singleton process atm) - max_workers = 1 - - @cached_property - def executable(self) -> str: - return 'ocrd-eynollah-segment' - - def setup(self) -> None: - assert self.parameter - if self.parameter['textline_light'] != self.parameter['light_version']: - raise ValueError("Error: You must set or unset both parameter 'textline_light' (to enable light textline detection), " - "and parameter 'light_version' (faster+simpler method for main region detection and deskewing)") - self.eynollah = Eynollah( - self.resolve_resource(self.parameter['models']), - allow_enhancement=self.parameter['allow_enhancement'], - curved_line=self.parameter['curved_line'], - right2left=self.parameter['right_to_left'], - reading_order_machine_based=self.parameter['reading_order_machine_based'], - ignore_page_extraction=self.parameter['ignore_page_extraction'], - light_version=self.parameter['light_version'], - textline_light=self.parameter['textline_light'], - full_layout=self.parameter['full_layout'], - allow_scaling=self.parameter['allow_scaling'], - headers_off=self.parameter['headers_off'], - tables=self.parameter['tables'], - ) - self.eynollah.logger = self.logger - self.eynollah.plotter = None - - def shutdown(self): - if hasattr(self, 'eynollah'): - del self.eynollah - - def process_page_pcgts(self, *input_pcgts: Optional[OcrdPage], page_id: Optional[str] = None) -> OcrdPageResult: - """ - Performs cropping, region and line segmentation with Eynollah. - - For each page, open and deserialize PAGE input file (from existing - PAGE file in the input fileGrp, or generated from image file). - Retrieve its respective page-level image (ignoring annotation that - already added `binarized`, `cropped` or `deskewed` features). - - Set up Eynollah to detect regions and lines, and add each one to the - page, respectively. - - \b - - If ``tables``, try to detect table blocks and add them as TableRegion. - - If ``full_layout``, then in addition to paragraphs and marginals, also - try to detect drop capitals and headings. - - If ``ignore_page_extraction``, then attempt no cropping of the page. - - If ``curved_line``, then compute contour polygons for text lines - instead of simple bounding boxes. - - If ``reading_order_machine_based``, then detect reading order via - data-driven model instead of geometrical heuristics. - - Produce a new output file by serialising the resulting hierarchy. - """ - assert input_pcgts - assert input_pcgts[0] - assert self.parameter - pcgts = input_pcgts[0] - result = OcrdPageResult(pcgts) - page = pcgts.get_Page() - page_image, _, _ = self.workspace.image_from_page( - page, page_id, - # avoid any features that would change the coordinate system: cropped,deskewed - # (the PAGE builder merely adds regions, so afterwards we would not know which to transform) - # also avoid binarization as models usually fare better on grayscale/RGB - feature_filter='cropped,deskewed,binarized') - if hasattr(page_image, 'filename'): - image_filename = page_image.filename - else: - image_filename = "dummy" # will be replaced by ocrd.Processor.process_page_file - result.images.append(OcrdPageResultImage(page_image, '.IMG', page)) # mark as new original - # FIXME: mask out already existing regions (incremental segmentation) - self.eynollah.cache_images( - image_pil=page_image, - dpi=self.parameter['dpi'], - ) - self.eynollah.writer = EynollahXmlWriter( - dir_out=None, - image_filename=image_filename, - curved_line=self.eynollah.curved_line, - textline_light=self.eynollah.textline_light, - pcgts=pcgts) - self.eynollah.run_single() - return result diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py deleted file mode 100644 index 3716987..0000000 --- a/src/eynollah/sbb_binarize.py +++ /dev/null @@ -1,378 +0,0 @@ -""" -Tool to load model and binarize a given image. -""" - -import sys -from glob import glob -import os -import logging - -import numpy as np -from PIL import Image -import cv2 -from ocrd_utils import tf_disable_interactive_logs -tf_disable_interactive_logs() -import tensorflow as tf -from tensorflow.keras.models import load_model -from tensorflow.python.keras import backend as tensorflow_backend - -from .utils import is_image_filename - -def resize_image(img_in, input_height, input_width): - return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) - -class SbbBinarizer: - - def __init__(self, model_dir, logger=None): - self.model_dir = model_dir - self.log = logger if logger else logging.getLogger('SbbBinarizer') - - self.start_new_session() - - self.model_files = glob(self.model_dir+"/*/", recursive = True) - - self.models = [] - for model_file in self.model_files: - self.models.append(self.load_model(model_file)) - - def start_new_session(self): - config = tf.compat.v1.ConfigProto() - config.gpu_options.allow_growth = True - - self.session = tf.compat.v1.Session(config=config) # tf.InteractiveSession() - tensorflow_backend.set_session(self.session) - - def end_session(self): - tensorflow_backend.clear_session() - self.session.close() - del self.session - - def load_model(self, model_name): - model = load_model(os.path.join(self.model_dir, model_name), compile=False) - model_height = model.layers[len(model.layers)-1].output_shape[1] - model_width = model.layers[len(model.layers)-1].output_shape[2] - n_classes = model.layers[len(model.layers)-1].output_shape[3] - return model, model_height, model_width, n_classes - - def predict(self, model_in, img, use_patches, n_batch_inference=5): - tensorflow_backend.set_session(self.session) - model, model_height, model_width, n_classes = model_in - - img_org_h = img.shape[0] - img_org_w = img.shape[1] - - if img.shape[0] < model_height and img.shape[1] >= model_width: - img_padded = np.zeros(( model_height, img.shape[1], img.shape[2] )) - - index_start_h = int( abs( img.shape[0] - model_height) /2.) - index_start_w = 0 - - img_padded [ index_start_h: index_start_h+img.shape[0], :, : ] = img[:,:,:] - - elif img.shape[0] >= model_height and img.shape[1] < model_width: - img_padded = np.zeros(( img.shape[0], model_width, img.shape[2] )) - - index_start_h = 0 - index_start_w = int( abs( img.shape[1] - model_width) /2.) - - img_padded [ :, index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:] - - - elif img.shape[0] < model_height and img.shape[1] < model_width: - img_padded = np.zeros(( model_height, model_width, img.shape[2] )) - - index_start_h = int( abs( img.shape[0] - model_height) /2.) - index_start_w = int( abs( img.shape[1] - model_width) /2.) - - img_padded [ index_start_h: index_start_h+img.shape[0], index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:] - - else: - index_start_h = 0 - index_start_w = 0 - img_padded = np.copy(img) - - - img = np.copy(img_padded) - - - - if use_patches: - - margin = int(0.1 * model_width) - - width_mid = model_width - 2 * margin - height_mid = model_height - 2 * margin - - - img = img / float(255.0) - - img_h = img.shape[0] - img_w = img.shape[1] - - prediction_true = np.zeros((img_h, img_w, 3)) - mask_true = np.zeros((img_h, img_w)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - - if nxf > int(nxf): - nxf = int(nxf) + 1 - else: - nxf = int(nxf) - - if nyf > int(nyf): - nyf = int(nyf) + 1 - else: - nyf = int(nyf) - - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - - img_patch = np.zeros((n_batch_inference, model_height, model_width,3)) - - for i in range(nxf): - for j in range(nyf): - - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + model_width - elif i > 0: - index_x_d = i * width_mid - index_x_u = index_x_d + model_width - - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + model_height - elif j > 0: - index_y_d = j * height_mid - index_y_u = index_y_d + model_height - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - model_width - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - model_height - - - list_i_s.append(i) - list_j_s.append(j) - list_x_u.append(index_x_u) - list_x_d.append(index_x_d) - list_y_d.append(index_y_d) - list_y_u.append(index_y_u) - - - img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - - batch_indexer = batch_indexer + 1 - - - - if batch_indexer == n_batch_inference: - - label_p_pred = model.predict(img_patch,verbose=0) - - seg = np.argmax(label_p_pred, axis=3) - - #print(seg.shape, len(seg), len(list_i_s)) - - indexer_inside_batch = 0 - for i_batch, j_batch in zip(list_i_s, list_j_s): - seg_in = seg[indexer_inside_batch,:,:] - seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2) - - index_y_u_in = list_y_u[indexer_inside_batch] - index_y_d_in = list_y_d[indexer_inside_batch] - - index_x_u_in = list_x_u[indexer_inside_batch] - index_x_d_in = list_x_d[indexer_inside_batch] - - if i_batch == 0 and j_batch == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color - elif i_batch == nxf - 1 and j_batch == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :] - prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color - elif i_batch == 0 and j_batch == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color - elif i_batch == nxf - 1 and j_batch == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color - elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color - elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color - else: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color - - indexer_inside_batch = indexer_inside_batch +1 - - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - - img_patch = np.zeros((n_batch_inference, model_height, model_width,3)) - - elif i==(nxf-1) and j==(nyf-1): - label_p_pred = model.predict(img_patch,verbose=0) - - seg = np.argmax(label_p_pred, axis=3) - - #print(seg.shape, len(seg), len(list_i_s)) - - indexer_inside_batch = 0 - for i_batch, j_batch in zip(list_i_s, list_j_s): - seg_in = seg[indexer_inside_batch,:,:] - seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2) - - index_y_u_in = list_y_u[indexer_inside_batch] - index_y_d_in = list_y_d[indexer_inside_batch] - - index_x_u_in = list_x_u[indexer_inside_batch] - index_x_d_in = list_x_d[indexer_inside_batch] - - if i_batch == 0 and j_batch == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color - elif i_batch == nxf - 1 and j_batch == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :] - prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color - elif i_batch == 0 and j_batch == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color - elif i_batch == nxf - 1 and j_batch == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color - elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color - elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :] - prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: - seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color - elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: - seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color - else: - seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :] - prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color - - indexer_inside_batch = indexer_inside_batch +1 - - - list_i_s = [] - list_j_s = [] - list_x_u = [] - list_x_d = [] - list_y_u = [] - list_y_d = [] - - batch_indexer = 0 - - img_patch = np.zeros((n_batch_inference, model_height, model_width,3)) - - - - prediction_true = prediction_true[index_start_h: index_start_h+img_org_h, index_start_w: index_start_w+img_org_w,:] - prediction_true = prediction_true.astype(np.uint8) - - else: - img_h_page = img.shape[0] - img_w_page = img.shape[1] - img = img / float(255.0) - img = resize_image(img, model_height, model_width) - - label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2])) - - seg = np.argmax(label_p_pred, axis=3)[0] - seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - prediction_true = resize_image(seg_color, img_h_page, img_w_page) - prediction_true = prediction_true.astype(np.uint8) - return prediction_true[:,:,0] - - def run(self, image=None, image_path=None, output=None, use_patches=False, dir_in=None): - # print(dir_in,'dir_in') - if not dir_in: - if (image is not None and image_path is not None) or \ - (image is None and image_path is None): - raise ValueError("Must pass either a opencv2 image or an image_path") - if image_path is not None: - image = cv2.imread(image_path) - img_last = 0 - for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) - - res = self.predict(model, image, use_patches) - - img_fin = np.zeros((res.shape[0], res.shape[1], 3)) - res[:, :][res[:, :] == 0] = 2 - res = res - 1 - res = res * 255 - img_fin[:, :, 0] = res - img_fin[:, :, 1] = res - img_fin[:, :, 2] = res - - img_fin = img_fin.astype(np.uint8) - img_fin = (res[:, :] == 0) * 255 - img_last = img_last + img_fin - - kernel = np.ones((5, 5), np.uint8) - img_last[:, :][img_last[:, :] > 0] = 255 - img_last = (img_last[:, :] == 0) * 255 - if output: - cv2.imwrite(output, img_last) - return img_last - else: - ls_imgs = list(filter(is_image_filename, os.listdir(dir_in))) - for image_name in ls_imgs: - image_stem = image_name.split('.')[0] - print(image_name,'image_name') - image = cv2.imread(os.path.join(dir_in,image_name) ) - img_last = 0 - for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) - - res = self.predict(model, image, use_patches) - - img_fin = np.zeros((res.shape[0], res.shape[1], 3)) - res[:, :][res[:, :] == 0] = 2 - res = res - 1 - res = res * 255 - img_fin[:, :, 0] = res - img_fin[:, :, 1] = res - img_fin[:, :, 2] = res - - img_fin = img_fin.astype(np.uint8) - img_fin = (res[:, :] == 0) * 255 - img_last = img_last + img_fin - - kernel = np.ones((5, 5), np.uint8) - img_last[:, :][img_last[:, :] > 0] = 255 - img_last = (img_last[:, :] == 0) * 255 - - cv2.imwrite(os.path.join(output, image_stem + '.png'), img_last) diff --git a/src/eynollah/training/__init__.py b/src/eynollah/training/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py b/src/eynollah/training/build_model_load_pretrained_weights_and_save.py deleted file mode 100644 index 40fc1fe..0000000 --- a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py +++ /dev/null @@ -1,24 +0,0 @@ -import click -import tensorflow as tf - -from .models import resnet50_unet - - -def configuration(): - gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) - session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) - -@click.command() -def build_model_load_pretrained_weights_and_save(): - n_classes = 2 - input_height = 224 - input_width = 448 - weight_decay = 1e-6 - pretraining = False - dir_of_weights = 'model_bin_sbb_ens.h5' - - # configuration() - - model = resnet50_unet(n_classes, input_height, input_width, weight_decay, pretraining) - model.load_weights(dir_of_weights) - model.save('./name_in_another_python_version.h5') diff --git a/src/eynollah/training/cli.py b/src/eynollah/training/cli.py deleted file mode 100644 index 8ab754d..0000000 --- a/src/eynollah/training/cli.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - -import click -import sys - -from .build_model_load_pretrained_weights_and_save import build_model_load_pretrained_weights_and_save -from .generate_gt_for_training import main as generate_gt_cli -from .inference import main as inference_cli -from .train import ex - -@click.command(context_settings=dict( - ignore_unknown_options=True, -)) -@click.argument('SACRED_ARGS', nargs=-1, type=click.UNPROCESSED) -def train_cli(sacred_args): - ex.run_commandline([sys.argv[0]] + list(sacred_args)) - -@click.group('training') -def main(): - pass - -main.add_command(build_model_load_pretrained_weights_and_save) -main.add_command(generate_gt_cli, 'generate-gt') -main.add_command(inference_cli, 'inference') -main.add_command(train_cli, 'train') diff --git a/src/eynollah/training/generate_gt_for_training.py b/src/eynollah/training/generate_gt_for_training.py deleted file mode 100644 index 693cab8..0000000 --- a/src/eynollah/training/generate_gt_for_training.py +++ /dev/null @@ -1,583 +0,0 @@ -import click -import json -import os -from tqdm import tqdm -from pathlib import Path -from PIL import Image, ImageDraw, ImageFont -import cv2 -import numpy as np - -from eynollah.training.gt_gen_utils import ( - filter_contours_area_of_image, - find_format_of_given_filename_in_dir, - find_new_features_of_contours, - fit_text_single_line, - get_content_of_dir, - get_images_of_ground_truth, - get_layout_contours_for_visualization, - get_textline_contours_and_ocr_text, - get_textline_contours_for_visualization, - overlay_layout_on_image, - read_xml, - resize_image, - visualize_image_from_contours, - visualize_image_from_contours_layout -) - -@click.group() -def main(): - pass - -@main.command() -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_images", - "-di", - help="directory of org images. If print space cropping or scaling is needed for labels it would be great to provide the original images to apply the same function on them. So if -ps is not set true or in config files no columns_width key is given this argumnet can be ignored. File stems in this directory should be the same as those in dir_xml.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_out_images", - "-doi", - help="directory where the output org images after undergoing a process (like print space cropping or scaling) will be written.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_out", - "-do", - help="directory where ground truth label images would be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--config", - "-cfg", - help="config file of prefered layout or use case.", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--type_output", - "-to", - help="this defines how output should be. A 2d image array or a 3d image array encoded with RGB color. Just pass 2d or 3d. The file will be saved one directory up. 2D image array is 3d but only information of one channel would be enough since all channels have the same values.", -) -@click.option( - "--printspace", - "-ps", - is_flag=True, - help="if this parameter set to true, generated labels and in the case of provided org images cropping will be imposed and cropped labels and images will be written in output directories.", -) - -def pagexml2label(dir_xml,dir_out,type_output,config, printspace, dir_images, dir_out_images): - if config: - with open(config) as f: - config_params = json.load(f) - else: - print("passed") - config_params = None - gt_list = get_content_of_dir(dir_xml) - get_images_of_ground_truth(gt_list,dir_xml,dir_out,type_output, config, config_params, printspace, dir_images, dir_out_images) - -@main.command() -@click.option( - "--dir_imgs", - "-dis", - help="directory of images with high resolution.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_out_images", - "-dois", - help="directory where degraded images will be written.", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out_labels", - "-dols", - help="directory where original images will be written as labels.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--scales", - "-scs", - help="json dictionary where the scales are written.", - type=click.Path(exists=True, dir_okay=False), -) -def image_enhancement(dir_imgs, dir_out_images, dir_out_labels, scales): - ls_imgs = os.listdir(dir_imgs) - with open(scales) as f: - scale_dict = json.load(f) - ls_scales = scale_dict['scales'] - - for img in tqdm(ls_imgs): - img_name = img.split('.')[0] - img_type = img.split('.')[1] - image = cv2.imread(os.path.join(dir_imgs, img)) - for i, scale in enumerate(ls_scales): - height_sc = int(image.shape[0]*scale) - width_sc = int(image.shape[1]*scale) - - image_down_scaled = resize_image(image, height_sc, width_sc) - image_back_to_org_scale = resize_image(image_down_scaled, image.shape[0], image.shape[1]) - - cv2.imwrite(os.path.join(dir_out_images, img_name+'_'+str(i)+'.'+img_type), image_back_to_org_scale) - cv2.imwrite(os.path.join(dir_out_labels, img_name+'_'+str(i)+'.'+img_type), image) - - -@main.command() -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out_modal_image", - "-domi", - help="directory where ground truth images would be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out_classes", - "-docl", - help="directory where ground truth classes would be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--input_height", - "-ih", - help="input height", -) -@click.option( - "--input_width", - "-iw", - help="input width", -) -@click.option( - "--min_area_size", - "-min", - help="min area size of regions considered for reading order training.", -) - -@click.option( - "--min_area_early", - "-min_early", - help="If you have already generated a training dataset using a specific minimum area value and now wish to create a dataset with a smaller minimum area value, you can avoid regenerating the previous dataset by providing the earlier minimum area value. This will ensure that only the missing data is generated.", -) - -def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size, min_area_early): - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - input_height = int(input_height) - input_width = int(input_width) - min_area = float(min_area_size) - if min_area_early: - min_area_early = float(min_area_early) - - - indexer_start= 0#55166 - max_area = 1 - #min_area = 0.0001 - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = ind_xml.split('.')[0] - _, _, _, file_name, id_paragraph, id_header,co_text_paragraph,co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) - - id_all_text = id_paragraph + id_header - co_text_all = co_text_paragraph + co_text_header - - - _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_header) - - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') - - for j in range(len(cy_main)): - img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12,int(x_min_main[j]):int(x_max_main[j]) ] = 1 - - - texts_corr_order_index = [index_tot_regions[tot_region_ref.index(i)] for i in id_all_text ] - texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] - - - co_text_all, texts_corr_order_index_int, regions_ar_less_than_early_min = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area, min_area_early) - - - arg_array = np.array(range(len(texts_corr_order_index_int))) - - labels_con = np.zeros((y_len,x_len,len(arg_array)),dtype='uint8') - for i in range(len(co_text_all)): - img_label = np.zeros((y_len,x_len,3),dtype='uint8') - img_label=cv2.fillPoly(img_label, pts =[co_text_all[i]], color=(1,1,1)) - - img_label[:,:,0][img_poly[:,:,0]==5] = 2 - img_label[:,:,0][img_header_and_sep[:,:]==1] = 3 - - labels_con[:,:,i] = img_label[:,:,0] - - labels_con = resize_image(labels_con, input_height, input_width) - img_poly = resize_image(img_poly, input_height, input_width) - - - for i in range(len(texts_corr_order_index_int)): - for j in range(len(texts_corr_order_index_int)): - if i!=j: - if regions_ar_less_than_early_min: - if regions_ar_less_than_early_min[i]==1: - input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) - final_f_name = f_name+'_'+str(indexer+indexer_start) - order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] - if order_class_condition<0: - class_type = 1 - else: - class_type = 0 - - input_multi_visual_modal[:,:,0] = labels_con[:,:,i] - input_multi_visual_modal[:,:,1] = img_poly[:,:,0] - input_multi_visual_modal[:,:,2] = labels_con[:,:,j] - - np.save(os.path.join(dir_out_classes,final_f_name+'_missed.npy' ), class_type) - - cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'_missed.png' ), input_multi_visual_modal) - indexer = indexer+1 - - else: - input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) - final_f_name = f_name+'_'+str(indexer+indexer_start) - order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] - if order_class_condition<0: - class_type = 1 - else: - class_type = 0 - - input_multi_visual_modal[:,:,0] = labels_con[:,:,i] - input_multi_visual_modal[:,:,1] = img_poly[:,:,0] - input_multi_visual_modal[:,:,2] = labels_con[:,:,j] - - np.save(os.path.join(dir_out_classes,final_f_name+'.npy' ), class_type) - - cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_multi_visual_modal) - indexer = indexer+1 - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_imgs", - "-di", - help="directory where the overlayed plots will be written", ) - -def visualize_reading_order(xml_file, dir_xml, dir_out, dir_imgs): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - indexer_start= 0#55166 - #min_area = 0.0001 - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - #xml_file = os.path.join(dir_xml,ind_xml ) - - if dir_xml: - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - else: - xml_file = os.path.join(ind_xml ) - f_name = Path(ind_xml).stem - print(f_name, 'f_name') - - #f_name = ind_xml.split('.')[0] - _, _, _, file_name, id_paragraph, id_header,co_text_paragraph,co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) - - id_all_text = id_paragraph + id_header - co_text_all = co_text_paragraph + co_text_header - - - cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_all) - - texts_corr_order_index = [int(index_tot_regions[tot_region_ref.index(i)]) for i in id_all_text ] - #texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] - - - #cx_ordered = np.array(cx_main)[np.array(texts_corr_order_index)] - #cx_ordered = cx_ordered.astype(np.int32) - - cx_ordered = [int(val) for (_, val) in sorted(zip(texts_corr_order_index, cx_main), key=lambda x: \ - x[0], reverse=False)] - #cx_ordered = cx_ordered.astype(np.int32) - - cy_ordered = [int(val) for (_, val) in sorted(zip(texts_corr_order_index, cy_main), key=lambda x: \ - x[0], reverse=False)] - #cy_ordered = cy_ordered.astype(np.int32) - - - color = (0, 0, 255) - thickness = 20 - if dir_imgs: - layout = np.zeros( (y_len,x_len,3) ) - layout = cv2.fillPoly(layout, pts =co_text_all, color=(1,1,1)) - - img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) - img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) - - overlayed = overlay_layout_on_image(layout, img, cx_ordered, cy_ordered, color, thickness) - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), overlayed) - - else: - img = np.zeros( (y_len,x_len,3) ) - img = cv2.fillPoly(img, pts =co_text_all, color=(255,0,0)) - for i in range(len(cx_ordered)-1): - start_point = (int(cx_ordered[i]), int(cy_ordered[i])) - end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) - img = cv2.arrowedLine(img, start_point, end_point, - color, thickness, tipLength = 0.03) - - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img) - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_imgs", - "-di", - help="directory of images where textline segmentation will be overlayed", ) - -def visualize_textline_segmentation(xml_file, dir_xml, dir_out, dir_imgs): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - - img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) - img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) - - co_tetxlines, y_len, x_len = get_textline_contours_for_visualization(xml_file) - - added_image = visualize_image_from_contours(co_tetxlines, img) - - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) - - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_imgs", - "-di", - help="directory of images where textline segmentation will be overlayed", ) - -def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - if dir_xml: - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - else: - xml_file = os.path.join(ind_xml ) - f_name = Path(ind_xml).stem - print(f_name, 'f_name') - - img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) - img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) - - co_text, co_graphic, co_sep, co_img, co_table, co_noise, y_len, x_len = get_layout_contours_for_visualization(xml_file) - - - added_image = visualize_image_from_contours_layout(co_text['paragraph'], co_text['header']+co_text['heading'], co_text['drop-capital'], co_sep, co_img, co_text['marginalia'], co_table, img) - - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) - - - - -@main.command() -@click.option( - "--xml_file", - "-xml", - help="xml filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--dir_out", - "-o", - help="directory where plots will be written", - type=click.Path(exists=True, file_okay=False), -) - - -def visualize_ocr_text(xml_file, dir_xml, dir_out): - assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" - if dir_xml: - xml_files_ind = os.listdir(dir_xml) - xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] - else: - xml_files_ind = [xml_file] - - font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = ImageFont.truetype(font_path, 40) - - for ind_xml in tqdm(xml_files_ind): - indexer = 0 - #print(ind_xml) - #print('########################') - if dir_xml: - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem - else: - xml_file = os.path.join(ind_xml ) - f_name = Path(ind_xml).stem - print(f_name, 'f_name') - - co_tetxlines, y_len, x_len, ocr_texts = get_textline_contours_and_ocr_text(xml_file) - - total_bb_coordinates = [] - - image_text = Image.new("RGB", (x_len, y_len), "white") - draw = ImageDraw.Draw(image_text) - - - - for index, cnt in enumerate(co_tetxlines): - x,y,w,h = cv2.boundingRect(cnt) - #total_bb_coordinates.append([x,y,w,h]) - - #fit_text_single_line - - #x_bb = bb_ind[0] - #y_bb = bb_ind[1] - #w_bb = bb_ind[2] - #h_bb = bb_ind[3] - if ocr_texts[index]: - - - is_vertical = h > 2*w # Check orientation - font = fit_text_single_line(draw, ocr_texts[index], font_path, w, int(h*0.4) ) - - if is_vertical: - - vertical_font = fit_text_single_line(draw, ocr_texts[index], font_path, h, int(w * 0.8)) - - text_img = Image.new("RGBA", (h, w), (255, 255, 255, 0)) # Note: dimensions are swapped - text_draw = ImageDraw.Draw(text_img) - text_draw.text((0, 0), ocr_texts[index], font=vertical_font, fill="black") - - # Rotate text image by 90 degrees - rotated_text = text_img.rotate(90, expand=1) - - # Calculate paste position (centered in bbox) - paste_x = x + (w - rotated_text.width) // 2 - paste_y = y + (h - rotated_text.height) // 2 - - image_text.paste(rotated_text, (paste_x, paste_y), rotated_text) # Use rotated image as mask - else: - text_bbox = draw.textbbox((0, 0), ocr_texts[index], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x + (w - text_width) // 2 # Center horizontally - text_y = y + (h - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) - image_text.save(os.path.join(dir_out, f_name+'.png')) diff --git a/src/eynollah/training/gt_gen_utils.py b/src/eynollah/training/gt_gen_utils.py deleted file mode 100644 index 2e3428b..0000000 --- a/src/eynollah/training/gt_gen_utils.py +++ /dev/null @@ -1,1835 +0,0 @@ -import os -import numpy as np -import warnings -import xml.etree.ElementTree as ET -from tqdm import tqdm -import cv2 -from shapely import geometry -from pathlib import Path -from PIL import ImageFont - - -KERNEL = np.ones((5, 5), np.uint8) - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - - -def visualize_image_from_contours_layout(co_par, co_header, co_drop, co_sep, co_image, co_marginal, co_table, img): - alpha = 0.5 - - blank_image = np.ones( (img.shape[:]), dtype=np.uint8) * 255 - - col_header = (173, 216, 230) - col_drop = (0, 191, 255) - boundary_color = (143, 216, 200)#(0, 0, 255) # Dark gray for the boundary - col_par = (0, 0, 139) # Lighter gray for the filled area - col_image = (0, 100, 0) - col_sep = (255, 0, 0) - col_marginal = (106, 90, 205) - col_table = (0, 90, 205) - - if len(co_image)>0: - cv2.drawContours(blank_image, co_image, -1, col_image, thickness=cv2.FILLED) # Fill the contour - - if len(co_sep)>0: - cv2.drawContours(blank_image, co_sep, -1, col_sep, thickness=cv2.FILLED) # Fill the contour - - - if len(co_header)>0: - cv2.drawContours(blank_image, co_header, -1, col_header, thickness=cv2.FILLED) # Fill the contour - - if len(co_par)>0: - cv2.drawContours(blank_image, co_par, -1, col_par, thickness=cv2.FILLED) # Fill the contour - - cv2.drawContours(blank_image, co_par, -1, boundary_color, thickness=1) # Draw the boundary - - if len(co_drop)>0: - cv2.drawContours(blank_image, co_drop, -1, col_drop, thickness=cv2.FILLED) # Fill the contour - - if len(co_marginal)>0: - cv2.drawContours(blank_image, co_marginal, -1, col_marginal, thickness=cv2.FILLED) # Fill the contour - - if len(co_table)>0: - cv2.drawContours(blank_image, co_table, -1, col_table, thickness=cv2.FILLED) # Fill the contour - - img_final =cv2.cvtColor(blank_image, cv2.COLOR_BGR2RGB) - - added_image = cv2.addWeighted(img,alpha,img_final,1- alpha,0) - return added_image - - -def visualize_image_from_contours(contours, img): - alpha = 0.5 - - blank_image = np.ones( (img.shape[:]), dtype=np.uint8) * 255 - - boundary_color = (0, 0, 255) # Dark gray for the boundary - fill_color = (173, 216, 230) # Lighter gray for the filled area - - cv2.drawContours(blank_image, contours, -1, fill_color, thickness=cv2.FILLED) # Fill the contour - cv2.drawContours(blank_image, contours, -1, boundary_color, thickness=1) # Draw the boundary - - img_final =cv2.cvtColor(blank_image, cv2.COLOR_BGR2RGB) - - added_image = cv2.addWeighted(img,alpha,img_final,1- alpha,0) - return added_image - -def visualize_model_output(prediction, img, task): - if task == "binarization": - prediction = prediction * -1 - prediction = prediction + 1 - added_image = prediction * 255 - layout_only = None - else: - unique_classes = np.unique(prediction[:,:,0]) - rgb_colors = {'0' : [255, 255, 255], - '1' : [255, 0, 0], - '2' : [255, 125, 0], - '3' : [255, 0, 125], - '4' : [125, 125, 125], - '5' : [125, 125, 0], - '6' : [0, 125, 255], - '7' : [0, 125, 0], - '8' : [125, 125, 125], - '9' : [0, 125, 255], - '10' : [125, 0, 125], - '11' : [0, 255, 0], - '12' : [0, 0, 255], - '13' : [0, 255, 255], - '14' : [255, 125, 125], - '15' : [255, 0, 255]} - - layout_only = np.zeros(prediction.shape) - - for unq_class in unique_classes: - rgb_class_unique = rgb_colors[str(int(unq_class))] - layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] - layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] - layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] - - - - img = resize_image(img, layout_only.shape[0], layout_only.shape[1]) - - layout_only = layout_only.astype(np.int32) - img = img.astype(np.int32) - - - - added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) - - return added_image, layout_only - -def get_content_of_dir(dir_in): - """ - Listing all ground truth page xml files. All files are needed to have xml format. - """ - - gt_all=os.listdir(dir_in) - gt_list = [file for file in gt_all if os.path.splitext(file)[1] == '.xml'] - return gt_list - -def return_parent_contours(contours, hierarchy): - contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] - return contours_parent -def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): - found_polygons_early = list() - - jv = 0 - for c in contours: - if len(np.shape(c)) == 3: - c = c[0] - elif len(np.shape(c)) == 2: - pass - #c = c[0] - if len(c) < 3: # A polygon cannot have less than 3 points - continue - - c_e = [point for point in c] - polygon = geometry.Polygon(c_e) - # area = cv2.contourArea(c) - area = polygon.area - # Check that polygon has area greater than minimal area - if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) - jv += 1 - return found_polygons_early - -def filter_contours_area_of_image(image, contours, order_index, max_area, min_area, min_early=None): - found_polygons_early = list() - order_index_filtered = list() - regions_ar_less_than_early_min = list() - #jv = 0 - for jv, c in enumerate(contours): - if len(np.shape(c)) == 3: - c = c[0] - elif len(np.shape(c)) == 2: - pass - if len(c) < 3: # A polygon cannot have less than 3 points - continue - c_e = [point for point in c] - polygon = geometry.Polygon(c_e) - area = polygon.area - if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) - order_index_filtered.append(order_index[jv]) - if min_early: - if area < min_early * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - regions_ar_less_than_early_min.append(1) - else: - regions_ar_less_than_early_min.append(0) - else: - regions_ar_less_than_early_min = None - - #jv += 1 - return found_polygons_early, order_index_filtered, regions_ar_less_than_early_min - -def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): - - # pixels of images are identified by 5 - if len(region_pre_p.shape) == 3: - cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 - else: - cnts_images = (region_pre_p[:, :] == pixel) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - #print(len(contours_imgs), hierarchy) - - contours_imgs = return_parent_contours(contours_imgs, hierarchy) - - #print(len(contours_imgs), "iki") - #contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) - - return contours_imgs -def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=None, erosion_early=None): - co_text_eroded = [] - for con in co_text: - img_boundary_in = np.zeros( (y_len,x_len) ) - img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - - if dilation_early: - img_boundary_in = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_early) - - if erosion_early: - img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_early) - - #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica - if erosion_rate > 0: - img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_rate) - - pixel = 1 - min_size = 0 - - img_boundary_in = img_boundary_in.astype("uint8") - - con_eroded = return_contours_of_interested_region(img_boundary_in,pixel, min_size ) - - try: - co_text_eroded.append(con_eroded[0]) - except: - co_text_eroded.append(con) - - - img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_rate) - #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) - - boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - img_boundary[:,:][boundary[:,:]==1] =1 - return co_text_eroded, img_boundary - -def get_textline_contours_for_visualization(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - tag_endings = ['}TextLine','}textline'] - co_use_case = [] - - for tag in region_tags: - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_use_case.append(np.array(c_t_in)) - return co_use_case, y_len, x_len - - -def get_textline_contours_and_ocr_text(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - tag_endings = ['}TextLine','}textline'] - co_use_case = [] - ocr_textlines = [] - - for tag in region_tags: - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - ocr_text_in = [''] - sumi = 0 - for vv in nn.iter(): - if vv.tag == link + 'Coords': - for childtest2 in nn: - if childtest2.tag.endswith("TextEquiv"): - for child_uc in childtest2: - if child_uc.tag.endswith("Unicode"): - text = child_uc.text - ocr_text_in[0]= text - - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - - - co_use_case.append(np.array(c_t_in)) - ocr_textlines.append(ocr_text_in[0]) - return co_use_case, y_len, x_len, ocr_textlines - -def fit_text_single_line(draw, text, font_path, max_width, max_height): - initial_font_size = 50 - font_size = initial_font_size - while font_size > 10: # Minimum font size - font = ImageFont.truetype(font_path, font_size) - text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - if text_width <= max_width and text_height <= max_height: - return font # Return the best-fitting font - - font_size -= 2 # Reduce font size and retry - - return ImageFont.truetype(font_path, 10) # Smallest font fallback - -def get_layout_contours_for_visualization(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - all_defined_textregion_types = list(co_text.keys()) - co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - all_defined_graphic_types = list(co_graphic.keys()) - co_sep=[] - co_img=[] - co_table=[] - co_noise=[] - - types_text = [] - types_graphic = [] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - c_t_in = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_text in list(c_t_in.keys()): - if len(c_t_in[element_text])>0: - co_text[element_text].append(np.array(c_t_in[element_text])) - - - if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_graphic in list(c_t_in_graphic.keys()): - if len(c_t_in_graphic[element_graphic])>0: - co_graphic[element_graphic].append(np.array(c_t_in_graphic[element_graphic])) - - - if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - - if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - - - if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - return co_text, co_graphic, co_sep, co_img, co_table, co_noise, y_len, x_len - -def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_file, config_params, printspace, dir_images, dir_out_images): - """ - Reading the page xml files and write the ground truth images into given output directory. - """ - ## to do: add footnote to text regions - - if dir_images: - ls_org_imgs = os.listdir(dir_images) - ls_org_imgs_stem = [os.path.splitext(item)[0] for item in ls_org_imgs] - for index in tqdm(range(len(gt_list))): - #try: - print(gt_list[index]) - tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - if 'columns_width' in list(config_params.keys()): - columns_width_dict = config_params['columns_width'] - metadata_element = root1.find(link+'Metadata') - comment_is_sub_element = False - for child in metadata_element: - tag2 = child.tag - if tag2.endswith('}Comments') or tag2.endswith('}comments'): - text_comments = child.text - num_col = int(text_comments.split('num_col')[1]) - comment_is_sub_element = True - if not comment_is_sub_element: - num_col = None - - if num_col: - x_new = columns_width_dict[str(num_col)] - y_new = int ( x_new * (y_len / float(x_len)) ) - - if printspace or "printspace_as_class_in_layout" in list(config_params.keys()): - region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace') or x.endswith('Border')]) - co_use_case = [] - - for tag in region_tags: - tag_endings = ['}PrintSpace','}Border'] - - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_use_case.append(np.array(c_t_in)) - - img = np.zeros((y_len, x_len, 3)) - - img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) - - img_poly = img_poly.astype(np.uint8) - - imgray = cv2.cvtColor(img_poly, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) - - cnt = contours[np.argmax(cnt_size)] - - x, y, w, h = cv2.boundingRect(cnt) - bb_xywh = [x, y, w, h] - - - if config_file and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph' or config_params['use_case']=='printspace'): - keys = list(config_params.keys()) - if "artificial_class_label" in keys: - artificial_class_rgb_color = (255,255,0) - artificial_class_label = config_params['artificial_class_label'] - - textline_rgb_color = (255, 0, 0) - - if config_params['use_case']=='textline': - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - elif config_params['use_case']=='word': - region_tags = np.unique([x for x in alltags if x.endswith('Word')]) - elif config_params['use_case']=='glyph': - region_tags = np.unique([x for x in alltags if x.endswith('Glyph')]) - elif config_params['use_case']=='printspace': - region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace')]) - - co_use_case = [] - - for tag in region_tags: - if config_params['use_case']=='textline': - tag_endings = ['}TextLine','}textline'] - elif config_params['use_case']=='word': - tag_endings = ['}Word','}word'] - elif config_params['use_case']=='glyph': - tag_endings = ['}Glyph','}glyph'] - elif config_params['use_case']=='printspace': - tag_endings = ['}PrintSpace','}printspace'] - - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_use_case.append(np.array(c_t_in)) - - - if "artificial_class_label" in keys: - img_boundary = np.zeros((y_len, x_len)) - erosion_rate = 0#1 - dilation_rate = 2 - dilation_early = 0 - erosion_early = 2 - co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=dilation_early, erosion_early=erosion_early) - - - img = np.zeros((y_len, x_len, 3)) - if output_type == '2d': - img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) - if "artificial_class_label" in keys: - img_mask = np.copy(img_poly) - ##img_poly[:,:][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=1)] = artificial_class_label - img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label - elif output_type == '3d': - img_poly = cv2.fillPoly(img, pts=co_use_case, color=textline_rgb_color) - if "artificial_class_label" in keys: - img_mask = np.copy(img_poly) - img_poly[:,:,0][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[0] - img_poly[:,:,1][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[1] - img_poly[:,:,2][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[2] - - - if printspace and config_params['use_case']!='printspace': - img_poly = img_poly[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] - - - if 'columns_width' in list(config_params.keys()) and num_col and config_params['use_case']!='printspace': - img_poly = resize_image(img_poly, y_new, x_new) - - try: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - except: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - - if dir_images: - org_image_name = ls_org_imgs[ls_org_imgs_stem.index(xml_file_stem)] - img_org = cv2.imread(os.path.join(dir_images, org_image_name)) - - if printspace and config_params['use_case']!='printspace': - img_org = img_org[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] - - if 'columns_width' in list(config_params.keys()) and num_col and config_params['use_case']!='printspace': - img_org = resize_image(img_org, y_new, x_new) - - cv2.imwrite(os.path.join(dir_out_images, org_image_name), img_org) - - - if config_file and config_params['use_case']=='layout': - keys = list(config_params.keys()) - - if "artificial_class_on_boundary" in keys: - elements_with_artificial_class = list(config_params['artificial_class_on_boundary']) - artificial_class_rgb_color = (255,255,0) - artificial_class_label = config_params['artificial_class_label'] - #values = config_params.values() - - if "printspace_as_class_in_layout" in list(config_params.keys()): - printspace_class_rgb_color = (125,125,255) - printspace_class_label = config_params['printspace_as_class_in_layout'] - - if 'textregions' in keys: - types_text_dict = config_params['textregions'] - types_text = list(types_text_dict.keys()) - types_text_label = list(types_text_dict.values()) - if 'graphicregions' in keys: - types_graphic_dict = config_params['graphicregions'] - types_graphic = list(types_graphic_dict.keys()) - types_graphic_label = list(types_graphic_dict.values()) - - - labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125), (255,125,255), (125,255,0)] - - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - all_defined_textregion_types = list(co_text.keys()) - co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - all_defined_graphic_types = list(co_graphic.keys()) - co_sep=[] - co_img=[] - co_table=[] - co_noise=[] - - for tag in region_tags: - if 'textregions' in keys: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - c_t_in = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_paragraph" in types_text: - types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] - if len(types_text_without_paragraph) == 0: - if "type" in nn.attrib: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_text_without_paragraph) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_text_without_paragraph: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_textregion_types: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_text in list(c_t_in.keys()): - if len(c_t_in[element_text])>0: - co_text[element_text].append(np.array(c_t_in[element_text])) - - if 'graphicregions' in keys: - if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "rest_as_decoration" in types_graphic: - types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] - if len(types_graphic_without_decoration) == 0: - if "type" in nn.attrib: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - elif len(types_graphic_without_decoration) >= 1: - if "type" in nn.attrib: - if nn.attrib['type'] in types_graphic_without_decoration: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - else: - c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - else: - if "type" in nn.attrib: - if nn.attrib['type'] in all_defined_graphic_types: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - for element_graphic in list(c_t_in_graphic.keys()): - if len(c_t_in_graphic[element_graphic])>0: - co_graphic[element_graphic].append(np.array(c_t_in_graphic[element_graphic])) - - - if 'imageregion' in keys: - if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - - if 'separatorregion' in keys: - if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - if 'tableregion' in keys: - if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - - if 'noiseregion' in keys: - if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - - if "artificial_class_on_boundary" in keys: - img_boundary = np.zeros( (y_len,x_len) ) - if "paragraph" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text['paragraph'], img_boundary = update_region_contours(co_text['paragraph'], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "drop-capital" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 3 - co_text["drop-capital"], img_boundary = update_region_contours(co_text["drop-capital"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "catch-word" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 3#4 - co_text["catch-word"], img_boundary = update_region_contours(co_text["catch-word"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "page-number" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 3#4 - co_text["page-number"], img_boundary = update_region_contours(co_text["page-number"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "header" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text["header"], img_boundary = update_region_contours(co_text["header"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "heading" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text["heading"], img_boundary = update_region_contours(co_text["heading"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "signature-mark" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text["signature-mark"], img_boundary = update_region_contours(co_text["signature-mark"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "marginalia" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text["marginalia"], img_boundary = update_region_contours(co_text["marginalia"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "footnote" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 2#4 - co_text["footnote"], img_boundary = update_region_contours(co_text["footnote"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "footnote-continued" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 2#4 - co_text["footnote-continued"], img_boundary = update_region_contours(co_text["footnote-continued"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "tableregion" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 3#4 - co_table, img_boundary = update_region_contours(co_table, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - - - - img = np.zeros( (y_len,x_len,3) ) - - if output_type == '3d': - if 'graphicregions' in keys: - if 'rest_as_decoration' in types_graphic: - types_graphic[types_graphic=='rest_as_decoration'] = 'decoration' - for element_graphic in types_graphic: - if element_graphic == 'decoration': - color_label = labels_rgb_color[ config_params['graphicregions']['rest_as_decoration']] - else: - color_label = labels_rgb_color[ config_params['graphicregions'][element_graphic]] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - else: - for element_graphic in types_graphic: - color_label = labels_rgb_color[ config_params['graphicregions'][element_graphic]] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - - - if 'imageregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_img, color=labels_rgb_color[ config_params['imageregion']]) - if 'tableregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_table, color=labels_rgb_color[ config_params['tableregion']]) - if 'noiseregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_noise, color=labels_rgb_color[ config_params['noiseregion']]) - - if 'textregions' in keys: - if 'rest_as_paragraph' in types_text: - types_text = ['paragraph'if ttind=='rest_as_paragraph' else ttind for ttind in types_text] - for element_text in types_text: - if element_text == 'paragraph': - color_label = labels_rgb_color[ config_params['textregions']['rest_as_paragraph']] - else: - color_label = labels_rgb_color[ config_params['textregions'][element_text]] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - else: - for element_text in types_text: - color_label = labels_rgb_color[ config_params['textregions'][element_text]] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - - - if "artificial_class_on_boundary" in keys: - img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] - img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] - img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] - - if 'separatorregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_sep, color=labels_rgb_color[ config_params['separatorregion']]) - - - if "printspace_as_class_in_layout" in list(config_params.keys()): - printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) - printspace_mask[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2]] = 1 - - img_poly[:,:,0][printspace_mask[:,:] == 0] = printspace_class_rgb_color[0] - img_poly[:,:,1][printspace_mask[:,:] == 0] = printspace_class_rgb_color[1] - img_poly[:,:,2][printspace_mask[:,:] == 0] = printspace_class_rgb_color[2] - - - - - elif output_type == '2d': - if 'graphicregions' in keys: - if 'rest_as_decoration' in types_graphic: - types_graphic[types_graphic=='rest_as_decoration'] = 'decoration' - for element_graphic in types_graphic: - if element_graphic == 'decoration': - color_label = config_params['graphicregions']['rest_as_decoration'] - else: - color_label = config_params['graphicregions'][element_graphic] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - else: - for element_graphic in types_graphic: - color_label = config_params['graphicregions'][element_graphic] - img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) - - - if 'imageregion' in keys: - color_label = config_params['imageregion'] - img_poly=cv2.fillPoly(img, pts =co_img, color=(color_label,color_label,color_label)) - if 'tableregion' in keys: - color_label = config_params['tableregion'] - img_poly=cv2.fillPoly(img, pts =co_table, color=(color_label,color_label,color_label)) - if 'noiseregion' in keys: - color_label = config_params['noiseregion'] - img_poly=cv2.fillPoly(img, pts =co_noise, color=(color_label,color_label,color_label)) - - if 'textregions' in keys: - if 'rest_as_paragraph' in types_text: - types_text = ['paragraph'if ttind=='rest_as_paragraph' else ttind for ttind in types_text] - for element_text in types_text: - if element_text == 'paragraph': - color_label = config_params['textregions']['rest_as_paragraph'] - else: - color_label = config_params['textregions'][element_text] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - else: - for element_text in types_text: - color_label = config_params['textregions'][element_text] - img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - - if "artificial_class_on_boundary" in keys: - img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label - - if 'separatorregion' in keys: - color_label = config_params['separatorregion'] - img_poly=cv2.fillPoly(img, pts =co_sep, color=(color_label,color_label,color_label)) - - if "printspace_as_class_in_layout" in list(config_params.keys()): - printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) - printspace_mask[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2]] = 1 - - img_poly[:,:,0][printspace_mask[:,:] == 0] = printspace_class_label - img_poly[:,:,1][printspace_mask[:,:] == 0] = printspace_class_label - img_poly[:,:,2][printspace_mask[:,:] == 0] = printspace_class_label - - - - if printspace: - img_poly = img_poly[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] - - if 'columns_width' in list(config_params.keys()) and num_col: - img_poly = resize_image(img_poly, y_new, x_new) - - try: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - except: - xml_file_stem = os.path.splitext(gt_list[index])[0] - cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) - - - if dir_images: - org_image_name = ls_org_imgs[ls_org_imgs_stem.index(xml_file_stem)] - img_org = cv2.imread(os.path.join(dir_images, org_image_name)) - - if printspace: - img_org = img_org[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] - - if 'columns_width' in list(config_params.keys()) and num_col: - img_org = resize_image(img_org, y_new, x_new) - - cv2.imwrite(os.path.join(dir_out_images, org_image_name), img_org) - - - -def find_new_features_of_contours(contours_main): - - areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) - M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] - cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - try: - x_min_main = np.array([np.min(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) - - argmin_x_main = np.array([np.argmin(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) - - x_min_from_argmin = np.array([contours_main[j][0][argmin_x_main[j], 0] for j in range(len(contours_main))]) - y_corr_x_min_from_argmin = np.array([contours_main[j][0][argmin_x_main[j], 1] for j in range(len(contours_main))]) - - x_max_main = np.array([np.max(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) - - y_min_main = np.array([np.min(contours_main[j][0][:, 1]) for j in range(len(contours_main))]) - y_max_main = np.array([np.max(contours_main[j][0][:, 1]) for j in range(len(contours_main))]) - except: - x_min_main = np.array([np.min(contours_main[j][:, 0]) for j in range(len(contours_main))]) - - argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) for j in range(len(contours_main))]) - - x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] for j in range(len(contours_main))]) - y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] for j in range(len(contours_main))]) - - x_max_main = np.array([np.max(contours_main[j][:, 0]) for j in range(len(contours_main))]) - - y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))]) - y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))]) - - return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin -def read_xml(xml_file): - file_name = Path(xml_file).stem - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - index_tot_regions = [] - tot_region_ref = [] - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - for jj in root1.iter(link+'RegionRefIndexed'): - index_tot_regions.append(jj.attrib['index']) - tot_region_ref.append(jj.attrib['regionRef']) - - if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): - co_printspace = [] - if link+'PrintSpace' in alltags: - region_tags_printspace = np.unique([x for x in alltags if x.endswith('PrintSpace')]) - elif link+'Border' in alltags: - region_tags_printspace = np.unique([x for x in alltags if x.endswith('Border')]) - - for tag in region_tags_printspace: - if link+'PrintSpace' in alltags: - tag_endings_printspace = ['}PrintSpace','}printspace'] - elif link+'Border' in alltags: - tag_endings_printspace = ['}Border','}border'] - - if tag.endswith(tag_endings_printspace[0]) or tag.endswith(tag_endings_printspace[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_printspace.append(np.array(c_t_in)) - img_printspace = np.zeros( (y_len,x_len,3) ) - img_printspace=cv2.fillPoly(img_printspace, pts =co_printspace, color=(1,1,1)) - img_printspace = img_printspace.astype(np.uint8) - - imgray = cv2.cvtColor(img_printspace, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) - cnt = contours[np.argmax(cnt_size)] - x, y, w, h = cv2.boundingRect(cnt) - - bb_coord_printspace = [x, y, w, h] - - else: - bb_coord_printspace = None - - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - co_text_paragraph=[] - co_text_drop=[] - co_text_heading=[] - co_text_header=[] - co_text_marginalia=[] - co_text_catch=[] - co_text_page_number=[] - co_text_signature_mark=[] - co_sep=[] - co_img=[] - co_table=[] - co_graphic=[] - co_graphic_text_annotation=[] - co_graphic_decoration=[] - co_noise=[] - - co_text_paragraph_text=[] - co_text_drop_text=[] - co_text_heading_text=[] - co_text_header_text=[] - co_text_marginalia_text=[] - co_text_catch_text=[] - co_text_page_number_text=[] - co_text_signature_mark_text=[] - co_sep_text=[] - co_img_text=[] - co_table_text=[] - co_graphic_text=[] - co_graphic_text_annotation_text=[] - co_graphic_decoration_text=[] - co_noise_text=[] - - id_paragraph = [] - id_header = [] - id_heading = [] - id_marginalia = [] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - for child2 in nn: - tag2 = child2.tag - if tag2.endswith('}TextEquiv') or tag2.endswith('}TextEquiv'): - for childtext2 in child2: - if childtext2.tag.endswith('}Unicode') or childtext2.tag.endswith('}Unicode'): - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - co_text_drop_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='heading': - co_text_heading_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - co_text_signature_mark_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='header': - co_text_header_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###co_text_catch_text.append(childtext2.text) - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - ###co_text_page_number_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - co_text_marginalia_text.append(childtext2.text) - else: - co_text_paragraph_text.append(childtext2.text) - c_t_in_drop=[] - c_t_in_paragraph=[] - c_t_in_heading=[] - c_t_in_header=[] - c_t_in_page_number=[] - c_t_in_signature_mark=[] - c_t_in_catch=[] - c_t_in_marginalia=[] - - - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - #print('birda1') - p_h=vv.attrib['points'].split(' ') - - - - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - ##id_heading.append(nn.attrib['id']) - c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - #id_paragraph.append(nn.attrib['id']) - - c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - - c_t_in_drop.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - #id_heading.append(nn.attrib['id']) - c_t_in_heading.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif "type" in nn.attrib and nn.attrib['type']=='header': - #id_header.append(nn.attrib['id']) - c_t_in_header.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - - ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - ###c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - ###c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - ###sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - #id_marginalia.append(nn.attrib['id']) - - c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - #id_paragraph.append(nn.attrib['id']) - c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - - if len(c_t_in_drop)>0: - co_text_drop.append(np.array(c_t_in_drop)) - if len(c_t_in_paragraph)>0: - co_text_paragraph.append(np.array(c_t_in_paragraph)) - id_paragraph.append(nn.attrib['id']) - if len(c_t_in_heading)>0: - co_text_heading.append(np.array(c_t_in_heading)) - id_heading.append(nn.attrib['id']) - - if len(c_t_in_header)>0: - co_text_header.append(np.array(c_t_in_header)) - id_header.append(nn.attrib['id']) - if len(c_t_in_page_number)>0: - co_text_page_number.append(np.array(c_t_in_page_number)) - if len(c_t_in_catch)>0: - co_text_catch.append(np.array(c_t_in_catch)) - - if len(c_t_in_signature_mark)>0: - co_text_signature_mark.append(np.array(c_t_in_signature_mark)) - - if len(c_t_in_marginalia)>0: - co_text_marginalia.append(np.array(c_t_in_marginalia)) - id_marginalia.append(nn.attrib['id']) - - - elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - for nn in root1.iter(tag): - c_t_in=[] - c_t_in_text_annotation=[] - c_t_in_decoration=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - else: - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - else: - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - if len(c_t_in_text_annotation)>0: - co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) - if len(c_t_in_decoration)>0: - co_graphic_decoration.append(np.array(c_t_in_decoration)) - if len(c_t_in)>0: - co_graphic.append(np.array(c_t_in)) - - - - elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - co_img_text.append(' ') - - - elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - co_table_text.append(' ') - - elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - co_noise_text.append(' ') - - img = np.zeros( (y_len,x_len,3) ) - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) - - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(3,3,3)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) - - return tree1, root1, bb_coord_printspace, file_name, id_paragraph, id_header+id_heading, co_text_paragraph, co_text_header+co_text_heading,\ -tot_region_ref,x_len, y_len,index_tot_regions, img_poly - - - - -def bounding_box(cnt,color, corr_order_index ): - x, y, w, h = cv2.boundingRect(cnt) - x = int(x*scale_w) - y = int(y*scale_h) - - w = int(w*scale_w) - h = int(h*scale_h) - - return [x,y,w,h,int(color), int(corr_order_index)+1] - -def resize_image(seg_in,input_height,input_width): - return cv2.resize(seg_in,(input_width,input_height),interpolation=cv2.INTER_NEAREST) - -def make_image_from_bb(width_l, height_l, bb_all): - bb_all =np.array(bb_all) - img_remade = np.zeros((height_l,width_l )) - - for i in range(bb_all.shape[0]): - img_remade[bb_all[i,1]:bb_all[i,1]+bb_all[i,3],bb_all[i,0]:bb_all[i,0]+bb_all[i,2] ] = 1 - return img_remade - -def update_list_and_return_first_with_length_bigger_than_one(index_element_to_be_updated, innner_index_pr_pos, pr_list, pos_list,list_inp): - list_inp.pop(index_element_to_be_updated) - if len(pr_list)>0: - list_inp.insert(index_element_to_be_updated, pr_list) - else: - index_element_to_be_updated = index_element_to_be_updated -1 - - list_inp.insert(index_element_to_be_updated+1, [innner_index_pr_pos]) - if len(pos_list)>0: - list_inp.insert(index_element_to_be_updated+2, pos_list) - - len_all_elements = [len(i) for i in list_inp] - list_len_bigger_1 = np.where(np.array(len_all_elements)>1) - list_len_bigger_1 = list_len_bigger_1[0] - - if len(list_len_bigger_1)>0: - early_list_bigger_than_one = list_len_bigger_1[0] - else: - early_list_bigger_than_one = -20 - return list_inp, early_list_bigger_than_one - -def overlay_layout_on_image(prediction, img, cx_ordered, cy_ordered, color, thickness): - - unique_classes = np.unique(prediction[:,:,0]) - rgb_colors = {'0' : [255, 255, 255], - '1' : [255, 0, 0], - '2' : [0, 0, 255], - '3' : [255, 0, 125], - '4' : [125, 125, 125], - '5' : [125, 125, 0], - '6' : [0, 125, 255], - '7' : [0, 125, 0], - '8' : [125, 125, 125], - '9' : [0, 125, 255], - '10' : [125, 0, 125], - '11' : [0, 255, 0], - '12' : [255, 125, 0], - '13' : [0, 255, 255], - '14' : [255, 125, 125], - '15' : [255, 0, 255]} - - layout_only = np.zeros(prediction.shape) - - for unq_class in unique_classes: - rgb_class_unique = rgb_colors[str(int(unq_class))] - layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] - layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] - layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] - - - - #img = self.resize_image(img, layout_only.shape[0], layout_only.shape[1]) - - layout_only = layout_only.astype(np.int32) - - for i in range(len(cx_ordered)-1): - start_point = (int(cx_ordered[i]), int(cy_ordered[i])) - end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) - layout_only = cv2.arrowedLine(layout_only, start_point, end_point, - color, thickness, tipLength = 0.03) - - img = img.astype(np.int32) - - - - added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) - - return added_image - -def find_format_of_given_filename_in_dir(dir_imgs, f_name): - ls_imgs = os.listdir(dir_imgs) - file_interested = [ind for ind in ls_imgs if ind.startswith(f_name+'.')] - return file_interested[0] diff --git a/src/eynollah/training/inference.py b/src/eynollah/training/inference.py deleted file mode 100644 index 3fa8fd6..0000000 --- a/src/eynollah/training/inference.py +++ /dev/null @@ -1,680 +0,0 @@ -import sys -import os -import warnings -import json - -import numpy as np -import cv2 -from tensorflow.keras.models import load_model -import tensorflow as tf -from tensorflow.keras import backend as K -from tensorflow.keras.layers import * -import click -from tensorflow.python.keras import backend as tensorflow_backend -import xml.etree.ElementTree as ET - -from .gt_gen_utils import ( - filter_contours_area_of_image, - find_new_features_of_contours, - read_xml, - resize_image, - update_list_and_return_first_with_length_bigger_than_one -) -from .models import ( - PatchEncoder, - Patches -) - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - -__doc__=\ -""" -Tool to load model and predict for given image. -""" - -class sbb_predict: - def __init__(self,image, dir_in, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area): - self.image=image - self.dir_in=dir_in - self.patches=patches - self.save=save - self.save_layout=save_layout - self.model_dir=model - self.ground_truth=ground_truth - self.task=task - self.config_params_model=config_params_model - self.xml_file = xml_file - self.out = out - if min_area: - self.min_area = float(min_area) - else: - self.min_area = 0 - - def resize_image(self,img_in,input_height,input_width): - return cv2.resize( img_in, ( input_width,input_height) ,interpolation=cv2.INTER_NEAREST) - - - def color_images(self,seg): - ann_u=range(self.n_classes) - if len(np.shape(seg))==3: - seg=seg[:,:,0] - - seg_img=np.zeros((np.shape(seg)[0],np.shape(seg)[1],3)).astype(np.uint8) - - for c in ann_u: - c=int(c) - seg_img[:,:,0][seg==c]=c - seg_img[:,:,1][seg==c]=c - seg_img[:,:,2][seg==c]=c - return seg_img - - def otsu_copy_binary(self,img): - img_r=np.zeros((img.shape[0],img.shape[1],3)) - img1=img[:,:,0] - - #print(img.min()) - #print(img[:,:,0].min()) - #blur = cv2.GaussianBlur(img,(5,5)) - #ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) - retval1, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) - - - - img_r[:,:,0]=threshold1 - img_r[:,:,1]=threshold1 - img_r[:,:,2]=threshold1 - #img_r=img_r/float(np.max(img_r))*255 - return img_r - - def otsu_copy(self,img): - img_r=np.zeros((img.shape[0],img.shape[1],3)) - #img1=img[:,:,0] - - #print(img.min()) - #print(img[:,:,0].min()) - #blur = cv2.GaussianBlur(img,(5,5)) - #ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) - _, threshold1 = cv2.threshold(img[:,:,0], 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) - _, threshold2 = cv2.threshold(img[:,:,1], 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) - _, threshold3 = cv2.threshold(img[:,:,2], 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) - - - - img_r[:,:,0]=threshold1 - img_r[:,:,1]=threshold2 - img_r[:,:,2]=threshold3 - ###img_r=img_r/float(np.max(img_r))*255 - return img_r - - def soft_dice_loss(self,y_true, y_pred, epsilon=1e-6): - - axes = tuple(range(1, len(y_pred.shape)-1)) - - numerator = 2. * K.sum(y_pred * y_true, axes) - - denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) - return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch - - def weighted_categorical_crossentropy(self,weights=None): - - def loss(y_true, y_pred): - labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) - - if weights is not None: - weight_mask = tf.maximum(tf.reduce_max(tf.constant( - np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) - per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] - return tf.reduce_mean(per_pixel_loss) - return self.loss - - - def IoU(self,Yi,y_predi): - ## mean Intersection over Union - ## Mean IoU = TP/(FN + TP + FP) - - IoUs = [] - Nclass = np.unique(Yi) - for c in Nclass: - TP = np.sum( (Yi == c)&(y_predi==c) ) - FP = np.sum( (Yi != c)&(y_predi==c) ) - FN = np.sum( (Yi == c)&(y_predi != c)) - IoU = TP/float(TP + FP + FN) - if self.n_classes>2: - print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c,TP,FP,FN,IoU)) - IoUs.append(IoU) - if self.n_classes>2: - mIoU = np.mean(IoUs) - print("_________________") - print("Mean IoU: {:4.3f}".format(mIoU)) - return mIoU - elif self.n_classes==2: - mIoU = IoUs[1] - print("_________________") - print("IoU: {:4.3f}".format(mIoU)) - return mIoU - - def start_new_session_and_model(self): - - config = tf.compat.v1.ConfigProto() - config.gpu_options.allow_growth = True - - session = tf.compat.v1.Session(config=config) # tf.InteractiveSession() - tensorflow_backend.set_session(session) - #tensorflow.keras.layers.custom_layer = PatchEncoder - #tensorflow.keras.layers.custom_layer = Patches - self.model = load_model(self.model_dir , compile=False,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) - #config = tf.ConfigProto() - #config.gpu_options.allow_growth=True - - #self.session = tf.InteractiveSession() - #keras.losses.custom_loss = self.weighted_categorical_crossentropy - #self.model = load_model(self.model_dir , compile=False) - - - ##if self.weights_dir!=None: - ##self.model.load_weights(self.weights_dir) - - if self.task != 'classification' and self.task != 'reading_order': - self.img_height=self.model.layers[len(self.model.layers)-1].output_shape[1] - self.img_width=self.model.layers[len(self.model.layers)-1].output_shape[2] - self.n_classes=self.model.layers[len(self.model.layers)-1].output_shape[3] - - def visualize_model_output(self, prediction, img, task): - if task == "binarization": - prediction = prediction * -1 - prediction = prediction + 1 - added_image = prediction * 255 - layout_only = None - else: - unique_classes = np.unique(prediction[:,:,0]) - rgb_colors = {'0' : [255, 255, 255], - '1' : [255, 0, 0], - '2' : [255, 125, 0], - '3' : [255, 0, 125], - '4' : [125, 125, 125], - '5' : [125, 125, 0], - '6' : [0, 125, 255], - '7' : [0, 125, 0], - '8' : [125, 125, 125], - '9' : [0, 125, 255], - '10' : [125, 0, 125], - '11' : [0, 255, 0], - '12' : [0, 0, 255], - '13' : [0, 255, 255], - '14' : [255, 125, 125], - '15' : [255, 0, 255]} - - layout_only = np.zeros(prediction.shape) - - for unq_class in unique_classes: - rgb_class_unique = rgb_colors[str(int(unq_class))] - layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] - layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] - layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] - - - - img = self.resize_image(img, layout_only.shape[0], layout_only.shape[1]) - - layout_only = layout_only.astype(np.int32) - img = img.astype(np.int32) - - - - added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) - - return added_image, layout_only - - def predict(self, image_dir): - if self.task == 'classification': - classes_names = self.config_params_model['classification_classes_name'] - img_1ch = img=cv2.imread(image_dir, 0) - - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (self.config_params_model['input_height'], self.config_params_model['input_width']), interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.model.predict(img_in, verbose=0) - index_class = np.argmax(label_p_pred[0]) - - print("Predicted Class: {}".format(classes_names[str(int(index_class))])) - elif self.task == 'reading_order': - img_height = self.config_params_model['input_height'] - img_width = self.config_params_model['input_width'] - - tree_xml, root_xml, bb_coord_printspace, file_name, id_paragraph, id_header, co_text_paragraph, co_text_header, tot_region_ref, x_len, y_len, index_tot_regions, img_poly = read_xml(self.xml_file) - _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_header) - - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') - - - for j in range(len(cy_main)): - img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12,int(x_min_main[j]):int(x_max_main[j]) ] = 1 - - co_text_all = co_text_paragraph + co_text_header - id_all_text = id_paragraph + id_header - - - ##texts_corr_order_index = [index_tot_regions[tot_region_ref.index(i)] for i in id_all_text ] - ##texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] - texts_corr_order_index_int = list(np.array(range(len(co_text_all)))) - - #print(texts_corr_order_index_int) - - max_area = 1 - #print(np.shape(co_text_all[0]), len( np.shape(co_text_all[0]) ),'co_text_all') - #co_text_all = filter_contours_area_of_image_tables(img_poly, co_text_all, _, max_area, min_area) - #print(co_text_all,'co_text_all') - co_text_all, texts_corr_order_index_int, _ = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, self.min_area) - - #print(texts_corr_order_index_int) - - #co_text_all = [co_text_all[index] for index in texts_corr_order_index_int] - id_all_text = [id_all_text[index] for index in texts_corr_order_index_int] - - labels_con = np.zeros((y_len,x_len,len(co_text_all)),dtype='uint8') - for i in range(len(co_text_all)): - img_label = np.zeros((y_len,x_len,3),dtype='uint8') - img_label=cv2.fillPoly(img_label, pts =[co_text_all[i]], color=(1,1,1)) - labels_con[:,:,i] = img_label[:,:,0] - - if bb_coord_printspace: - #bb_coord_printspace[x,y,w,h,_,_] - x = bb_coord_printspace[0] - y = bb_coord_printspace[1] - w = bb_coord_printspace[2] - h = bb_coord_printspace[3] - labels_con = labels_con[y:y+h, x:x+w, :] - img_poly = img_poly[y:y+h, x:x+w, :] - img_header_and_sep = img_header_and_sep[y:y+h, x:x+w] - - - - img3= np.copy(img_poly) - labels_con = resize_image(labels_con, img_height, img_width) - - img_header_and_sep = resize_image(img_header_and_sep, img_height, img_width) - - img3= resize_image (img3, img_height, img_width) - img3 = img3.astype(np.uint16) - - inference_bs = 1#4 - - input_1= np.zeros( (inference_bs, img_height, img_width,3)) - - - starting_list_of_regions = [list(range(labels_con.shape[2]))] - - index_update = 0 - index_selected = starting_list_of_regions[0] - - scalibility_num = 0 - while index_update>=0: - ij_list = starting_list_of_regions[index_update] - i = ij_list[0] - ij_list.pop(0) - - - pr_list = [] - post_list = [] - - batch_counter = 0 - tot_counter = 1 - - tot_iteration = len(ij_list) - full_bs_ite= tot_iteration//inference_bs - last_bs = tot_iteration % inference_bs - - jbatch_indexer =[] - for j in ij_list: - img1= np.repeat(labels_con[:,:,i][:, :, np.newaxis], 3, axis=2) - img2 = np.repeat(labels_con[:,:,j][:, :, np.newaxis], 3, axis=2) - - - img2[:,:,0][img3[:,:,0]==5] = 2 - img2[:,:,0][img_header_and_sep[:,:]==1] = 3 - - - - img1[:,:,0][img3[:,:,0]==5] = 2 - img1[:,:,0][img_header_and_sep[:,:]==1] = 3 - - #input_1= np.zeros( (height1, width1,3)) - - - jbatch_indexer.append(j) - - input_1[batch_counter,:,:,0] = img1[:,:,0]/3. - input_1[batch_counter,:,:,2] = img2[:,:,0]/3. - input_1[batch_counter,:,:,1] = img3[:,:,0]/5. - #input_1[batch_counter,:,:,:]= np.zeros( (batch_counter, height1, width1,3)) - batch_counter = batch_counter+1 - - #input_1[:,:,0] = img1[:,:,0]/3. - #input_1[:,:,2] = img2[:,:,0]/3. - #input_1[:,:,1] = img3[:,:,0]/5. - - if batch_counter==inference_bs or ( (tot_counter//inference_bs)==full_bs_ite and tot_counter%inference_bs==last_bs): - y_pr = self.model.predict(input_1 , verbose=0) - scalibility_num = scalibility_num+1 - - if batch_counter==inference_bs: - iteration_batches = inference_bs - else: - iteration_batches = last_bs - for jb in range(iteration_batches): - if y_pr[jb][0]>=0.5: - post_list.append(jbatch_indexer[jb]) - else: - pr_list.append(jbatch_indexer[jb]) - - batch_counter = 0 - jbatch_indexer = [] - - tot_counter = tot_counter+1 - - starting_list_of_regions, index_update = update_list_and_return_first_with_length_bigger_than_one(index_update, i, pr_list, post_list,starting_list_of_regions) - - - index_sort = [i[0] for i in starting_list_of_regions ] - - id_all_text = np.array(id_all_text)[index_sort] - - alltags=[elem.tag for elem in root_xml.iter()] - - - - link=alltags[0].split('}')[0]+'}' - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - page_element = root_xml.find(link+'Page') - - """ - ro_subelement = ET.SubElement(page_element, 'ReadingOrder') - #print(page_element, 'page_element') - - #new_element = ET.Element('ReadingOrder') - - new_element_element = ET.Element('OrderedGroup') - new_element_element.set('id', "ro357564684568544579089") - - for index, id_text in enumerate(id_all_text): - new_element_2 = ET.Element('RegionRefIndexed') - new_element_2.set('regionRef', id_all_text[index]) - new_element_2.set('index', str(index_sort[index])) - - new_element_element.append(new_element_2) - - ro_subelement.append(new_element_element) - """ - ##ro_subelement = ET.SubElement(page_element, 'ReadingOrder') - - ro_subelement = ET.Element('ReadingOrder') - - ro_subelement2 = ET.SubElement(ro_subelement, 'OrderedGroup') - ro_subelement2.set('id', "ro357564684568544579089") - - for index, id_text in enumerate(id_all_text): - new_element_2 = ET.SubElement(ro_subelement2, 'RegionRefIndexed') - new_element_2.set('regionRef', id_all_text[index]) - new_element_2.set('index', str(index)) - - if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): - page_element.insert(1, ro_subelement) - else: - page_element.insert(0, ro_subelement) - - alltags=[elem.tag for elem in root_xml.iter()] - - ET.register_namespace("",name_space) - tree_xml.write(os.path.join(self.out, file_name+'.xml'),xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) - #tree_xml.write('library2.xml') - - else: - if self.patches: - #def textline_contours(img,input_width,input_height,n_classes,model): - - img=cv2.imread(image_dir) - self.img_org = np.copy(img) - - if img.shape[0] < self.img_height: - img = self.resize_image(img, self.img_height, img.shape[1]) - - if img.shape[1] < self.img_width: - img = self.resize_image(img, img.shape[0], self.img_width) - - margin = int(0.1 * self.img_width) - width_mid = self.img_width - 2 * margin - height_mid = self.img_height - 2 * margin - img = img / float(255.0) - - img_h = img.shape[0] - img_w = img.shape[1] - - prediction_true = np.zeros((img_h, img_w, 3)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - - for i in range(nxf): - for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + self.img_width - else: - index_x_d = i * width_mid - index_x_u = index_x_d + self.img_width - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + self.img_height - else: - index_y_d = j * height_mid - index_y_u = index_y_d + self.img_height - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - self.img_width - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - self.img_height - - img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), - verbose=0) - - if self.task == 'enhancement': - seg = label_p_pred[0, :, :, :] - seg = seg * 255 - elif self.task == 'segmentation' or self.task == 'binarization': - seg = np.argmax(label_p_pred, axis=3)[0] - seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - - - if i == 0 and j == 0: - seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg - elif i == nxf - 1 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg - elif i == 0 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg - elif i == nxf - 1 and j == 0: - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg - elif i == 0 and j != 0 and j != nyf - 1: - seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg - elif i == nxf - 1 and j != 0 and j != nyf - 1: - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg - elif i != 0 and i != nxf - 1 and j == 0: - seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] - prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg - elif i != 0 and i != nxf - 1 and j == nyf - 1: - seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg - else: - seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] - prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg - prediction_true = prediction_true.astype(int) - prediction_true = cv2.resize(prediction_true, (self.img_org.shape[1], self.img_org.shape[0]), interpolation=cv2.INTER_NEAREST) - return prediction_true - - else: - - img=cv2.imread(image_dir) - self.img_org = np.copy(img) - - width=self.img_width - height=self.img_height - - img=img/255.0 - img=self.resize_image(img,self.img_height,self.img_width) - - - label_p_pred=self.model.predict( - img.reshape(1,img.shape[0],img.shape[1],img.shape[2])) - - if self.task == 'enhancement': - seg = label_p_pred[0, :, :, :] - seg = seg * 255 - elif self.task == 'segmentation' or self.task == 'binarization': - seg = np.argmax(label_p_pred, axis=3)[0] - seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - - prediction_true = seg.astype(int) - - prediction_true = cv2.resize(prediction_true, (self.img_org.shape[1], self.img_org.shape[0]), interpolation=cv2.INTER_NEAREST) - return prediction_true - - - - def run(self): - self.start_new_session_and_model() - if self.image: - res=self.predict(image_dir = self.image) - - if self.task == 'classification' or self.task == 'reading_order': - pass - elif self.task == 'enhancement': - if self.save: - cv2.imwrite(self.save,res) - else: - img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) - if self.save: - cv2.imwrite(self.save,img_seg_overlayed) - if self.save_layout: - cv2.imwrite(self.save_layout, only_layout) - - if self.ground_truth: - gt_img=cv2.imread(self.ground_truth) - self.IoU(gt_img[:,:,0],res[:,:,0]) - - else: - ls_images = os.listdir(self.dir_in) - for ind_image in ls_images: - f_name = ind_image.split('.')[0] - image_dir = os.path.join(self.dir_in, ind_image) - res=self.predict(image_dir) - - if self.task == 'classification' or self.task == 'reading_order': - pass - elif self.task == 'enhancement': - self.save = os.path.join(self.out, f_name+'.png') - cv2.imwrite(self.save,res) - else: - img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) - self.save = os.path.join(self.out, f_name+'_overlayed.png') - cv2.imwrite(self.save,img_seg_overlayed) - self.save_layout = os.path.join(self.out, f_name+'_layout.png') - cv2.imwrite(self.save_layout, only_layout) - - if self.ground_truth: - gt_img=cv2.imread(self.ground_truth) - self.IoU(gt_img[:,:,0],res[:,:,0]) - - - -@click.command() -@click.option( - "--image", - "-i", - help="image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of images", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--out", - "-o", - help="output directory where xml with detected reading order will be written.", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--patches/--no-patches", - "-p/-nop", - is_flag=True, - help="if this parameter set to true, this tool will try to do inference in patches.", -) -@click.option( - "--save", - "-s", - help="save prediction as a png file in current folder.", -) -@click.option( - "--save_layout", - "-sl", - help="save layout prediction only as a png file in current folder.", -) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--ground_truth", - "-gt", - help="ground truth directory if you want to see the iou of prediction.", -) -@click.option( - "--xml_file", - "-xml", - help="xml file with layout coordinates that reading order detection will be implemented on. The result will be written in the same xml file.", -) - -@click.option( - "--min_area", - "-min", - help="min area size of regions considered for reading order detection. The default value is zero and means that all text regions are considered for reading order.", -) -def main(image, dir_in, model, patches, save, save_layout, ground_truth, xml_file, out, min_area): - assert image or dir_in, "Either a single image -i or a dir_in -di is required" - with open(os.path.join(model,'config.json')) as f: - config_params_model = json.load(f) - task = config_params_model['task'] - if task != 'classification' and task != 'reading_order': - if image and not save: - print("Error: You used one of segmentation or binarization task with image input but not set -s, you need a filename to save visualized output with -s") - sys.exit(1) - if dir_in and not out: - print("Error: You used one of segmentation or binarization task with dir_in but not set -out") - sys.exit(1) - x=sbb_predict(image, dir_in, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area) - x.run() - diff --git a/src/eynollah/training/metrics.py b/src/eynollah/training/metrics.py deleted file mode 100644 index cd30b02..0000000 --- a/src/eynollah/training/metrics.py +++ /dev/null @@ -1,357 +0,0 @@ -from tensorflow.keras import backend as K -import tensorflow as tf -import numpy as np - - -def focal_loss(gamma=2., alpha=4.): - gamma = float(gamma) - alpha = float(alpha) - - def focal_loss_fixed(y_true, y_pred): - """Focal loss for multi-classification - FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t) - Notice: y_pred is probability after softmax - gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper - d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x) - Focal Loss for Dense Object Detection - https://arxiv.org/abs/1708.02002 - - Arguments: - y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls] - y_pred {tensor} -- model's output, shape of [batch_size, num_cls] - - Keyword Arguments: - gamma {float} -- (default: {2.0}) - alpha {float} -- (default: {4.0}) - - Returns: - [tensor] -- loss. - """ - epsilon = 1.e-9 - y_true = tf.convert_to_tensor(y_true, tf.float32) - y_pred = tf.convert_to_tensor(y_pred, tf.float32) - - model_out = tf.add(y_pred, epsilon) - ce = tf.multiply(y_true, -tf.log(model_out)) - weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma)) - fl = tf.multiply(alpha, tf.multiply(weight, ce)) - reduced_fl = tf.reduce_max(fl, axis=1) - return tf.reduce_mean(reduced_fl) - - return focal_loss_fixed - - -def weighted_categorical_crossentropy(weights=None): - """ weighted_categorical_crossentropy - - Args: - * weights: crossentropy weights - Returns: - * weighted categorical crossentropy function - """ - - def loss(y_true, y_pred): - labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) - - if weights is not None: - weight_mask = tf.maximum(tf.reduce_max(tf.constant( - np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) - per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] - return tf.reduce_mean(per_pixel_loss) - - return loss - - -def image_categorical_cross_entropy(y_true, y_pred, weights=None): - """ - :param y_true: tensor of shape (batch_size, height, width) representing the ground truth. - :param y_pred: tensor of shape (batch_size, height, width) representing the prediction. - :return: The mean cross-entropy on softmaxed tensors. - """ - - labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) - - if weights is not None: - weight_mask = tf.maximum( - tf.reduce_max(tf.constant( - np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) - per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] - - return tf.reduce_mean(per_pixel_loss) - - -def class_tversky(y_true, y_pred): - smooth = 1.0 # 1.00 - - y_true = K.permute_dimensions(y_true, (3, 1, 2, 0)) - y_pred = K.permute_dimensions(y_pred, (3, 1, 2, 0)) - - y_true_pos = K.batch_flatten(y_true) - y_pred_pos = K.batch_flatten(y_pred) - true_pos = K.sum(y_true_pos * y_pred_pos, 1) - false_neg = K.sum(y_true_pos * (1 - y_pred_pos), 1) - false_pos = K.sum((1 - y_true_pos) * y_pred_pos, 1) - alpha = 0.2 # 0.5 - beta = 0.8 - return (true_pos + smooth) / (true_pos + alpha * false_neg + beta * false_pos + smooth) - - -def focal_tversky_loss(y_true, y_pred): - pt_1 = class_tversky(y_true, y_pred) - gamma = 1.3 # 4./3.0#1.3#4.0/3.00# 0.75 - return K.sum(K.pow((1 - pt_1), gamma)) - - -def generalized_dice_coeff2(y_true, y_pred): - n_el = 1 - for dim in y_true.shape: - n_el *= int(dim) - n_cl = y_true.shape[-1] - w = K.zeros(shape=(n_cl,)) - w = (K.sum(y_true, axis=(0, 1, 2))) / n_el - w = 1 / (w ** 2 + 0.000001) - numerator = y_true * y_pred - numerator = w * K.sum(numerator, (0, 1, 2)) - numerator = K.sum(numerator) - denominator = y_true + y_pred - denominator = w * K.sum(denominator, (0, 1, 2)) - denominator = K.sum(denominator) - return 2 * numerator / denominator - - -def generalized_dice_coeff(y_true, y_pred): - axes = tuple(range(1, len(y_pred.shape) - 1)) - Ncl = y_pred.shape[-1] - w = K.zeros(shape=(Ncl,)) - w = K.sum(y_true, axis=axes) - w = 1 / (w ** 2 + 0.000001) - # Compute gen dice coef: - numerator = y_true * y_pred - numerator = w * K.sum(numerator, axes) - numerator = K.sum(numerator) - - denominator = y_true + y_pred - denominator = w * K.sum(denominator, axes) - denominator = K.sum(denominator) - - gen_dice_coef = 2 * numerator / denominator - - return gen_dice_coef - - -def generalized_dice_loss(y_true, y_pred): - return 1 - generalized_dice_coeff2(y_true, y_pred) - - -def soft_dice_loss(y_true, y_pred, epsilon=1e-6): - """ - Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions. - Assumes the `channels_last` format. - - # Arguments - y_true: b x X x Y( x Z...) x c One hot encoding of ground truth - y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) - epsilon: Used for numerical stability to avoid divide by zero errors - - # References - V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation - https://arxiv.org/abs/1606.04797 - More details on Dice loss formulation - https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72) - - Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022 - """ - - # skip the batch and class axis for calculating Dice score - axes = tuple(range(1, len(y_pred.shape) - 1)) - - numerator = 2. * K.sum(y_pred * y_true, axes) - - denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) - return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch - - -def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last=True, mean_per_class=False, - verbose=False): - """ - Compute mean metrics of two segmentation masks, via Keras. - - IoU(A,B) = |A & B| / (| A U B|) - Dice(A,B) = 2*|A & B| / (|A| + |B|) - - Args: - y_true: true masks, one-hot encoded. - y_pred: predicted masks, either softmax outputs, or one-hot encoded. - metric_name: metric to be computed, either 'iou' or 'dice'. - metric_type: one of 'standard' (default), 'soft', 'naive'. - In the standard version, y_pred is one-hot encoded and the mean - is taken only over classes that are present (in y_true or y_pred). - The 'soft' version of the metrics are computed without one-hot - encoding y_pred. - The 'naive' version return mean metrics where absent classes contribute - to the class mean as 1.0 (instead of being dropped from the mean). - drop_last = True: boolean flag to drop last class (usually reserved - for background class in semantic segmentation) - mean_per_class = False: return mean along batch axis for each class. - verbose = False: print intermediate results such as intersection, union - (as number of pixels). - Returns: - IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True - in which case it returns the per-class metric, averaged over the batch. - - Inputs are B*W*H*N tensors, with - B = batch size, - W = width, - H = height, - N = number of classes - """ - - flag_soft = (metric_type == 'soft') - flag_naive_mean = (metric_type == 'naive') - - # always assume one or more classes - num_classes = K.shape(y_true)[-1] - - if not flag_soft: - # get one-hot encoded masks from y_pred (true masks should already be one-hot) - y_pred = K.one_hot(K.argmax(y_pred), num_classes) - y_true = K.one_hot(K.argmax(y_true), num_classes) - - # if already one-hot, could have skipped above command - # keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64) - y_true = K.cast(y_true, 'float32') - y_pred = K.cast(y_pred, 'float32') - - # intersection and union shapes are batch_size * n_classes (values = area in pixels) - axes = (1, 2) # W,H axes of each image - intersection = K.sum(K.abs(y_true * y_pred), axis=axes) - mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes) - union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot - - smooth = .001 - iou = (intersection + smooth) / (union + smooth) - dice = 2 * (intersection + smooth) / (mask_sum + smooth) - - metric = {'iou': iou, 'dice': dice}[metric_name] - - # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise - mask = K.cast(K.not_equal(union, 0), 'float32') - - if drop_last: - metric = metric[:, :-1] - mask = mask[:, :-1] - - if verbose: - print('intersection, union') - print(K.eval(intersection), K.eval(union)) - print(K.eval(intersection / union)) - - # return mean metrics: remaining axes are (batch, classes) - if flag_naive_mean: - return K.mean(metric) - - # take mean only over non-absent classes - class_count = K.sum(mask, axis=0) - non_zero = tf.greater(class_count, 0) - non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero) - non_zero_count = tf.boolean_mask(class_count, non_zero) - - if verbose: - print('Counts of inputs with class present, metrics for non-absent classes') - print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count)) - - return K.mean(non_zero_sum / non_zero_count) - - -def mean_iou(y_true, y_pred, **kwargs): - """ - Compute mean Intersection over Union of two segmentation masks, via Keras. - - Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs. - """ - return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs) - - -def Mean_IOU(y_true, y_pred): - nb_classes = K.int_shape(y_pred)[-1] - iou = [] - true_pixels = K.argmax(y_true, axis=-1) - pred_pixels = K.argmax(y_pred, axis=-1) - void_labels = K.equal(K.sum(y_true, axis=-1), 0) - for i in range(0, nb_classes): # exclude first label (background) and last label (void) - true_labels = K.equal(true_pixels, i) # & ~void_labels - pred_labels = K.equal(pred_pixels, i) # & ~void_labels - inter = tf.to_int32(true_labels & pred_labels) - union = tf.to_int32(true_labels | pred_labels) - legal_batches = K.sum(tf.to_int32(true_labels), axis=1) > 0 - ious = K.sum(inter, axis=1) / K.sum(union, axis=1) - iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects - iou = tf.stack(iou) - legal_labels = ~tf.debugging.is_nan(iou) - iou = tf.gather(iou, indices=tf.where(legal_labels)) - return K.mean(iou) - - -def iou_vahid(y_true, y_pred): - nb_classes = tf.shape(y_true)[-1] + tf.to_int32(1) - true_pixels = K.argmax(y_true, axis=-1) - pred_pixels = K.argmax(y_pred, axis=-1) - iou = [] - - for i in tf.range(nb_classes): - tp = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.equal(pred_pixels, i))) - fp = K.sum(tf.to_int32(K.not_equal(true_pixels, i) & K.equal(pred_pixels, i))) - fn = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.not_equal(pred_pixels, i))) - iouh = tp / (tp + fp + fn) - iou.append(iouh) - return K.mean(iou) - - -def IoU_metric(Yi, y_predi): - # mean Intersection over Union - # Mean IoU = TP/(FN + TP + FP) - y_predi = np.argmax(y_predi, axis=3) - y_testi = np.argmax(Yi, axis=3) - IoUs = [] - Nclass = int(np.max(Yi)) + 1 - for c in range(Nclass): - TP = np.sum((Yi == c) & (y_predi == c)) - FP = np.sum((Yi != c) & (y_predi == c)) - FN = np.sum((Yi == c) & (y_predi != c)) - IoU = TP / float(TP + FP + FN) - IoUs.append(IoU) - return K.cast(np.mean(IoUs), dtype='float32') - - -def IoU_metric_keras(y_true, y_pred): - # mean Intersection over Union - # Mean IoU = TP/(FN + TP + FP) - init = tf.global_variables_initializer() - sess = tf.Session() - sess.run(init) - - return IoU_metric(y_true.eval(session=sess), y_pred.eval(session=sess)) - - -def jaccard_distance_loss(y_true, y_pred, smooth=100): - """ - Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) - = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) - - The jaccard distance loss is usefull for unbalanced datasets. This has been - shifted so it converges on 0 and is smoothed to avoid exploding or disapearing - gradient. - - Ref: https://en.wikipedia.org/wiki/Jaccard_index - - @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 - @author: wassname - """ - intersection = K.sum(K.abs(y_true * y_pred), axis=-1) - sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) - jac = (intersection + smooth) / (sum_ - intersection + smooth) - return (1 - jac) * smooth diff --git a/src/eynollah/training/models.py b/src/eynollah/training/models.py deleted file mode 100644 index fdc5437..0000000 --- a/src/eynollah/training/models.py +++ /dev/null @@ -1,760 +0,0 @@ -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras.models import * -from tensorflow.keras.layers import * -from tensorflow.keras import layers -from tensorflow.keras.regularizers import l2 - -##mlp_head_units = [512, 256]#[2048, 1024] -###projection_dim = 64 -##transformer_layers = 2#8 -##num_heads = 1#4 -resnet50_Weights_path = './pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' -IMAGE_ORDERING = 'channels_last' -MERGE_AXIS = -1 - -def mlp(x, hidden_units, dropout_rate): - for units in hidden_units: - x = layers.Dense(units, activation=tf.nn.gelu)(x) - x = layers.Dropout(dropout_rate)(x) - return x - -class Patches(layers.Layer): - def __init__(self, patch_size_x, patch_size_y):#__init__(self, **kwargs):#:__init__(self, patch_size):#__init__(self, **kwargs): - super(Patches, self).__init__() - self.patch_size_x = patch_size_x - self.patch_size_y = patch_size_y - - def call(self, images): - #print(tf.shape(images)[1],'images') - #print(self.patch_size,'self.patch_size') - batch_size = tf.shape(images)[0] - patches = tf.image.extract_patches( - images=images, - sizes=[1, self.patch_size_y, self.patch_size_x, 1], - strides=[1, self.patch_size_y, self.patch_size_x, 1], - rates=[1, 1, 1, 1], - padding="VALID", - ) - #patch_dims = patches.shape[-1] - patch_dims = tf.shape(patches)[-1] - patches = tf.reshape(patches, [batch_size, -1, patch_dims]) - return patches - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'patch_size_x': self.patch_size_x, - 'patch_size_y': self.patch_size_y, - }) - return config - -class Patches_old(layers.Layer): - def __init__(self, patch_size):#__init__(self, **kwargs):#:__init__(self, patch_size):#__init__(self, **kwargs): - super(Patches, self).__init__() - self.patch_size = patch_size - - def call(self, images): - #print(tf.shape(images)[1],'images') - #print(self.patch_size,'self.patch_size') - batch_size = tf.shape(images)[0] - patches = tf.image.extract_patches( - images=images, - sizes=[1, self.patch_size, self.patch_size, 1], - strides=[1, self.patch_size, self.patch_size, 1], - rates=[1, 1, 1, 1], - padding="VALID", - ) - patch_dims = patches.shape[-1] - #print(patches.shape,patch_dims,'patch_dims') - patches = tf.reshape(patches, [batch_size, -1, patch_dims]) - return patches - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'patch_size': self.patch_size, - }) - return config - - -class PatchEncoder(layers.Layer): - def __init__(self, num_patches, projection_dim): - super(PatchEncoder, self).__init__() - self.num_patches = num_patches - self.projection = layers.Dense(units=projection_dim) - self.position_embedding = layers.Embedding( - input_dim=num_patches, output_dim=projection_dim - ) - - def call(self, patch): - positions = tf.range(start=0, limit=self.num_patches, delta=1) - encoded = self.projection(patch) + self.position_embedding(positions) - return encoded - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'num_patches': self.num_patches, - 'projection': self.projection, - 'position_embedding': self.position_embedding, - }) - return config - - -def one_side_pad(x): - x = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(x) - if IMAGE_ORDERING == 'channels_first': - x = Lambda(lambda x: x[:, :, :-1, :-1])(x) - elif IMAGE_ORDERING == 'channels_last': - x = Lambda(lambda x: x[:, :-1, :-1, :])(x) - return x - - -def identity_block(input_tensor, kernel_size, filters, stage, block): - """The identity block is the block that has no conv layer at shortcut. - # Arguments - input_tensor: input tensor - kernel_size: defualt 3, the kernel size of middle conv layer at main path - filters: list of integers, the filterss of 3 conv layer at main path - stage: integer, current stage label, used for generating layer names - block: 'a','b'..., current block label, used for generating layer names - # Returns - Output tensor for the block. - """ - filters1, filters2, filters3 = filters - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - conv_name_base = 'res' + str(stage) + block + '_branch' - bn_name_base = 'bn' + str(stage) + block + '_branch' - - x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2a')(input_tensor) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) - x = Activation('relu')(x) - - x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, - padding='same', name=conv_name_base + '2b')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) - x = Activation('relu')(x) - - x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) - - x = layers.add([x, input_tensor]) - x = Activation('relu')(x) - return x - - -def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): - """conv_block is the block that has a conv layer at shortcut - # Arguments - input_tensor: input tensor - kernel_size: defualt 3, the kernel size of middle conv layer at main path - filters: list of integers, the filterss of 3 conv layer at main path - stage: integer, current stage label, used for generating layer names - block: 'a','b'..., current block label, used for generating layer names - # Returns - Output tensor for the block. - Note that from stage 3, the first conv layer at main path is with strides=(2,2) - And the shortcut should have strides=(2,2) as well - """ - filters1, filters2, filters3 = filters - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - conv_name_base = 'res' + str(stage) + block + '_branch' - bn_name_base = 'bn' + str(stage) + block + '_branch' - - x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, strides=strides, - name=conv_name_base + '2a')(input_tensor) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) - x = Activation('relu')(x) - - x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, padding='same', - name=conv_name_base + '2b')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) - x = Activation('relu')(x) - - x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) - x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) - - shortcut = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, strides=strides, - name=conv_name_base + '1')(input_tensor) - shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) - - x = layers.add([x, shortcut]) - x = Activation('relu')(x) - return x - - -def resnet50_unet_light(n_classes, input_height=224, input_width=224, taks="segmentation", weight_decay=1e-6, pretraining=False): - assert input_height % 32 == 0 - assert input_width % 32 == 0 - - img_input = Input(shape=(input_height, input_width, 3)) - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2), kernel_regularizer=l2(weight_decay), - name='conv1')(x) - f1 = x - - x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) - x = Activation('relu')(x) - x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) - - x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) - x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') - x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x) - - x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x - - x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x - - x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x - - if pretraining: - model = Model(img_input, x).load_weights(resnet50_Weights_path) - - v512_2048 = Conv2D(512, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f5) - v512_2048 = (BatchNormalization(axis=bn_axis))(v512_2048) - v512_2048 = Activation('relu')(v512_2048) - - v512_1024 = Conv2D(512, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f4) - v512_1024 = (BatchNormalization(axis=bn_axis))(v512_1024) - v512_1024 = Activation('relu')(v512_1024) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(v512_2048) - o = (concatenate([o, v512_1024], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f3], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f2], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f1], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, img_input], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - if task == "segmentation": - o = (BatchNormalization(axis=bn_axis))(o) - o = (Activation('softmax'))(o) - else: - o = (Activation('sigmoid'))(o) - - model = Model(img_input, o) - return model - - -def resnet50_unet(n_classes, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): - assert input_height % 32 == 0 - assert input_width % 32 == 0 - - img_input = Input(shape=(input_height, input_width, 3)) - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2), kernel_regularizer=l2(weight_decay), - name='conv1')(x) - f1 = x - - x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) - x = Activation('relu')(x) - x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) - - x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) - x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') - x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x) - - x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x - - x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x - - x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x - - if pretraining: - Model(img_input, x).load_weights(resnet50_Weights_path) - - v1024_2048 = Conv2D(1024, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))( - f5) - v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) - v1024_2048 = Activation('relu')(v1024_2048) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(v1024_2048) - o = (concatenate([o, f4], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f3], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f2], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f1], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, img_input], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - if task == "segmentation": - o = (BatchNormalization(axis=bn_axis))(o) - o = (Activation('softmax'))(o) - else: - o = (Activation('sigmoid'))(o) - - model = Model(img_input, o) - - return model - - -def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=None, transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): - if mlp_head_units is None: - mlp_head_units = [128, 64] - inputs = layers.Input(shape=(input_height, input_width, 3)) - - #transformer_units = [ - #projection_dim * 2, - #projection_dim, - #] # Size of the transformer layers - IMAGE_ORDERING = 'channels_last' - bn_axis=3 - - x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(inputs) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) - f1 = x - - x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) - x = Activation('relu')(x) - x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) - - x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) - x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') - x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x) - - x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x - - x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x - - x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x - - if pretraining: - model = Model(inputs, x).load_weights(resnet50_Weights_path) - - #num_patches = x.shape[1]*x.shape[2] - - #patch_size_y = input_height / x.shape[1] - #patch_size_x = input_width / x.shape[2] - #patch_size = patch_size_x * patch_size_y - patches = Patches(patch_size_x, patch_size_y)(x) - # Encode patches. - encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) - - for _ in range(transformer_layers): - # Layer normalization 1. - x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) - # Create a multi-head attention layer. - attention_output = layers.MultiHeadAttention( - num_heads=num_heads, key_dim=projection_dim, dropout=0.1 - )(x1, x1) - # Skip connection 1. - x2 = layers.Add()([attention_output, encoded_patches]) - # Layer normalization 2. - x3 = layers.LayerNormalization(epsilon=1e-6)(x2) - # MLP. - x3 = mlp(x3, hidden_units=mlp_head_units, dropout_rate=0.1) - # Skip connection 2. - encoded_patches = layers.Add()([x3, x2]) - - encoded_patches = tf.reshape(encoded_patches, [-1, x.shape[1], x.shape[2] , int( projection_dim / (patch_size_x * patch_size_y) )]) - - v1024_2048 = Conv2D( 1024 , (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(encoded_patches) - v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) - v1024_2048 = Activation('relu')(v1024_2048) - - o = (UpSampling2D( (2, 2), data_format=IMAGE_ORDERING))(v1024_2048) - o = (concatenate([o, f4],axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o ,f3], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f2], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f1], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, inputs],axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(o) - if task == "segmentation": - o = (BatchNormalization(axis=bn_axis))(o) - o = (Activation('softmax'))(o) - else: - o = (Activation('sigmoid'))(o) - - model = Model(inputs=inputs, outputs=o) - - return model - -def vit_resnet50_unet_transformer_before_cnn(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=None, transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): - if mlp_head_units is None: - mlp_head_units = [128, 64] - inputs = layers.Input(shape=(input_height, input_width, 3)) - - ##transformer_units = [ - ##projection_dim * 2, - ##projection_dim, - ##] # Size of the transformer layers - IMAGE_ORDERING = 'channels_last' - bn_axis=3 - - patches = Patches(patch_size_x, patch_size_y)(inputs) - # Encode patches. - encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) - - for _ in range(transformer_layers): - # Layer normalization 1. - x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) - # Create a multi-head attention layer. - attention_output = layers.MultiHeadAttention( - num_heads=num_heads, key_dim=projection_dim, dropout=0.1 - )(x1, x1) - # Skip connection 1. - x2 = layers.Add()([attention_output, encoded_patches]) - # Layer normalization 2. - x3 = layers.LayerNormalization(epsilon=1e-6)(x2) - # MLP. - x3 = mlp(x3, hidden_units=mlp_head_units, dropout_rate=0.1) - # Skip connection 2. - encoded_patches = layers.Add()([x3, x2]) - - encoded_patches = tf.reshape(encoded_patches, [-1, input_height, input_width , int( projection_dim / (patch_size_x * patch_size_y) )]) - - encoded_patches = Conv2D(3, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay), name='convinput')(encoded_patches) - - x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(encoded_patches) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) - f1 = x - - x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) - x = Activation('relu')(x) - x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) - - x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) - x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') - x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x) - - x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x - - x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x - - x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x - - if pretraining: - model = Model(encoded_patches, x).load_weights(resnet50_Weights_path) - - v1024_2048 = Conv2D( 1024 , (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(x) - v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) - v1024_2048 = Activation('relu')(v1024_2048) - - o = (UpSampling2D( (2, 2), data_format=IMAGE_ORDERING))(v1024_2048) - o = (concatenate([o, f4],axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o ,f3], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f2], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, f1], axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) - o = (concatenate([o, inputs],axis=MERGE_AXIS)) - o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) - o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = Activation('relu')(o) - - o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(o) - if task == "segmentation": - o = (BatchNormalization(axis=bn_axis))(o) - o = (Activation('softmax'))(o) - else: - o = (Activation('sigmoid'))(o) - - model = Model(inputs=inputs, outputs=o) - - return model - -def resnet50_classifier(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): - include_top=True - assert input_height%32 == 0 - assert input_width%32 == 0 - - - img_input = Input(shape=(input_height,input_width , 3 )) - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) - f1 = x - - x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) - x = Activation('relu')(x) - x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) - - - x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) - x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') - x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x ) - - - x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') - x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x - - x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') - x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x - - x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') - x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x - - if pretraining: - Model(img_input, x).load_weights(resnet50_Weights_path) - - x = AveragePooling2D((7, 7), name='avg_pool')(x) - x = Flatten()(x) - - ## - x = Dense(256, activation='relu', name='fc512')(x) - x=Dropout(0.2)(x) - ## - x = Dense(n_classes, activation='softmax', name='fc1000')(x) - model = Model(img_input, x) - - - - - return model - -def machine_based_reading_order_model(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): - assert input_height%32 == 0 - assert input_width%32 == 0 - - img_input = Input(shape=(input_height,input_width , 3 )) - - if IMAGE_ORDERING == 'channels_last': - bn_axis = 3 - else: - bn_axis = 1 - - x1 = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) - x1 = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x1) - - x1 = BatchNormalization(axis=bn_axis, name='bn_conv1')(x1) - x1 = Activation('relu')(x1) - x1 = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x1) - - x1 = conv_block(x1, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) - x1 = identity_block(x1, 3, [64, 64, 256], stage=2, block='b') - x1 = identity_block(x1, 3, [64, 64, 256], stage=2, block='c') - - x1 = conv_block(x1, 3, [128, 128, 512], stage=3, block='a') - x1 = identity_block(x1, 3, [128, 128, 512], stage=3, block='b') - x1 = identity_block(x1, 3, [128, 128, 512], stage=3, block='c') - x1 = identity_block(x1, 3, [128, 128, 512], stage=3, block='d') - - x1 = conv_block(x1, 3, [256, 256, 1024], stage=4, block='a') - x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='b') - x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='c') - x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='d') - x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='e') - x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='f') - - x1 = conv_block(x1, 3, [512, 512, 2048], stage=5, block='a') - x1 = identity_block(x1, 3, [512, 512, 2048], stage=5, block='b') - x1 = identity_block(x1, 3, [512, 512, 2048], stage=5, block='c') - - if pretraining: - Model(img_input , x1).load_weights(resnet50_Weights_path) - - x1 = AveragePooling2D((7, 7), name='avg_pool1')(x1) - flattened = Flatten()(x1) - - o = Dense(256, activation='relu', name='fc512')(flattened) - o=Dropout(0.2)(o) - - o = Dense(256, activation='relu', name='fc512a')(o) - o=Dropout(0.2)(o) - - o = Dense(n_classes, activation='sigmoid', name='fc1000')(o) - model = Model(img_input , o) - - return model diff --git a/src/eynollah/training/train.py b/src/eynollah/training/train.py deleted file mode 100644 index 97736e0..0000000 --- a/src/eynollah/training/train.py +++ /dev/null @@ -1,474 +0,0 @@ -import os -import sys -import json - -import click - -from eynollah.training.metrics import ( - soft_dice_loss, - weighted_categorical_crossentropy -) -from eynollah.training.models import ( - PatchEncoder, - Patches, - machine_based_reading_order_model, - resnet50_classifier, - resnet50_unet, - vit_resnet50_unet, - vit_resnet50_unet_transformer_before_cnn -) -from eynollah.training.utils import ( - data_gen, - generate_arrays_from_folder_reading_order, - generate_data_from_folder_evaluation, - generate_data_from_folder_training, - get_one_hot, - provide_patches, - return_number_of_total_training_data -) - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' -import tensorflow as tf -from tensorflow.compat.v1.keras.backend import set_session -from tensorflow.keras.optimizers import SGD, Adam -from sacred import Experiment -from tensorflow.keras.models import load_model -from tqdm import tqdm -from sklearn.metrics import f1_score -from tensorflow.keras.callbacks import Callback - -import numpy as np -import cv2 - -class SaveWeightsAfterSteps(Callback): - def __init__(self, save_interval, save_path, _config): - super(SaveWeightsAfterSteps, self).__init__() - self.save_interval = save_interval - self.save_path = save_path - self.step_count = 0 - self._config = _config - - def on_train_batch_end(self, batch, logs=None): - self.step_count += 1 - - if self.step_count % self.save_interval ==0: - save_file = f"{self.save_path}/model_step_{self.step_count}" - #os.system('mkdir '+save_file) - - self.model.save(save_file) - - with open(os.path.join(os.path.join(self.save_path, f"model_step_{self.step_count}"),"config.json"), "w") as fp: - json.dump(self._config, fp) # encode dict into JSON - print(f"saved model as steps {self.step_count} to {save_file}") - - -def configuration(): - config = tf.compat.v1.ConfigProto() - config.gpu_options.allow_growth = True - session = tf.compat.v1.Session(config=config) - set_session(session) - - -def get_dirs_or_files(input_data): - image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') - if os.path.isdir(input_data): - # Check if training dir exists - assert os.path.isdir(image_input), "{} is not a directory".format(image_input) - assert os.path.isdir(labels_input), "{} is not a directory".format(labels_input) - return image_input, labels_input - - -ex = Experiment(save_git_info=False) - - -@ex.config -def config_params(): - n_classes = None # Number of classes. In the case of binary classification this should be 2. - n_epochs = 1 # Number of epochs. - input_height = 224 * 1 # Height of model's input in pixels. - input_width = 224 * 1 # Width of model's input in pixels. - weight_decay = 1e-6 # Weight decay of l2 regularization of model layers. - n_batch = 1 # Number of batches at each iteration. - learning_rate = 1e-4 # Set the learning rate. - patches = False # Divides input image into smaller patches (input size of the model) when set to true. For the model to see the full image, like page extraction, set this to false. - augmentation = False # To apply any kind of augmentation, this parameter must be set to true. - flip_aug = False # If true, different types of flipping will be applied to the image. Types of flips are defined with "flip_index" in config_params.json. - blur_aug = False # If true, different types of blurring will be applied to the image. Types of blur are defined with "blur_k" in config_params.json. - padding_white = False # If true, white padding will be applied to the image. - padding_black = False # If true, black padding will be applied to the image. - scaling = False # If true, scaling will be applied to the image. The amount of scaling is defined with "scales" in config_params.json. - shifting = False - degrading = False # If true, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" in config_params.json. - brightening = False # If true, brightening will be applied to the image. The amount of brightening is defined with "brightness" in config_params.json. - binarization = False # If true, Otsu thresholding will be applied to augment the input with binarized images. - adding_rgb_background = False - adding_rgb_foreground = False - add_red_textlines = False - channels_shuffling = False - dir_train = None # Directory of training dataset with subdirectories having the names "images" and "labels". - dir_eval = None # Directory of validation dataset with subdirectories having the names "images" and "labels". - dir_output = None # Directory where the output model will be saved. - pretraining = False # Set to true to load pretrained weights of ResNet50 encoder. - scaling_bluring = False # If true, a combination of scaling and blurring will be applied to the image. - scaling_binarization = False # If true, a combination of scaling and binarization will be applied to the image. - rotation = False # If true, a 90 degree rotation will be implemeneted. - rotation_not_90 = False # If true rotation based on provided angles with thetha will be implemeneted. - scaling_brightness = False # If true, a combination of scaling and brightening will be applied to the image. - scaling_flip = False # If true, a combination of scaling and flipping will be applied to the image. - thetha = None # Rotate image by these angles for augmentation. - shuffle_indexes = None - blur_k = None # Blur image for augmentation. - scales = None # Scale patches for augmentation. - degrade_scales = None # Degrade image for augmentation. - brightness = None # Brighten image for augmentation. - flip_index = None # Flip image for augmentation. - continue_training = False # Set to true if you would like to continue training an already trained a model. - transformer_patchsize_x = None # Patch size of vision transformer patches in x direction. - transformer_patchsize_y = None # Patch size of vision transformer patches in y direction. - transformer_num_patches_xy = None # Number of patches for vision transformer in x and y direction respectively. - transformer_projection_dim = 64 # Transformer projection dimension. Default value is 64. - transformer_mlp_head_units = [128, 64] # Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64] - transformer_layers = 8 # transformer layers. Default value is 8. - transformer_num_heads = 4 # Transformer number of heads. Default value is 4. - transformer_cnn_first = True # We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. - index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. - dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. - is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. - weighted_loss = False # Use weighted categorical cross entropy as loss fucntion. When set to true, "is_loss_soft_dice" must be false. - data_is_provided = False # Only set this to true when you have already provided the input data and the train and eval data are in "dir_output". - task = "segmentation" # This parameter defines task of model which can be segmentation, enhancement or classification. - f1_threshold_classification = None # This threshold is used to consider models with an evaluation f1 scores bigger than it. The selected model weights undergo a weights ensembling. And avreage ensembled model will be written to output. - classification_classes_name = None # Dictionary of classification classes names. - backbone_type = None # As backbone we have 2 types of backbones. A vision transformer alongside a CNN and we call it "transformer" and only CNN called "nontransformer" - save_interval = None - dir_img_bin = None - number_of_backgrounds_per_image = 1 - dir_rgb_backgrounds = None - dir_rgb_foregrounds = None - -@ex.automain -def run(_config, n_classes, n_epochs, input_height, - input_width, weight_decay, weighted_loss, - index_start, dir_of_start_model, is_loss_soft_dice, - n_batch, patches, augmentation, flip_aug, - blur_aug, padding_white, padding_black, scaling, shifting, degrading,channels_shuffling, - brightening, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, blur_k, scales, degrade_scales,shuffle_indexes, - brightness, dir_train, data_is_provided, scaling_bluring, - scaling_brightness, scaling_binarization, rotation, rotation_not_90, - thetha, scaling_flip, continue_training, transformer_projection_dim, - transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_cnn_first, - transformer_patchsize_x, transformer_patchsize_y, - transformer_num_patches_xy, backbone_type, save_interval, flip_index, dir_eval, dir_output, - pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name, dir_img_bin, number_of_backgrounds_per_image,dir_rgb_backgrounds, dir_rgb_foregrounds): - - if dir_rgb_backgrounds: - list_all_possible_background_images = os.listdir(dir_rgb_backgrounds) - else: - list_all_possible_background_images = None - - if dir_rgb_foregrounds: - list_all_possible_foreground_rgbs = os.listdir(dir_rgb_foregrounds) - else: - list_all_possible_foreground_rgbs = None - - if task == "segmentation" or task == "enhancement" or task == "binarization": - if data_is_provided: - dir_train_flowing = os.path.join(dir_output, 'train') - dir_eval_flowing = os.path.join(dir_output, 'eval') - - - dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images') - dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels') - - dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images') - dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels') - - configuration() - - else: - dir_img, dir_seg = get_dirs_or_files(dir_train) - dir_img_val, dir_seg_val = get_dirs_or_files(dir_eval) - - # make first a directory in output for both training and evaluations in order to flow data from these directories. - dir_train_flowing = os.path.join(dir_output, 'train') - dir_eval_flowing = os.path.join(dir_output, 'eval') - - dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images/') - dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels/') - - dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images/') - dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels/') - - if os.path.isdir(dir_train_flowing): - os.system('rm -rf ' + dir_train_flowing) - os.makedirs(dir_train_flowing) - else: - os.makedirs(dir_train_flowing) - - if os.path.isdir(dir_eval_flowing): - os.system('rm -rf ' + dir_eval_flowing) - os.makedirs(dir_eval_flowing) - else: - os.makedirs(dir_eval_flowing) - - os.mkdir(dir_flow_train_imgs) - os.mkdir(dir_flow_train_labels) - - os.mkdir(dir_flow_eval_imgs) - os.mkdir(dir_flow_eval_labels) - - # set the gpu configuration - configuration() - - imgs_list=np.array(os.listdir(dir_img)) - segs_list=np.array(os.listdir(dir_seg)) - - imgs_list_test=np.array(os.listdir(dir_img_val)) - segs_list_test=np.array(os.listdir(dir_seg_val)) - - # writing patches into a sub-folder in order to be flowed from directory. - provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, - dir_flow_train_labels, input_height, input_width, blur_k, - blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background,adding_rgb_foreground, add_red_textlines, channels_shuffling, - scaling, shifting, degrading, brightening, scales, degrade_scales, brightness, - flip_index,shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=augmentation, - patches=patches, dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds, dir_rgb_foregrounds=dir_rgb_foregrounds,list_all_possible_foreground_rgbs=list_all_possible_foreground_rgbs) - - provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, - dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, - blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, channels_shuffling, - scaling, shifting, degrading, brightening, scales, degrade_scales, brightness, - flip_index, shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches,dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds,dir_rgb_foregrounds=dir_rgb_foregrounds,list_all_possible_foreground_rgbs=list_all_possible_foreground_rgbs ) - - if weighted_loss: - weights = np.zeros(n_classes) - if data_is_provided: - for obj in os.listdir(dir_flow_train_labels): - try: - label_obj = cv2.imread(dir_flow_train_labels + '/' + obj) - label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) - weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) - except: - pass - else: - - for obj in os.listdir(dir_seg): - try: - label_obj = cv2.imread(dir_seg + '/' + obj) - label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) - weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) - except: - pass - - weights = 1.00 / weights - - weights = weights / float(np.sum(weights)) - weights = weights / float(np.min(weights)) - weights = weights / float(np.sum(weights)) - - if continue_training: - if backbone_type=='nontransformer': - if is_loss_soft_dice and (task == "segmentation" or task == "binarization"): - model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) - if weighted_loss and (task == "segmentation" or task == "binarization"): - model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) - if not is_loss_soft_dice and not weighted_loss: - model = load_model(dir_of_start_model , compile=True) - elif backbone_type=='transformer': - if is_loss_soft_dice and (task == "segmentation" or task == "binarization"): - model = load_model(dir_of_start_model, compile=True, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches,'soft_dice_loss': soft_dice_loss}) - if weighted_loss and (task == "segmentation" or task == "binarization"): - model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) - if not is_loss_soft_dice and not weighted_loss: - model = load_model(dir_of_start_model , compile=True,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) - else: - index_start = 0 - if backbone_type=='nontransformer': - model = resnet50_unet(n_classes, input_height, input_width, task, weight_decay, pretraining) - elif backbone_type=='transformer': - num_patches_x = transformer_num_patches_xy[0] - num_patches_y = transformer_num_patches_xy[1] - num_patches = num_patches_x * num_patches_y - - if transformer_cnn_first: - if input_height != (num_patches_y * transformer_patchsize_y * 32): - print("Error: transformer_patchsize_y or transformer_num_patches_xy height value error . input_height should be equal to ( transformer_num_patches_xy height value * transformer_patchsize_y * 32)") - sys.exit(1) - if input_width != (num_patches_x * transformer_patchsize_x * 32): - print("Error: transformer_patchsize_x or transformer_num_patches_xy width value error . input_width should be equal to ( transformer_num_patches_xy width value * transformer_patchsize_x * 32)") - sys.exit(1) - if (transformer_projection_dim % (transformer_patchsize_y * transformer_patchsize_x)) != 0: - print("Error: transformer_projection_dim error. The remainder when parameter transformer_projection_dim is divided by (transformer_patchsize_y*transformer_patchsize_x) should be zero") - sys.exit(1) - - - model = vit_resnet50_unet(n_classes, transformer_patchsize_x, transformer_patchsize_y, num_patches, transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_projection_dim, input_height, input_width, task, weight_decay, pretraining) - else: - if input_height != (num_patches_y * transformer_patchsize_y): - print("Error: transformer_patchsize_y or transformer_num_patches_xy height value error . input_height should be equal to ( transformer_num_patches_xy height value * transformer_patchsize_y)") - sys.exit(1) - if input_width != (num_patches_x * transformer_patchsize_x): - print("Error: transformer_patchsize_x or transformer_num_patches_xy width value error . input_width should be equal to ( transformer_num_patches_xy width value * transformer_patchsize_x)") - sys.exit(1) - if (transformer_projection_dim % (transformer_patchsize_y * transformer_patchsize_x)) != 0: - print("Error: transformer_projection_dim error. The remainder when parameter transformer_projection_dim is divided by (transformer_patchsize_y*transformer_patchsize_x) should be zero") - sys.exit(1) - model = vit_resnet50_unet_transformer_before_cnn(n_classes, transformer_patchsize_x, transformer_patchsize_y, num_patches, transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_projection_dim, input_height, input_width, task, weight_decay, pretraining) - - #if you want to see the model structure just uncomment model summary. - model.summary() - - - if task == "segmentation" or task == "binarization": - if not is_loss_soft_dice and not weighted_loss: - model.compile(loss='categorical_crossentropy', - optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) - if is_loss_soft_dice: - model.compile(loss=soft_dice_loss, - optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) - if weighted_loss: - model.compile(loss=weighted_categorical_crossentropy(weights), - optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) - elif task == "enhancement": - model.compile(loss='mean_squared_error', - optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) - - - # generating train and evaluation data - train_gen = data_gen(dir_flow_train_imgs, dir_flow_train_labels, batch_size=n_batch, - input_height=input_height, input_width=input_width, n_classes=n_classes, task=task) - val_gen = data_gen(dir_flow_eval_imgs, dir_flow_eval_labels, batch_size=n_batch, - input_height=input_height, input_width=input_width, n_classes=n_classes, task=task) - - ##img_validation_patches = os.listdir(dir_flow_eval_imgs) - ##score_best=[] - ##score_best.append(0) - - if save_interval: - save_weights_callback = SaveWeightsAfterSteps(save_interval, dir_output, _config) - - - for i in tqdm(range(index_start, n_epochs + index_start)): - if save_interval: - model.fit( - train_gen, - steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, - validation_data=val_gen, - validation_steps=1, - epochs=1, callbacks=[save_weights_callback]) - else: - model.fit( - train_gen, - steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, - validation_data=val_gen, - validation_steps=1, - epochs=1) - - model.save(os.path.join(dir_output,'model_'+str(i))) - - with open(os.path.join(os.path.join(dir_output,'model_'+str(i)),"config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - - #os.system('rm -rf '+dir_train_flowing) - #os.system('rm -rf '+dir_eval_flowing) - - #model.save(dir_output+'/'+'model'+'.h5') - elif task=='classification': - configuration() - model = resnet50_classifier(n_classes, input_height, input_width, weight_decay, pretraining) - - opt_adam = Adam(learning_rate=0.001) - model.compile(loss='categorical_crossentropy', - optimizer = opt_adam,metrics=['accuracy']) - - - list_classes = list(classification_classes_name.values()) - testX, testY = generate_data_from_folder_evaluation(dir_eval, input_height, input_width, n_classes, list_classes) - - y_tot=np.zeros((testX.shape[0],n_classes)) - - score_best= [0] - - num_rows = return_number_of_total_training_data(dir_train) - weights=[] - - for i in range(n_epochs): - history = model.fit( generate_data_from_folder_training(dir_train, n_batch , input_height, input_width, n_classes, list_classes), steps_per_epoch=num_rows / n_batch, verbose=1)#,class_weight=weights) - - y_pr_class = [] - for jj in range(testY.shape[0]): - y_pr=model.predict(testX[jj,:,:,:].reshape(1,input_height,input_width,3), verbose=0) - y_pr_ind= np.argmax(y_pr,axis=1) - y_pr_class.append(y_pr_ind) - - y_pr_class = np.array(y_pr_class) - f1score=f1_score(np.argmax(testY,axis=1), y_pr_class, average='macro') - print(i,f1score) - - if f1score>score_best[0]: - score_best[0]=f1score - model.save(os.path.join(dir_output,'model_best')) - - if f1score > f1_threshold_classification: - weights.append(model.get_weights() ) - - - if len(weights) >= 1: - new_weights=list() - for weights_list_tuple in zip(*weights): - new_weights.append( [np.array(weights_).mean(axis=0) for weights_ in zip(*weights_list_tuple)] ) - - new_weights = [np.array(x) for x in new_weights] - model_weight_averaged=tf.keras.models.clone_model(model) - model_weight_averaged.set_weights(new_weights) - - model_weight_averaged.save(os.path.join(dir_output,'model_ens_avg')) - with open(os.path.join( os.path.join(dir_output,'model_ens_avg'), "config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - - with open(os.path.join( os.path.join(dir_output,'model_best'), "config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - - elif task=='reading_order': - configuration() - model = machine_based_reading_order_model(n_classes,input_height,input_width,weight_decay,pretraining) - - dir_flow_train_imgs = os.path.join(dir_train, 'images') - dir_flow_train_labels = os.path.join(dir_train, 'labels') - - classes = os.listdir(dir_flow_train_labels) - if augmentation: - num_rows = len(classes)*(len(thetha) + 1) - else: - num_rows = len(classes) - #ls_test = os.listdir(dir_flow_train_labels) - - #f1score_tot = [0] - indexer_start = 0 - # opt = SGD(learning_rate=0.01, momentum=0.9) - opt_adam = tf.keras.optimizers.Adam(learning_rate=0.0001) - model.compile(loss="binary_crossentropy", - optimizer = opt_adam,metrics=['accuracy']) - - if save_interval: - save_weights_callback = SaveWeightsAfterSteps(save_interval, dir_output, _config) - - for i in range(n_epochs): - if save_interval: - history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes, thetha, augmentation), steps_per_epoch=num_rows / n_batch, verbose=1, callbacks=[save_weights_callback]) - else: - history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes, thetha, augmentation), steps_per_epoch=num_rows / n_batch, verbose=1) - model.save( os.path.join(dir_output,'model_'+str(i+indexer_start) )) - - with open(os.path.join(os.path.join(dir_output,'model_'+str(i)),"config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON - ''' - if f1score>f1score_tot[0]: - f1score_tot[0] = f1score - model_dir = os.path.join(dir_out,'model_best') - model.save(model_dir) - ''' - - diff --git a/src/eynollah/training/utils.py b/src/eynollah/training/utils.py deleted file mode 100644 index 1278be5..0000000 --- a/src/eynollah/training/utils.py +++ /dev/null @@ -1,1057 +0,0 @@ -import os -import math -import random - -import cv2 -import numpy as np -import seaborn as sns -from scipy.ndimage.interpolation import map_coordinates -from scipy.ndimage.filters import gaussian_filter -from tqdm import tqdm -import imutils -from tensorflow.keras.utils import to_categorical -from PIL import Image, ImageEnhance - - -def return_shuffled_channels(img, channels_order): - """ - channels order in ordinary case is like this [0, 1, 2]. In the case of shuffling the order should be provided. - """ - img_sh = np.copy(img) - - img_sh[:,:,0]= img[:,:,channels_order[0]] - img_sh[:,:,1]= img[:,:,channels_order[1]] - img_sh[:,:,2]= img[:,:,channels_order[2]] - return img_sh - -def return_binary_image_with_red_textlines(img_bin): - img_red = np.copy(img_bin) - - img_red[:,:,0][img_bin[:,:,0] == 0] = 255 - return img_red - -def return_binary_image_with_given_rgb_background(img_bin, img_rgb_background): - img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) - - img_final = np.copy(img_bin) - - img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] - img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] - img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] - - return img_final - -def return_binary_image_with_given_rgb_background_and_given_foreground_rgb(img_bin, img_rgb_background, rgb_foreground): - img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) - - img_final = np.copy(img_bin) - img_foreground = np.zeros(img_bin.shape) - - - img_foreground[:,:,0][img_bin[:,:,0] == 0] = rgb_foreground[0] - img_foreground[:,:,1][img_bin[:,:,0] == 0] = rgb_foreground[1] - img_foreground[:,:,2][img_bin[:,:,0] == 0] = rgb_foreground[2] - - - img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] - img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] - img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] - - img_final = img_final + img_foreground - return img_final - -def return_binary_image_with_given_rgb_background_red_textlines(img_bin, img_rgb_background, img_color): - img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) - - img_final = np.copy(img_color) - - img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] - img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] - img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] - - return img_final - -def return_image_with_red_elements(img, img_bin): - img_final = np.copy(img) - - img_final[:,:,0][img_bin[:,:,0]==0] = 0 - img_final[:,:,1][img_bin[:,:,0]==0] = 0 - img_final[:,:,2][img_bin[:,:,0]==0] = 255 - return img_final - -def shift_image_and_label(img, label, type_shift): - h_n = int(img.shape[0]*1.06) - w_n = int(img.shape[1]*1.06) - - channel0_avg = int( np.mean(img[:,:,0]) ) - channel1_avg = int( np.mean(img[:,:,1]) ) - channel2_avg = int( np.mean(img[:,:,2]) ) - - h_diff = abs( img.shape[0] - h_n ) - w_diff = abs( img.shape[1] - w_n ) - - h_start = int(h_diff / 2.) - w_start = int(w_diff / 2.) - - img_scaled_padded = np.zeros((h_n, w_n, 3)) - label_scaled_padded = np.zeros((h_n, w_n, 3)) - - img_scaled_padded[:,:,0] = channel0_avg - img_scaled_padded[:,:,1] = channel1_avg - img_scaled_padded[:,:,2] = channel2_avg - - img_scaled_padded[h_start:h_start+img.shape[0], w_start:w_start+img.shape[1],:] = img[:,:,:] - label_scaled_padded[h_start:h_start+img.shape[0], w_start:w_start+img.shape[1],:] = label[:,:,:] - - - if type_shift=="xpos": - img_dis = img_scaled_padded[h_start:h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - label_dis = label_scaled_padded[h_start:h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - elif type_shift=="xmin": - img_dis = img_scaled_padded[h_start:h_start+img.shape[0],:img.shape[1],:] - label_dis = label_scaled_padded[h_start:h_start+img.shape[0],:img.shape[1],:] - elif type_shift=="ypos": - img_dis = img_scaled_padded[2*h_start:2*h_start+img.shape[0],w_start:w_start+img.shape[1],:] - label_dis = label_scaled_padded[2*h_start:2*h_start+img.shape[0],w_start:w_start+img.shape[1],:] - elif type_shift=="ymin": - img_dis = img_scaled_padded[:img.shape[0],w_start:w_start+img.shape[1],:] - label_dis = label_scaled_padded[:img.shape[0],w_start:w_start+img.shape[1],:] - elif type_shift=="xypos": - img_dis = img_scaled_padded[2*h_start:2*h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - label_dis = label_scaled_padded[2*h_start:2*h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] - elif type_shift=="xymin": - img_dis = img_scaled_padded[:img.shape[0],:img.shape[1],:] - label_dis = label_scaled_padded[:img.shape[0],:img.shape[1],:] - return img_dis, label_dis - -def scale_image_for_no_patch(img, label, scale): - h_n = int(img.shape[0]*scale) - w_n = int(img.shape[1]*scale) - - channel0_avg = int( np.mean(img[:,:,0]) ) - channel1_avg = int( np.mean(img[:,:,1]) ) - channel2_avg = int( np.mean(img[:,:,2]) ) - - h_diff = img.shape[0] - h_n - w_diff = img.shape[1] - w_n - - h_start = int(h_diff / 2.) - w_start = int(w_diff / 2.) - - img_res = resize_image(img, h_n, w_n) - label_res = resize_image(label, h_n, w_n) - - img_scaled_padded = np.copy(img) - - label_scaled_padded = np.zeros(label.shape) - - img_scaled_padded[:,:,0] = channel0_avg - img_scaled_padded[:,:,1] = channel1_avg - img_scaled_padded[:,:,2] = channel2_avg - - img_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = img_res[:,:,:] - label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:] - - return img_scaled_padded, label_scaled_padded - - -def return_number_of_total_training_data(path_classes): - sub_classes = os.listdir(path_classes) - n_tot = 0 - for sub_c in sub_classes: - sub_files = os.listdir(os.path.join(path_classes,sub_c)) - n_tot = n_tot + len(sub_files) - return n_tot - - - -def generate_data_from_folder_evaluation(path_classes, height, width, n_classes, list_classes): - #sub_classes = os.listdir(path_classes) - #n_classes = len(sub_classes) - all_imgs = [] - labels = [] - #dicts =dict() - #indexer= 0 - for indexer, sub_c in enumerate(list_classes): - sub_files = os.listdir(os.path.join(path_classes,sub_c )) - sub_files = [os.path.join(path_classes,sub_c )+'/' + x for x in sub_files] - #print( os.listdir(os.path.join(path_classes,sub_c )) ) - all_imgs = all_imgs + sub_files - sub_labels = list( np.zeros( len(sub_files) ) +indexer ) - - #print( len(sub_labels) ) - labels = labels + sub_labels - #dicts[sub_c] = indexer - #indexer +=1 - - - categories = to_categorical(range(n_classes)).astype(np.int16)#[ [1 , 0, 0 , 0 , 0 , 0] , [0 , 1, 0 , 0 , 0 , 0] , [0 , 0, 1 , 0 , 0 , 0] , [0 , 0, 0 , 1 , 0 , 0] , [0 , 0, 0 , 0 , 1 , 0] , [0 , 0, 0 , 0 , 0 , 1] ] - ret_x= np.zeros((len(labels), height,width, 3)).astype(np.int16) - ret_y= np.zeros((len(labels), n_classes)).astype(np.int16) - - #print(all_imgs) - for i in range(len(all_imgs)): - row = all_imgs[i] - #####img = cv2.imread(row, 0) - #####img= resize_image (img, height, width) - #####img = img.astype(np.uint16) - #####ret_x[i, :,:,0] = img[:,:] - #####ret_x[i, :,:,1] = img[:,:] - #####ret_x[i, :,:,2] = img[:,:] - - img = cv2.imread(row) - img= resize_image (img, height, width) - img = img.astype(np.uint16) - ret_x[i, :,:] = img[:,:,:] - - ret_y[i, :] = categories[ int( labels[i] ) ][:] - - return ret_x/255., ret_y - -def generate_data_from_folder_training(path_classes, batchsize, height, width, n_classes, list_classes): - #sub_classes = os.listdir(path_classes) - #n_classes = len(sub_classes) - - all_imgs = [] - labels = [] - #dicts =dict() - #indexer= 0 - for indexer, sub_c in enumerate(list_classes): - sub_files = os.listdir(os.path.join(path_classes,sub_c )) - sub_files = [os.path.join(path_classes,sub_c )+'/' + x for x in sub_files] - #print( os.listdir(os.path.join(path_classes,sub_c )) ) - all_imgs = all_imgs + sub_files - sub_labels = list( np.zeros( len(sub_files) ) +indexer ) - - #print( len(sub_labels) ) - labels = labels + sub_labels - #dicts[sub_c] = indexer - #indexer +=1 - - ids = np.array(range(len(labels))) - random.shuffle(ids) - - shuffled_labels = np.array(labels)[ids] - shuffled_files = np.array(all_imgs)[ids] - categories = to_categorical(range(n_classes)).astype(np.int16)#[ [1 , 0, 0 , 0 , 0 , 0] , [0 , 1, 0 , 0 , 0 , 0] , [0 , 0, 1 , 0 , 0 , 0] , [0 , 0, 0 , 1 , 0 , 0] , [0 , 0, 0 , 0 , 1 , 0] , [0 , 0, 0 , 0 , 0 , 1] ] - ret_x= np.zeros((batchsize, height,width, 3)).astype(np.int16) - ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) - batchcount = 0 - while True: - for i in range(len(shuffled_files)): - row = shuffled_files[i] - #print(row) - ###img = cv2.imread(row, 0) - ###img= resize_image (img, height, width) - ###img = img.astype(np.uint16) - ###ret_x[batchcount, :,:,0] = img[:,:] - ###ret_x[batchcount, :,:,1] = img[:,:] - ###ret_x[batchcount, :,:,2] = img[:,:] - - img = cv2.imread(row) - img= resize_image (img, height, width) - img = img.astype(np.uint16) - ret_x[batchcount, :,:,:] = img[:,:,:] - - #print(int(shuffled_labels[i]) ) - #print( categories[int(shuffled_labels[i])] ) - ret_y[batchcount, :] = categories[ int( shuffled_labels[i] ) ][:] - - batchcount+=1 - - if batchcount>=batchsize: - ret_x = ret_x/255. - yield ret_x, ret_y - ret_x= np.zeros((batchsize, height,width, 3)).astype(np.int16) - ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) - batchcount = 0 - -def do_brightening(img_in_dir, factor): - im = Image.open(img_in_dir) - enhancer = ImageEnhance.Brightness(im) - out_img = enhancer.enhance(factor) - out_img = out_img.convert('RGB') - opencv_img = np.array(out_img) - opencv_img = opencv_img[:,:,::-1].copy() - return opencv_img - - -def bluring(img_in, kind): - if kind == 'gauss': - img_blur = cv2.GaussianBlur(img_in, (5, 5), 0) - elif kind == "median": - img_blur = cv2.medianBlur(img_in, 5) - elif kind == 'blur': - img_blur = cv2.blur(img_in, (5, 5)) - return img_blur - - -def elastic_transform(image, alpha, sigma, seedj, random_state=None): - """Elastic deformation of images as described in [Simard2003]_. - .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for - Convolutional Neural Networks applied to Visual Document Analysis", in - Proc. of the International Conference on Document Analysis and - Recognition, 2003. - """ - if random_state is None: - random_state = np.random.RandomState(seedj) - - shape = image.shape - dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha - dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha - dz = np.zeros_like(dx) - - x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) - indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1)) - - distored_image = map_coordinates(image, indices, order=1, mode='reflect') - return distored_image.reshape(image.shape) - - -def rotation_90(img): - img_rot = np.zeros((img.shape[1], img.shape[0], img.shape[2])) - img_rot[:, :, 0] = img[:, :, 0].T - img_rot[:, :, 1] = img[:, :, 1].T - img_rot[:, :, 2] = img[:, :, 2].T - return img_rot - - -def rotatedRectWithMaxArea(w, h, angle): - """ - Given a rectangle of size wxh that has been rotated by 'angle' (in - radians), computes the width and height of the largest possible - axis-aligned rectangle (maximal area) within the rotated rectangle. - """ - if w <= 0 or h <= 0: - return 0, 0 - - width_is_longer = w >= h - side_long, side_short = (w, h) if width_is_longer else (h, w) - - # since the solutions for angle, -angle and 180-angle are all the same, - # if suffices to look at the first quadrant and the absolute values of sin,cos: - sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) - if side_short <= 2. * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: - # half constrained case: two crop corners touch the longer side, - # the other two corners are on the mid-line parallel to the longer line - x = 0.5 * side_short - wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) - else: - # fully constrained case: crop touches all 4 sides - cos_2a = cos_a * cos_a - sin_a * sin_a - wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a - - return wr, hr - - -def rotate_max_area(image, rotated, rotated_label, angle): - """ image: cv2 image matrix object - angle: in degree - """ - wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], - math.radians(angle)) - h, w, _ = rotated.shape - y1 = h // 2 - int(hr / 2) - y2 = y1 + int(hr) - x1 = w // 2 - int(wr / 2) - x2 = x1 + int(wr) - return rotated[y1:y2, x1:x2], rotated_label[y1:y2, x1:x2] - -def rotate_max_area_single_image(image, rotated, angle): - """ image: cv2 image matrix object - angle: in degree - """ - wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], - math.radians(angle)) - h, w, _ = rotated.shape - y1 = h // 2 - int(hr / 2) - y2 = y1 + int(hr) - x1 = w // 2 - int(wr / 2) - x2 = x1 + int(wr) - return rotated[y1:y2, x1:x2] - -def rotation_not_90_func(img, label, thetha): - rotated = imutils.rotate(img, thetha) - rotated_label = imutils.rotate(label, thetha) - return rotate_max_area(img, rotated, rotated_label, thetha) - - -def rotation_not_90_func_single_image(img, thetha): - rotated = imutils.rotate(img, thetha) - return rotate_max_area_single_image(img, rotated, thetha) - - -def color_images(seg, n_classes): - ann_u = range(n_classes) - if len(np.shape(seg)) == 3: - seg = seg[:, :, 0] - - seg_img = np.zeros((np.shape(seg)[0], np.shape(seg)[1], 3)).astype(float) - colors = sns.color_palette("hls", n_classes) - - for c in ann_u: - c = int(c) - segl = (seg == c) - seg_img[:, :, 0] += segl * (colors[c][0]) - seg_img[:, :, 1] += segl * (colors[c][1]) - seg_img[:, :, 2] += segl * (colors[c][2]) - return seg_img - - -def resize_image(seg_in, input_height, input_width): - return cv2.resize(seg_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) - - -def get_one_hot(seg, input_height, input_width, n_classes): - seg = seg[:, :, 0] - seg_f = np.zeros((input_height, input_width, n_classes)) - for j in range(n_classes): - seg_f[:, :, j] = (seg == j).astype(int) - return seg_f - - -def IoU(Yi, y_predi): - ## mean Intersection over Union - ## Mean IoU = TP/(FN + TP + FP) - - IoUs = [] - classes_true = np.unique(Yi) - for c in classes_true: - TP = np.sum((Yi == c) & (y_predi == c)) - FP = np.sum((Yi != c) & (y_predi == c)) - FN = np.sum((Yi == c) & (y_predi != c)) - IoU = TP / float(TP + FP + FN) - #print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c, TP, FP, FN, IoU)) - IoUs.append(IoU) - mIoU = np.mean(IoUs) - #print("_________________") - #print("Mean IoU: {:4.3f}".format(mIoU)) - return mIoU - -def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batchsize, height, width, n_classes, thetha, augmentation=False): - all_labels_files = os.listdir(classes_file_dir) - ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) - ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) - batchcount = 0 - while True: - for i in all_labels_files: - file_name = os.path.splitext(i)[0] - img = cv2.imread(os.path.join(modal_dir,file_name+'.png')) - - label_class = int( np.load(os.path.join(classes_file_dir,i)) ) - - ret_x[batchcount, :,:,0] = img[:,:,0]/3.0 - ret_x[batchcount, :,:,2] = img[:,:,2]/3.0 - ret_x[batchcount, :,:,1] = img[:,:,1]/5.0 - - ret_y[batchcount, :] = label_class - batchcount+=1 - if batchcount>=batchsize: - yield ret_x, ret_y - ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) - ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) - batchcount = 0 - - if augmentation: - for thetha_i in thetha: - img_rot = rotation_not_90_func_single_image(img, thetha_i) - - img_rot = resize_image(img_rot, height, width) - - ret_x[batchcount, :,:,0] = img_rot[:,:,0]/3.0 - ret_x[batchcount, :,:,2] = img_rot[:,:,2]/3.0 - ret_x[batchcount, :,:,1] = img_rot[:,:,1]/5.0 - - ret_y[batchcount, :] = label_class - batchcount+=1 - if batchcount>=batchsize: - yield ret_x, ret_y - ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) - ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) - batchcount = 0 - -def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_classes, task='segmentation'): - c = 0 - n = [f for f in os.listdir(img_folder) if not f.startswith('.')] # os.listdir(img_folder) #List of training images - random.shuffle(n) - while True: - img = np.zeros((batch_size, input_height, input_width, 3)).astype('float') - mask = np.zeros((batch_size, input_height, input_width, n_classes)).astype('float') - - for i in range(c, c + batch_size): # initially from 0 to 16, c = 0. - try: - filename = os.path.splitext(n[i])[0] - - train_img = cv2.imread(img_folder + '/' + n[i]) / 255. - train_img = cv2.resize(train_img, (input_width, input_height), - interpolation=cv2.INTER_NEAREST) # Read an image from folder and resize - - img[i - c] = train_img # add to array - img[0], img[1], and so on. - if task == "segmentation" or task=="binarization": - train_mask = cv2.imread(mask_folder + '/' + filename + '.png') - train_mask = get_one_hot(resize_image(train_mask, input_height, input_width), input_height, input_width, - n_classes) - elif task == "enhancement": - train_mask = cv2.imread(mask_folder + '/' + filename + '.png')/255. - train_mask = resize_image(train_mask, input_height, input_width) - - # train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] - - mask[i - c] = train_mask - except: - img[i - c] = np.ones((input_height, input_width, 3)).astype('float') - mask[i - c] = np.zeros((input_height, input_width, n_classes)).astype('float') - - c += batch_size - if c + batch_size >= len(os.listdir(img_folder)): - c = 0 - random.shuffle(n) - yield img, mask - - -def otsu_copy(img): - img_r = np.zeros(img.shape) - img1 = img[:, :, 0] - img2 = img[:, :, 1] - img3 = img[:, :, 2] - _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - _, threshold2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - _, threshold3 = cv2.threshold(img3, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) - img_r[:, :, 0] = threshold1 - img_r[:, :, 1] = threshold1 - img_r[:, :, 2] = threshold1 - return img_r - - -def get_patches(dir_img_f, dir_seg_f, img, label, height, width, indexer): - if img.shape[0] < height or img.shape[1] < width: - img, label = do_padding(img, label, height, width) - - img_h = img.shape[0] - img_w = img.shape[1] - - nxf = img_w / float(width) - nyf = img_h / float(height) - - if nxf > int(nxf): - nxf = int(nxf) + 1 - if nyf > int(nyf): - nyf = int(nyf) + 1 - - nxf = int(nxf) - nyf = int(nyf) - - for i in range(nxf): - for j in range(nyf): - index_x_d = i * width - index_x_u = (i + 1) * width - - index_y_d = j * height - index_y_u = (j + 1) * height - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - width - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - height - - img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] - - cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) - cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) - indexer += 1 - - return indexer - - -def do_padding_white(img): - img_org_h = img.shape[0] - img_org_w = img.shape[1] - - index_start_h = 4 - index_start_w = 4 - - img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1]+ 2*index_start_w, img.shape[2])) + 255 - img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] - - return img_padded.astype(float) - - -def do_degrading(img, scale): - img_org_h = img.shape[0] - img_org_w = img.shape[1] - - img_res = resize_image(img, int(img_org_h * scale), int(img_org_w * scale)) - - return resize_image(img_res, img_org_h, img_org_w) - - -def do_padding_black(img): - img_org_h = img.shape[0] - img_org_w = img.shape[1] - - index_start_h = 4 - index_start_w = 4 - - img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1] + 2*index_start_w, img.shape[2])) - img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] - - return img_padded.astype(float) - - -def do_padding_label(img): - img_org_h = img.shape[0] - img_org_w = img.shape[1] - - index_start_h = 4 - index_start_w = 4 - - img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1] + 2*index_start_w, img.shape[2])) - img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] - - return img_padded.astype(np.int16) - -def do_padding(img, label, height, width): - height_new=img.shape[0] - width_new=img.shape[1] - - h_start = 0 - w_start = 0 - - if img.shape[0] < height: - h_start = int(abs(height - img.shape[0]) / 2.) - height_new = height - - if img.shape[1] < width: - w_start = int(abs(width - img.shape[1]) / 2.) - width_new = width - - img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 255 - label_new = np.zeros((height_new, width_new, label.shape[2])).astype(float) - - img_new[h_start:h_start + img.shape[0], w_start:w_start + img.shape[1], :] = np.copy(img[:, :, :]) - label_new[h_start:h_start + label.shape[0], w_start:w_start + label.shape[1], :] = np.copy(label[:, :, :]) - - return img_new,label_new - - -def get_patches_num_scale(dir_img_f, dir_seg_f, img, label, height, width, indexer, n_patches, scaler): - if img.shape[0] < height or img.shape[1] < width: - img, label = do_padding(img, label, height, width) - - img_h = img.shape[0] - img_w = img.shape[1] - - height_scale = int(height * scaler) - width_scale = int(width * scaler) - - - nxf = img_w / float(width_scale) - nyf = img_h / float(height_scale) - - if nxf > int(nxf): - nxf = int(nxf) + 1 - if nyf > int(nyf): - nyf = int(nyf) + 1 - - nxf = int(nxf) - nyf = int(nyf) - - for i in range(nxf): - for j in range(nyf): - index_x_d = i * width_scale - index_x_u = (i + 1) * width_scale - - index_y_d = j * height_scale - index_y_u = (j + 1) * height_scale - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - width_scale - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - height_scale - - - img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] - - img_patch = resize_image(img_patch, height, width) - label_patch = resize_image(label_patch, height, width) - - cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) - cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) - indexer += 1 - - return indexer - - -def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, indexer, scaler): - img = resize_image(img, int(img.shape[0] * scaler), int(img.shape[1] * scaler)) - label = resize_image(label, int(label.shape[0] * scaler), int(label.shape[1] * scaler)) - - if img.shape[0] < height or img.shape[1] < width: - img, label = do_padding(img, label, height, width) - - img_h = img.shape[0] - img_w = img.shape[1] - - height_scale = int(height * 1) - width_scale = int(width * 1) - - nxf = img_w / float(width_scale) - nyf = img_h / float(height_scale) - - if nxf > int(nxf): - nxf = int(nxf) + 1 - if nyf > int(nyf): - nyf = int(nyf) + 1 - - nxf = int(nxf) - nyf = int(nyf) - - for i in range(nxf): - for j in range(nyf): - index_x_d = i * width_scale - index_x_u = (i + 1) * width_scale - - index_y_d = j * height_scale - index_y_u = (j + 1) * height_scale - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - width_scale - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - height_scale - - img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] - - cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) - cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) - indexer += 1 - - return indexer - - -def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow_train_imgs, - dir_flow_train_labels, input_height, input_width, blur_k, blur_aug, - padding_white, padding_black, flip_aug, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, channels_shuffling, scaling, shifting, degrading, - brightening, scales, degrade_scales, brightness, flip_index, shuffle_indexes, - scaling_bluring, scaling_brightness, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=False, dir_img_bin=None,number_of_backgrounds_per_image=None,list_all_possible_background_images=None, dir_rgb_backgrounds=None, dir_rgb_foregrounds=None, list_all_possible_foreground_rgbs=None): - - indexer = 0 - for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)): - img_name = os.path.splitext(im)[0] - if task == "segmentation" or task == "binarization": - dir_of_label_file = os.path.join(dir_seg, img_name + '.png') - elif task=="enhancement": - dir_of_label_file = os.path.join(dir_seg, im) - - if not patches: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_img + '/' + im), input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - indexer += 1 - - if augmentation: - if flip_aug: - for f_i in flip_index: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - resize_image(cv2.flip(cv2.imread(dir_img+'/'+im),f_i),input_height,input_width) ) - - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.flip(cv2.imread(dir_of_label_file), f_i), input_height, input_width)) - indexer += 1 - - if blur_aug: - for blur_i in blur_k: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - (resize_image(bluring(cv2.imread(dir_img + '/' + im), blur_i), input_height, input_width))) - - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - indexer += 1 - if brightening: - for factor in brightness: - try: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - (resize_image(do_brightening(dir_img + '/' +im, factor), input_height, input_width))) - - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - indexer += 1 - except: - pass - - if binarization: - - if dir_img_bin: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - resize_image(img_bin_corr, input_height, input_width)) - else: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - resize_image(otsu_copy(cv2.imread(dir_img + '/' + im)), input_height, input_width)) - - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - indexer += 1 - - if degrading: - for degrade_scale_ind in degrade_scales: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - (resize_image(do_degrading(cv2.imread(dir_img + '/' + im), degrade_scale_ind), input_height, input_width))) - - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - indexer += 1 - - if rotation_not_90: - for thetha_i in thetha: - img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/'+im), - cv2.imread(dir_of_label_file), thetha_i) - - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_max_rotated, input_height, input_width)) - - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_max_rotated, input_height, input_width)) - indexer += 1 - - if channels_shuffling: - for shuffle_index in shuffle_indexes: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - (resize_image(return_shuffled_channels(cv2.imread(dir_img + '/' + im), shuffle_index), input_height, input_width))) - - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - indexer += 1 - - if scaling: - for sc_ind in scales: - img_scaled, label_scaled = scale_image_for_no_patch(cv2.imread(dir_img + '/'+im), - cv2.imread(dir_of_label_file), sc_ind) - - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_scaled, input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_scaled, input_height, input_width)) - indexer += 1 - if shifting: - shift_types = ['xpos', 'xmin', 'ypos', 'ymin', 'xypos', 'xymin'] - for st_ind in shift_types: - img_shifted, label_shifted = shift_image_and_label(cv2.imread(dir_img + '/'+im), - cv2.imread(dir_of_label_file), st_ind) - - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_shifted, input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_shifted, input_height, input_width)) - indexer += 1 - - - if adding_rgb_background: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - img_with_overlayed_background = return_binary_image_with_given_rgb_background(img_bin_corr, img_rgb_background_chosen) - - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_with_overlayed_background, input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - - indexer += 1 - - if adding_rgb_foreground: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - foreground_rgb_chosen_name = random.choice(list_all_possible_foreground_rgbs) - - img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - foreground_rgb_chosen = np.load(dir_rgb_foregrounds + '/' + foreground_rgb_chosen_name) - - img_with_overlayed_background = return_binary_image_with_given_rgb_background_and_given_foreground_rgb(img_bin_corr, img_rgb_background_chosen, foreground_rgb_chosen) - - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_with_overlayed_background, input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - - indexer += 1 - - if add_red_textlines: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - img_red_context = return_image_with_red_elements(cv2.imread(dir_img + '/'+im), img_bin_corr) - - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_red_context, input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - - indexer += 1 - - - - - if patches: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - cv2.imread(dir_img + '/' + im), cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - if augmentation: - if rotation: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - rotation_90(cv2.imread(dir_img + '/' + im)), - rotation_90(cv2.imread(dir_of_label_file)), - input_height, input_width, indexer=indexer) - - if rotation_not_90: - for thetha_i in thetha: - img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/'+im), - cv2.imread(dir_of_label_file), thetha_i) - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - img_max_rotated, - label_max_rotated, - input_height, input_width, indexer=indexer) - - if channels_shuffling: - for shuffle_index in shuffle_indexes: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - return_shuffled_channels(cv2.imread(dir_img + '/' + im), shuffle_index), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - if adding_rgb_background: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - img_with_overlayed_background = return_binary_image_with_given_rgb_background(img_bin_corr, img_rgb_background_chosen) - - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - img_with_overlayed_background, - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - - if adding_rgb_foreground: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - for i_n in range(number_of_backgrounds_per_image): - background_image_chosen_name = random.choice(list_all_possible_background_images) - foreground_rgb_chosen_name = random.choice(list_all_possible_foreground_rgbs) - - img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - foreground_rgb_chosen = np.load(dir_rgb_foregrounds + '/' + foreground_rgb_chosen_name) - - img_with_overlayed_background = return_binary_image_with_given_rgb_background_and_given_foreground_rgb(img_bin_corr, img_rgb_background_chosen, foreground_rgb_chosen) - - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - img_with_overlayed_background, - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - - if add_red_textlines: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - img_red_context = return_image_with_red_elements(cv2.imread(dir_img + '/'+im), img_bin_corr) - - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - img_red_context, - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - if flip_aug: - for f_i in flip_index: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - cv2.flip(cv2.imread(dir_img + '/' + im), f_i), - cv2.flip(cv2.imread(dir_of_label_file), f_i), - input_height, input_width, indexer=indexer) - if blur_aug: - for blur_i in blur_k: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - bluring(cv2.imread(dir_img + '/' + im), blur_i), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - if padding_black: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - do_padding_black(cv2.imread(dir_img + '/' + im)), - do_padding_label(cv2.imread(dir_of_label_file)), - input_height, input_width, indexer=indexer) - - if padding_white: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - do_padding_white(cv2.imread(dir_img + '/'+im)), - do_padding_label(cv2.imread(dir_of_label_file)), - input_height, input_width, indexer=indexer) - - if brightening: - for factor in brightness: - try: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - do_brightening(dir_img + '/' +im, factor), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - except: - pass - if scaling: - for sc_ind in scales: - indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, - cv2.imread(dir_img + '/' + im) , - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer, scaler=sc_ind) - - if degrading: - for degrade_scale_ind in degrade_scales: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - do_degrading(cv2.imread(dir_img + '/' + im), degrade_scale_ind), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - if binarization: - if dir_img_bin: - img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') - - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - img_bin_corr, - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - else: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - otsu_copy(cv2.imread(dir_img + '/' + im)), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) - - if scaling_brightness: - for sc_ind in scales: - for factor in brightness: - try: - indexer = get_patches_num_scale_new(dir_flow_train_imgs, - dir_flow_train_labels, - do_brightening(dir_img + '/' + im, factor) - ,cv2.imread(dir_of_label_file) - ,input_height, input_width, indexer=indexer, scaler=sc_ind) - except: - pass - - if scaling_bluring: - for sc_ind in scales: - for blur_i in blur_k: - indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, - bluring(cv2.imread(dir_img + '/' + im), blur_i), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer, scaler=sc_ind) - - if scaling_binarization: - for sc_ind in scales: - indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, - otsu_copy(cv2.imread(dir_img + '/' + im)), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer, scaler=sc_ind) - - if scaling_flip: - for sc_ind in scales: - for f_i in flip_index: - indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, - cv2.flip( cv2.imread(dir_img + '/' + im), f_i), - cv2.flip(cv2.imread(dir_of_label_file), f_i), - input_height, input_width, indexer=indexer, scaler=sc_ind) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py deleted file mode 100644 index f304db2..0000000 --- a/src/eynollah/utils/contour.py +++ /dev/null @@ -1,365 +0,0 @@ -from typing import Sequence, Union -from numbers import Number -from functools import partial -import itertools - -import cv2 -import numpy as np -from scipy.sparse.csgraph import minimum_spanning_tree -from shapely.geometry import Polygon, LineString -from shapely.geometry.polygon import orient -from shapely import set_precision -from shapely.ops import unary_union, nearest_points - -from .rotate import rotate_image, rotation_image_new - -def contours_in_same_horizon(cy_main_hor): - X1 = np.zeros((len(cy_main_hor), len(cy_main_hor))) - X2 = np.zeros((len(cy_main_hor), len(cy_main_hor))) - - X1[0::1, :] = cy_main_hor[:] - X2 = X1.T - - X_dif = np.abs(X2 - X1) - args_help = np.array(range(len(cy_main_hor))) - all_args = [] - for i in range(len(cy_main_hor)): - list_h = list(args_help[X_dif[i, :] <= 20]) - list_h.append(i) - if len(list_h) > 1: - all_args.append(list(set(list_h))) - return np.unique(np.array(all_args, dtype=object)) - -def find_contours_mean_y_diff(contours_main): - M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] - cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - return np.mean(np.diff(np.sort(np.array(cy_main)))) - -def get_text_region_boxes_by_given_contours(contours): - return [cv2.boundingRect(contour) - for contour in contours] - -def filter_contours_area_of_image(image, contours, hierarchy, max_area=1.0, min_area=0.0, dilate=0): - found_polygons_early = [] - for jv, contour in enumerate(contours): - if len(contour) < 3: # A polygon cannot have less than 3 points - continue - - polygon = contour2polygon(contour, dilate=dilate) - area = polygon.area - if (area >= min_area * np.prod(image.shape[:2]) and - area <= max_area * np.prod(image.shape[:2]) and - hierarchy[0][jv][3] == -1): - found_polygons_early.append(polygon2contour(polygon)) - return found_polygons_early - -def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area=1.0, min_area=0.0, dilate=0): - found_polygons_early = [] - for jv, contour in enumerate(contours): - if len(contour) < 3: # A polygon cannot have less than 3 points - continue - - polygon = contour2polygon(contour, dilate=dilate) - # area = cv2.contourArea(contour) - area = polygon.area - ##print(np.prod(thresh.shape[:2])) - # Check that polygon has area greater than minimal area - # print(hierarchy[0][jv][3],hierarchy ) - if (area >= min_area * np.prod(image.shape[:2]) and - area <= max_area * np.prod(image.shape[:2]) and - # hierarchy[0][jv][3]==-1 - True): - # print(contour[0][0][1]) - found_polygons_early.append(polygon2contour(polygon)) - return found_polygons_early - -def find_center_of_contours(contours): - moments = [cv2.moments(contour) for contour in contours] - cx = [feat["m10"] / (feat["m00"] + 1e-32) - for feat in moments] - cy = [feat["m01"] / (feat["m00"] + 1e-32) - for feat in moments] - return cx, cy - -def find_new_features_of_contours(contours): - # areas = np.array([cv2.contourArea(contour) for contour in contours]) - cx, cy = find_center_of_contours(contours) - slice_x = np.index_exp[:, 0, 0] - slice_y = np.index_exp[:, 0, 1] - if any(contour.ndim < 3 for contour in contours): - slice_x = np.index_exp[:, 0] - slice_y = np.index_exp[:, 1] - x_min = np.array([np.min(contour[slice_x]) for contour in contours]) - x_max = np.array([np.max(contour[slice_x]) for contour in contours]) - y_min = np.array([np.min(contour[slice_y]) for contour in contours]) - y_max = np.array([np.max(contour[slice_y]) for contour in contours]) - # dis_x=np.abs(x_max-x_min) - y_corr_x_min = np.array([contour[np.argmin(contour[slice_x])][slice_y[1:]] - for contour in contours]) - - return cx, cy, x_min, x_max, y_min, y_max, y_corr_x_min - -def find_features_of_contours(contours): - y_min = np.array([np.min(contour[:,0,1]) for contour in contours]) - y_max = np.array([np.max(contour[:,0,1]) for contour in contours]) - - return y_min, y_max - -def return_parent_contours(contours, hierarchy): - contours_parent = [contours[i] - for i in range(len(contours)) - if hierarchy[0][i][3] == -1] - return contours_parent - -def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002): - # pixels of images are identified by 5 - if region_pre_p.ndim == 3: - cnts_images = (region_pre_p[:, :, 0] == label) * 1 - else: - cnts_images = (region_pre_p[:, :] == label) * 1 - _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) - - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - contours_imgs = return_parent_contours(contours_imgs, hierarchy) - contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, - max_area=1, min_area=min_area) - return contours_imgs - -def do_work_of_contours_in_image(contour, index_r_con, img, slope_first): - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[contour], color=1) - - img_copy = rotation_image_new(img_copy, -slope_first) - _, thresh = cv2.threshold(img_copy, 0, 255, 0) - - cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) - cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) - - return cont_int[0], index_r_con - -def get_textregion_contours_in_org_image_multi(cnts, img, slope_first, map=map): - if not len(cnts): - return [], [] - results = map(partial(do_work_of_contours_in_image, - img=img, - slope_first=slope_first, - ), - cnts, range(len(cnts))) - return tuple(zip(*results)) - -def get_textregion_contours_in_org_image(cnts, img, slope_first): - cnts_org = [] - # print(cnts,'cnts') - for i in range(len(cnts)): - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=1) - - # plt.imshow(img_copy) - # plt.show() - - # print(img.shape,'img') - img_copy = rotation_image_new(img_copy, -slope_first) - ##print(img_copy.shape,'img_copy') - # plt.imshow(img_copy) - # plt.show() - - _, thresh = cv2.threshold(img_copy, 0, 255, 0) - - cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) - cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) - # print(np.shape(cont_int[0])) - cnts_org.append(cont_int[0]) - - return cnts_org - -def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first): - zoom = 3 - img = cv2.resize(img, (img.shape[1] // zoom, - img.shape[0] // zoom), - interpolation=cv2.INTER_NEAREST) - cnts_org = [] - for cnt in cnts: - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[cnt // zoom], color=1) - - img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) - _, thresh = cv2.threshold(img_copy, 0, 255, 0) - - cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) - cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) - cnts_org.append(cont_int[0] * zoom) - - return cnts_org - -def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first, confidence_matrix): - img_copy = np.zeros(img.shape[:2], dtype=np.uint8) - img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=1) - confidence_matrix_mapped_with_contour = confidence_matrix * img_copy - confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy)) - - img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) - _, thresh = cv2.threshold(img_copy, 0, 255, 0) - - cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if len(cont_int)==0: - cont_int = [contour_par] - confidence_contour = 0 - else: - cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) - cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) - return cont_int[0], index_r_con, confidence_contour - -def get_textregion_contours_in_org_image_light(cnts, img, confidence_matrix): - if not len(cnts): - return [] - - confidence_matrix = cv2.resize(confidence_matrix, - (img.shape[1] // 6, img.shape[0] // 6), - interpolation=cv2.INTER_NEAREST) - confs = [] - for cnt in cnts: - cnt_mask = np.zeros(confidence_matrix.shape) - cnt_mask = cv2.fillPoly(cnt_mask, pts=[cnt // 6], color=1.0) - confs.append(np.sum(confidence_matrix * cnt_mask) / np.sum(cnt_mask)) - return confs - -def return_contours_of_interested_textline(region_pre_p, label): - # pixels of images are identified by 5 - if region_pre_p.ndim == 3: - cnts_images = (region_pre_p[:, :, 0] == label) * 1 - else: - cnts_images = (region_pre_p[:, :] == label) * 1 - _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - contours_imgs = return_parent_contours(contours_imgs, hierarchy) - contours_imgs = filter_contours_area_of_image_tables( - thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003) - return contours_imgs - -def return_contours_of_image(image): - if len(image.shape) == 2: - image = image.astype(np.uint8) - imgray = image - else: - image = image.astype(np.uint8) - imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - return contours, hierarchy - -def dilate_textline_contours(all_found_textline_polygons): - return [[polygon2contour(contour2polygon(contour, dilate=6)) - for contour in region] - for region in all_found_textline_polygons] - -def dilate_textregion_contours(all_found_textline_polygons): - return [polygon2contour(contour2polygon(contour, dilate=6)) - for contour in all_found_textline_polygons] - -def contour2polygon(contour: Union[np.ndarray, Sequence[Sequence[Sequence[Number]]]], dilate=0): - polygon = Polygon([point[0] for point in contour]) - if dilate: - polygon = polygon.buffer(dilate) - if polygon.geom_type == 'GeometryCollection': - # heterogeneous result: filter zero-area shapes (LineString, Point) - polygon = unary_union([geom for geom in polygon.geoms if geom.area > 0]) - if polygon.geom_type == 'MultiPolygon': - # homogeneous result: construct convex hull to connect - polygon = join_polygons(polygon.geoms) - return make_valid(polygon) - -def polygon2contour(polygon: Polygon) -> np.ndarray: - polygon = np.array(polygon.exterior.coords[:-1], dtype=int) - return np.maximum(0, polygon).astype(int)[:, np.newaxis] - -def make_intersection(poly1, poly2): - interp = poly1.intersection(poly2) - # post-process - if interp.is_empty or interp.area == 0.0: - return None - if interp.geom_type == 'GeometryCollection': - # heterogeneous result: filter zero-area shapes (LineString, Point) - interp = unary_union([geom for geom in interp.geoms if geom.area > 0]) - if interp.geom_type == 'MultiPolygon': - # homogeneous result: construct convex hull to connect - interp = join_polygons(interp.geoms) - assert interp.geom_type == 'Polygon', interp.wkt - interp = make_valid(interp) - return interp - -def make_valid(polygon: Polygon) -> Polygon: - """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" - def isint(x): - return isinstance(x, int) or int(x) == x - # make sure rounding does not invalidate - if not all(map(isint, np.array(polygon.exterior.coords).flat)) and polygon.minimum_clearance < 1.0: - polygon = Polygon(np.round(polygon.exterior.coords)) - points = list(polygon.exterior.coords[:-1]) - # try by re-arranging points - for split in range(1, len(points)): - if polygon.is_valid or polygon.simplify(polygon.area).is_valid: - break - # simplification may not be possible (at all) due to ordering - # in that case, try another starting point - polygon = Polygon(points[-split:]+points[:-split]) - # try by simplification - for tolerance in range(int(polygon.area + 1.5)): - if polygon.is_valid: - break - # simplification may require a larger tolerance - polygon = polygon.simplify(tolerance + 1) - # try by enlarging - for tolerance in range(1, int(polygon.area + 2.5)): - if polygon.is_valid: - break - # enlargement may require a larger tolerance - polygon = polygon.buffer(tolerance) - assert polygon.is_valid, polygon.wkt - return polygon - -def join_polygons(polygons: Sequence[Polygon], scale=20) -> Polygon: - """construct concave hull (alpha shape) from input polygons by connecting their pairwise nearest points""" - # ensure input polygons are simply typed and all oriented equally - polygons = [orient(poly) - for poly in itertools.chain.from_iterable( - [poly.geoms - if poly.geom_type in ['MultiPolygon', 'GeometryCollection'] - else [poly] - for poly in polygons])] - npoly = len(polygons) - if npoly == 1: - return polygons[0] - # find min-dist path through all polygons (travelling salesman) - pairs = itertools.combinations(range(npoly), 2) - dists = np.zeros((npoly, npoly), dtype=float) - for i, j in pairs: - dist = polygons[i].distance(polygons[j]) - if dist < 1e-5: - dist = 1e-5 # if pair merely touches, we still need to get an edge - dists[i, j] = dist - dists[j, i] = dist - dists = minimum_spanning_tree(dists, overwrite=True) - # add bridge polygons (where necessary) - for prevp, nextp in zip(*dists.nonzero()): - prevp = polygons[prevp] - nextp = polygons[nextp] - nearest = nearest_points(prevp, nextp) - bridgep = orient(LineString(nearest).buffer(max(1, scale/5), resolution=1), -1) - polygons.append(bridgep) - jointp = unary_union(polygons) - if jointp.geom_type == 'MultiPolygon': - jointp = unary_union(jointp.geoms) - assert jointp.geom_type == 'Polygon', jointp.wkt - # follow-up calculations will necessarily be integer; - # so anticipate rounding here and then ensure validity - jointp2 = set_precision(jointp, 1.0) - if jointp2.geom_type != 'Polygon' or not jointp2.is_valid: - jointp2 = Polygon(np.round(jointp.exterior.coords)) - jointp2 = make_valid(jointp2) - assert jointp2.geom_type == 'Polygon', jointp2.wkt - return jointp2 diff --git a/src/eynollah/utils/shm.py b/src/eynollah/utils/shm.py deleted file mode 100644 index 4b51053..0000000 --- a/src/eynollah/utils/shm.py +++ /dev/null @@ -1,45 +0,0 @@ -from multiprocessing import shared_memory -from contextlib import contextmanager -from functools import wraps -import numpy as np - -@contextmanager -def share_ndarray(array: np.ndarray): - size = np.dtype(array.dtype).itemsize * np.prod(array.shape) - shm = shared_memory.SharedMemory(create=True, size=size) - try: - shared_array = np.ndarray(array.shape, dtype=array.dtype, buffer=shm.buf) - shared_array[:] = array[:] - shared_array.flags["WRITEABLE"] = False - yield dict(shape=array.shape, dtype=array.dtype, name=shm.name) - finally: - shm.close() - shm.unlink() - -@contextmanager -def ndarray_shared(array: dict): - shm = shared_memory.SharedMemory(name=array['name']) - try: - array = np.ndarray(array['shape'], dtype=array['dtype'], buffer=shm.buf) - yield array - finally: - shm.close() - -def wrap_ndarray_shared(kw=None): - def wrapper(f): - if kw is None: - @wraps(f) - def shared_func(array, *args, **kwargs): - with ndarray_shared(array) as ndarray: - return f(ndarray, *args, **kwargs) - return shared_func - else: - @wraps(f) - def shared_func(*args, **kwargs): - array = kwargs.pop(kw) - with ndarray_shared(array) as ndarray: - kwargs[kw] = ndarray - return f(*args, **kwargs) - return shared_func - return wrapper - diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py deleted file mode 100644 index 6e71b0f..0000000 --- a/src/eynollah/utils/utils_ocr.py +++ /dev/null @@ -1,510 +0,0 @@ -import math -import copy - -import numpy as np -import cv2 -import tensorflow as tf -from scipy.signal import find_peaks -from scipy.ndimage import gaussian_filter1d -from PIL import Image, ImageDraw, ImageFont -from Bio import pairwise2 - -from .resize import resize_image - - -def decode_batch_predictions(pred, num_to_char, max_len = 128): - # input_len is the product of the batch size and the - # number of time steps. - input_len = np.ones(pred.shape[0]) * pred.shape[1] - - # Decode CTC predictions using greedy search. - # decoded is a tuple with 2 elements. - decoded = tf.keras.backend.ctc_decode(pred, - input_length = input_len, - beam_width = 100) - # The outputs are in the first element of the tuple. - # Additionally, the first element is actually a list, - # therefore we take the first element of that list as well. - #print(decoded,'decoded') - decoded = decoded[0][0][:, :max_len] - - #print(decoded, decoded.shape,'decoded') - - output = [] - for d in decoded: - # Convert the predicted indices to the corresponding chars. - d = tf.strings.reduce_join(num_to_char(d)) - d = d.numpy().decode("utf-8") - output.append(d) - return output - - -def distortion_free_resize(image, img_size): - w, h = img_size - image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True) - - # Check tha amount of padding needed to be done. - pad_height = h - tf.shape(image)[0] - pad_width = w - tf.shape(image)[1] - - # Only necessary if you want to do same amount of padding on both sides. - if pad_height % 2 != 0: - height = pad_height // 2 - pad_height_top = height + 1 - pad_height_bottom = height - else: - pad_height_top = pad_height_bottom = pad_height // 2 - - if pad_width % 2 != 0: - width = pad_width // 2 - pad_width_left = width + 1 - pad_width_right = width - else: - pad_width_left = pad_width_right = pad_width // 2 - - image = tf.pad( - image, - paddings=[ - [pad_height_top, pad_height_bottom], - [pad_width_left, pad_width_right], - [0, 0], - ], - ) - - image = tf.transpose(image, (1, 0, 2)) - image = tf.image.flip_left_right(image) - return image - -def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.06*width) - - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: - - peaks_real = peaks_real[(peaks_realwidth1)] - - arg_max = np.argmax(sum_smoothed[peaks_real]) - peaks_final = peaks_real[arg_max] - return peaks_final - else: - return None - -# Function to fit text inside the given area -def fit_text_single_line(draw, text, font_path, max_width, max_height): - initial_font_size = 50 - font_size = initial_font_size - while font_size > 10: # Minimum font size - font = ImageFont.truetype(font_path, font_size) - text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - if text_width <= max_width and text_height <= max_height: - return font # Return the best-fitting font - - font_size -= 2 # Reduce font size and retry - - return ImageFont.truetype(font_path, 10) # Smallest font fallback - -def return_textlines_split_if_needed(textline_image, textline_image_bin=None): - - split_point = return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) - if split_point: - image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - if textline_image_bin is not None: - image1_bin = textline_image_bin[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2_bin = textline_image_bin[:, split_point:,:]#image.crop((width1, 0, width, height)) - return [image1, image2], [image1_bin, image2_bin] - else: - return [image1, image2], None - else: - return None, None -def preprocess_and_resize_image_for_ocrcnn_model(img, image_height, image_width): - if img.shape[0]==0 or img.shape[1]==0: - img_fin = np.ones((image_height, image_width, 3)) - else: - ratio = image_height /float(img.shape[0]) - w_ratio = int(ratio * img.shape[1]) - - if w_ratio <= image_width: - width_new = w_ratio - else: - width_new = image_width - - if width_new == 0: - width_new = img.shape[1] - - - img = resize_image(img, image_height, width_new) - img_fin = np.ones((image_height, image_width, 3))*255 - - img_fin[:,:width_new,:] = img[:,:,:] - img_fin = img_fin / 255. - return img_fin - -def get_deskewed_contour_and_bb_and_image(contour, image, deskew_angle): - (h_in, w_in) = image.shape[:2] - center = (w_in // 2, h_in // 2) - - rotation_matrix = cv2.getRotationMatrix2D(center, deskew_angle, 1.0) - - cos_angle = abs(rotation_matrix[0, 0]) - sin_angle = abs(rotation_matrix[0, 1]) - new_w = int((h_in * sin_angle) + (w_in * cos_angle)) - new_h = int((h_in * cos_angle) + (w_in * sin_angle)) - - rotation_matrix[0, 2] += (new_w / 2) - center[0] - rotation_matrix[1, 2] += (new_h / 2) - center[1] - - deskewed_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h)) - - contour_points = np.array(contour, dtype=np.float32) - transformed_points = cv2.transform(np.array([contour_points]), rotation_matrix)[0] - - x, y, w, h = cv2.boundingRect(np.array(transformed_points, dtype=np.int32)) - cropped_textline = deskewed_image[y:y+h, x:x+w] - - return cropped_textline - -def rotate_image_with_padding(image, angle, border_value=(0,0,0)): - # Get image dimensions - (h, w) = image.shape[:2] - - # Calculate the center of the image - center = (w // 2, h // 2) - - # Get the rotation matrix - rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0) - - # Compute the new bounding dimensions - cos = abs(rotation_matrix[0, 0]) - sin = abs(rotation_matrix[0, 1]) - new_w = int((h * sin) + (w * cos)) - new_h = int((h * cos) + (w * sin)) - - # Adjust the rotation matrix to account for translation - rotation_matrix[0, 2] += (new_w / 2) - center[0] - rotation_matrix[1, 2] += (new_h / 2) - center[1] - - # Perform the rotation - try: - rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=border_value) - except: - rotated_image = np.copy(image) - - return rotated_image - -def get_orientation_moments(contour): - moments = cv2.moments(contour) - if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero - return 90 if moments["mu11"] > 0 else -90 - else: - angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) - return np.degrees(angle) # Convert radians to degrees - - -def get_orientation_moments_of_mask(mask): - mask=mask.astype('uint8') - contours, _ = cv2.findContours(mask[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - - largest_contour = max(contours, key=cv2.contourArea) if contours else None - - moments = cv2.moments(largest_contour) - if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero - return 90 if moments["mu11"] > 0 else -90 - else: - angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) - return np.degrees(angle) # Convert radians to degrees - -def get_contours_and_bounding_boxes(mask): - # Find contours in the binary mask - contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - - largest_contour = max(contours, key=cv2.contourArea) if contours else None - - # Get the bounding rectangle for the contour - x, y, w, h = cv2.boundingRect(largest_contour) - #bounding_boxes.append((x, y, w, h)) - - return x, y, w, h - -def return_splitting_point_of_image(image_to_spliited): - width = np.shape(image_to_spliited)[1] - height = np.shape(image_to_spliited)[0] - common_window = int(0.03*width) - - width1 = int ( common_window) - width2 = int ( width - common_window ) - - img_sum = np.sum(image_to_spliited[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 1) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - peaks_real = peaks_real[(peaks_realwidth1)] - - arg_sort = np.argsort(sum_smoothed[peaks_real]) - peaks_sort_4 = peaks_real[arg_sort][::-1][:3] - - return np.sort(peaks_sort_4) - -def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, img_bin_curved=None): - peaks_4 = return_splitting_point_of_image(img_curved) - if len(peaks_4)>0: - imgs_tot = [] - - for ind in range(len(peaks_4)+1): - if ind==0: - img = img_curved[:, :peaks_4[ind], :] - if img_bin_curved is not None: - img_bin = img_bin_curved[:, :peaks_4[ind], :] - mask = mask_curved[:, :peaks_4[ind], :] - elif ind==len(peaks_4): - img = img_curved[:, peaks_4[ind-1]:, :] - if img_bin_curved is not None: - img_bin = img_bin_curved[:, peaks_4[ind-1]:, :] - mask = mask_curved[:, peaks_4[ind-1]:, :] - else: - img = img_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - if img_bin_curved is not None: - img_bin = img_bin_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - mask = mask_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - - or_ma = get_orientation_moments_of_mask(mask) - - if img_bin_curved is not None: - imgs_tot.append([img, mask, or_ma, img_bin] ) - else: - imgs_tot.append([img, mask, or_ma] ) - - - w_tot_des_list = [] - w_tot_des = 0 - imgs_deskewed_list = [] - imgs_bin_deskewed_list = [] - - for ind in range(len(imgs_tot)): - img_in = imgs_tot[ind][0] - mask_in = imgs_tot[ind][1] - ori_in = imgs_tot[ind][2] - if img_bin_curved is not None: - img_bin_in = imgs_tot[ind][3] - - if abs(ori_in)<45: - img_in_des = rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) - if img_bin_curved is not None: - img_bin_in_des = rotate_image_with_padding(img_bin_in, ori_in, border_value=(255,255,255) ) - mask_in_des = rotate_image_with_padding(mask_in, ori_in) - mask_in_des = mask_in_des.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_in_des[:,:,0]) - - if w_n==0 or h_n==0: - img_in_des = np.copy(img_in) - if img_bin_curved is not None: - img_bin_in_des = np.copy(img_bin_in) - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved is not None: - img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) - else: - mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - if img_bin_curved is not None: - img_bin_in_des = img_bin_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved is not None: - img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) - - - else: - img_in_des = np.copy(img_in) - if img_bin_curved is not None: - img_bin_in_des = np.copy(img_bin_in) - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved is not None: - img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) - - w_tot_des+=img_in_des.shape[1] - w_tot_des_list.append(img_in_des.shape[1]) - imgs_deskewed_list.append(img_in_des) - if img_bin_curved is not None: - imgs_bin_deskewed_list.append(img_bin_in_des) - - - - - img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - if img_bin_curved is not None: - img_bin_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - else: - img_bin_final_deskewed = None - - w_indexer = 0 - for ind in range(len(w_tot_des_list)): - img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] - if img_bin_curved is not None: - img_bin_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_bin_deskewed_list[ind][:,:,:] - w_indexer = w_indexer+w_tot_des_list[ind] - return img_final_deskewed, img_bin_final_deskewed - else: - return img_curved, img_bin_curved - -def return_textline_contour_with_added_box_coordinate(textline_contour, box_ind): - textline_contour[:,0] = textline_contour[:,0] + box_ind[2] - textline_contour[:,1] = textline_contour[:,1] + box_ind[0] - return textline_contour - - -def return_rnn_cnn_ocr_of_given_textlines(image, - all_found_textline_polygons, - all_box_coord, - prediction_model, - b_s_ocr, num_to_char, - textline_light=False, - curved_line=False): - max_len = 512 - padding_token = 299 - image_width = 512#max_len * 4 - image_height = 32 - ind_tot = 0 - #cv2.imwrite('./img_out.png', image_page) - ocr_all_textlines = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - cropped_lines = [] - indexer_text_region = 0 - - for indexing, ind_poly_first in enumerate(all_found_textline_polygons): - #ocr_textline_in_textregion = [] - if len(ind_poly_first)==0: - cropped_lines_region_indexer.append(indexer_text_region) - cropped_lines_meging_indexing.append(0) - img_fin = np.ones((image_height, image_width, 3))*1 - cropped_lines.append(img_fin) - - else: - for indexing2, ind_poly in enumerate(ind_poly_first): - cropped_lines_region_indexer.append(indexer_text_region) - if not (textline_light or curved_line): - ind_poly = copy.deepcopy(ind_poly) - box_ind = all_box_coord[indexing] - - ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) - #print(ind_poly_copy) - ind_poly[ind_poly<0] = 0 - x, y, w, h = cv2.boundingRect(ind_poly) - - w_scaled = w * image_height/float(h) - - mask_poly = np.zeros(image.shape) - - img_poly_on_img = np.copy(image) - - mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) - - - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - img_crop[mask_poly==0] = 255 - - if w_scaled < 640:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) - - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], - image_height, - image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], - image_height, - image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, - image_height, - image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - - indexer_text_region+=1 - - extracted_texts = [] - - n_iterations = math.ceil(len(cropped_lines) / b_s_ocr) - - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*b_s_ocr - imgs = cropped_lines[n_start:] - imgs = np.array(imgs) - imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) - - - else: - n_start = i*b_s_ocr - n_end = (i+1)*b_s_ocr - imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(b_s_ocr, image_height, image_width, 3) - - - preds = prediction_model.predict(imgs, verbose=0) - - pred_texts = decode_batch_predictions(preds, num_to_char) - - for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - extracted_texts.append(pred_texts_ib) - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - ocr_all_textlines = [] - for ind in unique_cropped_lines_region_indexer: - ocr_textline_in_textregion = [] - extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] - for it_ind, text_textline in enumerate(extracted_texts_merged_un): - ocr_textline_in_textregion.append(text_textline) - ocr_all_textlines.append(ocr_textline_in_textregion) - return ocr_all_textlines - -def biopython_align(str1, str2): - alignments = pairwise2.align.globalms(str1, str2, 2, -1, -2, -2) - best_alignment = alignments[0] # Get the best alignment - return best_alignment.seqA, best_alignment.seqB diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py deleted file mode 100644 index 9c3456a..0000000 --- a/src/eynollah/writer.py +++ /dev/null @@ -1,253 +0,0 @@ -# pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member -# pylint: disable=import-error -from pathlib import Path -import os.path -import xml.etree.ElementTree as ET -from .utils.xml import create_page_xml, xml_reading_order -from .utils.counter import EynollahIdCounter - -from ocrd_utils import getLogger -from ocrd_models.ocrd_page import ( - BorderType, - CoordsType, - PcGtsType, - TextLineType, - TextEquivType, - TextRegionType, - ImageRegionType, - TableRegionType, - SeparatorRegionType, - to_xml - ) -import numpy as np - -class EynollahXmlWriter: - - def __init__(self, *, dir_out, image_filename, curved_line,textline_light, pcgts=None): - self.logger = getLogger('eynollah.writer') - self.counter = EynollahIdCounter() - self.dir_out = dir_out - self.image_filename = image_filename - self.output_filename = os.path.join(self.dir_out or "", self.image_filename_stem) + ".xml" - self.curved_line = curved_line - self.textline_light = textline_light - self.pcgts = pcgts - self.scale_x = None # XXX set outside __init__ - self.scale_y = None # XXX set outside __init__ - self.height_org = None # XXX set outside __init__ - self.width_org = None # XXX set outside __init__ - - @property - def image_filename_stem(self): - return Path(Path(self.image_filename).name).stem - - def calculate_page_coords(self, cont_page): - self.logger.debug('enter calculate_page_coords') - points_page_print = "" - for _, contour in enumerate(cont_page[0]): - if len(contour) == 2: - points_page_print += str(int((contour[0]) / self.scale_x)) - points_page_print += ',' - points_page_print += str(int((contour[1]) / self.scale_y)) - else: - points_page_print += str(int((contour[0][0]) / self.scale_x)) - points_page_print += ',' - points_page_print += str(int((contour[0][1] ) / self.scale_y)) - points_page_print = points_page_print + ' ' - return points_page_print[:-1] - - def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion): - self.logger.debug('enter serialize_lines_in_region') - for j, polygon_textline in enumerate(all_found_textline_polygons[region_idx]): - coords = CoordsType() - textline = TextLineType(id=counter.next_line_id, Coords=coords) - if ocr_all_textlines_textregion: - # FIXME: add OCR confidence - textline.set_TextEquiv([TextEquivType(Unicode=ocr_all_textlines_textregion[j])]) - text_region.add_TextLine(textline) - text_region.set_orientation(-slopes[region_idx]) - region_bboxes = all_box_coord[region_idx] - points_co = '' - for point in polygon_textline: - if len(point) != 2: - point = point[0] - point_x = point[0] + page_coord[2] - point_y = point[1] + page_coord[0] - # FIXME: or actually... not self.textline_light and not self.curved_line or np.abs(slopes[region_idx]) > 45? - if not self.textline_light and not (self.curved_line and np.abs(slopes[region_idx]) <= 45): - point_x += region_bboxes[2] - point_y += region_bboxes[0] - point_x = max(0, int(point_x / self.scale_x)) - point_y = max(0, int(point_y / self.scale_y)) - points_co += str(point_x) + ',' + str(point_y) + ' ' - coords.set_points(points_co[:-1]) - - def write_pagexml(self, pcgts): - self.logger.info("output filename: '%s'", self.output_filename) - with open(self.output_filename, 'w') as f: - f.write(to_xml(pcgts)) - - def build_pagexml_no_full_layout( - self, found_polygons_text_region, - page_coord, order_of_texts, id_of_texts, - all_found_textline_polygons, - all_box_coord, - found_polygons_text_region_img, - found_polygons_marginals_left, found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, - found_polygons_tables, - **kwargs): - return self.build_pagexml_full_layout( - found_polygons_text_region, [], - page_coord, order_of_texts, id_of_texts, - all_found_textline_polygons, [], - all_box_coord, [], - found_polygons_text_region_img, found_polygons_tables, [], - found_polygons_marginals_left, found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, [], slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, - **kwargs) - - def build_pagexml_full_layout( - self, - found_polygons_text_region, found_polygons_text_region_h, - page_coord, order_of_texts, id_of_texts, - all_found_textline_polygons, all_found_textline_polygons_h, - all_box_coord, all_box_coord_h, - found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, - found_polygons_marginals_left,found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, - ocr_all_textlines=None, ocr_all_textlines_h=None, - ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, - ocr_all_textlines_drop=None, - conf_contours_textregions=None, conf_contours_textregions_h=None, - skip_layout_reading_order=False): - self.logger.debug('enter build_pagexml') - - # create the file structure - pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) - page = pcgts.get_Page() - page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) - - counter = EynollahIdCounter() - if len(order_of_texts): - _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) - id_of_marginalia_left = [_counter_marginals.next_region_id - for _ in found_polygons_marginals_left] - id_of_marginalia_right = [_counter_marginals.next_region_id - for _ in found_polygons_marginals_right] - xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) - - for mm, region_contour in enumerate(found_polygons_text_region): - textregion = TextRegionType( - id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord, - skip_layout_reading_order)) - ) - if conf_contours_textregions: - textregion.Coords.set_conf(conf_contours_textregions[mm]) - page.add_TextRegion(textregion) - if ocr_all_textlines: - ocr_textlines = ocr_all_textlines[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, - all_box_coord, slopes, counter, ocr_textlines) - - self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) - for mm, region_contour in enumerate(found_polygons_text_region_h): - textregion = TextRegionType( - id=counter.next_region_id, type_='heading', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - if conf_contours_textregions_h: - textregion.Coords.set_conf(conf_contours_textregions_h[mm]) - page.add_TextRegion(textregion) - if ocr_all_textlines_h: - ocr_textlines = ocr_all_textlines_h[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, - all_box_coord_h, slopes_h, counter, ocr_textlines) - - for mm, region_contour in enumerate(found_polygons_marginals_left): - marginal = TextRegionType( - id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - page.add_TextRegion(marginal) - if ocr_all_textlines_marginals_left: - ocr_textlines = ocr_all_textlines_marginals_left[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) - - for mm, region_contour in enumerate(found_polygons_marginals_right): - marginal = TextRegionType( - id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - page.add_TextRegion(marginal) - if ocr_all_textlines_marginals_right: - ocr_textlines = ocr_all_textlines_marginals_right[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, - all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) - - for mm, region_contour in enumerate(found_polygons_drop_capitals): - dropcapital = TextRegionType( - id=counter.next_region_id, type_='drop-capital', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - page.add_TextRegion(dropcapital) - all_box_coord_drop = [[0, 0, 0, 0]] - slopes_drop = [0] - if ocr_all_textlines_drop: - ocr_textlines = ocr_all_textlines_drop[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(dropcapital, [[found_polygons_drop_capitals[mm]]], 0, page_coord, - all_box_coord_drop, slopes_drop, counter, ocr_textlines) - - for region_contour in found_polygons_text_region_img: - page.add_ImageRegion( - ImageRegionType(id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)))) - - for region_contour in polygons_seplines: - page.add_SeparatorRegion( - SeparatorRegionType(id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, [0, 0, 0, 0])))) - - for region_contour in found_polygons_tables: - page.add_TableRegion( - TableRegionType(id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)))) - - return pcgts - - def calculate_polygon_coords(self, contour, page_coord, skip_layout_reading_order=False): - self.logger.debug('enter calculate_polygon_coords') - coords = '' - for point in contour: - if len(point) != 2: - point = point[0] - point_x = point[0] - point_y = point[1] - if not skip_layout_reading_order: - point_x += page_coord[2] - point_y += page_coord[0] - point_x = int(point_x / self.scale_x) - point_y = int(point_y / self.scale_y) - coords += str(point_x) + ',' + str(point_y) + ' ' - return coords[:-1] - diff --git a/tests/base.py b/tests/base.py new file mode 100644 index 0000000..9de35ef --- /dev/null +++ b/tests/base.py @@ -0,0 +1,54 @@ +# pylint: disable=unused-import + +from os.path import dirname, realpath +from os import chdir +import sys +import logging +import io +import collections +from unittest import TestCase as VanillaTestCase, skip, main as unittests_main +import pytest +from ocrd_utils import disableLogging, initLogging + +def main(fn=None): + if fn: + sys.exit(pytest.main([fn])) + else: + unittests_main() + +class TestCase(VanillaTestCase): + + @classmethod + def setUpClass(cls): + chdir(dirname(realpath(__file__)) + '/..') + + def setUp(self): + disableLogging() + initLogging() + +class CapturingTestCase(TestCase): + """ + A TestCase that needs to capture stderr/stdout and invoke click CLI. + """ + + @pytest.fixture(autouse=True) + def _setup_pytest_capfd(self, capfd): + self.capfd = capfd + + def invoke_cli(self, cli, args): + """ + Substitution for click.CliRunner.invooke that works together nicely + with unittests/pytest capturing stdout/stderr. + """ + self.capture_out_err() # XXX snapshot just before executing the CLI + code = 0 + sys.argv[1:] = args # XXX necessary because sys.argv reflects pytest args not cli args + try: + cli.main(args=args) + except SystemExit as e: + code = e.code + out, err = self.capture_out_err() + return code, out, err + + def capture_out_err(self): + return self.capfd.readouterr() diff --git a/tests/resources/euler_rechenkunst01_1738_0025.tif b/tests/resources/euler_rechenkunst01_1738_0025.tif deleted file mode 100644 index db6bae1..0000000 Binary files a/tests/resources/euler_rechenkunst01_1738_0025.tif and /dev/null differ diff --git a/tests/resources/euler_rechenkunst01_1738_0025.xml b/tests/resources/euler_rechenkunst01_1738_0025.xml deleted file mode 100644 index 1a92f73..0000000 --- a/tests/resources/euler_rechenkunst01_1738_0025.xml +++ /dev/null @@ -1,1626 +0,0 @@ - - - OCR-D - 2016-09-29T14:32:09 - 2018-04-25T08:56:33 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 9 - - - 9 - - - 9 - - - - - - - - - der - - - - - rechten - - - - - gegen - - - - - der - - - - - lincken - - - - - Hand - - - - - bedeutet - - - der rechten gegen der lincken Hand bedeutet - - - - - - - - wie - - - - - folget: - - - wie folget: - - - der rechten gegen der lincken Hand bedeutet -wie folget: - - - - - - - - - I. - - - I. - - - I. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - eins - - - 1 - eins - - - - - - - - 2 - - - - - - - - - - - zwey - - - 2 - zwey - - - - - - - - 3 - - - - - - - - - - - drey - - - 3 - drey - - - - - - - - 4 - - - - - - - - - - - vier - - - 4 - vier - - - 0 - nichts -1 - eins -2 - zwey -3 - drey -4 - vier - - - - - - - - - 5 - - - - - - - - - - - fuͤnf - - - 5 - fuͤnf - - - - - - - - 6 - - - - - - - - - - - ſechs - - - 6 - ſechs - - - - - - - 7 - - - - - - - - - - - ſieben - - - 7 - ſieben - - - - - - - - 8 - - - - - - - - - - - acht - - - 8 - acht - - - - - - - - 9 - - - - - - - - - - - neun - - - 9 - neun - - - 5 - fuͤnf -6 - ſechs -7 - ſieben -8 - acht -9 - neun - - - - - - - - - Auf - - - - - der - - - - - zweyten - - - - - Stelle - - - - - aber - - - - - bedeutet. - - - Auf der zweyten Stelle aber bedeutet. - - - Auf der zweyten Stelle aber bedeutet. - - - - - - - - - II. - - - II. - - - II. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - zehen - - - 1 - zehen - - - - - - - - 2 - - - - - - - - - - - zwanzig - - - 2 - zwanzig - - - - - - - 3 - - - - - - - - - - - dreyßig - - - 3 - dreyßig - - - - - - - 4 - - - - - - - - - - - vierzig - - - 4 - vierzig - - 0 - nichts -1 - zehen -2 - zwanzig -3 - dreyßig -4 - vierzig - - - - - - - - - 5 - - - - - - - - - - - fuͤnfzig - - - 5 - fuͤnfzig - - - - - - - - 6 - - - - - - - - - - - ſechzig - - - 6 - ſechzig - - - - - - - 7 - - - - - - - - - - - ſiebenzig - - - 7 - ſiebenzig - - - - - - - 8 - - - - - - - - - - - achtzig - - - 8 - achtzig - - - - - - - 9 - - - - - - - - - - - neunzig - - - 9 - neunzig - - 5 - fuͤnfzig -6 - ſechzig -7 - ſiebenzig -8 - achtzig -9 - neunzig - - - - - - - - - Auf - - - - - der - - - - - dritten - - - - - Stelle - - - - - bedeutet. - - - Auf der dritten Stelle bedeutet. - - - Auf der dritten Stelle bedeutet. - - - - - - - - - III. - - - III. - - - III. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - hundert - - - 1 - hundert - - - - - - - - 2 - - - - - - - - - - - zwey - - - - - hundert - - - 2 - zwey hundert - - - - - - - - 3 - - - - - - - - - - - drey - - - - - hundert - - - 3 - drey hundert - - - - - - - - 4 - - - - - - - - - - - vier - - - - - hundert - - - 4 - vier hundert - - - 0 - nichts -1 - hundert -2 - zwey hundert -3 - drey hundert -4 - vier hundert - - - - - - - - - 5 - - - - - - - - - - - fuͤnf - - - - - hundert - - - 5 - fuͤnf hundert - - - - - - - - 6 - - - - - - - - - - - ſechs - - - - - hundert - - - 6 - ſechs hundert - - - - - - - 7 - - - - - - - - - - - ſieben - - - - - hundert - - - 7 - ſieben hundert - - - - - - - - 8 - - - - - - - - - - - acht - - - - - hundert - - - 8 - acht hundert - - - - - - - 9 - - - - - - - - - - - neun - - - - - hundert - - - 9 - neun hundert - - - 5 - fuͤnf hundert -6 - ſechs hundert -7 - ſieben hundert -8 - acht hundert -9 - neun hundert - - - - - - - - - Auf - - - - - der - - - - - vierten - - - - - Stelle - - - - - bedeutet. - - - Auf der vierten Stelle bedeutet. - - - Auf der vierten Stelle bedeutet. - - - - - - - - - IV. - - - IV. - - - IV. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - tauſend - - - 1 - tauſend - - - - - - - - 2 - - - - - - - - - - - zwey - - - - - tauſend - - - 2 - zwey tauſend - - - - - - - - 3 - - - - - - - - - - - drey - - - - - tauſend - - - 3 - drey tauſend - - - - - - - - 4 - - - - - - - - - - - vier - - - - - tauſend - - - 4 - vier tauſend - - - 0 - nichts -1 - tauſend -2 - zwey tauſend -3 - drey tauſend -4 - vier tauſend - - - - - - - - - 5 - - - - - - - - - - - fuͤnf - - - - - tauſend - - - 5 - fuͤnf tauſend - - - - - - - - 6 - - - - - - - - - - - ſechs - - - - - tauſend - - - 6 - ſechs tauſend - - - - - - - 7 - - - - - - - - - - - ſieben - - - - - tauſend - - - 7 - ſieben tauſend - - - - - - - - 8 - - - - - - - - - - - acht - - - - - tauſend - - - 8 - acht tauſend - - - - - - - 9 - - - - - - - - - - - neun - - - - - tauſend - - - 9 - neun tauſend - - 5 - fuͤnf tauſend -6 - ſechs tauſend -7 - ſieben tauſend -8 - acht tauſend -9 - neun tauſend - - - - - - - - - Auf - - - - - der - - - - - fuͤnften - - - - - Stelle - - - - - bedeutet. - - - Auf der fuͤnften Stelle bedeutet. - - - Auf der fuͤnften Stelle bedeutet. - - - - - - - - - V. - - - V. - - - V. - - - - - - - - - 0 - - - - - - - - - - - nichts - - - 0 - nichts - - - - - - - - 1 - - - - - - - - - - - zehen - - - - - tauſend - - - 1 - zehen tauſend - - - - - - - - 2 - - - - - - - - - - - zwanzig - - - - - tauſend - - - 2 - zwanzig tauſend - - - - - - - - 3 - - - - - - - - - - - dreyßig - - - - - tauſend - - - 3 - dreyßig tauſend - - - - - - - - 4 - - - - - - - - - - - vierzig - - - - - tauſend - - - 4 - vierzig tauſend - - - 0 - nichts -1 - zehen tauſend -2 - zwanzig tauſend -3 - dreyßig tauſend -4 - vierzig tauſend - - - - - - - - - 5 - - - - - - - - - - - fuͤnfzig - - - - - tauſend - - - 5 - fuͤnfzig tauſend - - - - - - - - 6 - - - - - - - - - - - ſechzig - - - - - tauſend - - - 6 - ſechzig tauſend - - - - - - - 7 - - - - - - - - - - - ſiebenzig - - - - - tauſend - - - 7 - ſiebenzig tauſend - - - - - - - - 8 - - - - - - - - - - - achtzig - - - - - tauſend - - - 8 - achtzig tauſend - - - - - - - 9 - - - - - - - - - - - neunzig - - - - - tauſend - - - 9 - neunzig tauſend - - - 5 - fuͤnfzig tauſend -6 - ſechzig tauſend -7 - ſiebenzig tauſend -8 - achtzig tauſend -9 - neunzig tauſend - - - - - - - - A - - - - - 5 - - - A 5 - - A 5 - - - - - - - - - Anf - - - Anf - - Anf - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/resources/kant_aufklaerung_1784_0020.xml b/tests/resources/kant_aufklaerung_1784_0020.xml deleted file mode 100644 index 47484cd..0000000 --- a/tests/resources/kant_aufklaerung_1784_0020.xml +++ /dev/null @@ -1,2129 +0,0 @@ - - - OCR-D - 2016-09-20T11:09:27.431+02:00 - 2018-04-24T17:44:49.605+01:00 - - - - - - - - - - - - - - - - - - - - - - - ( - - - - - - - 484 - - - - - - - ) - - - - - ( 484 ) - - - - ( 484 ) - - - - - - - - - - - gewiegelt - - - - - - - worden - - - - - - - ; - - - - - - - ſo - - - - - - - ſchaͤdlich - - - - - - - iſt - - - - - - - es - - - - - - - Vorurtheile - - - - - - - zu - - - - - gewiegelt worden; ſo ſchaͤdlich iſt es Vorurtheile zu - - - - - - - - - - pflanzen - - - - - - - , - - - - - - - weil - - - - - - - ſie - - - - - - - ſich - - - - - - - zuletzt - - - - - - - an - - - - - - - denen - - - - - - - ſelbſt - - - - - - - raͤchen - - - - - - - , - - - - - pflanzen, weil ſie ſich zuletzt an denen ſelbſt raͤchen, - - - - - - - - - - die - - - - - - - , - - - - - - - oder - - - - - - - deren - - - - - - - Vorgaͤnger - - - - - - - , - - - - - - - ihre - - - - - - - Urheber - - - - - - - geweſen - - - - - die, oder deren Vorgaͤnger, ihre Urheber geweſen - - - - - - - - - - ſind - - - - - - - . - - - - - - - Daher - - - - - - - kann - - - - - - - ein - - - - - - - Publikum - - - - - - - nur - - - - - - - langſam - - - - - - - zur - - - - - ſind. Daher kann ein Publikum nur langſam zur - - - - - - - - - - Aufklaͤrung - - - - - - - gelangen - - - - - - - . - - - - - - - Durch - - - - - - - eine - - - - - - - Revolution - - - - - - - wird - - - - - Aufklaͤrung gelangen. Durch eine Revolution wird - - - - - - - - - - vielleicht - - - - - - - wohl - - - - - - - ein - - - - - - - Abfall - - - - - - - von - - - - - - - perſoͤnlichem - - - - - - - Despo- - - - - - vielleicht wohl ein Abfall von perſoͤnlichem Despo- - - - - - - - - - - tism - - - - - - - und - - - - - - - gewinnſuͤchtiger - - - - - - - oder - - - - - - - herrſchſüchtiger - - - - - - - Be - - - - - - - - - - - - - tism und gewinnſuͤchtiger oder herrſchſüchtiger Be- - - - - - - - - - - druͤkkung - - - - - - - , - - - - - - - aber - - - - - - - niemals - - - - - - - wahre - - - - - - - Reform - - - - - - - der - - - - - - - Den - - - - - - - - - - - - - druͤkkung, aber niemals wahre Reform der Den- - - - - - - - - - - kungsart - - - - - - - zu - - - - - - - Stande - - - - - - - kommen - - - - - - - ; - - - - - - - ſondern - - - - - - - neue - - - - - - - Vor - - - - - - - - - - - - - kungsart zu Stande kommen; ſondern neue Vor- - - - - - - - - - - urtheile - - - - - - - werden - - - - - - - , - - - - - - - eben - - - - - - - ſowohl - - - - - - - als - - - - - - - die - - - - - - - alten - - - - - - - , - - - - - - - zum - - - - - urtheile werden, eben ſowohl als die alten, zum - - - - - - - - - - Leitbande - - - - - - - des - - - - - - - gedankenloſen - - - - - - - großen - - - - - - - Haufens - - - - - Leitbande des gedankenloſen großen Haufens - - - - - - - - - - dienen - - - - - - - . - - - - - dienen. - - - - gewiegelt worden; ſo ſchaͤdlich iſt es Vorurtheile zu -pflanzen, weil ſie ſich zuletzt an denen ſelbſt raͤchen, -die, oder deren Vorgaͤnger, ihre Urheber geweſen -ſind. Daher kann ein Publikum nur langſam zur -Aufklaͤrung gelangen. Durch eine Revolution wird -vielleicht wohl ein Abfall von perſoͤnlichem Despo- -tism und gewinnſuͤchtiger oder herrſchſüchtiger Be- -druͤkkung, aber niemals wahre Reform der Den- -kungsart zu Stande kommen; ſondern neue Vor- -urtheile werden, eben ſowohl als die alten, zum -Leitbande des gedankenloſen großen Haufens -dienen. - - - - - - - - - - - Zu - - - - - - - dieſer - - - - - - - Aufklaͤrung - - - - - - - aber - - - - - - - wird - - - - - - - nichts - - - - - - - erfordert - - - - - Zu dieſer Aufklaͤrung aber wird nichts erfordert - - - - - - - - - - als - - - - - - - Freiheit - - - - - - - ; - - - - - - - und - - - - - - - zwar - - - - - - - die - - - - - - - unſchaͤdlichſte - - - - - - - unter - - - - - als Freiheit; und zwar die unſchaͤdlichſte unter - - - - - - - - - allem - - - - - - - , - - - - - - - was - - - - - - - nur - - - - - - - Freiheit - - - - - - - heißen - - - - - - - mag - - - - - - - , - - - - - - - naͤmlich - - - - - - - die - - - - - - - : - - - - - allem, was nur Freiheit heißen mag, naͤmlich die: - - - - - - - - - - von - - - - - - - ſeiner - - - - - - - Vernunft - - - - - - - in - - - - - - - allen - - - - - - - Stuͤkken - - - - - - - oͤffentlichen - - - - - von ſeiner Vernunft in allen Stuͤkken oͤffentlichen - - - - - - - - - - Gebrauch - - - - - - - zu - - - - - - - machen - - - - - - - . - - - - - - - Nun - - - - - - - hoͤre - - - - - - - ich - - - - - - - aber - - - - - - - von - - - - - - - al - - - - - - - - - - - - - Gebrauch zu machen. Nun hoͤre ich aber von al- - - - - - - - - - - len - - - - - - - Seiten - - - - - - - rufen - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - ! - - - - - - - Der - - - - - - - Offi - - - - - - - - - - - - - len Seiten rufen: raͤſonnirt nicht! Der Offi- - - - - - - - - - - zier - - - - - - - ſagt - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - exercirt - - - - - - - ! - - - - - - - Der - - - - - zier ſagt: raͤſonnirt nicht, ſondern exercirt! Der - - - - - - - - - - Finanzrath - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - bezahlt - - - - - - - ! - - - - - - - Der - - - - - Finanzrath: raͤſonnirt nicht, ſondern bezahlt! Der - - - - - - - - - - Geiſtliche - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - glaubt - - - - - - - ! - - - - - - - ( - - - - - - - Nur - - - - - Geiſtliche: raͤſonnirt nicht, ſondern glaubt! (Nur - - - - - - - - - - ein - - - - - - - einziger - - - - - - - Herr - - - - - - - in - - - - - - - der - - - - - - - Welt - - - - - - - ſagt - - - - - - - : - - - - - - - raͤſonnirt - - - - - - - , - - - - - - - ſo - - - - - ein einziger Herr in der Welt ſagt: raͤſonnirt, ſo - - - - - - - - - - viel - - - - - - - ihr - - - - - - - wollt - - - - - - - , - - - - - - - und - - - - - - - woruͤber - - - - - - - ihr - - - - - - - wollt - - - - - - - ; - - - - - - - aber - - - - - - - ge - - - - - - - - - - - - - viel ihr wollt, und woruͤber ihr wollt; aber ge- - - - - - - - - - - horcht - - - - - - - ! - - - - - - - ) - - - - - - - Hier - - - - - - - iſt - - - - - - - uͤberall - - - - - - - Einſchraͤnkung - - - - - - - der - - - - - - - Frei - - - - - - - - - - - - - horcht!) Hier iſt uͤberall Einſchraͤnkung der Frei- - - - - - - - - - - heit - - - - - - - . - - - - - - - Welche - - - - - - - Einſchraͤnkung - - - - - - - aber - - - - - - - iſt - - - - - - - der - - - - - - - Aufklaͤ - - - - - - - - - - - - - heit. Welche Einſchraͤnkung aber iſt der Aufklaͤ- - - - - - - - - - - rung - - - - - - - hinderlich - - - - - - - ? - - - - - - - welche - - - - - - - nicht - - - - - - - , - - - - - - - ſondern - - - - - - - ihr - - - - - - - wohl - - - - - - - gar - - - - - rung hinderlich? welche nicht, ſondern ihr wohl gar - - - - - - - - - - befoͤrderlich - - - - - - - ? - - - - - - - - - - - - - - Ich - - - - - - - antworte - - - - - - - : - - - - - - - der - - - - - - - oͤffentliche - - - - - befoͤrderlich? — Ich antworte: der oͤffentliche - - - - - - - - - - Gebrauch - - - - - - - ſeiner - - - - - - - Vernunft - - - - - - - muß - - - - - - - jederzeit - - - - - - - frei - - - - - - - ſein - - - - - - - , - - - - - Gebrauch ſeiner Vernunft muß jederzeit frei ſein, - - - - - - - - - - und - - - - - - - der - - - - - - - allein - - - - - - - kann - - - - - - - Aufklaͤrung - - - - - - - unter - - - - - - - Menſchen - - - - - - - zu - - - - - und der allein kann Aufklaͤrung unter Menſchen zu - - - - - Zu dieſer Aufklaͤrung aber wird nichts erfordert -als Freiheit; und zwar die unſchaͤdlichſte unter -allem, was nur Freiheit heißen mag, naͤmlich die: -von ſeiner Vernunft in allen Stuͤkken oͤffentlichen -Gebrauch zu machen. Nun hoͤre ich aber von al- -len Seiten rufen: raͤſonnirt nicht! Der Offi- -zier ſagt: raͤſonnirt nicht, ſondern exercirt! Der -Finanzrath: raͤſonnirt nicht, ſondern bezahlt! Der -Geiſtliche: raͤſonnirt nicht, ſondern glaubt! (Nur -ein einziger Herr in der Welt ſagt: raͤſonnirt, ſo -viel ihr wollt, und woruͤber ihr wollt; aber ge- -horcht!) Hier iſt uͤberall Einſchraͤnkung der Frei- -heit. Welche Einſchraͤnkung aber iſt der Aufklaͤ- -rung hinderlich? welche nicht, ſondern ihr wohl gar -befoͤrderlich? — Ich antworte: der oͤffentliche -Gebrauch ſeiner Vernunft muß jederzeit frei ſein, -und der allein kann Aufklaͤrung unter Menſchen zu - - - - - - - - - - - Stan - - - - - - - - - - - - - Stan- - - - - - Stan- - - - - - - - - - - \ No newline at end of file diff --git a/tests/test_counter.py b/tests/test_counter.py index 4f1abe2..8ef0756 100644 --- a/tests/test_counter.py +++ b/tests/test_counter.py @@ -1,4 +1,5 @@ -from eynollah.utils.counter import EynollahIdCounter +from tests.base import main +from qurator.eynollah.utils.counter import EynollahIdCounter def test_counter_string(): c = EynollahIdCounter() @@ -28,3 +29,5 @@ def test_counter_methods(): c.inc('region', -9) assert c.get('region') == 1 +if __name__ == '__main__': + main(__file__) diff --git a/tests/test_dpi.py b/tests/test_dpi.py index abe48ce..510ffc5 100644 --- a/tests/test_dpi.py +++ b/tests/test_dpi.py @@ -1,8 +1,11 @@ import cv2 from pathlib import Path -from eynollah.utils.pil_cv2 import check_dpi +from qurator.eynollah.utils.pil_cv2 import check_dpi +from tests.base import main def test_dpi(): fpath = str(Path(__file__).parent.joinpath('resources', 'kant_aufklaerung_1784_0020.tif')) assert 230 == check_dpi(cv2.imread(fpath)) +if __name__ == '__main__': + main(__file__) diff --git a/tests/test_run.py b/tests/test_run.py index 79c64c2..b1137e7 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -1,351 +1,24 @@ from os import environ from pathlib import Path -import pytest -import logging -from PIL import Image -from eynollah.cli import ( - layout as layout_cli, - binarization as binarization_cli, - enhancement as enhancement_cli, - machine_based_reading_order as mbreorder_cli, - ocr as ocr_cli, -) -from click.testing import CliRunner -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS +from ocrd_utils import pushd_popd +from tests.base import CapturingTestCase as TestCase, main +from qurator.eynollah.cli import main as eynollah_cli testdir = Path(__file__).parent.resolve() -MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_layout_v0_5_0').resolve())) -MODELS_OCR = environ.get('MODELS_OCR', str(testdir.joinpath('..', 'models_ocr_v0_5_1').resolve())) -MODELS_BIN = environ.get('MODELS_BIN', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) +EYNOLLAH_MODELS = environ.get('EYNOLLAH_MODELS', str(testdir.joinpath('..', 'models_eynollah').resolve())) -@pytest.mark.parametrize( - "options", - [ - [], # defaults - #["--allow_scaling", "--curved-line"], - ["--allow_scaling", "--curved-line", "--full-layout"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", - "--textline_light", "--light_version"], - # -ep ... - # -eoi ... - # FIXME: find out whether OCR extra was installed, otherwise skip these - ["--do_ocr"], - ["--do_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr"], - #["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"], - # --skip_layout_and_reading_order - ], ids=str) -def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert str(infile) in logmsgs - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line +class TestEynollahRun(TestCase): -@pytest.mark.parametrize( - "options", - [ - ["--tables"], - ["--tables", "--full-layout"], - ["--tables", "--full-layout", "--textline_light", "--light_version"], - ], ids=str) -def test_run_eynollah_layout_filename2(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/euler_rechenkunst01_1738_0025.tif') - outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert str(infile) in logmsgs - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:TableRegion", namespaces=NS) - # model/decoding is not very precise, so (depending on mode) we can get fractures/splits/FP - assert len(regions) >= 1, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) >= 2, "result is inaccurate" # mostly table (if detected correctly), but 1 page and 1 catch-word line + def test_full_run(self): + with pushd_popd(tempdir=True) as tempdir: + code, out, err = self.invoke_cli(eynollah_cli, [ + '-m', EYNOLLAH_MODELS, + '-i', str(testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif')), + '-o', tempdir + ]) + print(code, out, err) + assert not code -def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_LAYOUT, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 - assert any(logmsg for logmsg in logmsgs if logmsg.startswith('All jobs done in')) - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["--no-patches"], - ], ids=str) -def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') - args = [ - '-m', MODELS_BIN, - '-i', str(infile), - '-o', str(outfile), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'SbbBinarizer' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as binarized_img: - binarized_size = binarized_img.size - assert original_size == binarized_size - -def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_BIN, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'SbbBinarizer' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["-sos"], - ], ids=str) -def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'enhancement' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as enhanced_img: - enhanced_size = enhanced_img.size - assert (original_size == enhanced_size) == ("-sos" in options) - -def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_LAYOUT, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'enhancement' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'mbreorder' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: mbreorder has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert outfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_order = out_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - #assert len(out_order) >= 2, "result is inaccurate" - #assert in_order != out_order - assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] - -def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_LAYOUT, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'mbreorder' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: mbreorder has no logging! - #assert len([logmsg for logmsg in logmsgs if logmsg.startswith('???')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["-doit", #str(outrenderfile.parent)], - ], - ["-trocr"], - ], ids=str) -def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') - outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') - outrenderfile.parent.mkdir() - args = [ - '-m', MODELS_OCR, - '-i', str(infile), - '-dx', str(infile.parent), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.DEBUG) - def only_eynollah(logrec): - return logrec.name == 'eynollah' - runner = CliRunner() - if "-doit" in options: - options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert outfile.exists() - if "-doit" in options: - assert outrenderfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) - assert len(out_texts) >= 2, ("result is inaccurate", out_texts) - assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) - -def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_OCR, - '-di', str(indir), - '-dx', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert len(list(outdir.iterdir())) == 2 +if __name__ == '__main__': + main(__file__) diff --git a/tests/test_smoke.py b/tests/test_smoke.py index e2b323a..d069479 100644 --- a/tests/test_smoke.py +++ b/tests/test_smoke.py @@ -1,6 +1,7 @@ def test_utils_import(): - import eynollah.utils - import eynollah.utils.contour - import eynollah.utils.drop_capitals - import eynollah.utils.is_nan - import eynollah.utils.rotate + import qurator.eynollah.utils + import qurator.eynollah.utils.contour + import qurator.eynollah.utils.drop_capitals + import qurator.eynollah.utils.drop_capitals + import qurator.eynollah.utils.is_nan + import qurator.eynollah.utils.rotate diff --git a/tests/test_xml.py b/tests/test_xml.py index 5dffc94..8422fd1 100644 --- a/tests/test_xml.py +++ b/tests/test_xml.py @@ -1,4 +1,5 @@ -from eynollah.utils.xml import create_page_xml +from pytest import main +from qurator.eynollah.utils.xml import create_page_xml from ocrd_models.ocrd_page import to_xml PAGE_2019 = 'http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15' @@ -8,3 +9,6 @@ def test_create_xml(): xmlstr = to_xml(pcgts) assert 'xmlns:pc="%s"' % PAGE_2019 in xmlstr assert 'Metadata' in xmlstr + +if __name__ == '__main__': + main([__file__]) diff --git a/train/Dockerfile b/train/Dockerfile deleted file mode 100644 index 2456ea4..0000000 --- a/train/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# Use NVIDIA base image -FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 - -# Set the working directory -WORKDIR /app - - -# Set environment variable for GitPython -ENV GIT_PYTHON_REFRESH=quiet - -# Install Python and pip -RUN apt-get update && apt-get install -y --fix-broken && \ - apt-get install -y \ - python3 \ - python3-pip \ - python3-distutils \ - python3-setuptools \ - python3-wheel && \ - rm -rf /var/lib/apt/lists/* - -# Copy and install Python dependencies -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt - -# Copy the rest of the application -COPY . . - -# Specify the entry point -CMD ["python3", "train.py", "with", "config_params_docker.json"] diff --git a/train/README.md b/train/README.md deleted file mode 100644 index 5f6d326..0000000 --- a/train/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# Training eynollah - -This README explains the technical details of how to set up and run training, for detailed information on parameterization, see [`docs/train.md`](../docs/train.md) - -## Introduction - -This folder contains the source code for training an encoder model for document image segmentation. - -## Installation - -Clone the repository and install eynollah along with the dependencies necessary for training: - -```sh -git clone https://github.com/qurator-spk/eynollah -cd eynollah -pip install '.[training]' -``` - -### Pretrained encoder - -Download our pretrained weights and add them to a `train/pretrained_model` folder: - -```sh -cd train -wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 -tar xf pretrained_model.tar.gz -``` - -### Binarization training data - -A small sample of training data for binarization experiment can be found [on -zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), -which contains `images` and `labels` folders. - -### Helpful tools - -* [`pagexml2img`](https://github.com/qurator-spk/page2img) -> Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, -each class will be defined with a RGB value and beside images, a text file of classes will also be produced. -* [`cocoSegmentationToPng`](https://github.com/nightrome/cocostuffapi/blob/17acf33aef3c6cc2d6aca46dcf084266c2778cf0/PythonAPI/pycocotools/cocostuffhelper.py#L130) -> Convert COCO GT or results for a single image to a segmentation map and write it to disk. -* [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) -> Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. - -### Train using Docker - -Build the Docker image: - -```bash -cd train -docker build -t model-training . -``` - -Run Docker image - -```bash -cd train -docker run --gpus all -v $PWD:/entry_point_dir model-training -``` diff --git a/train/config_params.json b/train/config_params.json deleted file mode 100644 index 1db8026..0000000 --- a/train/config_params.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "backbone_type" : "transformer", - "task": "segmentation", - "n_classes" : 2, - "n_epochs" : 0, - "input_height" : 448, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : false, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : false, - "adding_rgb_background": true, - "adding_rgb_foreground": true, - "add_red_textlines": false, - "channels_shuffling": false, - "degrading": false, - "brightening": false, - "binarization" : true, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "transformer_num_patches_xy": [56, 56], - "transformer_patchsize_x": 4, - "transformer_patchsize_y": 4, - "transformer_projection_dim": 64, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 1, - "transformer_num_heads": 1, - "transformer_cnn_first": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "shuffle_indexes" : [ [0,2,1], [1,2,0], [1,0,2] , [2,1,0]], - "thetha" : [5, -5], - "number_of_backgrounds_per_image": 2, - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/train_new", - "dir_eval": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/eval_new", - "dir_output": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/output_new", - "dir_rgb_backgrounds": "/home/vahid/Documents/1_2_test_eynollah/set_rgb_background", - "dir_rgb_foregrounds": "/home/vahid/Documents/1_2_test_eynollah/out_set_rgb_foreground", - "dir_img_bin": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/train_new/images_bin" - -} diff --git a/train/config_params_docker.json b/train/config_params_docker.json deleted file mode 100644 index 45f87d3..0000000 --- a/train/config_params_docker.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 3, - "n_epochs" : 1, - "input_height" : 672, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "patches" : false, - "pretraining" : true, - "augmentation" : false, - "flip_aug" : false, - "blur_aug" : true, - "scaling" : true, - "adding_rgb_background": false, - "adding_rgb_foreground": false, - "add_red_textlines": false, - "channels_shuffling": true, - "degrading": true, - "brightening": true, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": true, - "transformer_num_patches_xy": [14, 21], - "transformer_patchsize_x": 1, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 64, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 1, - "transformer_num_heads": 1, - "transformer_cnn_first": true, - "blur_k" : ["blur","gauss","median"], - "scales" : [0.6, 0.7, 0.8, 0.9], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "shuffle_indexes" : [ [0,2,1], [1,2,0], [1,0,2] , [2,1,0]], - "thetha" : [5, -5], - "number_of_backgrounds_per_image": 2, - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": true, - "data_is_provided": false, - "dir_train": "/entry_point_dir/train", - "dir_eval": "/entry_point_dir/eval", - "dir_output": "/entry_point_dir/output" -} diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json deleted file mode 100644 index 9116ce3..0000000 --- a/train/custom_config_page2label.json +++ /dev/null @@ -1,8 +0,0 @@ -{ -"use_case": "textline", -"textregions":{ "rest_as_paragraph": 1, "header":2 , "heading":2 , "marginalia":3 }, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6}, -"columns_width":{"1":1000, "2":1300, "3":1600, "4":2000, "5":2300, "6":2500} -} diff --git a/train/requirements.txt b/train/requirements.txt deleted file mode 100644 index 63f3813..0000000 --- a/train/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -sacred -seaborn -numpy <1.24.0 -tqdm -imutils -scipy diff --git a/train/scales_enhancement.json b/train/scales_enhancement.json deleted file mode 100644 index 58034f0..0000000 --- a/train/scales_enhancement.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "scales" : [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] -}