Compare commits

..

No commits in common. 'master' and 'v0.9.1' have entirely different histories.

@ -0,0 +1,20 @@
version: 2.1
jobs:
black:
parameters:
python-version:
type: string
docker:
- image: cimg/python:<< parameters.python-version >>
steps:
- checkout
- run: pip3 install --upgrade pip
- run: pip3 install black
- run: black .
workflows:
black:
jobs:
- black:
python-version: "3.11"

@ -1,5 +0,0 @@
src/dinglehopper/tests
dist
build
*.egg-info
.git

@ -15,7 +15,7 @@ indent_size = 2
[*.json] [*.json]
indent_size = 2 indent_size = 2
insert_final_newline = true insert_final_newline = false
# trailing spaces in markdown indicate word wrap # trailing spaces in markdown indicate word wrap
[*.md] [*.md]

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Upgrade pip - name: Upgrade pip
run: python3 -m pip install --upgrade pip run: python3 -m pip install --upgrade pip
- name: Install setuptools - name: Install setuptools
@ -32,7 +32,7 @@ jobs:
- name: Build package - name: Build package
run: python3 -m pip install --upgrade build && python3 -m build run: python3 -m pip install --upgrade build && python3 -m build
- name: Upload dist - name: Upload dist
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
with: with:
name: dist name: dist
path: dist/ path: dist/
@ -42,7 +42,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download dist - name: Download dist
uses: actions/download-artifact@v4 uses: actions/download-artifact@v3
with: with:
name: dist name: dist
path: dist/ path: dist/
@ -61,7 +61,7 @@ jobs:
id-token: write # IMPORTANT: this permission is mandatory for trusted publishing id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
steps: steps:
- name: Download dist - name: Download dist
uses: actions/download-artifact@v4 uses: actions/download-artifact@v3
with: with:
name: dist name: dist
path: dist/ path: dist/

@ -1,4 +1,4 @@
name: Test name: test
on: on:
@ -6,10 +6,6 @@ on:
branches: branches:
- master - master
pull_request:
branches:
- master
schedule: schedule:
- cron: "00 16 07 * *" # = monthly - cron: "00 16 07 * *" # = monthly
@ -25,27 +21,30 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13" ] python-version: [ "3.6", "3.7", "3.8", "3.9", "3.10", "3.11" ]
runs-on: "ubuntu-latest" # For Python 3.6, we need to fall back to Ubuntu 20.04
runs-on: ${{ matrix.python-version == '3.6' && 'ubuntu-20.04' || 'ubuntu-latest' }}
env:
test_results_dir: test-results-${{ matrix.python-version }}
steps: steps:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v5 uses: actions/setup-python@v4
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
allow-prereleases: true
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Install possible lxml build requirements (if building from source)
run: sudo apt-get install -y libxml2-dev libxslt-dev python3-dev
- name: Install possible shapely build requirements (if building from source)
run: sudo apt-get install -y libgeos-dev
- name: Update pip - name: Update pip
run: python3 -m pip install -U pip run: python3 -m pip install -U pip
- name: Avoid compiling OpenCV and NumPy on Python 3.6
run: |
if python3 --version | grep -q "Python 3.6"; then
pip install --prefer-binary -U opencv-python-headless numpy
fi
- name: Install requirements*.txt - name: Install requirements*.txt
run: | run: |
for requirements_txt in requirements*.txt; do for requirements_txt in requirements*.txt; do
@ -55,10 +54,19 @@ jobs:
- name: Test - name: Test
run: | run: |
cd src cd src
python3 -m pytest --junitxml=../${{matrix.python-version}}-junit.xml -o junit_family=legacy mkdir -p ../$test_results_dir
python3 -m pytest --junitxml=../$test_results_dir/junit.xml -o junit_family=legacy
- name: Upload test results - name: Upload test results
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
if: success() || failure()
with:
name: ${{ env.test_results_dir }}
path: ${{ env.test_results_dir }}
- name: Report tests
uses: dorny/test-reporter@v1
if: success() || failure() if: success() || failure()
with: with:
name: test-results-${{matrix.python-version}} name: Results on Python ${{ matrix.python-version }}
path: ${{matrix.python-version}}-junit.xml path: "${{env.test_results_dir }}/junit.xml"
reporter: java-junit

@ -1,20 +0,0 @@
name: 'Test Report'
on:
workflow_run:
workflows: ['test']
types:
- completed
permissions:
contents: read
actions: read
checks: write
jobs:
report:
runs-on: ubuntu-latest
steps:
- uses: dorny/test-reporter@v1
with:
artifact: /test-results-(.*)/
name: 'Tests Results - $1'
path: '*junit.xml'
reporter: java-junit

1
.gitignore vendored

@ -28,4 +28,3 @@ dmypy.json
# Build artifacts # Build artifacts
/build /build
/dist

@ -1,16 +0,0 @@
variables:
http_proxy: "http://http-proxy.sbb.spk-berlin.de:3128/"
https_proxy: "http://http-proxy.sbb.spk-berlin.de:3128/"
HTTP_PROXY: "http://http-proxy.sbb.spk-berlin.de:3128/"
HTTPS_PROXY: "http://http-proxy.sbb.spk-berlin.de:3128/"
stages:
- triggers
mirror:
stage: triggers
trigger:
include: .gitlab/mirror.yml
strategy: depend
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH

@ -1,47 +0,0 @@
stages:
- check
- pull
- push
default:
image: debian
check:
stage: check
script:
- whoami; env
- if [ -z "$CI_COMMIT_BRANCH" ]; then echo "Not on a branch" >&2; exit 3; fi
pull-gitlab:
stage: pull
script:
- echo "This is redundant"
pull-github:
stage: pull
before_script:
- apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
script:
- git remote remove github 2>/dev/null || true
- git remote add github https://github.com/qurator-spk/dinglehopper.git
- git remote -v
- git pull github "$CI_COMMIT_BRANCH"
push-gitlab:
stage: push
before_script:
- apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
script:
- git push origin "$CI_COMMIT_SHA":"$CI_COMMIT_BRANCH"
push-github:
stage: push
before_script:
- apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
script:
- git push github "$CI_COMMIT_SHA":"$CI_COMMIT_BRANCH"

@ -1,6 +1,8 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0 rev: v3.2.0
hooks: hooks:
- id: trailing-whitespace - id: trailing-whitespace
- id: end-of-file-fixer - id: end-of-file-fixer
@ -11,37 +13,17 @@ repos:
- id: check-ast - id: check-ast
- repo: https://github.com/psf/black - repo: https://github.com/psf/black
rev: 25.1.0 rev: 22.10.0
hooks: hooks:
- id: black - id: black
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.5 rev: v0.0.280
hooks: hooks:
- args: - id: ruff
- --fix args: [--fix, --exit-non-zero-on-fix]
- --exit-non-zero-on-fix
id: ruff
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.15.0 rev: v1.4.1
hooks: hooks:
- additional_dependencies: - id: mypy
- types-setuptools
- types-lxml
- numpy # for numpy plugin
- attrs
- multimethod
- rapidfuzz
id: mypy
- repo: https://gitlab.com/vojko.pribudic.foss/pre-commit-update
rev: v0.6.1
hooks:
- id: pre-commit-update
- repo: https://github.com/dhatim/python-license-check
rev: 0.9.2
hooks:
- id: liccheck
language: system

@ -1,38 +0,0 @@
ARG DOCKER_BASE_IMAGE
FROM $DOCKER_BASE_IMAGE
ARG VCS_REF
ARG BUILD_DATE
LABEL \
maintainer="https://github.com/qurator-spk/dinglehopper/issues" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url="https://github.com/qurator-spk/dinglehopper" \
org.label-schema.build-date=$BUILD_DATE \
org.opencontainers.image.vendor="qurator" \
org.opencontainers.image.title="dinglehopper" \
org.opencontainers.image.description="An OCR evaluation tool" \
org.opencontainers.image.source="https://github.com/qurator-spk/dinglehopper" \
org.opencontainers.image.documentation="https://github.com/qurator-spk/dinglehopper/blob/${VCS_REF}/README.md" \
org.opencontainers.image.revision=$VCS_REF \
org.opencontainers.image.created=$BUILD_DATE \
org.opencontainers.image.base.name=ocrd/core
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
# avoid HOME/.local/share (hard to predict USER here)
# so let XDG_DATA_HOME coincide with fixed system location
# (can still be overridden by derived stages)
ENV XDG_DATA_HOME /usr/local/share
# avoid the need for an extra volume for persistent resource user db
# (i.e. XDG_CONFIG_HOME/ocrd/resources.yml)
ENV XDG_CONFIG_HOME /usr/local/share/ocrd-resources
WORKDIR /build/dinglehopper
COPY . .
COPY ocrd-tool.json .
# prepackage ocrd-tool.json as ocrd-all-tool.json
RUN ocrd ocrd-tool ocrd-tool.json dump-tools > $(dirname $(ocrd bashlib filename))/ocrd-all-tool.json
RUN make install && rm -rf /build/dinglehopper
WORKDIR /data
VOLUME /data

@ -1,33 +0,0 @@
PYTHON = python3
PIP = pip3
PYTHONIOENCODING=utf8
PYTEST_ARGS = -vv
DOCKER_BASE_IMAGE = docker.io/ocrd/core:v3.3.0
DOCKER_TAG = ocrd/dinglehopper
help:
@echo
@echo " Targets"
@echo
@echo " install Install full Python package via pip"
@echo " docker Build the ocrd/dinglehopper docker image"
# Install Python package via pip
install:
$(PIP) install .
install-dev:
$(PIP) install -e .
test:
pytest $(PYTEST_ARGS)
docker:
docker build \
--build-arg DOCKER_BASE_IMAGE=$(DOCKER_BASE_IMAGE) \
--build-arg VCS_REF=$$(git rev-parse --short HEAD) \
--build-arg BUILD_DATE=$$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
-t $(DOCKER_TAG) .
.PHONY: help install install-dev test docker

@ -10,7 +10,6 @@ pytest
``` ```
## Test running examples ## Test running examples
Only unit tests: Only unit tests:
```bash ```bash
pytest -m "not integration" pytest -m "not integration"
@ -37,21 +36,9 @@ pytest -k "not test" --mypy
pytest -k "not test" --ruff pytest -k "not test" --ruff
``` ```
# How to use pre-commit ## How to use pre-commit
This project optionally uses [pre-commit](https://pre-commit.com) to check commits. To use it: This project optionally uses [pre-commit](https://pre-commit.com) to check commits. To use it:
- Install pre-commit, e.g. `pip install -r requirements-dev.txt` - Install pre-commit, e.g. `pip install -r requirements-dev.txt`
- Install the repo-local git hooks: `pre-commit install` - Install the repo-local git hooks: `pre-commit install`
# Releasing a new version
- Update `ocrd-tool.json`
- `git commit`
- `git tag vx.y.z`
- `git push && git push --tags`
- The GitHub Actions workflow `release` will now create
a. a new release on GitHub and
b. a new release on PyPI
- Currently requires a review for PYPI?

@ -8,7 +8,7 @@ compares a ground truth (GT) document page with a OCR result page to compute
metrics and a word/character differences report. It also supports batch processing by metrics and a word/character differences report. It also supports batch processing by
generating, aggregating and summarizing multiple reports. generating, aggregating and summarizing multiple reports.
[![Tests](https://github.com/qurator-spk/dinglehopper/actions/workflows/test.yml/badge.svg)](https://github.com/qurator-spk/dinglehopper/actions?query=workflow:"test") [![Tests](https://github.com/qurator-spk/dinglehopper/workflows/test/badge.svg)](https://github.com/qurator-spk/dinglehopper/actions?query=workflow:"test")
[![GitHub tag](https://img.shields.io/github/tag/qurator-spk/dinglehopper?include_prereleases=&sort=semver&color=blue)](https://github.com/qurator-spk/dinglehopper/releases/) [![GitHub tag](https://img.shields.io/github/tag/qurator-spk/dinglehopper?include_prereleases=&sort=semver&color=blue)](https://github.com/qurator-spk/dinglehopper/releases/)
[![License](https://img.shields.io/badge/License-Apache-blue)](#license) [![License](https://img.shields.io/badge/License-Apache-blue)](#license)
[![issues - dinglehopper](https://img.shields.io/github/issues/qurator-spk/dinglehopper)](https://github.com/qurator-spk/dinglehopper/issues) [![issues - dinglehopper](https://img.shields.io/github/issues/qurator-spk/dinglehopper)](https://github.com/qurator-spk/dinglehopper/issues)
@ -23,11 +23,10 @@ Goals
Installation Installation
------------ ------------
It's best to use pip, e.g.:
It's best to use pip to install the package from PyPI, e.g.: ~~~
``` sudo pip install .
pip install dinglehopper ~~~
```
Usage Usage
----- -----
@ -100,11 +99,11 @@ This generates `summary.html` and `summary.json` in the same `output_folder`.
If you are summarizing many reports and have used the `--differences` flag while If you are summarizing many reports and have used the `--differences` flag while
generating them, it may be useful to limit the number of differences reported by using generating them, it may be useful to limit the number of differences reported by using
the `--occurrences-threshold` parameter. This will reduce the size of the generated HTML the `--occurences-threshold` parameter. This will reduce the size of the generated HTML
report, making it easier to open and navigate. Note that the JSON report will still report, making it easier to open and navigate. Note that the JSON report will still
contain all differences. Example: contain all differences. Example:
~~~ ~~~
dinglehopper-summarize output_folder/ --occurrences-threshold 10 dinglehopper-summarize output_folder/ --occurences-threshold 10
~~~ ~~~
### dinglehopper-line-dirs ### dinglehopper-line-dirs

@ -7,10 +7,9 @@ authors = [
{name = "Mike Gerber", email = "mike.gerber@sbb.spk-berlin.de"}, {name = "Mike Gerber", email = "mike.gerber@sbb.spk-berlin.de"},
{name = "The QURATOR SPK Team", email = "qurator@sbb.spk-berlin.de"}, {name = "The QURATOR SPK Team", email = "qurator@sbb.spk-berlin.de"},
] ]
description = "An OCR evaluation tool" description = "The OCR evaluation tool"
readme = "README.md" readme = "README.md"
license.file = "LICENSE" requires-python = ">=3.6"
requires-python = ">=3.9"
keywords = ["qurator", "ocr", "evaluation", "ocr-d"] keywords = ["qurator", "ocr", "evaluation", "ocr-d"]
dynamic = ["version", "dependencies", "optional-dependencies"] dynamic = ["version", "dependencies", "optional-dependencies"]
@ -49,7 +48,7 @@ optional-dependencies.dev = {file = ["requirements-dev.txt"]}
where = ["src"] where = ["src"]
[tool.setuptools.package-data] [tool.setuptools.package-data]
dinglehopper = ["templates/*", "*.json"] dinglehopper = ["*.json", "templates/*"]
[tool.pytest.ini_options] [tool.pytest.ini_options]
@ -61,54 +60,11 @@ markers = [
[tool.mypy] [tool.mypy]
plugins = ["numpy.typing.mypy_plugin"]
ignore_missing_imports = true ignore_missing_imports = true
strict = true [tool.ruff]
disallow_subclassing_any = false
# ❗ error: Class cannot subclass "Processor" (has type "Any")
disallow_any_generics = false
disallow_untyped_defs = false
disallow_untyped_calls = false
[tool.ruff.lint]
select = ["E", "F", "I"] select = ["E", "F", "I"]
ignore = [
"F811", # multimethods are considered redefinitions by ruff
[tool.liccheck]
authorized_licenses = [
"bsd",
"new bsd",
"bsd license",
"new bsd license",
"simplified bsd",
"apache",
"apache 2.0",
"apache software license",
"apache software",
"apache license 2.0",
"gnu lgpl",
"lgpl with exceptions or zpl",
"GNU Library or Lesser General Public License (LGPL)",
"GNU Lesser General Public License v3 (LGPLv3)",
"GNU Lesser General Public License v2 or later (LGPLv2+)",
"mit",
"mit license",
"mit-cmu",
"python software foundation",
"psf",
"psf-2.0",
"Historical Permission Notice and Disclaimer (HPND)",
"public domain",
'The Unlicense (Unlicense)',
"isc",
"ISC License (ISCL)",
'Mozilla Public License 2.0 (MPL 2.0)',
]
unauthorized_licenses = [
"gpl v3",
] ]

@ -1,14 +1,8 @@
pytest pytest
pytest-cov pytest-cov
pytest-mypy
black black
pre-commit pre-commit
ruff ruff ; python_version >= "3.7"
pytest-ruff pytest-ruff ; python_version >= "3.7"
mypy
types-lxml
types-setuptools
pytest-mypy
liccheck

@ -1,14 +1,14 @@
click click
jinja2 jinja2
lxml lxml
uniseg >= 0.9.1 uniseg
numpy numpy
colorama colorama
MarkupSafe MarkupSafe
ocrd >= 3.3.0 ocrd >= 2.20.1
attrs attrs
multimethod >= 1.3 multimethod == 1.3 # latest version to officially support Python 3.5
tqdm tqdm
rapidfuzz >= 2.7.0 rapidfuzz >= 2.4.2
six # XXX workaround OCR-D/core#730
chardet chardet
importlib_resources

@ -1,4 +1,4 @@
from .align import align, score_hint, seq_align from .align import align, seq_align
from .character_error_rate import character_error_rate, character_error_rate_n from .character_error_rate import character_error_rate, character_error_rate_n
from .edit_distance import distance, editops from .edit_distance import distance, editops
from .extracted_text import ExtractedText from .extracted_text import ExtractedText
@ -16,7 +16,6 @@ __all__ = [
"editops", "editops",
"distance", "distance",
"align", "align",
"score_hint",
"seq_align", "seq_align",
"character_error_rate", "character_error_rate",
"character_error_rate_n", "character_error_rate_n",

@ -1,10 +1,8 @@
import math
import unicodedata import unicodedata
from math import ceil
from typing import Optional
from rapidfuzz.distance import Levenshtein from rapidfuzz.distance import Levenshtein
from uniseg.graphemecluster import grapheme_clusters
from .edit_distance import grapheme_clusters
def align(t1, t2): def align(t1, t2):
@ -14,27 +12,11 @@ def align(t1, t2):
return seq_align(s1, s2) return seq_align(s1, s2)
def score_hint(er: float, n: int) -> Optional[int]: def seq_align(s1, s2):
"""Calculate RapidFuzz score hint for a given error rate and count.
Gives the score hint for the distance functions (= expected distance) or None if
the error rate is inf.
"""
assert not math.isnan(er)
try:
score_hint = int(ceil(er * n))
except (OverflowError, ValueError):
# ceil(er * n) can be inf or NaN (for n == 0), so int() can throw an
# OverflowError and a ValueError.
score_hint = None
return score_hint
def seq_align(s1, s2, score_hint=None):
"""Align general sequences.""" """Align general sequences."""
s1 = list(s1) s1 = list(s1)
s2 = list(s2) s2 = list(s2)
ops = Levenshtein.editops(s1, s2, score_hint=score_hint) ops = Levenshtein.editops(s1, s2)
i = 0 i = 0
j = 0 j = 0

@ -1,5 +1,7 @@
from __future__ import division
import unicodedata import unicodedata
from typing import List, Tuple, TypeVar from typing import Tuple
from multimethod import multimethod from multimethod import multimethod
from uniseg.graphemecluster import grapheme_clusters from uniseg.graphemecluster import grapheme_clusters
@ -7,13 +9,9 @@ from uniseg.graphemecluster import grapheme_clusters
from .edit_distance import distance from .edit_distance import distance
from .extracted_text import ExtractedText from .extracted_text import ExtractedText
T = TypeVar("T")
@multimethod @multimethod
def character_error_rate_n( def character_error_rate_n(reference: str, compared: str) -> Tuple[float, int]:
reference: List[str], compared: List[str]
) -> Tuple[float, int]:
""" """
Compute character error rate. Compute character error rate.
@ -21,7 +19,7 @@ def character_error_rate_n(
""" """
d = distance(reference, compared) d = distance(reference, compared)
n = len(reference) n = len(list(grapheme_clusters(unicodedata.normalize("NFC", reference))))
if d == 0: if d == 0:
return 0, n return 0, n
@ -32,28 +30,18 @@ def character_error_rate_n(
# XXX Should we really count newlines here? # XXX Should we really count newlines here?
@character_error_rate_n.register @multimethod
def _(reference: str, compared: str) -> Tuple[float, int]: def character_error_rate_n(
seq1 = list(grapheme_clusters(unicodedata.normalize("NFC", reference))) reference: ExtractedText, compared: ExtractedText
seq2 = list(grapheme_clusters(unicodedata.normalize("NFC", compared))) ) -> Tuple[float, int]:
cer, n = character_error_rate_n(seq1, seq2) return character_error_rate_n(reference.text, compared.text)
return cer, n
@character_error_rate_n.register
def _(reference: ExtractedText, compared: ExtractedText) -> Tuple[float, int]:
cer, n = character_error_rate_n(
reference.grapheme_clusters, compared.grapheme_clusters
)
return cer, n
def character_error_rate(reference: T, compared: T) -> float: def character_error_rate(reference, compared) -> float:
""" """
Compute character error rate. Compute character error rate.
:return: character error rate :return: character error rate
""" """
cer: float
cer, _ = character_error_rate_n(reference, compared) cer, _ = character_error_rate_n(reference, compared)
return cer return cer

@ -1,13 +1,13 @@
import os import os
from collections import Counter from collections import Counter
from typing import List
import click import click
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
from markupsafe import escape from markupsafe import escape
from ocrd_utils import initLogging from ocrd_utils import initLogging
from uniseg.graphemecluster import grapheme_clusters
from dinglehopper.align import score_hint, seq_align from dinglehopper.align import seq_align
from dinglehopper.character_error_rate import character_error_rate_n from dinglehopper.character_error_rate import character_error_rate_n
from dinglehopper.config import Config from dinglehopper.config import Config
from dinglehopper.extracted_text import ExtractedText from dinglehopper.extracted_text import ExtractedText
@ -15,9 +15,7 @@ from dinglehopper.ocr_files import extract
from dinglehopper.word_error_rate import word_error_rate_n, words_normalized from dinglehopper.word_error_rate import word_error_rate_n, words_normalized
def gen_diff_report( def gen_diff_report(gt_in, ocr_in, css_prefix, joiner, none, differences=False):
gt_in, ocr_in, css_prefix, joiner, none, *, differences=False, score_hint=None
):
gtx = "" gtx = ""
ocrx = "" ocrx = ""
@ -44,8 +42,9 @@ def gen_diff_report(
if isinstance(gt_in, ExtractedText): if isinstance(gt_in, ExtractedText):
if not isinstance(ocr_in, ExtractedText): if not isinstance(ocr_in, ExtractedText):
raise TypeError() raise TypeError()
gt_things = gt_in.grapheme_clusters # XXX splitting should be done in ExtractedText
ocr_things = ocr_in.grapheme_clusters gt_things = list(grapheme_clusters(gt_in.text))
ocr_things = list(grapheme_clusters(ocr_in.text))
else: else:
gt_things = gt_in gt_things = gt_in
ocr_things = ocr_in ocr_things = ocr_in
@ -54,7 +53,7 @@ def gen_diff_report(
o_pos = 0 o_pos = 0
found_differences = [] found_differences = []
for k, (g, o) in enumerate(seq_align(gt_things, ocr_things, score_hint)): for k, (g, o) in enumerate(seq_align(gt_things, ocr_things)):
css_classes = None css_classes = None
gt_id = None gt_id = None
ocr_id = None ocr_id = None
@ -77,7 +76,7 @@ def gen_diff_report(
if o is not None: if o is not None:
o_pos += len(o) o_pos += len(o)
counted_differences = dict(Counter(elem for elem in found_differences)) found_differences = dict(Counter(elem for elem in found_differences))
return ( return (
""" """
@ -88,7 +87,7 @@ def gen_diff_report(
""".format( """.format(
gtx, ocrx gtx, ocrx
), ),
counted_differences, found_differences,
) )
@ -106,15 +105,15 @@ def json_float(value):
def process( def process(
gt: str, gt,
ocr: str, ocr,
report_prefix: str, report_prefix,
reports_folder: str = ".", reports_folder=".",
*, *,
metrics: bool = True, metrics=True,
differences: bool = False, differences=False,
textequiv_level: str = "region", textequiv_level="region",
) -> None: ):
"""Check OCR result against GT. """Check OCR result against GT.
The @click decorators change the signature of the decorated functions, so we keep The @click decorators change the signature of the decorated functions, so we keep
@ -123,34 +122,22 @@ def process(
gt_text = extract(gt, textequiv_level=textequiv_level) gt_text = extract(gt, textequiv_level=textequiv_level)
ocr_text = extract(ocr, textequiv_level=textequiv_level) ocr_text = extract(ocr, textequiv_level=textequiv_level)
gt_words: List[str] = list(words_normalized(gt_text))
ocr_words: List[str] = list(words_normalized(ocr_text))
assert isinstance(gt_text, ExtractedText)
assert isinstance(ocr_text, ExtractedText)
cer, n_characters = character_error_rate_n(gt_text, ocr_text) cer, n_characters = character_error_rate_n(gt_text, ocr_text)
wer, n_words = word_error_rate_n(gt_text, ocr_text)
char_diff_report, diff_c = gen_diff_report( char_diff_report, diff_c = gen_diff_report(
gt_text, gt_text, ocr_text, css_prefix="c", joiner="", none="·", differences=differences
ocr_text,
css_prefix="c",
joiner="",
none="·",
score_hint=score_hint(cer, n_characters),
differences=differences,
) )
# {gt,ocr}_words must not be a generator, so we don't drain it for the differences gt_words = words_normalized(gt_text)
# report. ocr_words = words_normalized(ocr_text)
assert isinstance(gt_words, list)
assert isinstance(ocr_words, list)
wer, n_words = word_error_rate_n(gt_words, ocr_words)
word_diff_report, diff_w = gen_diff_report( word_diff_report, diff_w = gen_diff_report(
gt_words, gt_words,
ocr_words, ocr_words,
css_prefix="w", css_prefix="w",
joiner=" ", joiner=" ",
none="", none="",
score_hint=score_hint(wer, n_words),
differences=differences, differences=differences,
) )
@ -187,15 +174,8 @@ def process(
def process_dir( def process_dir(
gt: str, gt, ocr, report_prefix, reports_folder, metrics, differences, textequiv_level
ocr: str, ):
report_prefix: str,
reports_folder: str = ".",
*,
metrics: bool = True,
differences: bool = False,
textequiv_level: str = "region",
) -> None:
for gt_file in os.listdir(gt): for gt_file in os.listdir(gt):
gt_file_path = os.path.join(gt, gt_file) gt_file_path = os.path.join(gt, gt_file)
ocr_file_path = os.path.join(ocr, gt_file) ocr_file_path = os.path.join(ocr, gt_file)
@ -234,7 +214,6 @@ def process_dir(
metavar="LEVEL", metavar="LEVEL",
) )
@click.option("--progress", default=False, is_flag=True, help="Show progress bar") @click.option("--progress", default=False, is_flag=True, help="Show progress bar")
@click.version_option()
def main( def main(
gt, gt,
ocr, ocr,
@ -277,9 +256,9 @@ def main(
ocr, ocr,
report_prefix, report_prefix,
reports_folder, reports_folder,
metrics=metrics, metrics,
differences=differences, differences,
textequiv_level=textequiv_level, textequiv_level,
) )
else: else:
process( process(

@ -5,7 +5,6 @@ import click
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
from ocrd_utils import initLogging from ocrd_utils import initLogging
from .align import score_hint
from .character_error_rate import character_error_rate_n from .character_error_rate import character_error_rate_n
from .cli import gen_diff_report, json_float from .cli import gen_diff_report, json_float
from .ocr_files import plain_extract from .ocr_files import plain_extract
@ -50,8 +49,6 @@ def process(gt_dir, ocr_dir, report_prefix, *, metrics=True):
ocr_text = plain_extract( ocr_text = plain_extract(
os.path.join(ocr_dir, ocr), include_filename_in_id=True os.path.join(ocr_dir, ocr), include_filename_in_id=True
) )
gt_words = words_normalized(gt_text)
ocr_words = words_normalized(ocr_text)
# Compute CER # Compute CER
l_cer, l_n_characters = character_error_rate_n(gt_text, ocr_text) l_cer, l_n_characters = character_error_rate_n(gt_text, ocr_text)
@ -65,7 +62,7 @@ def process(gt_dir, ocr_dir, report_prefix, *, metrics=True):
n_characters = n_characters + l_n_characters n_characters = n_characters + l_n_characters
# Compute WER # Compute WER
l_wer, l_n_words = word_error_rate_n(gt_words, ocr_words) l_wer, l_n_words = word_error_rate_n(gt_text, ocr_text)
if wer is None: if wer is None:
wer, n_words = l_wer, l_n_words wer, n_words = l_wer, l_n_words
else: else:
@ -75,20 +72,12 @@ def process(gt_dir, ocr_dir, report_prefix, *, metrics=True):
# Generate diff reports # Generate diff reports
char_diff_report += gen_diff_report( char_diff_report += gen_diff_report(
gt_text, gt_text, ocr_text, css_prefix="l{0}-c".format(k), joiner="", none="·"
ocr_text,
css_prefix="l{0}-c".format(k),
joiner="",
none="·",
score_hint=score_hint(l_cer, l_n_characters),
) )
gt_words = words_normalized(gt_text)
ocr_words = words_normalized(ocr_text)
word_diff_report += gen_diff_report( word_diff_report += gen_diff_report(
gt_words, gt_words, ocr_words, css_prefix="l{0}-w".format(k), joiner=" ", none=""
ocr_words,
css_prefix="l{0}-w".format(k),
joiner=" ",
none="",
score_hint=score_hint(l_wer, l_n_words),
) )
env = Environment( env = Environment(

@ -1,6 +1,5 @@
import json import json
import os import os
from typing import Dict
import click import click
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
@ -14,8 +13,8 @@ def process(reports_folder, occurrences_threshold=1):
wer_list = [] wer_list = []
cer_sum = 0 cer_sum = 0
wer_sum = 0 wer_sum = 0
diff_c: Dict[str, int] = {} diff_c = {}
diff_w: Dict[str, int] = {} diff_w = {}
for report in os.listdir(reports_folder): for report in os.listdir(reports_folder):
if report.endswith(".json"): if report.endswith(".json"):
@ -35,15 +34,10 @@ def process(reports_folder, occurrences_threshold=1):
cer_sum += cer cer_sum += cer
wer_sum += wer wer_sum += wer
try: for key, value in report_data["differences"]["character_level"].items():
for key, value in report_data["differences"][ diff_c[key] = diff_c.get(key, 0) + value
"character_level" for key, value in report_data["differences"]["word_level"].items():
].items(): diff_w[key] = diff_w.get(key, 0) + value
diff_c[key] = diff_c.get(key, 0) + value
for key, value in report_data["differences"]["word_level"].items():
diff_w[key] = diff_w.get(key, 0) + value
except KeyError:
pass
if len(cer_list) == 0: if len(cer_list) == 0:
click.echo(f"No reports found in folder '{os.path.abspath(reports_folder)}'") click.echo(f"No reports found in folder '{os.path.abspath(reports_folder)}'")

@ -1,5 +1,6 @@
from __future__ import division, print_function
import unicodedata import unicodedata
from typing import List
from multimethod import multimethod from multimethod import multimethod
from rapidfuzz.distance import Levenshtein from rapidfuzz.distance import Levenshtein
@ -9,18 +10,7 @@ from .extracted_text import ExtractedText
@multimethod @multimethod
def distance(seq1: List[str], seq2: List[str]) -> int: def distance(s1: str, s2: str):
"""Compute the Levenshtein edit distance between two lists of grapheme clusters.
This assumes that the grapheme clusters are already normalized.
Use distance(str, str) instead if you need to compare two Unicode strings.
"""
return Levenshtein.distance(seq1, seq2)
@distance.register
def _(s1: str, s2: str) -> int:
"""Compute the Levenshtein edit distance between two Unicode strings """Compute the Levenshtein edit distance between two Unicode strings
Note that this is different from levenshtein() as this function knows about Unicode Note that this is different from levenshtein() as this function knows about Unicode
@ -32,9 +22,9 @@ def _(s1: str, s2: str) -> int:
return Levenshtein.distance(seq1, seq2) return Levenshtein.distance(seq1, seq2)
@distance.register @multimethod
def _(s1: ExtractedText, s2: ExtractedText) -> int: def distance(s1: ExtractedText, s2: ExtractedText):
return Levenshtein.distance(s1.grapheme_clusters, s2.grapheme_clusters) return distance(s1.text, s2.text)
def editops(word1, word2): def editops(word1, word2):

@ -1,16 +1,14 @@
import enum import enum
import functools
import re import re
import unicodedata import unicodedata
from contextlib import suppress from contextlib import suppress
from itertools import repeat from itertools import repeat
from typing import Any, Dict, List, Optional from typing import Optional
import attr import attr
import numpy as np import numpy as np
from lxml import etree as ET from lxml import etree as ET
from ocrd_utils import getLogger from ocrd_utils import getLogger
from uniseg.graphemecluster import grapheme_clusters
class Normalization(enum.Enum): class Normalization(enum.Enum):
@ -122,7 +120,7 @@ class ExtractedText:
segment_id = attr.ib(type=Optional[str]) segment_id = attr.ib(type=Optional[str])
@segment_id.validator @segment_id.validator
def is_valid_segment_id(self, _, value): def check(self, _, value):
if value is None: if value is None:
return return
if not re.match(r"[\w\d_-]+", value): if not re.match(r"[\w\d_-]+", value):
@ -132,85 +130,33 @@ class ExtractedText:
# a. _text itself # a. _text itself
# b. or segments (ExtractedText) and a joiner # b. or segments (ExtractedText) and a joiner
segments = attr.ib(type=Optional[List["ExtractedText"]]) segments = attr.ib(type=Optional[list], converter=attr.converters.optional(list))
joiner = attr.ib(type=Optional[str]) joiner = attr.ib(type=Optional[str])
_text = attr.ib(type=Optional[str]) _text = attr.ib(type=Optional[str])
_grapheme_clusters = attr.ib(type=Optional[List[str]])
@segments.validator @segments.validator
def cant_set_both_segments_and_text(self, _, value): def check(self, _, value):
if value is not None and self._text is not None: if value is not None and self._text is not None:
raise ValueError("Can't have both segments and text") raise ValueError("Can't have both segments and text")
@joiner.validator
def is_valid_joiner(self, _, value):
if self.segments is None:
if value is not None:
raise ValueError("Can't have joiner without segments to join")
if self.segments is not None:
if value not in ("", " ", "\n"):
raise ValueError(f"Unexpected segment joiner value {repr(value)}")
@_text.validator @_text.validator
def is_valid_text(self, _, value): def check(self, _, value):
if value is None: if value is not None and self.segments is not None:
return
if self.segments is not None:
raise ValueError("Can't have both segments and text") raise ValueError("Can't have both segments and text")
if unicodedata.normalize("NFC", value) != value: if value is not None and unicodedata.normalize("NFC", value) != value:
raise ValueError('String "{}" is not in NFC.'.format(value)) raise ValueError('String "{}" is not in NFC.'.format(value))
if normalize(value, self.normalization) != value: if value is not None and normalize(value, self.normalization) != value:
raise ValueError('String "{}" is not normalized.'.format(value)) raise ValueError('String "{}" is not normalized.'.format(value))
if self._grapheme_clusters is None:
raise ValueError("Requires both text and grapheme clusters to be set")
@_grapheme_clusters.validator
def are_valid_grapheme_clusters(self, _, value):
if value is not None and self._text is None:
raise ValueError("Requires both text and grapheme clusters to be set")
normalization = attr.ib(converter=Normalization, default=Normalization.NFC_SBB) normalization = attr.ib(converter=Normalization, default=Normalization.NFC_SBB)
@property @property
def text(self) -> str: def text(self):
if self._text is not None: if self._text is not None:
return self._text return self._text
else: else:
assert self.joiner is not None and self.segments is not None
return self.joiner.join(s.text for s in self.segments) return self.joiner.join(s.text for s in self.segments)
@functools.cached_property
def _joiner_grapheme_cluster(self):
"""We need the joiner as a list of 0 or 1 grapheme clusters.
This property is cached.
"""
assert self.joiner is not None
if len(self.joiner) > 0:
joiner_grapheme_cluster = list(grapheme_clusters(self.joiner))
assert len(joiner_grapheme_cluster) == 1 # see joiner's check above
elif len(self.joiner) == 0:
joiner_grapheme_cluster = []
else:
joiner_grapheme_cluster = None
return joiner_grapheme_cluster
@property
def grapheme_clusters(self):
if self._text is not None:
return self._grapheme_clusters
else:
# TODO Test with text extracted at glyph level (joiner == "")
clusters = []
assert self.segments is not None
for seg in self.segments:
clusters += seg.grapheme_clusters + self._joiner_grapheme_cluster
clusters = clusters[:-1]
return clusters
_segment_id_for_pos = None _segment_id_for_pos = None
def segment_id_for_pos(self, pos): def segment_id_for_pos(self, pos):
@ -221,7 +167,6 @@ class ExtractedText:
else: else:
# Recurse # Recurse
segment_id_for_pos = [] segment_id_for_pos = []
assert self.joiner is not None and self.segments is not None
for s in self.segments: for s in self.segments:
seg_ids = [s.segment_id_for_pos(i) for i in range(len(s.text))] seg_ids = [s.segment_id_for_pos(i) for i in range(len(s.text))]
segment_id_for_pos.extend(seg_ids) segment_id_for_pos.extend(seg_ids)
@ -235,7 +180,7 @@ class ExtractedText:
return self._segment_id_for_pos[pos] return self._segment_id_for_pos[pos]
@classmethod @classmethod
def from_text_segment(cls, text_segment, nsmap, *, textequiv_level="region"): def from_text_segment(cls, text_segment, nsmap, textequiv_level="region"):
"""Build an ExtractedText from a PAGE content text element""" """Build an ExtractedText from a PAGE content text element"""
localname_for_textequiv_level = {"region": "TextRegion", "line": "TextLine"} localname_for_textequiv_level = {"region": "TextRegion", "line": "TextLine"}
@ -252,8 +197,7 @@ class ExtractedText:
# FIXME hardcoded SBB normalization # FIXME hardcoded SBB normalization
segment_text = normalize_sbb(segment_text) segment_text = normalize_sbb(segment_text)
segment_text = segment_text or "" segment_text = segment_text or ""
clusters = list(grapheme_clusters(segment_text)) return cls(segment_id, None, None, segment_text)
return cls(segment_id, None, None, segment_text, clusters)
else: else:
# Recurse # Recurse
sub_localname = children_for_localname[localname] sub_localname = children_for_localname[localname]
@ -268,15 +212,12 @@ class ExtractedText:
) )
) )
joiner = joiner_for_textequiv_level[sub_textequiv_level] joiner = joiner_for_textequiv_level[sub_textequiv_level]
return cls(segment_id, segments, joiner, None, None) return cls(segment_id, segments, joiner, None)
@classmethod @classmethod
def from_str(cls, text, normalization=Normalization.NFC_SBB): def from_str(cls, text, normalization=Normalization.NFC_SBB):
normalized_text = normalize(text, normalization) normalized_text = normalize(text, normalization)
clusters = list(grapheme_clusters(normalized_text)) return cls(None, None, None, normalized_text, normalization=normalization)
return cls(
None, None, None, normalized_text, clusters, normalization=normalization
)
def invert_dict(d): def invert_dict(d):
@ -284,7 +225,7 @@ def invert_dict(d):
return {v: k for k, v in d.items()} return {v: k for k, v in d.items()}
def get_textequiv_unicode(text_segment: Any, nsmap: Dict[str, str]) -> str: def get_textequiv_unicode(text_segment, nsmap) -> str:
"""Get the TextEquiv/Unicode text of the given PAGE text element.""" """Get the TextEquiv/Unicode text of the given PAGE text element."""
segment_id = text_segment.attrib["id"] segment_id = text_segment.attrib["id"]
textequivs = text_segment.findall("./page:TextEquiv", namespaces=nsmap) textequivs = text_segment.findall("./page:TextEquiv", namespaces=nsmap)
@ -308,7 +249,7 @@ def get_first_textequiv(textequivs, segment_id):
if np.any(~nan_mask): if np.any(~nan_mask):
if np.any(nan_mask): if np.any(nan_mask):
log.warning("TextEquiv without index in %s.", segment_id) log.warning("TextEquiv without index in %s.", segment_id)
index = int(np.nanargmin(indices)) index = np.nanargmin(indices)
else: else:
# try ordering by conf # try ordering by conf
confidences = np.array([get_attr(te, "conf") for te in textequivs], dtype=float) confidences = np.array([get_attr(te, "conf") for te in textequivs], dtype=float)
@ -317,7 +258,7 @@ def get_first_textequiv(textequivs, segment_id):
"No index attributes, use 'conf' attribute to sort TextEquiv in %s.", "No index attributes, use 'conf' attribute to sort TextEquiv in %s.",
segment_id, segment_id,
) )
index = int(np.nanargmax(confidences)) index = np.nanargmax(confidences)
else: else:
# fallback to first entry in case of neither index or conf present # fallback to first entry in case of neither index or conf present
log.warning("No index attributes, use first TextEquiv in %s.", segment_id) log.warning("No index attributes, use first TextEquiv in %s.", segment_id)
@ -325,11 +266,11 @@ def get_first_textequiv(textequivs, segment_id):
return textequivs[index] return textequivs[index]
def get_attr(te: Any, attr_name: str) -> float: def get_attr(te, attr_name) -> float:
"""Extract the attribute for the given name. """Extract the attribute for the given name.
Note: currently only handles numeric values! Note: currently only handles numeric values!
Other or non existent values are encoded as np.nan. Other or non existend values are encoded as np.nan.
""" """
attr_value = te.attrib.get(attr_name) attr_value = te.attrib.get(attr_name)
try: try:

@ -22,7 +22,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"dinglehopper used to have its own (very inefficient) Levenshtein edit distance implementation, but now uses RapidFuzz." "dinglehopper uses to have its own (very inefficient) Levenshtein edit distance implementation, but now uses RapidFuzz."
] ]
}, },
{ {
@ -391,7 +391,7 @@
"\\text{CER} = \\frac{i + s + d}{n}\n", "\\text{CER} = \\frac{i + s + d}{n}\n",
"$$\n", "$$\n",
"\n", "\n",
"where $i$ is the number of inserts, $s$ the number of substitutions, $d$ the number of deletions and $n$ is the number of characters in the reference text. (The text is not super clear about $n$ being the number of characters in the reference text, but it seems appropriate as they *are* clear about this when computing the word error rate.)" "where $i$ is the number of inserts, $s$ the number of substitutions, $d$ the number of deletions and $n$ is the number of characters in the reference text. (The text is not super clear about $n$ being the number of characters in the reference text, but it seems appropiate as they *are* clear about this when computing the word error rate.)"
] ]
}, },
{ {
@ -680,7 +680,7 @@
" return cat in unwanted_categories or subcat in unwanted_subcategories\n", " return cat in unwanted_categories or subcat in unwanted_subcategories\n",
"\n", "\n",
" # We follow Unicode Standard Annex #29 on Unicode Text Segmentation here: Split on word boundaries using\n", " # We follow Unicode Standard Annex #29 on Unicode Text Segmentation here: Split on word boundaries using\n",
" # uniseg.wordbreak.words() and ignore all \"words\" that contain only whitespace, punctuation \"or similar characters.\"\n", " # uniseg.wordbreak.words() and ignore all \"words\" that contain only whitespace, punctation \"or similar characters.\"\n",
" for word in uniseg.wordbreak.words(s):\n", " for word in uniseg.wordbreak.words(s):\n",
" if all(unwanted(c) for c in word):\n", " if all(unwanted(c) for c in word):\n",
" pass\n", " pass\n",

@ -1,53 +1,44 @@
from __future__ import division, print_function
import os import os
import sys import sys
from typing import Dict, Iterator, Optional from typing import Iterator
import chardet import chardet
from lxml import etree as ET from lxml import etree as ET
from lxml.etree import XMLSyntaxError from lxml.etree import XMLSyntaxError
from uniseg.graphemecluster import grapheme_clusters
from .extracted_text import ExtractedText, normalize_sbb from .extracted_text import ExtractedText, normalize_sbb
def alto_namespace(tree: ET._ElementTree) -> Optional[str]: def alto_namespace(tree: ET.ElementTree) -> str:
"""Return the ALTO namespace used in the given ElementTree. """Return the ALTO namespace used in the given ElementTree.
This relies on the assumption that, in any given ALTO file, the root element has the This relies on the assumption that, in any given ALTO file, the root element has the
local name "alto". We do not check if the file uses any valid ALTO namespace. local name "alto". We do not check if the files uses any valid ALTO namespace.
""" """
root_name = ET.QName(tree.getroot().tag) root_name = ET.QName(tree.getroot().tag)
if root_name.localname == "alto": if root_name.localname == "alto":
assert isinstance(root_name.namespace, str)
return root_name.namespace return root_name.namespace
else: else:
raise ValueError("Not an ALTO tree") raise ValueError("Not an ALTO tree")
def alto_nsmap(tree: ET._ElementTree) -> Dict[str, str]: def alto_extract_lines(tree: ET.ElementTree) -> Iterator[ExtractedText]:
alto_ns = alto_namespace(tree) nsmap = {"alto": alto_namespace(tree)}
if alto_ns is None:
raise ValueError("Could not determine ALTO namespace")
return {"alto": alto_ns}
def alto_extract_lines(tree: ET._ElementTree) -> Iterator[ExtractedText]:
nsmap = alto_nsmap(tree)
for line in tree.iterfind(".//alto:TextLine", namespaces=nsmap): for line in tree.iterfind(".//alto:TextLine", namespaces=nsmap):
line_id = line.attrib.get("ID") line_id = line.attrib.get("ID")
line_text = " ".join( line_text = " ".join(
string.attrib.get("CONTENT", "") string.attrib.get("CONTENT")
for string in line.iterfind("alto:String", namespaces=nsmap) for string in line.iterfind("alto:String", namespaces=nsmap)
) )
normalized_text = normalize_sbb(line_text) yield ExtractedText(line_id, None, None, normalize_sbb(line_text))
clusters = list(grapheme_clusters(normalized_text))
yield ExtractedText(line_id, None, None, normalized_text, clusters)
# FIXME hardcoded SBB normalization # FIXME hardcoded SBB normalization
def alto_extract(tree: ET._ElementTree) -> ExtractedText: def alto_extract(tree: ET.ElementTree) -> ExtractedText:
"""Extract text from the given ALTO ElementTree.""" """Extract text from the given ALTO ElementTree."""
return ExtractedText(None, list(alto_extract_lines(tree)), "\n", None, None) return ExtractedText(None, list(alto_extract_lines(tree)), "\n", None)
def alto_text(tree): def alto_text(tree):
@ -96,7 +87,7 @@ def page_extract(tree, *, textequiv_level="region"):
# Filter empty region texts # Filter empty region texts
regions = [r for r in regions if r.text != ""] regions = [r for r in regions if r.text != ""]
return ExtractedText(None, regions, "\n", None, None) return ExtractedText(None, regions, "\n", None)
def extract_texts_from_reading_order_group(group, tree, nsmap, textequiv_level): def extract_texts_from_reading_order_group(group, tree, nsmap, textequiv_level):
@ -106,7 +97,7 @@ def extract_texts_from_reading_order_group(group, tree, nsmap, textequiv_level):
if ET.QName(group.tag).localname in ["OrderedGroup", "OrderedGroupIndexed"]: if ET.QName(group.tag).localname in ["OrderedGroup", "OrderedGroupIndexed"]:
ro_children = list(group) ro_children = list(group)
ro_children = [child for child in ro_children if "index" in child.attrib.keys()] ro_children = filter(lambda child: "index" in child.attrib.keys(), ro_children)
ro_children = sorted(ro_children, key=lambda child: int(child.attrib["index"])) ro_children = sorted(ro_children, key=lambda child: int(child.attrib["index"]))
elif ET.QName(group.tag).localname in ["UnorderedGroup", "UnorderedGroupIndexed"]: elif ET.QName(group.tag).localname in ["UnorderedGroup", "UnorderedGroupIndexed"]:
ro_children = list(group) ro_children = list(group)
@ -152,25 +143,21 @@ def detect_encoding(filename):
def plain_extract(filename, include_filename_in_id=False): def plain_extract(filename, include_filename_in_id=False):
id_template = "{filename} - line {no}" if include_filename_in_id else "line {no}" id_template = "{filename} - line {no}" if include_filename_in_id else "line {no}"
def make_segment(no, line):
normalized_text = normalize_sbb(line)
clusters = list(grapheme_clusters(normalized_text))
return ExtractedText(
id_template.format(filename=os.path.basename(filename), no=no),
None,
None,
normalized_text,
clusters,
)
fileencoding = detect_encoding(filename) fileencoding = detect_encoding(filename)
with open(filename, "r", encoding=fileencoding) as f: with open(filename, "r", encoding=fileencoding) as f:
return ExtractedText( return ExtractedText(
None, None,
[make_segment(no, line.strip()) for no, line in enumerate(f.readlines())], [
ExtractedText(
id_template.format(filename=os.path.basename(filename), no=no),
None,
None,
normalize_sbb(line),
)
for no, line in enumerate(f.readlines())
],
"\n", "\n",
None, None,
None,
) )
# XXX hardcoded SBB normalization # XXX hardcoded SBB normalization

@ -1,13 +1,17 @@
{ {
"version": "0.10.0", "version": "0.9.1",
"git_url": "https://github.com/qurator-spk/dinglehopper", "git_url": "https://github.com/qurator-spk/dinglehopper",
"dockerhub": "ocrd/dinglehopper",
"tools": { "tools": {
"ocrd-dinglehopper": { "ocrd-dinglehopper": {
"executable": "ocrd-dinglehopper", "executable": "ocrd-dinglehopper",
"input_file_grp_cardinality": 2,
"output_file_grp_cardinality": 1,
"description": "Evaluate OCR text against ground truth with dinglehopper", "description": "Evaluate OCR text against ground truth with dinglehopper",
"input_file_grp": [
"OCR-D-GT-PAGE",
"OCR-D-OCR"
],
"output_file_grp": [
"OCR-D-OCR-EVAL"
],
"categories": [ "categories": [
"Quality assurance" "Quality assurance"
], ],

@ -1,76 +1,78 @@
from functools import cached_property import json
import os import os
from typing import Optional
import click import click
from ocrd_models import OcrdFileType
from ocrd import Processor from ocrd import Processor
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
from ocrd_utils import make_file_id from ocrd_utils import assert_file_grp_cardinality, getLogger, make_file_id
from pkg_resources import resource_string
from .cli import process as cli_process from .cli import process as cli_process
OCRD_TOOL = json.loads(resource_string(__name__, "ocrd-tool.json").decode("utf8"))
@click.command() @click.command()
@ocrd_cli_options @ocrd_cli_options
def ocrd_dinglehopper(*args, **kwargs): def ocrd_dinglehopper(*args, **kwargs):
return ocrd_cli_wrap_processor(OcrdDinglehopperEvaluate, *args, **kwargs) return ocrd_cli_wrap_processor(OcrdDinglehopperEvaluate, *args, **kwargs)
class OcrdDinglehopperEvaluate(Processor): class OcrdDinglehopperEvaluate(Processor):
def __init__(self, *args, **kwargs):
kwargs["ocrd_tool"] = OCRD_TOOL["tools"]["ocrd-dinglehopper"]
super(OcrdDinglehopperEvaluate, self).__init__(*args, **kwargs)
@cached_property def process(self):
def executable(self): assert_file_grp_cardinality(self.input_file_grp, 2, "GT and OCR")
return 'ocrd-dinglehopper' assert_file_grp_cardinality(self.output_file_grp, 1)
def process_page_file(self, *input_files: Optional[OcrdFileType]) -> None: log = getLogger("processor.OcrdDinglehopperEvaluate")
assert self.parameter
metrics = self.parameter["metrics"] metrics = self.parameter["metrics"]
textequiv_level = self.parameter["textequiv_level"] textequiv_level = self.parameter["textequiv_level"]
gt_grp, ocr_grp = self.input_file_grp.split(",")
input_file_tuples = self.zip_input_files(on_error="abort")
for n, (gt_file, ocr_file) in enumerate(input_file_tuples):
if not gt_file or not ocr_file:
# file/page was not found in this group
continue
gt_file = self.workspace.download_file(gt_file)
ocr_file = self.workspace.download_file(ocr_file)
page_id = gt_file.pageId
# wrong number of inputs: let fail log.info("INPUT FILES %i / %s%s", n, gt_file, ocr_file)
gt_file, ocr_file = input_files
# missing on either side: skip (zip_input_files already warned) file_id = make_file_id(ocr_file, self.output_file_grp)
if not gt_file or not ocr_file: report_prefix = os.path.join(self.output_file_grp, file_id)
return
# missing download (i.e. OCRD_DOWNLOAD_INPUT=false): # Process the files
if not gt_file.local_filename: try:
if config.OCRD_MISSING_INPUT == 'ABORT': os.mkdir(self.output_file_grp)
raise MissingInputFile(gt_file.fileGrp, gt_file.pageId, gt_file.mimetype) except FileExistsError:
return pass
if not ocr_file.local_filename: cli_process(
if config.OCRD_MISSING_INPUT == 'ABORT': gt_file.local_filename,
raise MissingInputFile(ocr_file.fileGrp, ocr_file.pageId, ocr_file.mimetype) ocr_file.local_filename,
return report_prefix,
metrics=metrics,
page_id = gt_file.pageId textequiv_level=textequiv_level,
file_id = make_file_id(ocr_file, self.output_file_grp)
cli_process(
gt_file.local_filename,
ocr_file.local_filename,
file_id,
self.output_file_grp,
metrics=metrics,
textequiv_level=textequiv_level,
)
# Add reports to the workspace
for report_suffix, mimetype in [
[".html", "text/html"],
[".json", "application/json"],
]:
output_file_id = file_id + report_suffix
output_file = next(self.workspace.mets.find_files(ID=output_file_id), None)
if output_file and config.OCRD_EXISTING_OUTPUT != 'OVERWRITE':
raise FileExistsError(f"A file with ID=={output_file_id} already exists {output_file} and neither force nor ignore are set")
self.workspace.add_file(
file_id=output_file_id,
file_grp=self.output_file_grp,
page_id=page_id,
mimetype=mimetype,
local_filename=file_id + report_suffix,
) )
# Add reports to the workspace
for report_suffix, mimetype in [
[".html", "text/html"],
[".json", "application/json"],
]:
self.workspace.add_file(
file_id=file_id + report_suffix,
file_grp=self.output_file_grp,
page_id=page_id,
mimetype=mimetype,
local_filename=report_prefix + report_suffix,
)
if __name__ == "__main__": if __name__ == "__main__":
ocrd_dinglehopper() ocrd_dinglehopper()

@ -138,17 +138,17 @@
<mets:fileSec> <mets:fileSec>
<mets:fileGrp USE="OCR-D-GT-PAGE"> <mets:fileGrp USE="OCR-D-GT-PAGE">
<mets:file MIMETYPE="application/xml" ID="OCR-D-GT-PAGE_00000024"> <mets:file MIMETYPE="application/xml" ID="OCR-D-GT-PAGE_00000024">
<mets:FLocat xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="OCR-D-GT-PAGE/00000024.page.xml" LOCTYPE="OTHER" OTHERLOCTYPE="FILE"/> <mets:FLocat xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="OCR-D-GT-PAGE/00000024.page.xml"/>
</mets:file> </mets:file>
</mets:fileGrp> </mets:fileGrp>
<mets:fileGrp USE="OCR-D-OCR-CALAMARI"> <mets:fileGrp USE="OCR-D-OCR-CALAMARI">
<mets:file MIMETYPE="application/vnd.prima.page+xml" ID="OCR-D-OCR-CALAMARI_0001"> <mets:file MIMETYPE="application/vnd.prima.page+xml" ID="OCR-D-OCR-CALAMARI_0001">
<mets:FLocat xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="OCR-D-OCR-CALAMARI/OCR-D-OCR-CALAMARI_0001.xml" LOCTYPE="OTHER" OTHERLOCTYPE="FILE"/> <mets:FLocat xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="OCR-D-OCR-CALAMARI/OCR-D-OCR-CALAMARI_0001.xml"/>
</mets:file> </mets:file>
</mets:fileGrp> </mets:fileGrp>
<mets:fileGrp USE="OCR-D-OCR-TESS"> <mets:fileGrp USE="OCR-D-OCR-TESS">
<mets:file MIMETYPE="application/vnd.prima.page+xml" ID="OCR-D-OCR-TESS_0001"> <mets:file MIMETYPE="application/vnd.prima.page+xml" ID="OCR-D-OCR-TESS_0001">
<mets:FLocat xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="OCR-D-OCR-TESS/OCR-D-OCR-TESS_0001.xml" LOCTYPE="OTHER" OTHERLOCTYPE="FILE"/> <mets:FLocat xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="OCR-D-OCR-TESS/OCR-D-OCR-TESS_0001.xml"/>
</mets:file> </mets:file>
</mets:fileGrp> </mets:fileGrp>
</mets:fileSec> </mets:fileSec>

@ -13,13 +13,12 @@ def test_text():
test1 = ExtractedText( test1 = ExtractedText(
None, None,
[ [
ExtractedText("s0", None, None, "foo", grapheme_clusters("foo")), ExtractedText("s0", None, None, "foo"),
ExtractedText("s1", None, None, "bar", grapheme_clusters("bar")), ExtractedText("s1", None, None, "bar"),
ExtractedText("s2", None, None, "bazinga", grapheme_clusters("bazinga")), ExtractedText("s2", None, None, "bazinga"),
], ],
" ", " ",
None, None,
None,
) )
assert test1.text == "foo bar bazinga" assert test1.text == "foo bar bazinga"
@ -30,20 +29,8 @@ def test_text():
def test_normalization_check(): def test_normalization_check():
with pytest.raises(ValueError, match=r".*is not in NFC.*"): with pytest.raises(ValueError, match=r".*is not in NFC.*"):
ExtractedText( ExtractedText("foo", None, None, unicodedata.normalize("NFD", "Schlyñ"))
"foo", assert ExtractedText("foo", None, None, unicodedata.normalize("NFC", "Schlyñ"))
None,
None,
unicodedata.normalize("NFD", "Schlyñ"),
grapheme_clusters(unicodedata.normalize("NFD", "Schlyñ")),
)
assert ExtractedText(
"foo",
None,
None,
unicodedata.normalize("NFC", "Schlyñ"),
grapheme_clusters(unicodedata.normalize("NFC", "Schlyñ")),
)
AlignmentElement = namedtuple("AlignmentElement", "left right left_id right_id") AlignmentElement = namedtuple("AlignmentElement", "left right left_id right_id")
@ -60,27 +47,25 @@ def test_align():
test1 = ExtractedText( test1 = ExtractedText(
None, None,
[ [
ExtractedText("s0", None, None, "foo", grapheme_clusters("foo")), ExtractedText("s0", None, None, "foo"),
ExtractedText("s1", None, None, "bar", grapheme_clusters("bar")), ExtractedText("s1", None, None, "bar"),
ExtractedText("s2", None, None, "batzinga", grapheme_clusters("batzinga")), ExtractedText("s2", None, None, "batzinga"),
], ],
" ", " ",
None, None,
None,
) )
test2 = ExtractedText( test2 = ExtractedText(
None, None,
[ [
ExtractedText("x0", None, None, "foo", grapheme_clusters("foo")), ExtractedText("x0", None, None, "foo"),
ExtractedText("x1", None, None, "bar", grapheme_clusters("bar")), ExtractedText("x1", None, None, "bar"),
# extra . # extra .
ExtractedText("x2", None, None, ".", grapheme_clusters(".")), ExtractedText("x2", None, None, "."),
# deletion + different grapheme cluster, m̃ also is two Python characters # deletion + different grapheme cluster, m̃ also is two Python characters
ExtractedText("x3", None, None, "bazim̃ga", grapheme_clusters("bazim̃ga")), ExtractedText("x3", None, None, "bazim̃ga"),
], ],
" ", " ",
None, None,
None,
) )
left_pos = 0 left_pos = 0

@ -1,8 +1,6 @@
import math
import pytest import pytest
from .. import align, distance, score_hint, seq_align from .. import align, distance, seq_align
from .util import unzip from .util import unzip
@ -185,8 +183,3 @@ def test_lines_similar():
# Test __eq__ (i.e. is it a substitution or a similar string?) # Test __eq__ (i.e. is it a substitution or a similar string?)
assert list(left)[0] == list(right)[0] assert list(left)[0] == list(right)[0]
def test_score_hint():
assert score_hint(0.5, 23) == 12 # int(ceil())
assert score_hint(math.inf, 12345) is None

@ -21,9 +21,9 @@ def test_cli_directory(tmp_path):
os.path.join(data_dir, "directory-test", "ocr"), os.path.join(data_dir, "directory-test", "ocr"),
"report", "report",
str(tmp_path / "reports"), str(tmp_path / "reports"),
metrics=False, False,
differences=True, True,
textequiv_level="line", "line",
) )
assert os.path.exists(tmp_path / "reports/1.xml-report.json") assert os.path.exists(tmp_path / "reports/1.xml-report.json")
@ -45,9 +45,9 @@ def test_cli_fail_without_gt(tmp_path):
os.path.join(data_dir, "directory-test", "ocr"), os.path.join(data_dir, "directory-test", "ocr"),
"report", "report",
str(tmp_path / "reports"), str(tmp_path / "reports"),
metrics=False, False,
differences=True, True,
textequiv_level="line", "line",
) )
assert len(os.listdir(tmp_path / "reports")) == 2 * 2 assert len(os.listdir(tmp_path / "reports")) == 2 * 2

@ -1,35 +0,0 @@
from __future__ import division, print_function
import math
import pytest
from .. import character_error_rate, plain_text
from .util import working_directory
@pytest.mark.integration
@pytest.mark.parametrize(
"gt_file_content,ocr_file_content,cer_expected",
[
("", "Lorem ipsum", math.inf),
("Lorem ipsum", "", 1.0),
("\ufeff", "Lorem ipsum", math.inf),
("Lorem ipsum", "\ufeff", 1.0),
("", "", 0.0),
("\ufeff", "", 0.0),
("", "\ufeff", 0.0),
],
)
def test_empty_files(tmp_path, gt_file_content, ocr_file_content, cer_expected):
with working_directory(tmp_path):
with open("gt.txt", "w") as gtf:
gtf.write(gt_file_content)
with open("ocr.txt", "w") as ocrf:
ocrf.write(ocr_file_content)
gt_text = plain_text("gt.txt")
ocr_text = plain_text("ocr.txt")
assert character_error_rate(gt_text, ocr_text) == cer_expected

@ -34,8 +34,9 @@ def test_ocrd_cli(tmp_path):
"-O", "-O",
"OCR-D-OCR-CALAMARI-EVAL", "OCR-D-OCR-CALAMARI-EVAL",
] ]
# Hack to satisfy ocrd_cli_wrap_processor() check for arguments sys.argv[
sys.argv[1:] = args 1:
] = args # XXX Hack to satisfy ocrd_cli_wrap_processor() check for arguments
result = runner.invoke(ocrd_dinglehopper, args) result = runner.invoke(ocrd_dinglehopper, args)
assert result.exit_code == 0 assert result.exit_code == 0
result_json = list((test_workspace_dir / "OCR-D-OCR-CALAMARI-EVAL").glob("*.json")) result_json = list((test_workspace_dir / "OCR-D-OCR-CALAMARI-EVAL").glob("*.json"))

@ -177,8 +177,8 @@ def test_text():
def test_plain(tmp_path): def test_plain(tmp_path):
with working_directory(tmp_path): with working_directory(tmp_path):
with open("ocr.txt", "w") as ocrf: with open("ocr.txt", "w") as ocrf:
ocrf.write("First, a line.\nAnd a second line.\n") ocrf.write("AAAAB")
result = plain_text("ocr.txt") result = plain_text("ocr.txt")
expected = "First, a line.\nAnd a second line." expected = "AAAAB"
assert result == expected assert result == expected

@ -1,5 +1,7 @@
from __future__ import division
import unicodedata import unicodedata
from typing import Generator, Iterable, Tuple, TypeVar from typing import Iterable, Tuple
import uniseg.wordbreak import uniseg.wordbreak
from multimethod import multimethod from multimethod import multimethod
@ -7,8 +9,6 @@ from rapidfuzz.distance import Levenshtein
from .extracted_text import ExtractedText from .extracted_text import ExtractedText
T = TypeVar("T")
# Did we patch uniseg.wordbreak.word_break already? # Did we patch uniseg.wordbreak.word_break already?
word_break_patched = False word_break_patched = False
@ -22,11 +22,11 @@ def patch_word_break():
""" """
old_word_break = uniseg.wordbreak.word_break old_word_break = uniseg.wordbreak.word_break
def new_word_break(c): def new_word_break(c, index=0):
if 0xE000 <= ord(c) <= 0xF8FF: # Private Use Area if 0xE000 <= ord(c) <= 0xF8FF: # Private Use Area
return uniseg.wordbreak.Word_Break.ALetter return "ALetter"
else: else:
return old_word_break(c) return old_word_break(c, index)
uniseg.wordbreak.word_break = new_word_break uniseg.wordbreak.word_break = new_word_break
global word_break_patched global word_break_patched
@ -34,7 +34,7 @@ def patch_word_break():
@multimethod @multimethod
def words(s: str) -> Generator[str, None, None]: def words(s: str):
"""Extract words from a string""" """Extract words from a string"""
global word_break_patched global word_break_patched
@ -54,7 +54,7 @@ def words(s: str) -> Generator[str, None, None]:
# We follow Unicode Standard Annex #29 on Unicode Text Segmentation here: Split on # We follow Unicode Standard Annex #29 on Unicode Text Segmentation here: Split on
# word boundaries using uniseg.wordbreak.words() and ignore all "words" that contain # word boundaries using uniseg.wordbreak.words() and ignore all "words" that contain
# only whitespace, punctuation "or similar characters." # only whitespace, punctation "or similar characters."
for word in uniseg.wordbreak.words(s): for word in uniseg.wordbreak.words(s):
if all(unwanted(c) for c in word): if all(unwanted(c) for c in word):
pass pass
@ -62,37 +62,37 @@ def words(s: str) -> Generator[str, None, None]:
yield word yield word
@words.register @multimethod
def _(s: ExtractedText) -> Generator[str, None, None]: def words(s: ExtractedText):
yield from words(s.text) return words(s.text)
@multimethod @multimethod
def words_normalized(s: str) -> Generator[str, None, None]: def words_normalized(s: str):
yield from words(unicodedata.normalize("NFC", s)) return words(unicodedata.normalize("NFC", s))
@words_normalized.register @multimethod
def _(s: ExtractedText) -> Generator[str, None, None]: def words_normalized(s: ExtractedText):
yield from words_normalized(s.text) return words_normalized(s.text)
@multimethod @multimethod
def word_error_rate_n(reference: str, compared: str) -> Tuple[float, int]: def word_error_rate_n(reference: str, compared: str) -> Tuple[float, int]:
reference_seq = list(words_normalized(reference)) reference_seq = list(words_normalized(reference))
compared_seq = list(words_normalized(compared)) compared_seq = list(words_normalized(compared))
wer, n = word_error_rate_n(reference_seq, compared_seq) return word_error_rate_n(reference_seq, compared_seq)
return wer, n
@word_error_rate_n.register @multimethod
def _(reference: ExtractedText, compared: ExtractedText) -> Tuple[float, int]: def word_error_rate_n(
wer, n = word_error_rate_n(reference.text, compared.text) reference: ExtractedText, compared: ExtractedText
return wer, n ) -> Tuple[float, int]:
return word_error_rate_n(reference.text, compared.text)
@word_error_rate_n.register @multimethod
def _(reference: Iterable[T], compared: Iterable[T]) -> Tuple[float, int]: def word_error_rate_n(reference: Iterable, compared: Iterable) -> Tuple[float, int]:
reference_seq = list(reference) reference_seq = list(reference)
compared_seq = list(compared) compared_seq = list(compared)
@ -106,7 +106,6 @@ def _(reference: Iterable[T], compared: Iterable[T]) -> Tuple[float, int]:
return d / n, n return d / n, n
def word_error_rate(reference: T, compared: T) -> float: def word_error_rate(reference, compared) -> float:
wer: float
wer, _ = word_error_rate_n(reference, compared) wer, _ = word_error_rate_n(reference, compared)
return wer return wer

Loading…
Cancel
Save