Merge pull request #83 from INL/feat/batch-processing
Add batch processing and report summariespull/90/head
commit
35be58cb94
@ -0,0 +1,101 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import click
|
||||
from ocrd_utils import initLogging
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
from dinglehopper.cli import json_float
|
||||
|
||||
|
||||
def process(reports_folder, occurrences_threshold=1):
|
||||
cer_list = []
|
||||
wer_list = []
|
||||
cer_sum = 0
|
||||
wer_sum = 0
|
||||
diff_c = {}
|
||||
diff_w = {}
|
||||
|
||||
for report in os.listdir(reports_folder):
|
||||
if report.endswith(".json"):
|
||||
with open(os.path.join(reports_folder, report), "r") as f:
|
||||
report_data = json.load(f)
|
||||
|
||||
if "cer" not in report_data or "wer" not in report_data:
|
||||
click.echo(
|
||||
f"Skipping {report} because it does not contain CER and WER")
|
||||
continue
|
||||
|
||||
cer = report_data["cer"]
|
||||
wer = report_data["wer"]
|
||||
cer_list.append(cer)
|
||||
wer_list.append(wer)
|
||||
cer_sum += cer
|
||||
wer_sum += wer
|
||||
|
||||
for key, value in report_data["differences"]["character_level"].items():
|
||||
diff_c[key] = diff_c.get(key, 0) + value
|
||||
for key, value in report_data["differences"]["word_level"].items():
|
||||
diff_w[key] = diff_w.get(key, 0) + value
|
||||
|
||||
if len(cer_list) == 0:
|
||||
click.echo(f"No reports found in folder '{os.path.abspath(reports_folder)}'")
|
||||
return
|
||||
|
||||
cer_avg = cer_sum / len(cer_list)
|
||||
wer_avg = wer_sum / len(wer_list)
|
||||
|
||||
print(f"Number of reports: {len(cer_list)}")
|
||||
print(f"Average CER: {cer_avg}")
|
||||
print(f"Average WER: {wer_avg}")
|
||||
print(f"Sum of common mistakes: {cer_sum}")
|
||||
print(f"Sum of common mistakes: {wer_sum}")
|
||||
|
||||
env = Environment(
|
||||
loader=FileSystemLoader(
|
||||
os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates")
|
||||
)
|
||||
)
|
||||
env.filters["json_float"] = json_float
|
||||
for report_suffix in (".html", ".json"):
|
||||
template_fn = "summary" + report_suffix + ".j2"
|
||||
|
||||
out_fn = os.path.join(reports_folder, 'summary' + report_suffix)
|
||||
template = env.get_template(template_fn)
|
||||
template.stream(
|
||||
num_reports=len(cer_list),
|
||||
cer_avg=cer_avg,
|
||||
wer_avg=wer_avg,
|
||||
diff_c=diff_c,
|
||||
diff_w=diff_w,
|
||||
occurrences_threshold=occurrences_threshold,
|
||||
).dump(out_fn)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.argument("reports_folder",
|
||||
type=click.Path(exists=True),
|
||||
default="./reports"
|
||||
)
|
||||
@click.option("--occurrences-threshold",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Only show differences that occur at least this many times.")
|
||||
def main(reports_folder, occurrences_threshold):
|
||||
"""
|
||||
Summarize the results from multiple reports generated earlier by dinglehopper.
|
||||
It calculates the average CER and WER, as well as a sum of common mistakes.
|
||||
Reports include lists of mistakes and their occurrences.
|
||||
|
||||
You may use a threshold to reduce the file size of the HTML report by only showing
|
||||
mistakes whose number of occurrences is above the threshold. The JSON report will
|
||||
always contain all mistakes.
|
||||
|
||||
All JSON files in the provided folder will be gathered and summarized.
|
||||
"""
|
||||
initLogging()
|
||||
process(reports_folder, occurrences_threshold)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,136 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
|
||||
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">
|
||||
<style type="text/css">
|
||||
{% if metrics %}
|
||||
.gt .diff {
|
||||
color: green;
|
||||
}
|
||||
.ocr .diff {
|
||||
color: red;
|
||||
}
|
||||
{% else %}
|
||||
.gt .diff, .ocr .diff {
|
||||
color: blue;
|
||||
}
|
||||
{% endif %}
|
||||
.ellipsis {
|
||||
opacity: 0.5;
|
||||
font-style: italic;
|
||||
}
|
||||
.diff-highlight {
|
||||
border: 2px solid;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
.row {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
table {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.cer {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
tr:hover {
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
th {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
th:hover {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
td {
|
||||
min-width: 100px;
|
||||
}
|
||||
|
||||
td:hover {
|
||||
background-color: #eee;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div class="container">
|
||||
|
||||
<div class="row">
|
||||
<h1>Summary of all reports</h1>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<p>Number of reports: {{ num_reports }}</p>
|
||||
</div>
|
||||
|
||||
{% if cer_avg and wer_avg -%}
|
||||
<div class="row">
|
||||
<h2>Metrics</h2>
|
||||
</div>
|
||||
|
||||
<div class="row cer">
|
||||
<p>Average CER: {{ cer_avg|round(4) }}</p>
|
||||
<p>Average WER: {{ wer_avg|round(4) }}</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{%- if diff_c and diff_w %}
|
||||
{%- set sections = [{'title': 'Found differences (character)', 'data': diff_c}, {'title': 'Found differences (word)', 'data': diff_w}] %}
|
||||
|
||||
<div class="row">
|
||||
{%- for section in sections %}
|
||||
<div class="col-md-6">
|
||||
<h2>{{ section['title'] }}</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>GT</th><th>OCR</th><th>Occurrences</th></tr>
|
||||
</thead>
|
||||
{%- set num_omitted = namespace(value=0) -%}
|
||||
{% for gt_ocr, occurrences in section['data'].items() -%}
|
||||
{% if occurrences < occurrences_threshold -%}
|
||||
{%- set num_omitted.value = num_omitted.value + 1 %}
|
||||
{%- else -%}
|
||||
{%- set gt = gt_ocr.split(" :: ")[0] %}
|
||||
{%- set ocr = gt_ocr.split(" :: ")[1] %}
|
||||
<tr>
|
||||
<td title="{{ gt|urlencode }}">{{ gt }}</td>{# display the unicode character #}
|
||||
<td title="{{ ocr|urlencode }}">{{ ocr }}</td >
|
||||
<td>{{ occurrences }}</td>
|
||||
</tr>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
{% if num_omitted.value > 0 and occurrences_threshold > 1 -%}
|
||||
<p>Skipped {{ num_omitted.value }} diffs with fewer than {{ occurrences_threshold }} occurrences. The complete list of diffs is available in the accompanying JSON file.</p>
|
||||
{%- set num_omitted.value = 0 %}
|
||||
{%- endif %}
|
||||
</table>
|
||||
</div>
|
||||
{%- endfor %}
|
||||
</div>
|
||||
{%- endif %}
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js" integrity="sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1" crossorigin="anonymous"></script>
|
||||
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js" integrity="sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM" crossorigin="anonymous"></script>
|
||||
|
||||
<script>
|
||||
{% include 'report.html.js' %}
|
||||
</script>
|
||||
|
||||
|
||||
</body>
|
||||
</html>
|
@ -0,0 +1,15 @@
|
||||
{
|
||||
"num_reports": {{ num_reports}}
|
||||
{%- if cer_avg and wer_avg %}
|
||||
,
|
||||
"cer_avg": {{ cer_avg|json_float }},
|
||||
"wer_avg": {{ wer_avg|json_float }}
|
||||
{%- endif %}
|
||||
{%- if diff_c and wer_avg %}
|
||||
,
|
||||
"differences": {
|
||||
"character_level": {{ diff_c|tojson }},
|
||||
"word_level": {{ diff_w|tojson }}
|
||||
}
|
||||
{%- endif %}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,41 @@
|
||||
import os
|
||||
import pytest
|
||||
from ocrd_utils import initLogging
|
||||
from dinglehopper.cli import process_dir
|
||||
|
||||
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cli_directory(tmp_path):
|
||||
"""
|
||||
Test that the cli/process_dir() processes a directory of files and
|
||||
yields JSON and HTML reports.
|
||||
"""
|
||||
|
||||
initLogging()
|
||||
process_dir(os.path.join(data_dir, "directory-test", "gt"),
|
||||
os.path.join(data_dir, "directory-test", "ocr"),
|
||||
"report", str(tmp_path / "reports"), False, True,
|
||||
"line")
|
||||
|
||||
assert os.path.exists(tmp_path / "reports/1.xml-report.json")
|
||||
assert os.path.exists(tmp_path / "reports/1.xml-report.html")
|
||||
assert os.path.exists(tmp_path / "reports/2.xml-report.json")
|
||||
assert os.path.exists(tmp_path / "reports/2.xml-report.html")
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cli_fail_without_gt(tmp_path):
|
||||
"""
|
||||
Test that the cli/process_dir skips a file if there is no corresponding file
|
||||
in the other directory.
|
||||
"""
|
||||
|
||||
initLogging()
|
||||
process_dir(os.path.join(data_dir, "directory-test", "gt"),
|
||||
os.path.join(data_dir, "directory-test", "ocr"),
|
||||
"report", str(tmp_path / "reports"), False, True,
|
||||
"line")
|
||||
|
||||
assert len(os.listdir(tmp_path / "reports")) == 2 * 2
|
@ -0,0 +1,101 @@
|
||||
import json
|
||||
import os
|
||||
import pytest
|
||||
from .util import working_directory
|
||||
from .. import cli_summarize
|
||||
|
||||
expected_cer_avg = (0.05 + 0.10) / 2
|
||||
expected_wer_avg = (0.15 + 0.20) / 2
|
||||
expected_diff_c = {"a": 30, "b": 50}
|
||||
expected_diff_w = {"c": 70, "d": 90}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def create_summaries(tmp_path):
|
||||
"""Create two summary reports with mock data"""
|
||||
reports_dirname = tmp_path / "reports"
|
||||
reports_dirname.mkdir()
|
||||
|
||||
report1 = {"cer": 0.05, "wer": 0.15,
|
||||
"differences": {
|
||||
"character_level": {"a": 10, "b": 20},
|
||||
"word_level": {"c": 30, "d": 40}
|
||||
}}
|
||||
report2 = {"cer": 0.10, "wer": 0.20,
|
||||
"differences": {
|
||||
"character_level": {"a": 20, "b": 30},
|
||||
"word_level": {"c": 40, "d": 50}
|
||||
}}
|
||||
|
||||
with open(os.path.join(reports_dirname, "report1.json"), "w") as f:
|
||||
json.dump(report1, f)
|
||||
with open(os.path.join(reports_dirname, "report2.json"), "w") as f:
|
||||
json.dump(report2, f)
|
||||
|
||||
return str(reports_dirname)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cli_summarize_json(tmp_path, create_summaries):
|
||||
"""Test that the cli/process() yields a summarized JSON report"""
|
||||
with working_directory(tmp_path):
|
||||
reports_dirname = create_summaries
|
||||
cli_summarize.process(reports_dirname)
|
||||
|
||||
with open(os.path.join(reports_dirname, "summary.json"), "r") as f:
|
||||
summary_data = json.load(f)
|
||||
|
||||
|
||||
assert summary_data["num_reports"] == 2
|
||||
assert summary_data["cer_avg"] == expected_cer_avg
|
||||
assert summary_data["wer_avg"] == expected_wer_avg
|
||||
assert summary_data["differences"]["character_level"] == expected_diff_c
|
||||
assert summary_data["differences"]["word_level"] == expected_diff_w
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cli_summarize_html(tmp_path, create_summaries):
|
||||
"""Test that the cli/process() yields an HTML report"""
|
||||
with working_directory(tmp_path):
|
||||
reports_dirname = create_summaries
|
||||
cli_summarize.process(reports_dirname)
|
||||
|
||||
html_file = os.path.join(reports_dirname, "summary.html")
|
||||
assert os.path.isfile(html_file)
|
||||
|
||||
with open(html_file, "r") as f:
|
||||
contents = f.read()
|
||||
|
||||
assert len(contents) > 0
|
||||
assert "Number of reports: 2" in contents
|
||||
assert f"Average CER: {round(expected_cer_avg, 4)}" in contents
|
||||
assert f"Average WER: {round(expected_wer_avg, 4)}" in contents
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cli_summarize_html_skip_invalid(tmp_path, create_summaries):
|
||||
"""
|
||||
Test that the cli/process() does not include reports that are missing a WER value.
|
||||
"""
|
||||
with working_directory(tmp_path):
|
||||
reports_dirname = create_summaries
|
||||
|
||||
# This third report has no WER value and should not be included in the summary
|
||||
report3 = {"cer": 0.10,
|
||||
"differences": {
|
||||
"character_level": {"a": 20, "b": 30},
|
||||
"word_level": {"c": 40, "d": 50}
|
||||
}}
|
||||
|
||||
with open(os.path.join(reports_dirname, "report3-missing-wer.json"), "w") as f:
|
||||
json.dump(report3, f)
|
||||
|
||||
cli_summarize.process(reports_dirname)
|
||||
|
||||
html_file = os.path.join(reports_dirname, "summary.html")
|
||||
assert os.path.isfile(html_file)
|
||||
|
||||
with open(html_file, "r") as f:
|
||||
contents = f.read()
|
||||
|
||||
assert "Number of reports: 2" in contents # report3 is not included
|
Loading…
Reference in New Issue