🚧 WIP: Migrate to using ocrd:all image - Move extra script to their own sub-directory

This commit is contained in:
Gerber, Mike 2024-04-25 20:29:29 +02:00
parent 70c0da0cec
commit 5dffd843aa
5 changed files with 0 additions and 0 deletions

94
extra/my_ocrd_workflow Executable file
View file

@ -0,0 +1,94 @@
#!/bin/bash
set -e # Abort on error
# Configuration
export LOG_LEVEL=${LOG_LEVEL:-INFO} # /etc/ocrd_logging.py uses this to set level for all OCR-D modules
export TEXTEQUIV_LEVEL=glyph
# Command line parameters
OPTS=`getopt -o I: --long input-file-grp:,skip-validation -- "$@"`
eval set -- "$OPTS"
INPUT_FILE_GRP=OCR-D-IMG
SKIP_VALIDATION=false
while true; do
case "$1" in
-I|--input-file-grp) INPUT_FILE_GRP=$2; shift 2;;
--skip-validation) SKIP_VALIDATION=true; shift;;
--) shift; break;;
*) break;;
esac
done
# Set up logging
if [ "$LOG_LEVEL" = "DEBUG" -o "$LOG_LEVEL" = "TRACE" ]; then
set -x
fi
do_validate() {
# Validate the workspace
# Both ocrd_tesserocr + ocrd_calamari produce segment coordinates that are not strictly within their parent's
# coordinates:
#
# INCONSISTENCY in [...] coords [...] not within parent coords
#
# → --page-coordinate-consistency off
#
# ocrd_tesserocr sometimes produces segment text results that aren't concatenating as expected by the validator:
#
# INCONSISTENCY in [...]: text results '[...]' != concatenated '[...]'
#
# → --page-strictness lax
#
validate_options='
--skip dimension
--skip pixel_density
--page-strictness lax
--page-coordinate-consistency off'
if [ "$SKIP_VALIDATION" = false ]; then
ocrd workspace validate $validate_options
fi
}
main() {
do_validate
#ocrd-sbb-binarize --overwrite -I $INPUT_FILE_GRP -O OCR-D-IMG-BIN -P model "/var/lib/sbb_binarization"
ocrd-olena-binarize --overwrite -I $INPUT_FILE_GRP -O OCR-D-IMG-BIN -P impl "sauvola-ms-split"
do_validate
#ocrd-tesserocr-segment-region --overwrite -I OCR-D-IMG-BIN -O OCR-D-SEG-REGION
#ocrd-tesserocr-segment-line --overwrite -I OCR-D-SEG-REGION -O OCR-D-SEG-LINE
#ocrd-cis-ocropy-segment --overwrite -I OCR-D-IMG-BIN -O OCR-D-SEG-LINE -P level-of-operation page
#ocrd-eynollah-segment -I OCR-D-IMG-BIN -O OCR-D-SEG-LINE -P models /var/lib/eynollah
ocrd-sbb-textline-detector --overwrite -I OCR-D-IMG-BIN -O OCR-D-SEG-LINE -P model "/var/lib/textline_detection"
do_validate
ocrd-calamari-recognize --overwrite -I OCR-D-SEG-LINE -O OCR-D-OCR-CALAMARI -P checkpoint_dir "/var/lib/calamari-models/GT4HistOCR/2019-12-11T11_10+0100/" -P textequiv_level "$TEXTEQUIV_LEVEL"
ocrd-tesserocr-recognize --overwrite -I OCR-D-SEG-LINE -O OCR-D-OCR-TESS -P model "GT4HistOCR_2000000" -P textequiv_level "$TEXTEQUIV_LEVEL"
do_validate
for ocr_filegrp in OCR-D-OCR-CALAMARI OCR-D-OCR-TESS; do
if ocrd workspace list-group | grep -q OCR-D-GT-PAGE; then
ocrd-dinglehopper --overwrite -I OCR-D-GT-PAGE,$ocr_filegrp -O $ocr_filegrp-EVAL
fi
ocrd-fileformat-transform --overwrite -I $ocr_filegrp -O ${ocr_filegrp}-ALTO
done
}
if [ "$LOG_LEVEL" = "DEBUG" -o "$LOG_LEVEL" = "TRACE" ]; then
pip list || true
fi
main
# vim:tw=120:

84
extra/my_ocrd_workflow-sbb Executable file
View file

@ -0,0 +1,84 @@
#!/bin/bash
set -e # Abort on error
# Configuration
export LOG_LEVEL=${LOG_LEVEL:-INFO} # /etc/ocrd_logging.py uses this to set level for all OCR-D modules
export TEXTEQUIV_LEVEL=word
# Command line parameters
OPTS=`getopt -o I: --long input-file-grp:,skip-validation -- "$@"`
eval set -- "$OPTS"
INPUT_FILE_GRP=OCR-D-IMG
SKIP_VALIDATION=false
while true; do
case "$1" in
-I|--input-file-grp) INPUT_FILE_GRP=$2; shift 2;;
--skip-validation) SKIP_VALIDATION=true; shift;;
--) shift; break;;
*) break;;
esac
done
# Set up logging
if [ "$LOG_LEVEL" = "DEBUG" -o "$LOG_LEVEL" = "TRACE" ]; then
set -x
fi
do_validate() {
# Validate the workspace
# Both ocrd_tesserocr + ocrd_calamari produce segment coordinates that are not strictly within their parent's
# coordinates:
#
# INCONSISTENCY in [...] coords [...] not within parent coords
#
# → --page-coordinate-consistency off
#
# ocrd_tesserocr sometimes produces segment text results that aren't concatenating as expected by the validator:
#
# INCONSISTENCY in [...]: text results '[...]' != concatenated '[...]'
#
# → --page-strictness lax
#
validate_options='
--skip dimension
--skip pixel_density
--page-strictness lax
--page-coordinate-consistency off'
if [ "$SKIP_VALIDATION" = false ]; then
ocrd workspace validate $validate_options
fi
}
main() {
do_validate
ocrd-sbb-binarize --overwrite -I $INPUT_FILE_GRP -O OCR-D-IMG-BIN -P model "/var/lib/sbb_binarization"
do_validate
ocrd-sbb-textline-detector --overwrite -I OCR-D-IMG-BIN -O OCR-D-SEG-LINE -P model "/var/lib/textline_detection"
do_validate
ocrd-calamari-recognize --overwrite -I OCR-D-SEG-LINE -O OCR-D-OCR-CALAMARI -P checkpoint_dir "/var/lib/calamari-models/GT4HistOCR/2019-12-11T11_10+0100/" -P textequiv_level "$TEXTEQUIV_LEVEL"
do_validate
ocrd-fileformat-transform --overwrite -I OCR-D-OCR-CALAMARI -O OCR-D-OCR-CALAMARI-ALTO
do_validate
}
if [ "$LOG_LEVEL" = "DEBUG" -o "$LOG_LEVEL" = "TRACE" ]; then
pip list || true
fi
main
# vim:tw=120:

View file

@ -0,0 +1,38 @@
#!/bin/bash
# Create an OCR-D workspace from images
#
# ocrd-workspace-from-images *.png
#
# In order to produce a workspace that validates, this script makes best effort
# to generate random IDs and to create the necessary structures like the
# physical page sequence.
workspace_dir=`mktemp -d "workspace-XXXXX"`
workspace_id=`basename $workspace_dir`
ocrd workspace -d $workspace_dir init
ocrd workspace -d $workspace_dir set-id $workspace_id
make_file_id_from_filename() {
filename="$1"
file_id="$filename"
file_id=`echo $file_id | sed 's#(.png|.tif|.jpe?g)$##i'`
file_id=`echo $file_id | sed 's#[^A-Za-z0-9_-]#_#g'`
echo "$file_id"
}
mkdir $workspace_dir/OCR-D-IMG
page_count=0
for img_orig in "$@"; do
page_count=$(($page_count + 1))
img="$workspace_dir/OCR-D-IMG/`basename $img_orig`"
cp -L "$img_orig" "$img"
file_id=`make_file_id_from_filename "$img"`
mime_type=`file -b --mime-type "$img"`
page_id=`printf "P%05d" $page_count`
ocrd workspace -d $workspace_dir add -G OCR-D-IMG "$img" --file-id $file_id --page-id $page_id --mimetype $mime_type
done
ocrd workspace -d $workspace_dir validate
echo $workspace_dir

217
extra/ppn2ocr Executable file
View file

@ -0,0 +1,217 @@
#!/usr/bin/env python3
"""Get OCR results as a OCR-D workspace for a given PPN"""
import os
import requests
import sys
import lxml.etree as ET
import re
import subprocess
import click
from copy import deepcopy
XMLNS = {
'mets': 'http://www.loc.gov/METS/',
'xlink': 'http://www.w3.org/1999/xlink'
}
API_URL = 'https://oai.sbb.berlin'
IDENTIFIER_TEMPLATE = 'oai:digital.staatsbibliothek-berlin.de:%s'
for prefix, uri in XMLNS.items():
ET.register_namespace(prefix, uri)
def oai_mets(ppn):
"""Retrieve METS metadata for a given PPN."""
params = {
'verb': 'GetRecord',
'metadataPrefix': 'mets',
'identifier': IDENTIFIER_TEMPLATE % ppn
}
s = requests.Session()
r = s.get(API_URL, params=params)
mets = ET.XML(r.content).find(f".//{{{XMLNS['mets']}}}mets")
mets = ET.ElementTree(mets)
return mets
def iiif_url_for_sbb_url(sbb_url, ppn, size, format):
"""
Construct an IIIF URL from a dms or an IIIF URL.
This function exists as long as dms URL exist (or as long as we may need to
rewrite IIIF URLs for a different size)
"""
if "/dms/" in sbb_url:
return iiif_url_for_dms_url(sbb_url, ppn, size, format)
else:
return iiif_url_for_iiif_url(sbb_url, ppn, size, format)
def iiif_url_for_dms_url(dms_url, ppn, size, format):
"""
Construct an IIIF URL from a dms URL.
This function exists to encapsulate the hack of rewriting the URL to get IIIF.
"""
if ppn not in dms_url:
raise ValueError(f"Unexpected URL {dms_url}")
m = re.search(r'/dms/.*/([0-9]+)\.jpg$', dms_url)
if m:
page_num = m.group(1)
else:
raise ValueError(f"Unexpected URL {dms_url}")
iiif_identifier = f'{ppn}-{page_num}'
iiif_quality = 'default'
iiif_url = f'https://content.staatsbibliothek-berlin.de/dc/{iiif_identifier}/full/{size}/0/{iiif_quality}.{format}'
return iiif_url
def iiif_url_for_iiif_url(iiif_url, ppn, size, format):
"""
Construct an IIIF URL from an already existing IIIF URL.
"""
if ppn not in iiif_url:
raise ValueError(f"Unexpected URL {iiif_url}")
m = re.search(rf'/dc/{ppn}-([0-9]+)/', iiif_url)
if m:
page_num = m.group(1)
else:
raise ValueError(f"Unexpected URL {iiif_url}")
iiif_identifier = f'{ppn}-{page_num}'
iiif_quality = 'default'
iiif_url = f'https://content.staatsbibliothek-berlin.de/dc/{iiif_identifier}/full/{size}/0/{iiif_quality}.{format}'
return iiif_url
def remove_file_grp(mets, use):
for bad_fileid in mets.xpath(f'//mets:fileGrp[@USE="{use}"]/mets:file/@ID', namespaces=XMLNS):
for bad in mets.xpath(f'//mets:fptr[@FILEID="{bad_fileid}"]', namespaces=XMLNS):
bad.getparent().remove(bad)
for bad in mets.xpath(f'//mets:fileGrp[@USE="{use}"]', namespaces=XMLNS):
bad.getparent().remove(bad)
def mime_type_for_format(format_):
if format_ == 'tif':
mime_type = 'image/tiff'
elif format_ == 'jpg':
mime_type = 'image/jpg'
else:
raise ValueError()
return mime_type
def prune_file_grps(mets):
"""
Prune unwanted file groups
We only want to keep the MAX file group (we created it ourselves) and
possibly ABBYY full texts in FULLTEXT.
For the PRESENTATION + LOCAL file groups we definitely want to delete
because they contain local file:/// or file:/ links, which are not handled
well by "ocrd workspace". They are not explicitly mentioned, as we
only keep a whitelist.
"""
wanted_file_grps = ["MAX", "FULLTEXT"]
for u in mets.xpath('//mets:fileGrp/@USE', namespaces=XMLNS):
if u not in wanted_file_grps:
remove_file_grp(mets, u)
def make_workspace(ppn, workspace):
# Make workspace directory
os.mkdir(workspace)
os.chdir(workspace)
mets = oai_mets(ppn)
# Delete MAX file group - we assume that, if it exists, it is not as
# we expect it, e.g. IIIF full URLs
remove_file_grp(mets, 'MAX')
# Duplicate DEFAULT file group into a new file group MAX
format_ = 'tif'
file_grp_default = mets.find('//mets:fileGrp[@USE="DEFAULT"]', namespaces=XMLNS)
if file_grp_default is None:
raise ValueError("This document has no DEFAULT file group, could be a multi-volume work")
file_grp_best = deepcopy(file_grp_default)
file_grp_best.attrib['USE'] = 'MAX'
for f in file_grp_best.findall('./mets:file', namespaces=XMLNS):
old_id = f.attrib['ID']
new_id = re.sub('DEFAULT', 'MAX', old_id)
f.attrib['ID'] = new_id
f.attrib['MIMETYPE'] = mime_type_for_format(format_)
for fptr in mets.findall(f'//mets:fptr[@FILEID="{old_id}"]', namespaces=XMLNS):
new_fptr = deepcopy(fptr)
new_fptr.attrib['FILEID'] = new_id
fptr.getparent().append(new_fptr)
# XXX Need to fumble around with the URL for now
flocat = f.find(f".//{{{XMLNS['mets']}}}FLocat")
old_url = flocat.attrib[f"{{{XMLNS['xlink']}}}href"]
url_iiif_full = iiif_url_for_sbb_url(old_url, ppn, 'full', format_)
flocat.attrib[f"{{{XMLNS['xlink']}}}href"] = url_iiif_full
mets.find('//mets:fileSec', namespaces=XMLNS).append(file_grp_best)
prune_file_grps(mets)
# Write mets.xml
mets.write('mets.xml', pretty_print=True)
# TODO
# Validate workspace
#ocrd workspace validate mets.xml | grep -v "<notice>Won't download remote image"
def validate_ppn(ctx, param, value):
"""Validate a PPN argument"""
if not value.startswith('PPN'):
raise click.BadParameter('PPN must be in format PPNxxxxxxxx')
else:
return value
@click.command()
@click.argument('ppn', callback=validate_ppn)
def ppn2ocr(ppn):
"""
Get METS with best images for a document PPN
For example, to get the document "PROPOSITIONES PHILOSOPHICAE: [...]" use this:
\b
ppn2ocr PPN699887615
ls PPN699887615
"""
self_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
make_workspace(ppn, ppn)
# XXX
# subprocess.run([
# os.path.join(self_dir, 'run-docker-hub'),
# '-I', 'MAX',
# '--skip-validation'
# ])
if __name__ == '__main__':
ppn2ocr()

31
extra/zdb2ocr Executable file
View file

@ -0,0 +1,31 @@
#!/bin/sh
zdb=27974534
yyyymmdd=19010712
set -e
self_dir=`dirname $0`
self_dir=`realpath $self_dir`
workspace=$zdb-$yyyymmdd
mkdir "$workspace"
cd "$workspace"
pwd
zefys_url="https://content.staatsbibliothek-berlin.de/zefys/SNP$zdb-$yyyymmdd-0-0-0-0.xml"
echo "$zefys_url"
curl "$zefys_url" > mets.xml
ocrd workspace validate mets.xml | grep -v "<notice>Won't download remote image"
$self_dir/run-docker-hub -I MAX --skip-validation
# * TODO: Error on invocation
# * TODO: Check out options to get better image resolutions
# * TODO: Are input images already grayscale? Further binarization makes them
# worse than before
# * TODO: Does this loose the image URLs for the MAX filegroup?
# * TODO: Lots of text problems with ocrd_calamari "not the same as Calamari"