mirror of
https://github.com/qurator-spk/modstool.git
synced 2025-06-25 19:49:54 +02:00
✔ Test if dtypes are as expected in produced Parquet files
This commit is contained in:
parent
215bfbb11f
commit
ac8740c33f
5 changed files with 130 additions and 92 deletions
|
@ -1,87 +0,0 @@
|
||||||
import re
|
|
||||||
import warnings
|
|
||||||
import os
|
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
|
||||||
# Filter warnings on WSL
|
|
||||||
if "Microsoft" in os.uname().release:
|
|
||||||
warnings.simplefilter("ignore")
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
|
|
||||||
mods_info = pd.read_parquet("mods_info_df.parquet")
|
|
||||||
page_info = pd.read_parquet("page_info_df.parquet")
|
|
||||||
alto_info = pd.read_parquet("alto_info_df.parquet")
|
|
||||||
|
|
||||||
# Check
|
|
||||||
EXPECTED_TYPES = {
|
|
||||||
|
|
||||||
# mods_info
|
|
||||||
|
|
||||||
r"mets_file": ("object", ["str"]),
|
|
||||||
r"titleInfo_title": ("object", ["str"]),
|
|
||||||
r"titleInfo_subTitle": ("object", ["str", "NoneType"]),
|
|
||||||
r"titleInfo_partName": ("object", ["str", "NoneType"]),
|
|
||||||
r"identifier-.*": ("object", ["str", "NoneType"]),
|
|
||||||
r"location_.*": ("object", ["str", "NoneType"]),
|
|
||||||
r"name\d+_.*roleTerm": ("object", ["ndarray", "NoneType"]),
|
|
||||||
r"name\d+_.*": ("object", ["str", "NoneType"]),
|
|
||||||
r"relatedItem-.*_recordInfo_recordIdentifier": ("object", ["str", "NoneType"]),
|
|
||||||
r"typeOfResource": ("object", ["str", "NoneType"]),
|
|
||||||
r"accessCondition-.*": ("object", ["str", "NoneType"]),
|
|
||||||
r"originInfo-.*": ("object", ["str", "NoneType"]),
|
|
||||||
|
|
||||||
r".*-count": ("Int64", None),
|
|
||||||
|
|
||||||
r"genre-.*": ("object", ["ndarray", "NoneType"]),
|
|
||||||
r"subject-.*": ("object", ["ndarray", "NoneType"]),
|
|
||||||
r"language_.*Term": ("object", ["ndarray", "NoneType"]),
|
|
||||||
r"classification-.*": ("object", ["ndarray", "NoneType"]),
|
|
||||||
|
|
||||||
# page_info
|
|
||||||
|
|
||||||
r"fileGrp_.*_file_FLocat_href": ("object", ["str", "NoneType"]),
|
|
||||||
r"structMap-LOGICAL_TYPE_.*": ("boolean", None),
|
|
||||||
|
|
||||||
# alto_info
|
|
||||||
|
|
||||||
r"Description_.*": ("object", ["str", "NoneType"]),
|
|
||||||
r"Layout_Page_ID": ("object", ["str", "NoneType"]),
|
|
||||||
r"Layout_Page_PHYSICAL_(IMG|IMAGE)_NR": ("object", ["str", "NoneType"]),
|
|
||||||
r"Layout_Page_PROCESSING": ("object", ["str", "NoneType"]),
|
|
||||||
r"Layout_Page_QUALITY": ("object", ["str", "NoneType"]),
|
|
||||||
r"Layout_Page_//alto:String/@WC-.*": ("Float64", None),
|
|
||||||
r"alto_xmlns": ("object", ["str", "NoneType"]),
|
|
||||||
|
|
||||||
r"Layout_Page_(WIDTH|HEIGHT)": ("Int64", None),
|
|
||||||
}
|
|
||||||
|
|
||||||
def expected_types(c):
|
|
||||||
for r, types in EXPECTED_TYPES.items():
|
|
||||||
if re.fullmatch(r, c):
|
|
||||||
edt = types[0]
|
|
||||||
einner_types = types[1]
|
|
||||||
if einner_types:
|
|
||||||
einner_types = set(einner_types)
|
|
||||||
return edt, einner_types
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
def check_types(df):
|
|
||||||
for c in df.columns:
|
|
||||||
dt = df.dtypes[c]
|
|
||||||
edt, einner_types = expected_types(c)
|
|
||||||
|
|
||||||
if edt is None:
|
|
||||||
print(f"No expected dtype known for column {c} (got {dt})")
|
|
||||||
elif dt != edt:
|
|
||||||
print(f"Unexpected dtype {dt} for column {c} (expected {edt})")
|
|
||||||
|
|
||||||
if edt == "object":
|
|
||||||
inner_types = set(type(v).__name__ for v in df[c])
|
|
||||||
if any(it not in einner_types for it in inner_types):
|
|
||||||
print(f"Unexpected inner types {inner_types} for column {c} (expected {einner_types})")
|
|
||||||
|
|
||||||
check_types(mods_info)
|
|
||||||
check_types(page_info)
|
|
||||||
check_types(alto_info)
|
|
||||||
|
|
|
@ -138,7 +138,7 @@ def walk(m):
|
||||||
@click.argument('alto_files', type=click.Path(exists=True), required=True, nargs=-1)
|
@click.argument('alto_files', type=click.Path(exists=True), required=True, nargs=-1)
|
||||||
@click.option('--output', '-o', 'output_file', type=click.Path(), help='Output Parquet file',
|
@click.option('--output', '-o', 'output_file', type=click.Path(), help='Output Parquet file',
|
||||||
default='alto_info_df.parquet', show_default=True)
|
default='alto_info_df.parquet', show_default=True)
|
||||||
def process(alto_files: List[str], output_file: str):
|
def process_command(alto_files: List[str], output_file: str):
|
||||||
"""
|
"""
|
||||||
A tool to convert the ALTO metadata in INPUT to a pandas DataFrame.
|
A tool to convert the ALTO metadata in INPUT to a pandas DataFrame.
|
||||||
|
|
||||||
|
@ -151,6 +151,9 @@ def process(alto_files: List[str], output_file: str):
|
||||||
- and a CSV file with all conversion warnings.
|
- and a CSV file with all conversion warnings.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
process(alto_files, output_file)
|
||||||
|
|
||||||
|
def process(alto_files: List[str], output_file: str):
|
||||||
# Extend file list if directories are given
|
# Extend file list if directories are given
|
||||||
alto_files_real = []
|
alto_files_real = []
|
||||||
for m in alto_files:
|
for m in alto_files:
|
||||||
|
|
|
@ -382,7 +382,7 @@ def pages_to_dict(mets, raise_errors=True) -> List[Dict]:
|
||||||
@click.option('--output', '-o', 'output_file', type=click.Path(), help='Output Parquet file',
|
@click.option('--output', '-o', 'output_file', type=click.Path(), help='Output Parquet file',
|
||||||
default='mods_info_df.parquet', show_default=True)
|
default='mods_info_df.parquet', show_default=True)
|
||||||
@click.option('--output-page-info', type=click.Path(), help='Output page info Parquet file')
|
@click.option('--output-page-info', type=click.Path(), help='Output page info Parquet file')
|
||||||
def process(mets_files: list[str], output_file: str, output_page_info: str):
|
def process_command(mets_files: list[str], output_file: str, output_page_info: str):
|
||||||
"""
|
"""
|
||||||
A tool to convert the MODS metadata in INPUT to a pandas DataFrame.
|
A tool to convert the MODS metadata in INPUT to a pandas DataFrame.
|
||||||
|
|
||||||
|
@ -393,7 +393,9 @@ def process(mets_files: list[str], output_file: str, output_page_info: str):
|
||||||
|
|
||||||
Per-page information (e.g. structure information) can be output to a separate Parquet file.
|
Per-page information (e.g. structure information) can be output to a separate Parquet file.
|
||||||
"""
|
"""
|
||||||
|
process(mets_files, output_file, output_page_info)
|
||||||
|
|
||||||
|
def process(mets_files: list[str], output_file: str, output_page_info: str):
|
||||||
# Extend file list if directories are given
|
# Extend file list if directories are given
|
||||||
mets_files_real: list[str] = []
|
mets_files_real: list[str] = []
|
||||||
for m in mets_files:
|
for m in mets_files:
|
||||||
|
@ -476,7 +478,7 @@ def main():
|
||||||
for prefix, uri in ns.items():
|
for prefix, uri in ns.items():
|
||||||
ET.register_namespace(prefix, uri)
|
ET.register_namespace(prefix, uri)
|
||||||
|
|
||||||
process()
|
process_command()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
|
from pathlib import Path
|
||||||
|
import re
|
||||||
from lxml import etree as ET
|
from lxml import etree as ET
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
from mods4pandas.alto4pandas import alto_to_dict
|
from mods4pandas.alto4pandas import alto_to_dict, process
|
||||||
from mods4pandas.lib import flatten
|
from mods4pandas.lib import flatten
|
||||||
|
|
||||||
|
TESTS_DATA_DIR = Path(__file__).parent / "data"
|
||||||
|
|
||||||
def dict_fromstring(x):
|
def dict_fromstring(x):
|
||||||
return flatten(alto_to_dict(ET.fromstring(x)))
|
return flatten(alto_to_dict(ET.fromstring(x)))
|
||||||
|
@ -79,3 +83,50 @@ def test_String_TAGREF_counts():
|
||||||
""")
|
""")
|
||||||
assert d['Layout_Page_//alto:String[@TAGREFS]-count'] == 3
|
assert d['Layout_Page_//alto:String[@TAGREFS]-count'] == 3
|
||||||
assert d['Layout_Page_String-count'] == 4
|
assert d['Layout_Page_String-count'] == 4
|
||||||
|
|
||||||
|
|
||||||
|
def test_dtypes(tmp_path):
|
||||||
|
alto_dir = (TESTS_DATA_DIR / "alto").absolute().as_posix()
|
||||||
|
alto_info_df_parquet = (tmp_path / "test_dtypes_alto_info.parquet").as_posix()
|
||||||
|
process([alto_dir], alto_info_df_parquet)
|
||||||
|
alto_info_df = pd.read_parquet(alto_info_df_parquet)
|
||||||
|
|
||||||
|
EXPECTED_TYPES = {
|
||||||
|
r"Description_.*": ("object", ["str", "NoneType"]),
|
||||||
|
r"Layout_Page_ID": ("object", ["str", "NoneType"]),
|
||||||
|
r"Layout_Page_PHYSICAL_(IMG|IMAGE)_NR": ("object", ["str", "NoneType"]),
|
||||||
|
r"Layout_Page_PROCESSING": ("object", ["str", "NoneType"]),
|
||||||
|
r"Layout_Page_QUALITY": ("object", ["str", "NoneType"]),
|
||||||
|
r"Layout_Page_//alto:String/@WC-.*": ("Float64", None),
|
||||||
|
r".*-count": ("Int64", None),
|
||||||
|
r"alto_xmlns": ("object", ["str", "NoneType"]),
|
||||||
|
|
||||||
|
r"Layout_Page_(WIDTH|HEIGHT)": ("Int64", None),
|
||||||
|
}
|
||||||
|
def expected_types(c):
|
||||||
|
"""Return the expected types for column c."""
|
||||||
|
for r, types in EXPECTED_TYPES.items():
|
||||||
|
if re.fullmatch(r, c):
|
||||||
|
edt = types[0]
|
||||||
|
einner_types = types[1]
|
||||||
|
if einner_types:
|
||||||
|
einner_types = set(einner_types)
|
||||||
|
return edt, einner_types
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def check_types(df):
|
||||||
|
"""Check the types of the DataFrame df."""
|
||||||
|
for c in df.columns:
|
||||||
|
dt = df.dtypes[c]
|
||||||
|
edt, einner_types = expected_types(c)
|
||||||
|
print(c, dt, edt)
|
||||||
|
|
||||||
|
assert edt is not None, f"No expected dtype known for column {c} (got {dt})"
|
||||||
|
assert dt == edt, f"Unexpected dtype {dt} for column {c} (expected {edt})"
|
||||||
|
|
||||||
|
if edt == "object":
|
||||||
|
inner_types = set(type(v).__name__ for v in df[c])
|
||||||
|
assert all(it in einner_types for it in inner_types), \
|
||||||
|
f"Unexpected inner types {inner_types} for column {c} (expected {einner_types})"
|
||||||
|
|
||||||
|
check_types(alto_info_df)
|
|
@ -1,10 +1,14 @@
|
||||||
|
from pathlib import Path
|
||||||
|
import re
|
||||||
from lxml import etree as ET
|
from lxml import etree as ET
|
||||||
|
import pandas as pd
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
from mods4pandas.mods4pandas import mods_to_dict
|
from mods4pandas.mods4pandas import mods_to_dict, process
|
||||||
from mods4pandas.lib import flatten
|
from mods4pandas.lib import flatten
|
||||||
|
|
||||||
|
TESTS_DATA_DIR = Path(__file__).parent / "data"
|
||||||
|
|
||||||
def dict_fromstring(x):
|
def dict_fromstring(x):
|
||||||
"""Helper function to parse a MODS XML string to a flattened dict"""
|
"""Helper function to parse a MODS XML string to a flattened dict"""
|
||||||
|
@ -151,3 +155,68 @@ def test_relatedItem():
|
||||||
""")
|
""")
|
||||||
|
|
||||||
assert d['relatedItem-original_recordInfo_recordIdentifier-dnb-ppn'] == '1236513355'
|
assert d['relatedItem-original_recordInfo_recordIdentifier-dnb-ppn'] == '1236513355'
|
||||||
|
|
||||||
|
def test_dtypes(tmp_path):
|
||||||
|
mets_files = [p.absolute().as_posix() for p in (TESTS_DATA_DIR / "mets-mods").glob("*.xml")]
|
||||||
|
mods_info_df_parquet = (tmp_path / "test_dtypes_mods_info.parquet").as_posix()
|
||||||
|
page_info_df_parquet = (tmp_path / "test_dtypes_page_info.parquet").as_posix()
|
||||||
|
process(mets_files, mods_info_df_parquet, page_info_df_parquet)
|
||||||
|
mods_info_df = pd.read_parquet(mods_info_df_parquet)
|
||||||
|
page_info_df = pd.read_parquet(page_info_df_parquet)
|
||||||
|
|
||||||
|
EXPECTED_TYPES = {
|
||||||
|
# mods_info
|
||||||
|
|
||||||
|
r"mets_file": ("object", ["str"]),
|
||||||
|
r"titleInfo_title": ("object", ["str"]),
|
||||||
|
r"titleInfo_subTitle": ("object", ["str", "NoneType"]),
|
||||||
|
r"titleInfo_partName": ("object", ["str", "NoneType"]),
|
||||||
|
r"identifier-.*": ("object", ["str", "NoneType"]),
|
||||||
|
r"location_.*": ("object", ["str", "NoneType"]),
|
||||||
|
r"name\d+_.*roleTerm": ("object", ["ndarray", "NoneType"]),
|
||||||
|
r"name\d+_.*": ("object", ["str", "NoneType"]),
|
||||||
|
r"relatedItem-.*_recordInfo_recordIdentifier": ("object", ["str", "NoneType"]),
|
||||||
|
r"typeOfResource": ("object", ["str", "NoneType"]),
|
||||||
|
r"accessCondition-.*": ("object", ["str", "NoneType"]),
|
||||||
|
r"originInfo-.*": ("object", ["str", "NoneType"]),
|
||||||
|
|
||||||
|
r".*-count": ("Int64", None),
|
||||||
|
|
||||||
|
r"genre-.*": ("object", ["ndarray", "NoneType"]),
|
||||||
|
r"subject-.*": ("object", ["ndarray", "NoneType"]),
|
||||||
|
r"language_.*Term": ("object", ["ndarray", "NoneType"]),
|
||||||
|
r"classification-.*": ("object", ["ndarray", "NoneType"]),
|
||||||
|
|
||||||
|
# page_info
|
||||||
|
|
||||||
|
r"fileGrp_.*_file_FLocat_href": ("object", ["str", "NoneType"]),
|
||||||
|
r"structMap-LOGICAL_TYPE_.*": ("boolean", None),
|
||||||
|
}
|
||||||
|
def expected_types(c):
|
||||||
|
"""Return the expected types for column c."""
|
||||||
|
for r, types in EXPECTED_TYPES.items():
|
||||||
|
if re.fullmatch(r, c):
|
||||||
|
edt = types[0]
|
||||||
|
einner_types = types[1]
|
||||||
|
if einner_types:
|
||||||
|
einner_types = set(einner_types)
|
||||||
|
return edt, einner_types
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def check_types(df):
|
||||||
|
"""Check the types of the DataFrame df."""
|
||||||
|
for c in df.columns:
|
||||||
|
dt = df.dtypes[c]
|
||||||
|
edt, einner_types = expected_types(c)
|
||||||
|
print(c, dt, edt)
|
||||||
|
|
||||||
|
assert edt is not None, f"No expected dtype known for column {c} (got {dt})"
|
||||||
|
assert dt == edt, f"Unexpected dtype {dt} for column {c} (expected {edt})"
|
||||||
|
|
||||||
|
if edt == "object":
|
||||||
|
inner_types = set(type(v).__name__ for v in df[c])
|
||||||
|
assert all(it in einner_types for it in inner_types), \
|
||||||
|
f"Unexpected inner types {inner_types} for column {c} (expected {einner_types})"
|
||||||
|
|
||||||
|
check_types(mods_info_df)
|
||||||
|
check_types(page_info_df)
|
Loading…
Add table
Add a link
Reference in a new issue