remove ner/ned code from page2tsv package
parent
ed90193c45
commit
568e1cd104
@ -1,3 +1,4 @@
|
||||
ocrd >= 2.23.2
|
||||
pandas
|
||||
matplotlib
|
||||
qurator-sbb-tools
|
@ -1,88 +0,0 @@
|
||||
import os
|
||||
import requests
|
||||
import json
|
||||
|
||||
|
||||
def ned(tsv, ner_result, ned_rest_endpoint, json_file=None, threshold=None, priority=None):
|
||||
|
||||
if json_file is not None and os.path.exists(json_file):
|
||||
|
||||
print('Loading {}'.format(json_file))
|
||||
|
||||
with open(json_file, "r") as fp:
|
||||
ned_result = json.load(fp)
|
||||
|
||||
else:
|
||||
|
||||
resp = requests.post(url=ned_rest_endpoint + '/parse', json=ner_result)
|
||||
|
||||
resp.raise_for_status()
|
||||
|
||||
ner_parsed = json.loads(resp.content)
|
||||
|
||||
ned_rest_endpoint = ned_rest_endpoint + '/ned?return_full=' + str(int(json_file is not None)).lower()
|
||||
|
||||
if priority is not None:
|
||||
ned_rest_endpoint += "&priority=" + str(int(priority))
|
||||
|
||||
resp = requests.post(url=ned_rest_endpoint, json=ner_parsed, timeout=3600000)
|
||||
|
||||
resp.raise_for_status()
|
||||
|
||||
ned_result = json.loads(resp.content)
|
||||
|
||||
rids = []
|
||||
entity = ""
|
||||
entity_type = None
|
||||
tsv['ID'] = '-'
|
||||
tsv['conf'] = '-'
|
||||
|
||||
def check_entity(tag):
|
||||
nonlocal entity, entity_type, rids
|
||||
|
||||
if (entity != "") and ((tag == 'O') or tag.startswith('B-') or (tag[2:] != entity_type)):
|
||||
|
||||
eid = entity + "-" + entity_type
|
||||
|
||||
if eid in ned_result:
|
||||
if 'ranking' in ned_result[eid]:
|
||||
ranking = ned_result[eid]['ranking']
|
||||
|
||||
# tsv.loc[rids, 'ID'] = ranking[0][1]['wikidata']
|
||||
# if threshold is None or ranking[0][1]['proba_1'] >= threshold else ''
|
||||
|
||||
tmp = "|".join([ranking[i][1]['wikidata']
|
||||
for i in range(len(ranking))
|
||||
if threshold is None or ranking[i][1]['proba_1'] >= threshold])
|
||||
tsv.loc[rids, 'ID'] = tmp if len(tmp) > 0 else '-'
|
||||
|
||||
tmp = ",".join([str(ranking[i][1]['proba_1'])
|
||||
for i in range(len(ranking))
|
||||
if threshold is None or ranking[i][1]['proba_1'] >= threshold])
|
||||
|
||||
tsv.loc[rids, 'conf'] = tmp if len(tmp) > 0 else '-'
|
||||
|
||||
rids = []
|
||||
entity = ""
|
||||
entity_type = None
|
||||
|
||||
ner_tmp = tsv.copy()
|
||||
ner_tmp.loc[~ner_tmp['NE-TAG'].isin(['O', 'B-PER', 'B-LOC', 'B-ORG', 'I-PER', 'I-LOC', 'I-ORG']), 'NE-TAG'] = 'O'
|
||||
|
||||
for rid, row in ner_tmp.iterrows():
|
||||
|
||||
check_entity(row['NE-TAG'])
|
||||
|
||||
if row['NE-TAG'] != 'O':
|
||||
|
||||
entity_type = row['NE-TAG'][2:]
|
||||
|
||||
entity += " " if entity != "" else ""
|
||||
|
||||
entity += str(row['TOKEN'])
|
||||
|
||||
rids.append(rid)
|
||||
|
||||
check_entity('O')
|
||||
|
||||
return tsv, ned_result
|
@ -1,49 +0,0 @@
|
||||
import pandas as pd
|
||||
import requests
|
||||
import unicodedata
|
||||
import json
|
||||
|
||||
|
||||
def ner(tsv, ner_rest_endpoint):
|
||||
|
||||
resp = requests.post(url=ner_rest_endpoint, json={'text': " ".join(tsv.TOKEN.astype(str).tolist())})
|
||||
|
||||
resp.raise_for_status()
|
||||
|
||||
def iterate_ner_results(result_sentences):
|
||||
|
||||
for sen in result_sentences:
|
||||
|
||||
for token in sen:
|
||||
|
||||
yield unicodedata.normalize('NFC', token['word']), token['prediction'], False
|
||||
|
||||
yield '', '', True
|
||||
|
||||
ner_result = json.loads(resp.content)
|
||||
|
||||
result_sequence = iterate_ner_results(ner_result)
|
||||
|
||||
tsv_result = []
|
||||
for idx, row in tsv.iterrows():
|
||||
|
||||
row_token = unicodedata.normalize('NFC', str(row.TOKEN).replace(' ', ''))
|
||||
|
||||
ner_token_concat = ''
|
||||
while row_token != ner_token_concat:
|
||||
|
||||
ner_token, ner_tag, sentence_break = next(result_sequence)
|
||||
ner_token_concat += ner_token
|
||||
|
||||
assert len(row_token) >= len(ner_token_concat)
|
||||
|
||||
if sentence_break:
|
||||
tsv_result.append((0, '', 'O', 'O', '-', row.url_id, row.left, row.right, row.top, row.bottom))
|
||||
else:
|
||||
tsv_result.append((0, ner_token, ner_tag, 'O', '-', row.url_id, row.left, row.right, row.top,
|
||||
row.bottom))
|
||||
|
||||
return pd.DataFrame(tsv_result, columns=['No.', 'TOKEN', 'NE-TAG', 'NE-EMB', 'ID', 'url_id',
|
||||
'left', 'right', 'top', 'bottom']), ner_result
|
||||
|
||||
|
@ -1,87 +0,0 @@
|
||||
import pandas as pd
|
||||
import re
|
||||
|
||||
|
||||
def read_tsv(tsv_file):
|
||||
|
||||
tsv = pd.read_csv(tsv_file, sep='\t', comment='#', quoting=3).rename(columns={'GND-ID': 'ID'})
|
||||
|
||||
parts = extract_doc_links(tsv_file)
|
||||
|
||||
urls = [part['url'] for part in parts]
|
||||
|
||||
return tsv, urls
|
||||
|
||||
|
||||
def write_tsv(tsv, urls, tsv_out_file):
|
||||
|
||||
if 'conf' in tsv.columns:
|
||||
out_columns = ['No.', 'TOKEN', 'NE-TAG', 'NE-EMB', 'ID', 'url_id', 'left', 'right', 'top', 'bottom', 'conf']
|
||||
else:
|
||||
out_columns = ['No.', 'TOKEN', 'NE-TAG', 'NE-EMB', 'ID', 'url_id', 'left', 'right', 'top', 'bottom']
|
||||
|
||||
if len(urls) == 0:
|
||||
print('Writing to {}...'.format(tsv_out_file))
|
||||
|
||||
tsv.to_csv(tsv_out_file, sep="\t", quoting=3, index=False)
|
||||
else:
|
||||
pd.DataFrame([], columns=out_columns).to_csv(tsv_out_file, sep="\t", quoting=3, index=False)
|
||||
|
||||
for url_id, part in tsv.groupby('url_id'):
|
||||
with open(tsv_out_file, 'a') as f:
|
||||
f.write('# ' + urls[int(url_id)] + '\n')
|
||||
|
||||
part.to_csv(tsv_out_file, sep="\t", quoting=3, index=False, mode='a', header=False)
|
||||
|
||||
|
||||
def extract_doc_links(tsv_file):
|
||||
parts = []
|
||||
|
||||
header = None
|
||||
|
||||
with open(tsv_file, 'r') as f:
|
||||
|
||||
text = []
|
||||
url = None
|
||||
|
||||
for line in f:
|
||||
|
||||
if header is None:
|
||||
header = "\t".join(line.split()) + '\n'
|
||||
continue
|
||||
|
||||
urls = [url for url in
|
||||
re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', line)]
|
||||
|
||||
if len(urls) > 0:
|
||||
if url is not None:
|
||||
parts.append({"url": url, 'header': header, 'text': "".join(text)})
|
||||
text = []
|
||||
|
||||
url = urls[-1]
|
||||
else:
|
||||
if url is None:
|
||||
continue
|
||||
|
||||
line = '\t'.join(line.split())
|
||||
|
||||
if line.count('\t') == 2:
|
||||
line = "\t" + line
|
||||
|
||||
if line.count('\t') >= 3:
|
||||
text.append(line + '\n')
|
||||
|
||||
continue
|
||||
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
|
||||
if len(line) == 0:
|
||||
continue
|
||||
|
||||
print('Line error: |', line, '|Number of Tabs: ', line.count('\t'))
|
||||
|
||||
if url is not None:
|
||||
parts.append({"url": url, 'header': header, 'text': "".join(text)})
|
||||
|
||||
return parts
|
Loading…
Reference in New Issue