mirror of
https://github.com/qurator-spk/eynollah.git
synced 2025-06-09 20:29:55 +02:00
Merge remote-tracking branch 'bertsky/machine_based_reading_order_integration_fixes' into machine_based_reading_order_integration
This commit is contained in:
commit
54040c1db4
9 changed files with 2952 additions and 4048 deletions
2
.github/workflows/test-eynollah.yml
vendored
2
.github/workflows/test-eynollah.yml
vendored
|
@ -36,7 +36,7 @@ jobs:
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install .
|
pip install .[OCR,plotting]
|
||||||
pip install -r requirements-test.txt
|
pip install -r requirements-test.txt
|
||||||
- name: Test with pytest
|
- name: Test with pytest
|
||||||
run: make test
|
run: make test
|
||||||
|
|
|
@ -25,6 +25,10 @@ classifiers = [
|
||||||
"Topic :: Scientific/Engineering :: Image Processing",
|
"Topic :: Scientific/Engineering :: Image Processing",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
OCR = ["torch <= 2.0.1", "transformers <= 4.30.2"]
|
||||||
|
plotting = ["matplotlib"]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
eynollah = "eynollah.cli:main"
|
eynollah = "eynollah.cli:main"
|
||||||
ocrd-eynollah-segment = "eynollah.ocrd_cli:main"
|
ocrd-eynollah-segment = "eynollah.ocrd_cli:main"
|
||||||
|
|
|
@ -4,8 +4,5 @@ numpy <1.24.0
|
||||||
scikit-learn >= 0.23.2
|
scikit-learn >= 0.23.2
|
||||||
tensorflow < 2.13
|
tensorflow < 2.13
|
||||||
imutils >= 0.5.3
|
imutils >= 0.5.3
|
||||||
matplotlib
|
|
||||||
setuptools >= 50
|
|
||||||
transformers <= 4.30.2
|
|
||||||
torch <= 2.0.1
|
|
||||||
numba <= 0.58.1
|
numba <= 0.58.1
|
||||||
|
loky
|
||||||
|
|
|
@ -97,6 +97,12 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out)
|
||||||
type=click.Path(exists=True, file_okay=False),
|
type=click.Path(exists=True, file_okay=False),
|
||||||
required=True,
|
required=True,
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--overwrite",
|
||||||
|
"-O",
|
||||||
|
help="overwrite (instead of skipping) if output xml exists",
|
||||||
|
is_flag=True,
|
||||||
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--dir_in",
|
"--dir_in",
|
||||||
"-di",
|
"-di",
|
||||||
|
@ -253,10 +259,10 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out)
|
||||||
help="Override log level globally to this",
|
help="Override log level globally to this",
|
||||||
)
|
)
|
||||||
|
|
||||||
def layout(image, out, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, skip_layout_and_reading_order, ignore_page_extraction, log_level):
|
def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, skip_layout_and_reading_order, ignore_page_extraction, log_level):
|
||||||
if log_level:
|
|
||||||
setOverrideLogLevel(log_level)
|
|
||||||
initLogging()
|
initLogging()
|
||||||
|
if log_level:
|
||||||
|
getLogger('eynollah').setLevel(getLevelName(log_level))
|
||||||
if not enable_plotting and (save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement):
|
if not enable_plotting and (save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement):
|
||||||
print("Error: You used one of -sl, -sd, -sa, -sp, -si or -ae but did not enable plotting with -ep")
|
print("Error: You used one of -sl, -sd, -sa, -sp, -si or -ae but did not enable plotting with -ep")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -273,6 +279,7 @@ def layout(image, out, dir_in, model, save_images, save_layout, save_deskewed, s
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
eynollah = Eynollah(
|
eynollah = Eynollah(
|
||||||
image_filename=image,
|
image_filename=image,
|
||||||
|
overwrite=overwrite,
|
||||||
dir_out=out,
|
dir_out=out,
|
||||||
dir_in=dir_in,
|
dir_in=dir_in,
|
||||||
dir_models=model,
|
dir_models=model,
|
||||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,10 +1,10 @@
|
||||||
|
from functools import partial
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from shapely import geometry
|
from shapely import geometry
|
||||||
|
|
||||||
from .rotate import rotate_image, rotation_image_new
|
from .rotate import rotate_image, rotation_image_new
|
||||||
from multiprocessing import Process, Queue, cpu_count
|
|
||||||
from multiprocessing import Pool
|
|
||||||
def contours_in_same_horizon(cy_main_hor):
|
def contours_in_same_horizon(cy_main_hor):
|
||||||
X1 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
|
X1 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
|
||||||
X2 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
|
X2 = np.zeros((len(cy_main_hor), len(cy_main_hor)))
|
||||||
|
@ -27,37 +27,33 @@ def find_contours_mean_y_diff(contours_main):
|
||||||
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
|
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
|
||||||
return np.mean(np.diff(np.sort(np.array(cy_main))))
|
return np.mean(np.diff(np.sort(np.array(cy_main))))
|
||||||
|
|
||||||
|
|
||||||
def get_text_region_boxes_by_given_contours(contours):
|
def get_text_region_boxes_by_given_contours(contours):
|
||||||
|
|
||||||
kernel = np.ones((5, 5), np.uint8)
|
|
||||||
boxes = []
|
boxes = []
|
||||||
contours_new = []
|
contours_new = []
|
||||||
for jj in range(len(contours)):
|
for jj in range(len(contours)):
|
||||||
x, y, w, h = cv2.boundingRect(contours[jj])
|
box = cv2.boundingRect(contours[jj])
|
||||||
|
boxes.append(box)
|
||||||
boxes.append([x, y, w, h])
|
|
||||||
contours_new.append(contours[jj])
|
contours_new.append(contours[jj])
|
||||||
|
|
||||||
del contours
|
|
||||||
return boxes, contours_new
|
return boxes, contours_new
|
||||||
|
|
||||||
def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area):
|
def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area):
|
||||||
found_polygons_early = list()
|
found_polygons_early = []
|
||||||
|
|
||||||
for jv,c in enumerate(contours):
|
for jv,c in enumerate(contours):
|
||||||
if len(c) < 3: # A polygon cannot have less than 3 points
|
if len(c) < 3: # A polygon cannot have less than 3 points
|
||||||
continue
|
continue
|
||||||
|
|
||||||
polygon = geometry.Polygon([point[0] for point in c])
|
polygon = geometry.Polygon([point[0] for point in c])
|
||||||
area = polygon.area
|
area = polygon.area
|
||||||
if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1: # and hierarchy[0][jv][3]==-1 :
|
if (area >= min_area * np.prod(image.shape[:2]) and
|
||||||
found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint))
|
area <= max_area * np.prod(image.shape[:2]) and
|
||||||
|
hierarchy[0][jv][3] == -1):
|
||||||
|
found_polygons_early.append(np.array([[point]
|
||||||
|
for point in polygon.exterior.coords], dtype=np.uint))
|
||||||
return found_polygons_early
|
return found_polygons_early
|
||||||
|
|
||||||
def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area):
|
def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area):
|
||||||
found_polygons_early = list()
|
found_polygons_early = []
|
||||||
|
|
||||||
for jv,c in enumerate(contours):
|
for jv,c in enumerate(contours):
|
||||||
if len(c) < 3: # A polygon cannot have less than 3 points
|
if len(c) < 3: # A polygon cannot have less than 3 points
|
||||||
continue
|
continue
|
||||||
|
@ -68,48 +64,59 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m
|
||||||
##print(np.prod(thresh.shape[:2]))
|
##print(np.prod(thresh.shape[:2]))
|
||||||
# Check that polygon has area greater than minimal area
|
# Check that polygon has area greater than minimal area
|
||||||
# print(hierarchy[0][jv][3],hierarchy )
|
# print(hierarchy[0][jv][3],hierarchy )
|
||||||
if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 :
|
if (area >= min_area * np.prod(image.shape[:2]) and
|
||||||
|
area <= max_area * np.prod(image.shape[:2]) and
|
||||||
|
# hierarchy[0][jv][3]==-1
|
||||||
|
True):
|
||||||
# print(c[0][0][1])
|
# print(c[0][0][1])
|
||||||
found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32))
|
found_polygons_early.append(np.array([[point]
|
||||||
|
for point in polygon.exterior.coords], dtype=np.int32))
|
||||||
return found_polygons_early
|
return found_polygons_early
|
||||||
|
|
||||||
def find_new_features_of_contours(contours_main):
|
def find_new_features_of_contours(contours_main):
|
||||||
|
areas_main = np.array([cv2.contourArea(contours_main[j])
|
||||||
areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
|
for j in range(len(contours_main))])
|
||||||
M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))]
|
M_main = [cv2.moments(contours_main[j])
|
||||||
cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
|
for j in range(len(contours_main))]
|
||||||
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))]
|
cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32))
|
||||||
|
for j in range(len(M_main))]
|
||||||
|
cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32))
|
||||||
|
for j in range(len(M_main))]
|
||||||
try:
|
try:
|
||||||
x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
|
x_min_main = np.array([np.min(contours_main[j][:, 0, 0])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
|
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0] for j in range(len(contours_main))])
|
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0]
|
||||||
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1] for j in range(len(contours_main))])
|
for j in range(len(contours_main))])
|
||||||
|
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1]
|
||||||
x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))])
|
for j in range(len(contours_main))])
|
||||||
|
x_max_main = np.array([np.max(contours_main[j][:, 0, 0])
|
||||||
y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))])
|
for j in range(len(contours_main))])
|
||||||
y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))])
|
y_min_main = np.array([np.min(contours_main[j][:, 0, 1])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
|
y_max_main = np.array([np.max(contours_main[j][:, 0, 1])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
except:
|
except:
|
||||||
x_min_main = np.array([np.min(contours_main[j][:, 0]) for j in range(len(contours_main))])
|
x_min_main = np.array([np.min(contours_main[j][:, 0])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) for j in range(len(contours_main))])
|
argmin_x_main = np.array([np.argmin(contours_main[j][:, 0])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] for j in range(len(contours_main))])
|
x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0]
|
||||||
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] for j in range(len(contours_main))])
|
for j in range(len(contours_main))])
|
||||||
|
y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1]
|
||||||
x_max_main = np.array([np.max(contours_main[j][:, 0]) for j in range(len(contours_main))])
|
for j in range(len(contours_main))])
|
||||||
|
x_max_main = np.array([np.max(contours_main[j][:, 0])
|
||||||
y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))])
|
for j in range(len(contours_main))])
|
||||||
y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))])
|
y_min_main = np.array([np.min(contours_main[j][:, 1])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
|
y_max_main = np.array([np.max(contours_main[j][:, 1])
|
||||||
|
for j in range(len(contours_main))])
|
||||||
# dis_x=np.abs(x_max_main-x_min_main)
|
# dis_x=np.abs(x_max_main-x_min_main)
|
||||||
|
|
||||||
return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin
|
return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin
|
||||||
def find_features_of_contours(contours_main):
|
|
||||||
|
|
||||||
|
|
||||||
|
def find_features_of_contours(contours_main):
|
||||||
areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
|
areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
|
||||||
M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))]
|
M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))]
|
||||||
cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))]
|
cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))]
|
||||||
|
@ -120,14 +127,15 @@ def find_features_of_contours(contours_main):
|
||||||
y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
||||||
y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
||||||
|
|
||||||
|
|
||||||
return y_min_main, y_max_main
|
return y_min_main, y_max_main
|
||||||
|
|
||||||
def return_parent_contours(contours, hierarchy):
|
def return_parent_contours(contours, hierarchy):
|
||||||
contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1]
|
contours_parent = [contours[i]
|
||||||
|
for i in range(len(contours))
|
||||||
|
if hierarchy[0][i][3] == -1]
|
||||||
return contours_parent
|
return contours_parent
|
||||||
|
|
||||||
def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002):
|
def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002):
|
||||||
|
|
||||||
# pixels of images are identified by 5
|
# pixels of images are identified by 5
|
||||||
if len(region_pre_p.shape) == 3:
|
if len(region_pre_p.shape) == 3:
|
||||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||||
|
@ -139,80 +147,16 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002):
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area)
|
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy,
|
||||||
|
max_area=1, min_area=min_area)
|
||||||
return contours_imgs
|
return contours_imgs
|
||||||
|
|
||||||
def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, indexes_r_con_per_pro, img, slope_first):
|
def do_work_of_contours_in_image(contour, index_r_con, img, slope_first):
|
||||||
cnts_org_per_each_subprocess = []
|
|
||||||
index_by_text_region_contours = []
|
|
||||||
for mv in range(len(contours_per_process)):
|
|
||||||
index_by_text_region_contours.append(indexes_r_con_per_pro[mv])
|
|
||||||
|
|
||||||
img_copy = np.zeros(img.shape)
|
|
||||||
img_copy = cv2.fillPoly(img_copy, pts=[contours_per_process[mv]], color=(1, 1, 1))
|
|
||||||
|
|
||||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
|
||||||
|
|
||||||
img_copy = img_copy.astype(np.uint8)
|
|
||||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
|
||||||
|
|
||||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
|
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
|
||||||
|
|
||||||
|
|
||||||
cnts_org_per_each_subprocess.append(cont_int[0])
|
|
||||||
|
|
||||||
queue_of_all_params.put([ cnts_org_per_each_subprocess, index_by_text_region_contours])
|
|
||||||
|
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image_multi(cnts, img, slope_first):
|
|
||||||
|
|
||||||
num_cores = cpu_count()
|
|
||||||
queue_of_all_params = Queue()
|
|
||||||
|
|
||||||
processes = []
|
|
||||||
nh = np.linspace(0, len(cnts), num_cores + 1)
|
|
||||||
indexes_by_text_con = np.array(range(len(cnts)))
|
|
||||||
for i in range(num_cores):
|
|
||||||
contours_per_process = cnts[int(nh[i]) : int(nh[i + 1])]
|
|
||||||
indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])]
|
|
||||||
|
|
||||||
processes.append(Process(target=do_work_of_contours_in_image, args=(queue_of_all_params, contours_per_process, indexes_text_con_per_process, img,slope_first )))
|
|
||||||
for i in range(num_cores):
|
|
||||||
processes[i].start()
|
|
||||||
cnts_org = []
|
|
||||||
all_index_text_con = []
|
|
||||||
for i in range(num_cores):
|
|
||||||
list_all_par = queue_of_all_params.get(True)
|
|
||||||
contours_for_sub_process = list_all_par[0]
|
|
||||||
indexes_for_sub_process = list_all_par[1]
|
|
||||||
for j in range(len(contours_for_sub_process)):
|
|
||||||
cnts_org.append(contours_for_sub_process[j])
|
|
||||||
all_index_text_con.append(indexes_for_sub_process[j])
|
|
||||||
for i in range(num_cores):
|
|
||||||
processes[i].join()
|
|
||||||
|
|
||||||
print(all_index_text_con)
|
|
||||||
return cnts_org
|
|
||||||
def loop_contour_image(index_l, cnts,img, slope_first):
|
|
||||||
img_copy = np.zeros(img.shape)
|
img_copy = np.zeros(img.shape)
|
||||||
img_copy = cv2.fillPoly(img_copy, pts=[cnts[index_l]], color=(1, 1, 1))
|
img_copy = cv2.fillPoly(img_copy, pts=[contour], color=(1, 1, 1))
|
||||||
|
|
||||||
# plt.imshow(img_copy)
|
|
||||||
# plt.show()
|
|
||||||
|
|
||||||
# print(img.shape,'img')
|
|
||||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
img_copy = rotation_image_new(img_copy, -slope_first)
|
||||||
##print(img_copy.shape,'img_copy')
|
|
||||||
# plt.imshow(img_copy)
|
|
||||||
# plt.show()
|
|
||||||
|
|
||||||
img_copy = img_copy.astype(np.uint8)
|
img_copy = img_copy.astype(np.uint8)
|
||||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
@ -221,20 +165,20 @@ def loop_contour_image(index_l, cnts,img, slope_first):
|
||||||
|
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||||
# print(np.shape(cont_int[0]))
|
|
||||||
return cont_int[0]
|
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first):
|
return cont_int[0], index_r_con
|
||||||
|
|
||||||
cnts_org = []
|
def get_textregion_contours_in_org_image_multi(cnts, img, slope_first, map=map):
|
||||||
# print(cnts,'cnts')
|
if not len(cnts):
|
||||||
with Pool(cpu_count()) as p:
|
return [], []
|
||||||
cnts_org = p.starmap(loop_contour_image, [(index_l,cnts, img,slope_first) for index_l in range(len(cnts))])
|
results = map(partial(do_work_of_contours_in_image,
|
||||||
|
img=img,
|
||||||
return cnts_org
|
slope_first=slope_first,
|
||||||
|
),
|
||||||
|
cnts, range(len(cnts)))
|
||||||
|
return tuple(zip(*results))
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
||||||
|
|
||||||
cnts_org = []
|
cnts_org = []
|
||||||
# print(cnts,'cnts')
|
# print(cnts,'cnts')
|
||||||
for i in range(len(cnts)):
|
for i in range(len(cnts)):
|
||||||
|
@ -255,7 +199,6 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||||
# print(np.shape(cont_int[0]))
|
# print(np.shape(cont_int[0]))
|
||||||
|
@ -264,101 +207,56 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
||||||
return cnts_org
|
return cnts_org
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first):
|
def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first):
|
||||||
|
zoom = 3
|
||||||
h_o = img.shape[0]
|
img = cv2.resize(img, (img.shape[1] // zoom,
|
||||||
w_o = img.shape[1]
|
img.shape[0] // zoom),
|
||||||
|
interpolation=cv2.INTER_NEAREST)
|
||||||
img = cv2.resize(img, (int(img.shape[1]/3.), int(img.shape[0]/3.)), interpolation=cv2.INTER_NEAREST)
|
|
||||||
##cnts = list( (np.array(cnts)/2).astype(np.int16) )
|
|
||||||
#cnts = cnts/2
|
|
||||||
cnts = [(i/ 3).astype(np.int32) for i in cnts]
|
|
||||||
cnts_org = []
|
cnts_org = []
|
||||||
#print(cnts,'cnts')
|
for cnt in cnts:
|
||||||
for i in range(len(cnts)):
|
|
||||||
img_copy = np.zeros(img.shape)
|
img_copy = np.zeros(img.shape)
|
||||||
img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1))
|
img_copy = cv2.fillPoly(img_copy, pts=[(cnt / zoom).astype(int)], color=(1, 1, 1))
|
||||||
|
|
||||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8)
|
||||||
|
|
||||||
img_copy = img_copy.astype(np.uint8)
|
|
||||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||||
# print(np.shape(cont_int[0]))
|
cnts_org.append(cont_int[0] * zoom)
|
||||||
cnts_org.append(cont_int[0]*3)
|
|
||||||
|
|
||||||
return cnts_org
|
return cnts_org
|
||||||
|
|
||||||
def return_list_of_contours_with_desired_order(ls_cons, sorted_indexes):
|
def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first):
|
||||||
return [ls_cons[sorted_indexes[index]] for index in range(len(sorted_indexes))]
|
img_copy = np.zeros(img.shape)
|
||||||
def do_back_rotation_and_get_cnt_back(queue_of_all_params, contours_par_per_process,indexes_r_con_per_pro, img, slope_first):
|
img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=(1, 1, 1))
|
||||||
contours_textregion_per_each_subprocess = []
|
|
||||||
index_by_text_region_contours = []
|
|
||||||
for mv in range(len(contours_par_per_process)):
|
|
||||||
img_copy = np.zeros(img.shape)
|
|
||||||
img_copy = cv2.fillPoly(img_copy, pts=[contours_par_per_process[mv]], color=(1, 1, 1))
|
|
||||||
|
|
||||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8)
|
||||||
|
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||||
|
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
img_copy = img_copy.astype(np.uint8)
|
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||||
|
# print(np.shape(cont_int[0]))
|
||||||
|
return cont_int[0], index_r_con
|
||||||
|
|
||||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
def get_textregion_contours_in_org_image_light(cnts, img, slope_first, map=map):
|
||||||
|
if not len(cnts):
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
return []
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST)
|
||||||
# print(np.shape(cont_int[0]))
|
|
||||||
contours_textregion_per_each_subprocess.append(cont_int[0]*6)
|
|
||||||
index_by_text_region_contours.append(indexes_r_con_per_pro[mv])
|
|
||||||
|
|
||||||
queue_of_all_params.put([contours_textregion_per_each_subprocess, index_by_text_region_contours])
|
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image_light(cnts, img, slope_first):
|
|
||||||
num_cores = cpu_count()
|
|
||||||
queue_of_all_params = Queue()
|
|
||||||
processes = []
|
|
||||||
nh = np.linspace(0, len(cnts), num_cores + 1)
|
|
||||||
indexes_by_text_con = np.array(range(len(cnts)))
|
|
||||||
|
|
||||||
h_o = img.shape[0]
|
|
||||||
w_o = img.shape[1]
|
|
||||||
|
|
||||||
img = cv2.resize(img, (int(img.shape[1]/6.), int(img.shape[0]/6.)), interpolation=cv2.INTER_NEAREST)
|
|
||||||
##cnts = list( (np.array(cnts)/2).astype(np.int16) )
|
##cnts = list( (np.array(cnts)/2).astype(np.int16) )
|
||||||
#cnts = cnts/2
|
#cnts = cnts/2
|
||||||
cnts = [(i/ 6).astype(np.int32) for i in cnts]
|
cnts = [(i/6).astype(np.int) for i in cnts]
|
||||||
|
results = map(partial(do_back_rotation_and_get_cnt_back,
|
||||||
for i in range(num_cores):
|
img=img,
|
||||||
contours_par_per_process = cnts[int(nh[i]) : int(nh[i + 1])]
|
slope_first=slope_first,
|
||||||
indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])]
|
),
|
||||||
processes.append(Process(target=do_back_rotation_and_get_cnt_back, args=(queue_of_all_params, contours_par_per_process, indexes_text_con_per_process, img, slope_first)))
|
cnts, range(len(cnts)))
|
||||||
|
contours, indexes = tuple(zip(*results))
|
||||||
for i in range(num_cores):
|
return [i*6 for i in contours]
|
||||||
processes[i].start()
|
|
||||||
|
|
||||||
cnts_org = []
|
|
||||||
all_index_text_con = []
|
|
||||||
for i in range(num_cores):
|
|
||||||
list_all_par = queue_of_all_params.get(True)
|
|
||||||
contours_for_subprocess = list_all_par[0]
|
|
||||||
indexes_for_subprocess = list_all_par[1]
|
|
||||||
for j in range(len(contours_for_subprocess)):
|
|
||||||
cnts_org.append(contours_for_subprocess[j])
|
|
||||||
all_index_text_con.append(indexes_for_subprocess[j])
|
|
||||||
for i in range(num_cores):
|
|
||||||
processes[i].join()
|
|
||||||
|
|
||||||
cnts_org = return_list_of_contours_with_desired_order(cnts_org, all_index_text_con)
|
|
||||||
|
|
||||||
return cnts_org
|
|
||||||
|
|
||||||
def return_contours_of_interested_textline(region_pre_p, pixel):
|
def return_contours_of_interested_textline(region_pre_p, pixel):
|
||||||
|
|
||||||
# pixels of images are identified by 5
|
# pixels of images are identified by 5
|
||||||
if len(region_pre_p.shape) == 3:
|
if len(region_pre_p.shape) == 3:
|
||||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||||
|
@ -371,11 +269,11 @@ def return_contours_of_interested_textline(region_pre_p, pixel):
|
||||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003)
|
contours_imgs = filter_contours_area_of_image_tables(
|
||||||
|
thresh, contours_imgs, hierarchy, max_area=1, min_area=0.000000003)
|
||||||
return contours_imgs
|
return contours_imgs
|
||||||
|
|
||||||
def return_contours_of_image(image):
|
def return_contours_of_image(image):
|
||||||
|
|
||||||
if len(image.shape) == 2:
|
if len(image.shape) == 2:
|
||||||
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
|
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
|
||||||
image = image.astype(np.uint8)
|
image = image.astype(np.uint8)
|
||||||
|
@ -387,7 +285,6 @@ def return_contours_of_image(image):
|
||||||
return contours, hierarchy
|
return contours, hierarchy
|
||||||
|
|
||||||
def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003):
|
def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003):
|
||||||
|
|
||||||
# pixels of images are identified by 5
|
# pixels of images are identified by 5
|
||||||
if len(region_pre_p.shape) == 3:
|
if len(region_pre_p.shape) == 3:
|
||||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||||
|
@ -399,14 +296,13 @@ def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_si
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size)
|
contours_imgs = filter_contours_area_of_image_tables(
|
||||||
|
thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size)
|
||||||
|
|
||||||
return contours_imgs
|
return contours_imgs
|
||||||
|
|
||||||
def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area):
|
def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area):
|
||||||
|
|
||||||
# pixels of images are identified by 5
|
# pixels of images are identified by 5
|
||||||
if len(region_pre_p.shape) == 3:
|
if len(region_pre_p.shape) == 3:
|
||||||
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
cnts_images = (region_pre_p[:, :, 0] == pixel) * 1
|
||||||
|
@ -419,9 +315,11 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area,
|
||||||
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
contours_imgs = return_parent_contours(contours_imgs, hierarchy)
|
||||||
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area)
|
contours_imgs = filter_contours_area_of_image_tables(
|
||||||
|
thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area)
|
||||||
|
|
||||||
img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3))
|
img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3))
|
||||||
img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1))
|
img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1))
|
||||||
|
|
||||||
return img_ret[:, :, 0]
|
return img_ret[:, :, 0]
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -28,6 +28,7 @@ class EynollahXmlWriter():
|
||||||
self.counter = EynollahIdCounter()
|
self.counter = EynollahIdCounter()
|
||||||
self.dir_out = dir_out
|
self.dir_out = dir_out
|
||||||
self.image_filename = image_filename
|
self.image_filename = image_filename
|
||||||
|
self.output_filename = os.path.join(self.dir_out, self.image_filename_stem) + ".xml"
|
||||||
self.curved_line = curved_line
|
self.curved_line = curved_line
|
||||||
self.textline_light = textline_light
|
self.textline_light = textline_light
|
||||||
self.pcgts = pcgts
|
self.pcgts = pcgts
|
||||||
|
@ -60,6 +61,7 @@ class EynollahXmlWriter():
|
||||||
coords = CoordsType()
|
coords = CoordsType()
|
||||||
textline = TextLineType(id=counter.next_line_id, Coords=coords)
|
textline = TextLineType(id=counter.next_line_id, Coords=coords)
|
||||||
marginal_region.add_TextLine(textline)
|
marginal_region.add_TextLine(textline)
|
||||||
|
marginal_region.set_orientation(-slopes_marginals[marginal_idx])
|
||||||
points_co = ''
|
points_co = ''
|
||||||
for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])):
|
for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])):
|
||||||
if not (self.curved_line or self.textline_light):
|
if not (self.curved_line or self.textline_light):
|
||||||
|
@ -102,6 +104,7 @@ class EynollahXmlWriter():
|
||||||
if ocr_all_textlines_textregion:
|
if ocr_all_textlines_textregion:
|
||||||
textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] )
|
textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] )
|
||||||
text_region.add_TextLine(textline)
|
text_region.add_TextLine(textline)
|
||||||
|
text_region.set_orientation(-slopes[region_idx])
|
||||||
region_bboxes = all_box_coord[region_idx]
|
region_bboxes = all_box_coord[region_idx]
|
||||||
points_co = ''
|
points_co = ''
|
||||||
for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[region_idx][j]):
|
for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[region_idx][j]):
|
||||||
|
@ -161,9 +164,8 @@ class EynollahXmlWriter():
|
||||||
coords.set_points(points_co[:-1])
|
coords.set_points(points_co[:-1])
|
||||||
|
|
||||||
def write_pagexml(self, pcgts):
|
def write_pagexml(self, pcgts):
|
||||||
out_fname = os.path.join(self.dir_out, self.image_filename_stem) + ".xml"
|
self.logger.info("output filename: '%s'", self.output_filename)
|
||||||
self.logger.info("output filename: '%s'", out_fname)
|
with open(self.output_filename, 'w') as f:
|
||||||
with open(out_fname, 'w') as f:
|
|
||||||
f.write(to_xml(pcgts))
|
f.write(to_xml(pcgts))
|
||||||
|
|
||||||
def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines):
|
def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue