mirror of
https://github.com/qurator-spk/eynollah.git
synced 2025-07-04 16:39:56 +02:00
attempting partial revert of dir_in
mode from c606391c31
This commit is contained in:
parent
482511adc0
commit
ca6090fef1
3 changed files with 270 additions and 268 deletions
|
@ -19,12 +19,13 @@ from eynollah.eynollah.eynollah import Eynollah
|
||||||
type=click.Path(exists=True, file_okay=False),
|
type=click.Path(exists=True, file_okay=False),
|
||||||
required=True,
|
required=True,
|
||||||
)
|
)
|
||||||
@click.option(
|
# temporary disable directory input
|
||||||
"--dir_in",
|
# @click.option(
|
||||||
"-di",
|
# "--dir_in",
|
||||||
help="directory of images",
|
# "-di",
|
||||||
type=click.Path(exists=True, file_okay=False),
|
# help="directory of images",
|
||||||
)
|
# type=click.Path(exists=True, file_okay=False),
|
||||||
|
# )
|
||||||
@click.option(
|
@click.option(
|
||||||
"--model",
|
"--model",
|
||||||
"-m",
|
"-m",
|
||||||
|
@ -143,7 +144,7 @@ from eynollah.eynollah.eynollah import Eynollah
|
||||||
def main(
|
def main(
|
||||||
image,
|
image,
|
||||||
out,
|
out,
|
||||||
dir_in,
|
# dir_in,
|
||||||
model,
|
model,
|
||||||
save_images,
|
save_images,
|
||||||
save_layout,
|
save_layout,
|
||||||
|
@ -179,7 +180,7 @@ def main(
|
||||||
eynollah = Eynollah(
|
eynollah = Eynollah(
|
||||||
image_filename=image,
|
image_filename=image,
|
||||||
dir_out=out,
|
dir_out=out,
|
||||||
dir_in=dir_in,
|
# dir_in=dir_in,
|
||||||
dir_models=model,
|
dir_models=model,
|
||||||
dir_of_cropped_images=save_images,
|
dir_of_cropped_images=save_images,
|
||||||
dir_of_layout=save_layout,
|
dir_of_layout=save_layout,
|
||||||
|
@ -199,9 +200,9 @@ def main(
|
||||||
light_version=light_version,
|
light_version=light_version,
|
||||||
ignore_page_extraction=ignore_page_extraction,
|
ignore_page_extraction=ignore_page_extraction,
|
||||||
)
|
)
|
||||||
eynollah.run()
|
# eynollah.run()
|
||||||
# pcgts = eynollah.run()
|
pcgts = eynollah.run()
|
||||||
# eynollah.writer.write_pagexml(pcgts)
|
eynollah.writer.write_pagexml(pcgts)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -145,11 +145,11 @@ class Eynollah:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
dir_models,
|
dir_models,
|
||||||
image_filename=None,
|
image_filename,
|
||||||
image_pil=None,
|
image_pil=None,
|
||||||
image_filename_stem=None,
|
image_filename_stem=None,
|
||||||
dir_out=None,
|
dir_out=None,
|
||||||
dir_in=None,
|
# dir_in=None,
|
||||||
dir_of_cropped_images=None,
|
dir_of_cropped_images=None,
|
||||||
dir_of_layout=None,
|
dir_of_layout=None,
|
||||||
dir_of_deskewed=None,
|
dir_of_deskewed=None,
|
||||||
|
@ -171,16 +171,16 @@ class Eynollah:
|
||||||
logger=None,
|
logger=None,
|
||||||
pcgts=None,
|
pcgts=None,
|
||||||
):
|
):
|
||||||
if not dir_in:
|
# if not dir_in:
|
||||||
if image_pil:
|
# if image_pil:
|
||||||
self._imgs = self._cache_images(image_pil=image_pil)
|
# self._imgs = self._cache_images(image_pil=image_pil)
|
||||||
else:
|
# else:
|
||||||
self._imgs = self._cache_images(image_filename=image_filename)
|
# self._imgs = self._cache_images(image_filename=image_filename)
|
||||||
if override_dpi:
|
# if override_dpi:
|
||||||
self.dpi = override_dpi
|
# self.dpi = override_dpi
|
||||||
self.image_filename = image_filename
|
# self.image_filename = image_filename
|
||||||
self.dir_out = dir_out
|
self.dir_out = dir_out
|
||||||
self.dir_in = dir_in
|
# self.dir_in = dir_in
|
||||||
self.dir_of_all = dir_of_all
|
self.dir_of_all = dir_of_all
|
||||||
self.dir_save_page = dir_save_page
|
self.dir_save_page = dir_save_page
|
||||||
self.dir_of_deskewed = dir_of_deskewed
|
self.dir_of_deskewed = dir_of_deskewed
|
||||||
|
@ -200,7 +200,7 @@ class Eynollah:
|
||||||
self.light_version = light_version
|
self.light_version = light_version
|
||||||
self.ignore_page_extraction = ignore_page_extraction
|
self.ignore_page_extraction = ignore_page_extraction
|
||||||
self.pcgts = pcgts
|
self.pcgts = pcgts
|
||||||
if not dir_in:
|
# if not dir_in:
|
||||||
self.plotter = None if not enable_plotting else EynollahPlotter(
|
self.plotter = None if not enable_plotting else EynollahPlotter(
|
||||||
dir_out=self.dir_out,
|
dir_out=self.dir_out,
|
||||||
dir_of_all=dir_of_all,
|
dir_of_all=dir_of_all,
|
||||||
|
@ -236,39 +236,39 @@ class Eynollah:
|
||||||
|
|
||||||
self.models = {}
|
self.models = {}
|
||||||
|
|
||||||
if dir_in and light_version:
|
# if dir_in and light_version:
|
||||||
config = tf.compat.v1.ConfigProto()
|
# config = tf.compat.v1.ConfigProto()
|
||||||
config.gpu_options.allow_growth = True
|
# config.gpu_options.allow_growth = True
|
||||||
session = tf.compat.v1.Session(config=config)
|
# session = tf.compat.v1.Session(config=config)
|
||||||
set_session(session)
|
# set_session(session)
|
||||||
|
|
||||||
self.model_page = self.our_load_model(self.model_page_dir)
|
# self.model_page = self.our_load_model(self.model_page_dir)
|
||||||
self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier)
|
# self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier)
|
||||||
self.model_bin = self.our_load_model(self.model_dir_of_binarization)
|
# self.model_bin = self.our_load_model(self.model_dir_of_binarization)
|
||||||
self.model_textline = self.our_load_model(self.model_textline_dir)
|
# self.model_textline = self.our_load_model(self.model_textline_dir)
|
||||||
self.model_region = self.our_load_model(self.model_region_dir_p_ens_light)
|
# self.model_region = self.our_load_model(self.model_region_dir_p_ens_light)
|
||||||
self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np)
|
# self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np)
|
||||||
self.model_region_fl = self.our_load_model(self.model_region_dir_fully)
|
# self.model_region_fl = self.our_load_model(self.model_region_dir_fully)
|
||||||
|
|
||||||
self.ls_imgs = os.listdir(self.dir_in)
|
# self.ls_imgs = os.listdir(self.dir_in)
|
||||||
|
|
||||||
if dir_in and not light_version:
|
# if dir_in and not light_version:
|
||||||
config = tf.compat.v1.ConfigProto()
|
# config = tf.compat.v1.ConfigProto()
|
||||||
config.gpu_options.allow_growth = True
|
# config.gpu_options.allow_growth = True
|
||||||
session = tf.compat.v1.Session(config=config)
|
# session = tf.compat.v1.Session(config=config)
|
||||||
set_session(session)
|
# set_session(session)
|
||||||
|
|
||||||
self.model_page = self.our_load_model(self.model_page_dir)
|
# self.model_page = self.our_load_model(self.model_page_dir)
|
||||||
self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier)
|
# self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier)
|
||||||
self.model_bin = self.our_load_model(self.model_dir_of_binarization)
|
# self.model_bin = self.our_load_model(self.model_dir_of_binarization)
|
||||||
self.model_textline = self.our_load_model(self.model_textline_dir)
|
# self.model_textline = self.our_load_model(self.model_textline_dir)
|
||||||
self.model_region = self.our_load_model(self.model_region_dir_p_ens)
|
# self.model_region = self.our_load_model(self.model_region_dir_p_ens)
|
||||||
self.model_region_p2 = self.our_load_model(self.model_region_dir_p2)
|
# self.model_region_p2 = self.our_load_model(self.model_region_dir_p2)
|
||||||
self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np)
|
# self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np)
|
||||||
self.model_region_fl = self.our_load_model(self.model_region_dir_fully)
|
# self.model_region_fl = self.our_load_model(self.model_region_dir_fully)
|
||||||
self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement)
|
# self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement)
|
||||||
|
|
||||||
self.ls_imgs = os.listdir(self.dir_in)
|
# self.ls_imgs = os.listdir(self.dir_in)
|
||||||
|
|
||||||
def _cache_images(self, image_filename=None, image_pil=None):
|
def _cache_images(self, image_filename=None, image_pil=None):
|
||||||
ret = {}
|
ret = {}
|
||||||
|
@ -283,9 +283,9 @@ class Eynollah:
|
||||||
ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8)
|
ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def reset_file_name_dir(self, image_filename):
|
# def reset_file_name_dir(self, image_filename):
|
||||||
self._imgs = self._cache_images(image_filename=image_filename)
|
# self._imgs = self._cache_images(image_filename=image_filename)
|
||||||
self.image_filename = image_filename
|
# self.image_filename = image_filename
|
||||||
|
|
||||||
self.plotter = None if not self.enable_plotting else EynollahPlotter(
|
self.plotter = None if not self.enable_plotting else EynollahPlotter(
|
||||||
dir_out=self.dir_out,
|
dir_out=self.dir_out,
|
||||||
|
@ -476,7 +476,7 @@ class Eynollah:
|
||||||
img = self.imread()
|
img = self.imread()
|
||||||
|
|
||||||
_, page_coord = self.early_page_for_num_of_column_classification(img)
|
_, page_coord = self.early_page_for_num_of_column_classification(img)
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_num_classifier, session_col_classifier = self.start_new_session_and_model(
|
model_num_classifier, session_col_classifier = self.start_new_session_and_model(
|
||||||
self.model_dir_of_col_classifier)
|
self.model_dir_of_col_classifier)
|
||||||
if self.input_binary:
|
if self.input_binary:
|
||||||
|
@ -501,10 +501,10 @@ class Eynollah:
|
||||||
img_in[0, :, :, 1] = img_1ch[:, :]
|
img_in[0, :, :, 1] = img_1ch[:, :]
|
||||||
img_in[0, :, :, 2] = img_1ch[:, :]
|
img_in[0, :, :, 2] = img_1ch[:, :]
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
label_p_pred = model_num_classifier.predict(img_in, verbose=0)
|
label_p_pred = model_num_classifier.predict(img_in, verbose=0)
|
||||||
else:
|
# else:
|
||||||
label_p_pred = self.model_classifier.predict(img_in, verbose=0)
|
# label_p_pred = self.model_classifier.predict(img_in, verbose=0)
|
||||||
|
|
||||||
num_col = np.argmax(label_p_pred[0]) + 1
|
num_col = np.argmax(label_p_pred[0]) + 1
|
||||||
|
|
||||||
|
@ -524,9 +524,9 @@ class Eynollah:
|
||||||
self.logger.info("Detected %s DPI", dpi)
|
self.logger.info("Detected %s DPI", dpi)
|
||||||
if self.input_binary:
|
if self.input_binary:
|
||||||
img = self.imread()
|
img = self.imread()
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
prediction_bin = self.do_prediction(True, img, self.model_bin)
|
# prediction_bin = self.do_prediction(True, img, self.model_bin)
|
||||||
else:
|
# else:
|
||||||
|
|
||||||
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
||||||
prediction_bin = self.do_prediction(True, img, model_bin)
|
prediction_bin = self.do_prediction(True, img, model_bin)
|
||||||
|
@ -546,7 +546,7 @@ class Eynollah:
|
||||||
|
|
||||||
t1 = time.time()
|
t1 = time.time()
|
||||||
_, page_coord = self.early_page_for_num_of_column_classification(img_bin)
|
_, page_coord = self.early_page_for_num_of_column_classification(img_bin)
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_num_classifier, session_col_classifier = self.start_new_session_and_model(
|
model_num_classifier, session_col_classifier = self.start_new_session_and_model(
|
||||||
self.model_dir_of_col_classifier)
|
self.model_dir_of_col_classifier)
|
||||||
|
|
||||||
|
@ -568,9 +568,10 @@ class Eynollah:
|
||||||
img_in[0, :, :, 1] = img_1ch[:, :]
|
img_in[0, :, :, 1] = img_1ch[:, :]
|
||||||
img_in[0, :, :, 2] = img_1ch[:, :]
|
img_in[0, :, :, 2] = img_1ch[:, :]
|
||||||
|
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
label_p_pred = self.model_classifier.predict(img_in, verbose=0)
|
# label_p_pred = self.model_classifier.predict(img_in, verbose=0)
|
||||||
else:
|
# else:
|
||||||
|
# label_p_pred = model_num_classifier.predict(img_in, verbose=0)
|
||||||
label_p_pred = model_num_classifier.predict(img_in, verbose=0)
|
label_p_pred = model_num_classifier.predict(img_in, verbose=0)
|
||||||
num_col = np.argmax(label_p_pred[0]) + 1
|
num_col = np.argmax(label_p_pred[0]) + 1
|
||||||
|
|
||||||
|
@ -984,13 +985,13 @@ class Eynollah:
|
||||||
if not self.ignore_page_extraction:
|
if not self.ignore_page_extraction:
|
||||||
img = cv2.GaussianBlur(self.image, (5, 5), 0)
|
img = cv2.GaussianBlur(self.image, (5, 5), 0)
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_page, session_page = self.start_new_session_and_model(self.model_page_dir)
|
model_page, session_page = self.start_new_session_and_model(self.model_page_dir)
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
img_page_prediction = self.do_prediction(False, img, model_page)
|
img_page_prediction = self.do_prediction(False, img, model_page)
|
||||||
else:
|
# else:
|
||||||
img_page_prediction = self.do_prediction(False, img, self.model_page)
|
# img_page_prediction = self.do_prediction(False, img, self.model_page)
|
||||||
imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY)
|
imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY)
|
||||||
_, thresh = cv2.threshold(imgray, 0, 255, 0)
|
_, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
thresh = cv2.dilate(thresh, KERNEL, iterations=3)
|
thresh = cv2.dilate(thresh, KERNEL, iterations=3)
|
||||||
|
@ -1036,13 +1037,13 @@ class Eynollah:
|
||||||
img = img.astype(np.uint8)
|
img = img.astype(np.uint8)
|
||||||
else:
|
else:
|
||||||
img = self.imread()
|
img = self.imread()
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_page, session_page = self.start_new_session_and_model(self.model_page_dir)
|
model_page, session_page = self.start_new_session_and_model(self.model_page_dir)
|
||||||
img = cv2.GaussianBlur(img, (5, 5), 0)
|
img = cv2.GaussianBlur(img, (5, 5), 0)
|
||||||
|
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
img_page_prediction = self.do_prediction(False, img, self.model_page)
|
# img_page_prediction = self.do_prediction(False, img, self.model_page)
|
||||||
else:
|
# else:
|
||||||
img_page_prediction = self.do_prediction(False, img, model_page)
|
img_page_prediction = self.do_prediction(False, img, model_page)
|
||||||
|
|
||||||
imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY)
|
imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY)
|
||||||
|
@ -1069,11 +1070,11 @@ class Eynollah:
|
||||||
self.logger.debug("enter extract_text_regions")
|
self.logger.debug("enter extract_text_regions")
|
||||||
img_height_h = img.shape[0]
|
img_height_h = img.shape[0]
|
||||||
img_width_h = img.shape[1]
|
img_width_h = img.shape[1]
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_region, session_region = self.start_new_session_and_model(
|
model_region, session_region = self.start_new_session_and_model(
|
||||||
self.model_region_dir_fully if patches else self.model_region_dir_fully_np)
|
self.model_region_dir_fully if patches else self.model_region_dir_fully_np)
|
||||||
else:
|
# else:
|
||||||
model_region = self.model_region_fl if patches else self.model_region_fl_np
|
# model_region = self.model_region_fl if patches else self.model_region_fl_np
|
||||||
|
|
||||||
if not patches:
|
if not patches:
|
||||||
img = otsu_copy_binary(img)
|
img = otsu_copy_binary(img)
|
||||||
|
@ -1588,7 +1589,7 @@ class Eynollah:
|
||||||
|
|
||||||
def textline_contours(self, img, patches, scaler_h, scaler_w):
|
def textline_contours(self, img, patches, scaler_h, scaler_w):
|
||||||
self.logger.debug('enter textline_contours')
|
self.logger.debug('enter textline_contours')
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_textline, session_textline = self.start_new_session_and_model(
|
model_textline, session_textline = self.start_new_session_and_model(
|
||||||
self.model_textline_dir if patches else self.model_textline_dir_np)
|
self.model_textline_dir if patches else self.model_textline_dir_np)
|
||||||
img = img.astype(np.uint8)
|
img = img.astype(np.uint8)
|
||||||
|
@ -1596,15 +1597,15 @@ class Eynollah:
|
||||||
img_h = img_org.shape[0]
|
img_h = img_org.shape[0]
|
||||||
img_w = img_org.shape[1]
|
img_w = img_org.shape[1]
|
||||||
img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w))
|
img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w))
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
prediction_textline = self.do_prediction(patches, img, model_textline)
|
prediction_textline = self.do_prediction(patches, img, model_textline)
|
||||||
else:
|
# else:
|
||||||
prediction_textline = self.do_prediction(patches, img, self.model_textline)
|
# prediction_textline = self.do_prediction(patches, img, self.model_textline)
|
||||||
prediction_textline = resize_image(prediction_textline, img_h, img_w)
|
prediction_textline = resize_image(prediction_textline, img_h, img_w)
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
prediction_textline_longshot = self.do_prediction(False, img, model_textline)
|
prediction_textline_longshot = self.do_prediction(False, img, model_textline)
|
||||||
else:
|
# else:
|
||||||
prediction_textline_longshot = self.do_prediction(False, img, self.model_textline)
|
# prediction_textline_longshot = self.do_prediction(False, img, self.model_textline)
|
||||||
prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w)
|
prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w)
|
||||||
|
|
||||||
if self.textline_light:
|
if self.textline_light:
|
||||||
|
@ -1681,11 +1682,11 @@ class Eynollah:
|
||||||
img_h_new = int(img_org.shape[0] / float(img_org.shape[1]) * img_w_new)
|
img_h_new = int(img_org.shape[0] / float(img_org.shape[1]) * img_w_new)
|
||||||
img_resized = resize_image(img, img_h_new, img_w_new)
|
img_resized = resize_image(img, img_h_new, img_w_new)
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
||||||
prediction_bin = self.do_prediction(True, img_resized, model_bin)
|
prediction_bin = self.do_prediction(True, img_resized, model_bin)
|
||||||
else:
|
# else:
|
||||||
prediction_bin = self.do_prediction(True, img_resized, self.model_bin)
|
# prediction_bin = self.do_prediction(True, img_resized, self.model_bin)
|
||||||
prediction_bin = prediction_bin[:, :, 0]
|
prediction_bin = prediction_bin[:, :, 0]
|
||||||
prediction_bin = (prediction_bin[:, :] == 0) * 1
|
prediction_bin = (prediction_bin[:, :] == 0) * 1
|
||||||
prediction_bin = prediction_bin * 255
|
prediction_bin = prediction_bin * 255
|
||||||
|
@ -1698,11 +1699,11 @@ class Eynollah:
|
||||||
|
|
||||||
textline_mask_tot_ea = self.run_textline(img_bin)
|
textline_mask_tot_ea = self.run_textline(img_bin)
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens_light)
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens_light)
|
||||||
prediction_regions_org = self.do_prediction_new_concept(True, img_bin, model_region)
|
prediction_regions_org = self.do_prediction_new_concept(True, img_bin, model_region)
|
||||||
else:
|
# else:
|
||||||
prediction_regions_org = self.do_prediction_new_concept(True, img_bin, self.model_region)
|
# prediction_regions_org = self.do_prediction_new_concept(True, img_bin, self.model_region)
|
||||||
|
|
||||||
# plt.imshow(prediction_regions_org[:,:,0])
|
# plt.imshow(prediction_regions_org[:,:,0])
|
||||||
# plt.show()
|
# plt.show()
|
||||||
|
@ -1744,17 +1745,17 @@ class Eynollah:
|
||||||
img_height_h = img_org.shape[0]
|
img_height_h = img_org.shape[0]
|
||||||
img_width_h = img_org.shape[1]
|
img_width_h = img_org.shape[1]
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)
|
||||||
|
|
||||||
ratio_y = 1.3
|
ratio_y = 1.3
|
||||||
ratio_x = 1
|
ratio_x = 1
|
||||||
|
|
||||||
img = resize_image(img_org, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x))
|
img = resize_image(img_org, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x))
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
prediction_regions_org_y = self.do_prediction(True, img, model_region)
|
prediction_regions_org_y = self.do_prediction(True, img, model_region)
|
||||||
else:
|
# else:
|
||||||
prediction_regions_org_y = self.do_prediction(True, img, self.model_region)
|
# prediction_regions_org_y = self.do_prediction(True, img, self.model_region)
|
||||||
prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h)
|
prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h)
|
||||||
|
|
||||||
# plt.imshow(prediction_regions_org_y[:,:,0])
|
# plt.imshow(prediction_regions_org_y[:,:,0])
|
||||||
|
@ -1774,23 +1775,23 @@ class Eynollah:
|
||||||
img = resize_image(img_org, int(img_org.shape[0]),
|
img = resize_image(img_org, int(img_org.shape[0]),
|
||||||
int(img_org.shape[1] * (1.2 if is_image_enhanced else 1)))
|
int(img_org.shape[1] * (1.2 if is_image_enhanced else 1)))
|
||||||
|
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
prediction_regions_org = self.do_prediction(True, img, self.model_region)
|
# prediction_regions_org = self.do_prediction(True, img, self.model_region)
|
||||||
else:
|
# else:
|
||||||
prediction_regions_org = self.do_prediction(True, img, model_region)
|
prediction_regions_org = self.do_prediction(True, img, model_region)
|
||||||
prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h)
|
prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h)
|
||||||
|
|
||||||
prediction_regions_org = prediction_regions_org[:, :, 0]
|
prediction_regions_org = prediction_regions_org[:, :, 0]
|
||||||
prediction_regions_org[(prediction_regions_org[:, :] == 1) & (mask_zeros_y[:, :] == 1)] = 0
|
prediction_regions_org[(prediction_regions_org[:, :] == 1) & (mask_zeros_y[:, :] == 1)] = 0
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p2)
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p2)
|
||||||
|
|
||||||
img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]))
|
img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]))
|
||||||
|
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
prediction_regions_org2 = self.do_prediction(True, img, self.model_region_p2, 0.2)
|
# prediction_regions_org2 = self.do_prediction(True, img, self.model_region_p2, 0.2)
|
||||||
else:
|
# else:
|
||||||
prediction_regions_org2 = self.do_prediction(True, img, model_region, 0.2)
|
prediction_regions_org2 = self.do_prediction(True, img, model_region, 0.2)
|
||||||
prediction_regions_org2 = resize_image(prediction_regions_org2, img_height_h, img_width_h)
|
prediction_regions_org2 = resize_image(prediction_regions_org2, img_height_h, img_width_h)
|
||||||
|
|
||||||
|
@ -1817,11 +1818,11 @@ class Eynollah:
|
||||||
if self.input_binary:
|
if self.input_binary:
|
||||||
prediction_bin = np.copy(img_org)
|
prediction_bin = np.copy(img_org)
|
||||||
else:
|
else:
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
||||||
prediction_bin = self.do_prediction(True, img_org, model_bin)
|
prediction_bin = self.do_prediction(True, img_org, model_bin)
|
||||||
else:
|
# else:
|
||||||
prediction_bin = self.do_prediction(True, img_org, self.model_bin)
|
# prediction_bin = self.do_prediction(True, img_org, self.model_bin)
|
||||||
prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h)
|
prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h)
|
||||||
|
|
||||||
prediction_bin = prediction_bin[:, :, 0]
|
prediction_bin = prediction_bin[:, :, 0]
|
||||||
|
@ -1830,17 +1831,17 @@ class Eynollah:
|
||||||
|
|
||||||
prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2)
|
prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2)
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)
|
||||||
ratio_y = 1
|
ratio_y = 1
|
||||||
ratio_x = 1
|
ratio_x = 1
|
||||||
|
|
||||||
img = resize_image(prediction_bin, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x))
|
img = resize_image(prediction_bin, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x))
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
prediction_regions_org = self.do_prediction(True, img, model_region)
|
prediction_regions_org = self.do_prediction(True, img, model_region)
|
||||||
else:
|
# else:
|
||||||
prediction_regions_org = self.do_prediction(True, img, self.model_region)
|
# prediction_regions_org = self.do_prediction(True, img, self.model_region)
|
||||||
prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h)
|
prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h)
|
||||||
prediction_regions_org = prediction_regions_org[:, :, 0]
|
prediction_regions_org = prediction_regions_org[:, :, 0]
|
||||||
|
|
||||||
|
@ -1869,11 +1870,11 @@ class Eynollah:
|
||||||
if self.input_binary:
|
if self.input_binary:
|
||||||
prediction_bin = np.copy(img_org)
|
prediction_bin = np.copy(img_org)
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)
|
||||||
prediction_bin = self.do_prediction(True, img_org, model_bin)
|
prediction_bin = self.do_prediction(True, img_org, model_bin)
|
||||||
else:
|
# else:
|
||||||
prediction_bin = self.do_prediction(True, img_org, self.model_bin)
|
# prediction_bin = self.do_prediction(True, img_org, self.model_bin)
|
||||||
prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h)
|
prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h)
|
||||||
prediction_bin = prediction_bin[:, :, 0]
|
prediction_bin = prediction_bin[:, :, 0]
|
||||||
|
|
||||||
|
@ -1883,7 +1884,7 @@ class Eynollah:
|
||||||
|
|
||||||
prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2)
|
prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2)
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -1892,10 +1893,10 @@ class Eynollah:
|
||||||
ratio_x = 1
|
ratio_x = 1
|
||||||
|
|
||||||
img = resize_image(prediction_bin, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x))
|
img = resize_image(prediction_bin, int(img_org.shape[0] * ratio_y), int(img_org.shape[1] * ratio_x))
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
prediction_regions_org = self.do_prediction(True, img, model_region)
|
prediction_regions_org = self.do_prediction(True, img, model_region)
|
||||||
else:
|
# else:
|
||||||
prediction_regions_org = self.do_prediction(True, img, self.model_region)
|
# prediction_regions_org = self.do_prediction(True, img, self.model_region)
|
||||||
prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h)
|
prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h)
|
||||||
prediction_regions_org = prediction_regions_org[:, :, 0]
|
prediction_regions_org = prediction_regions_org[:, :, 0]
|
||||||
|
|
||||||
|
@ -3036,13 +3037,13 @@ class Eynollah:
|
||||||
|
|
||||||
t0_tot = time.time()
|
t0_tot = time.time()
|
||||||
|
|
||||||
if not self.dir_in:
|
# if not self.dir_in:
|
||||||
self.ls_imgs = [1]
|
self.ls_imgs = [1]
|
||||||
|
|
||||||
for img_name in self.ls_imgs:
|
for img_name in self.ls_imgs:
|
||||||
t0 = time.time()
|
t0 = time.time()
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
self.reset_file_name_dir(os.path.join(self.dir_in, img_name))
|
# self.reset_file_name_dir(os.path.join(self.dir_in, img_name))
|
||||||
|
|
||||||
img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(
|
img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(
|
||||||
self.light_version)
|
self.light_version)
|
||||||
|
@ -3077,10 +3078,10 @@ class Eynollah:
|
||||||
pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], [], [], [], [], [], [],
|
pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], [], [], [], [], [], [],
|
||||||
cont_page, [], [])
|
cont_page, [], [])
|
||||||
self.logger.info("Job done in %.1fs", time.time() - t1)
|
self.logger.info("Job done in %.1fs", time.time() - t1)
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
self.writer.write_pagexml(pcgts)
|
# self.writer.write_pagexml(pcgts)
|
||||||
continue
|
# continue
|
||||||
else:
|
# else:
|
||||||
return pcgts
|
return pcgts
|
||||||
|
|
||||||
t1 = time.time()
|
t1 = time.time()
|
||||||
|
@ -3419,5 +3420,5 @@ class Eynollah:
|
||||||
# return pcgts
|
# return pcgts
|
||||||
self.writer.write_pagexml(pcgts)
|
self.writer.write_pagexml(pcgts)
|
||||||
# self.logger.info("Job done in %.1fs", time.time() - t0)
|
# self.logger.info("Job done in %.1fs", time.time() - t0)
|
||||||
if self.dir_in:
|
# if self.dir_in:
|
||||||
self.logger.info("All jobs done in %.1fs", time.time() - t0_tot)
|
# self.logger.info("All jobs done in %.1fs", time.time() - t0_tot)
|
||||||
|
|
|
@ -3,8 +3,8 @@ import numpy as np
|
||||||
from shapely import geometry
|
from shapely import geometry
|
||||||
|
|
||||||
from .rotate import rotate_image, rotation_image_new
|
from .rotate import rotate_image, rotation_image_new
|
||||||
from multiprocessing import Process, Queue, cpu_count
|
# from multiprocessing import Process, Queue, cpu_count
|
||||||
from multiprocessing import Pool
|
# from multiprocessing import Pool
|
||||||
|
|
||||||
|
|
||||||
def contours_in_same_horizon(cy_main_hor):
|
def contours_in_same_horizon(cy_main_hor):
|
||||||
|
@ -154,95 +154,95 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002):
|
||||||
return contours_imgs
|
return contours_imgs
|
||||||
|
|
||||||
|
|
||||||
def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, indexes_r_con_per_pro, img, slope_first):
|
# def do_work_of_contours_in_image(queue_of_all_params, contours_per_process, indexes_r_con_per_pro, img, slope_first):
|
||||||
cnts_org_per_each_subprocess = []
|
# cnts_org_per_each_subprocess = []
|
||||||
index_by_text_region_contours = []
|
# index_by_text_region_contours = []
|
||||||
for mv in range(len(contours_per_process)):
|
# for mv in range(len(contours_per_process)):
|
||||||
index_by_text_region_contours.append(indexes_r_con_per_pro[mv])
|
# index_by_text_region_contours.append(indexes_r_con_per_pro[mv])
|
||||||
|
|
||||||
img_copy = np.zeros(img.shape)
|
# img_copy = np.zeros(img.shape)
|
||||||
img_copy = cv2.fillPoly(img_copy, pts=[contours_per_process[mv]], color=(1, 1, 1))
|
# img_copy = cv2.fillPoly(img_copy, pts=[contours_per_process[mv]], color=(1, 1, 1))
|
||||||
|
|
||||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
# img_copy = rotation_image_new(img_copy, -slope_first)
|
||||||
|
|
||||||
img_copy = img_copy.astype(np.uint8)
|
# img_copy = img_copy.astype(np.uint8)
|
||||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
# imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
# ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
# cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
# cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
# cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||||
|
|
||||||
cnts_org_per_each_subprocess.append(cont_int[0])
|
# cnts_org_per_each_subprocess.append(cont_int[0])
|
||||||
|
|
||||||
queue_of_all_params.put([cnts_org_per_each_subprocess, index_by_text_region_contours])
|
# queue_of_all_params.put([cnts_org_per_each_subprocess, index_by_text_region_contours])
|
||||||
|
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image_multi(cnts, img, slope_first):
|
# def get_textregion_contours_in_org_image_multi(cnts, img, slope_first):
|
||||||
|
|
||||||
num_cores = cpu_count()
|
# num_cores = cpu_count()
|
||||||
queue_of_all_params = Queue()
|
# queue_of_all_params = Queue()
|
||||||
|
|
||||||
processes = []
|
# processes = []
|
||||||
nh = np.linspace(0, len(cnts), num_cores + 1)
|
# nh = np.linspace(0, len(cnts), num_cores + 1)
|
||||||
indexes_by_text_con = np.array(range(len(cnts)))
|
# indexes_by_text_con = np.array(range(len(cnts)))
|
||||||
for i in range(num_cores):
|
# for i in range(num_cores):
|
||||||
contours_per_process = cnts[int(nh[i]): int(nh[i + 1])]
|
# contours_per_process = cnts[int(nh[i]): int(nh[i + 1])]
|
||||||
indexes_text_con_per_process = indexes_by_text_con[int(nh[i]): int(nh[i + 1])]
|
# indexes_text_con_per_process = indexes_by_text_con[int(nh[i]): int(nh[i + 1])]
|
||||||
|
|
||||||
processes.append(Process(target=do_work_of_contours_in_image, args=(queue_of_all_params, contours_per_process, indexes_text_con_per_process, img, slope_first)))
|
# processes.append(Process(target=do_work_of_contours_in_image, args=(queue_of_all_params, contours_per_process, indexes_text_con_per_process, img, slope_first)))
|
||||||
for i in range(num_cores):
|
# for i in range(num_cores):
|
||||||
processes[i].start()
|
# processes[i].start()
|
||||||
cnts_org = []
|
# cnts_org = []
|
||||||
all_index_text_con = []
|
# all_index_text_con = []
|
||||||
for i in range(num_cores):
|
# for i in range(num_cores):
|
||||||
list_all_par = queue_of_all_params.get(True)
|
# list_all_par = queue_of_all_params.get(True)
|
||||||
contours_for_sub_process = list_all_par[0]
|
# contours_for_sub_process = list_all_par[0]
|
||||||
indexes_for_sub_process = list_all_par[1]
|
# indexes_for_sub_process = list_all_par[1]
|
||||||
for j in range(len(contours_for_sub_process)):
|
# for j in range(len(contours_for_sub_process)):
|
||||||
cnts_org.append(contours_for_sub_process[j])
|
# cnts_org.append(contours_for_sub_process[j])
|
||||||
all_index_text_con.append(indexes_for_sub_process[j])
|
# all_index_text_con.append(indexes_for_sub_process[j])
|
||||||
for i in range(num_cores):
|
# for i in range(num_cores):
|
||||||
processes[i].join()
|
# processes[i].join()
|
||||||
|
|
||||||
print(all_index_text_con)
|
# print(all_index_text_con)
|
||||||
return cnts_org
|
# return cnts_org
|
||||||
|
|
||||||
|
|
||||||
def loop_contour_image(index_l, cnts, img, slope_first):
|
# def loop_contour_image(index_l, cnts, img, slope_first):
|
||||||
img_copy = np.zeros(img.shape)
|
# img_copy = np.zeros(img.shape)
|
||||||
img_copy = cv2.fillPoly(img_copy, pts=[cnts[index_l]], color=(1, 1, 1))
|
# img_copy = cv2.fillPoly(img_copy, pts=[cnts[index_l]], color=(1, 1, 1))
|
||||||
|
|
||||||
# plt.imshow(img_copy)
|
# # plt.imshow(img_copy)
|
||||||
# plt.show()
|
# # plt.show()
|
||||||
|
|
||||||
# print(img.shape,'img')
|
# # print(img.shape,'img')
|
||||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
# img_copy = rotation_image_new(img_copy, -slope_first)
|
||||||
# print(img_copy.shape,'img_copy')
|
# # print(img_copy.shape,'img_copy')
|
||||||
# plt.imshow(img_copy)
|
# # plt.imshow(img_copy)
|
||||||
# plt.show()
|
# # plt.show()
|
||||||
|
|
||||||
img_copy = img_copy.astype(np.uint8)
|
# img_copy = img_copy.astype(np.uint8)
|
||||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
# imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
# ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
# cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
# cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
# cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||||
# print(np.shape(cont_int[0]))
|
# # print(np.shape(cont_int[0]))
|
||||||
return cont_int[0]
|
# return cont_int[0]
|
||||||
|
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first):
|
# def get_textregion_contours_in_org_image_multi2(cnts, img, slope_first):
|
||||||
|
|
||||||
cnts_org = []
|
# cnts_org = []
|
||||||
# print(cnts,'cnts')
|
# # print(cnts,'cnts')
|
||||||
with Pool(cpu_count()) as p:
|
# with Pool(cpu_count()) as p:
|
||||||
cnts_org = p.starmap(loop_contour_image, [(index_l, cnts, img, slope_first) for index_l in range(len(cnts))])
|
# cnts_org = p.starmap(loop_contour_image, [(index_l, cnts, img, slope_first) for index_l in range(len(cnts))])
|
||||||
|
|
||||||
return cnts_org
|
# return cnts_org
|
||||||
|
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
||||||
|
@ -276,42 +276,42 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first):
|
||||||
return cnts_org
|
return cnts_org
|
||||||
|
|
||||||
|
|
||||||
def get_textregion_contours_in_org_image_light(cnts, img, slope_first):
|
# def get_textregion_contours_in_org_image_light(cnts, img, slope_first):
|
||||||
|
|
||||||
h_o = img.shape[0]
|
# h_o = img.shape[0]
|
||||||
w_o = img.shape[1]
|
# w_o = img.shape[1]
|
||||||
|
|
||||||
img = cv2.resize(img, (int(img.shape[1]/3.), int(img.shape[0]/3.)), interpolation=cv2.INTER_NEAREST)
|
# img = cv2.resize(img, (int(img.shape[1]/3.), int(img.shape[0]/3.)), interpolation=cv2.INTER_NEAREST)
|
||||||
# cnts = list( (np.array(cnts)/2).astype(np.int16) )
|
# # cnts = list( (np.array(cnts)/2).astype(np.int16) )
|
||||||
# cnts = cnts/2
|
# # cnts = cnts/2
|
||||||
cnts = [(i / 3).astype(np.int32) for i in cnts]
|
# cnts = [(i / 3).astype(np.int32) for i in cnts]
|
||||||
cnts_org = []
|
# cnts_org = []
|
||||||
# print(cnts,'cnts')
|
# # print(cnts,'cnts')
|
||||||
for i in range(len(cnts)):
|
# for i in range(len(cnts)):
|
||||||
img_copy = np.zeros(img.shape)
|
# img_copy = np.zeros(img.shape)
|
||||||
img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1))
|
# img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1))
|
||||||
|
|
||||||
# plt.imshow(img_copy)
|
# # plt.imshow(img_copy)
|
||||||
# plt.show()
|
# # plt.show()
|
||||||
|
|
||||||
# print(img.shape,'img')
|
# # print(img.shape,'img')
|
||||||
img_copy = rotation_image_new(img_copy, -slope_first)
|
# img_copy = rotation_image_new(img_copy, -slope_first)
|
||||||
# print(img_copy.shape,'img_copy')
|
# # print(img_copy.shape,'img_copy')
|
||||||
# plt.imshow(img_copy)
|
# # plt.imshow(img_copy)
|
||||||
# plt.show()
|
# # plt.show()
|
||||||
|
|
||||||
img_copy = img_copy.astype(np.uint8)
|
# img_copy = img_copy.astype(np.uint8)
|
||||||
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
# imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
|
||||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
# ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||||
|
|
||||||
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
# cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
# cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
|
||||||
cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
# cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0])
|
||||||
# print(np.shape(cont_int[0]))
|
# # print(np.shape(cont_int[0]))
|
||||||
cnts_org.append(cont_int[0]*3)
|
# cnts_org.append(cont_int[0]*3)
|
||||||
|
|
||||||
return cnts_org
|
# return cnts_org
|
||||||
|
|
||||||
|
|
||||||
def return_contours_of_interested_textline(region_pre_p, pixel):
|
def return_contours_of_interested_textline(region_pre_p, pixel):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue