avoid unnecessary 3-channel conversions

This commit is contained in:
Robert Sachunsky 2025-10-06 13:11:03 +02:00
parent 155b8f68b8
commit fe603188f4
4 changed files with 132 additions and 203 deletions

View file

@ -712,7 +712,7 @@ class Eynollah:
if self.input_binary: if self.input_binary:
img = self.imread() img = self.imread()
prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5)
prediction_bin = 255 * (prediction_bin[:,:,0]==0) prediction_bin = 255 * (prediction_bin[:,:,0] == 0)
prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8)
img= np.copy(prediction_bin) img= np.copy(prediction_bin)
img_bin = prediction_bin img_bin = prediction_bin
@ -2064,9 +2064,7 @@ class Eynollah:
boxes_sub_new = [] boxes_sub_new = []
poly_sub = [] poly_sub = []
for mv in range(len(boxes_per_process)): for mv in range(len(boxes_per_process)):
crop_img, _ = crop_image_inside_box(boxes_per_process[mv], crop_img, _ = crop_image_inside_box(boxes_per_process[mv], textline_mask_tot)
np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2))
crop_img = crop_img[:, :, 0]
crop_img = cv2.erode(crop_img, KERNEL, iterations=2) crop_img = cv2.erode(crop_img, KERNEL, iterations=2)
try: try:
textline_con, hierarchy = return_contours_of_image(crop_img) textline_con, hierarchy = return_contours_of_image(crop_img)
@ -2638,10 +2636,8 @@ class Eynollah:
layout_org[:,:,0][layout_org[:,:,0]==pixel_table] = 0 layout_org[:,:,0][layout_org[:,:,0]==pixel_table] = 0
layout = (layout[:,:,0]==pixel_table)*1 layout = (layout[:,:,0]==pixel_table)*1
layout =np.repeat(layout[:, :, np.newaxis], 3, axis=2)
layout = layout.astype(np.uint8) layout = layout.astype(np.uint8)
imgray = cv2.cvtColor(layout, cv2.COLOR_BGR2GRAY ) _, thresh = cv2.threshold(layout, 0, 255, 0)
_, thresh = cv2.threshold(imgray, 0, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt_size = np.array([cv2.contourArea(contours[j]) cnt_size = np.array([cv2.contourArea(contours[j])
@ -2652,8 +2648,8 @@ class Eynollah:
x, y, w, h = cv2.boundingRect(contours[i]) x, y, w, h = cv2.boundingRect(contours[i])
iou = cnt_size[i] /float(w*h) *100 iou = cnt_size[i] /float(w*h) *100
if iou<80: if iou<80:
layout_contour = np.zeros((layout_org.shape[0], layout_org.shape[1])) layout_contour = np.zeros(layout_org.shape[:2])
layout_contour= cv2.fillPoly(layout_contour,pts=[contours[i]] ,color=(1,1,1)) layout_contour = cv2.fillPoly(layout_contour, pts=[contours[i]] ,color=1)
layout_contour_sum = layout_contour.sum(axis=0) layout_contour_sum = layout_contour.sum(axis=0)
layout_contour_sum_diff = np.diff(layout_contour_sum) layout_contour_sum_diff = np.diff(layout_contour_sum)
@ -2669,20 +2665,17 @@ class Eynollah:
layout_contour=cv2.erode(layout_contour[:,:], KERNEL, iterations=5) layout_contour=cv2.erode(layout_contour[:,:], KERNEL, iterations=5)
layout_contour=cv2.dilate(layout_contour[:,:], KERNEL, iterations=5) layout_contour=cv2.dilate(layout_contour[:,:], KERNEL, iterations=5)
layout_contour =np.repeat(layout_contour[:, :, np.newaxis], 3, axis=2)
layout_contour = layout_contour.astype(np.uint8) layout_contour = layout_contour.astype(np.uint8)
_, thresh = cv2.threshold(layout_contour, 0, 255, 0)
imgray = cv2.cvtColor(layout_contour, cv2.COLOR_BGR2GRAY )
_, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_sep, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_sep, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for ji in range(len(contours_sep) ): for ji in range(len(contours_sep) ):
contours_new.append(contours_sep[ji]) contours_new.append(contours_sep[ji])
if num_col_classifier>=2: if num_col_classifier>=2:
only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) only_recent_contour_image = np.zeros(layout.shape[:2])
only_recent_contour_image= cv2.fillPoly(only_recent_contour_image, only_recent_contour_image = cv2.fillPoly(only_recent_contour_image,
pts=[contours_sep[ji]], color=(1,1,1)) pts=[contours_sep[ji]], color=1)
table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early
iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum()
#print(iou_in,'iou_in_in1') #print(iou_in,'iou_in_in1')
@ -3210,13 +3203,11 @@ class Eynollah:
pixel_lines = 3 pixel_lines = 3
if np.abs(slope_deskew) < SLOPE_THRESHOLD: if np.abs(slope_deskew) < SLOPE_THRESHOLD:
_, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), text_regions_p, num_col_classifier, self.tables, pixel_lines)
num_col_classifier, self.tables, pixel_lines)
if np.abs(slope_deskew) >= SLOPE_THRESHOLD: if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), text_regions_p_1_n, num_col_classifier, self.tables, pixel_lines)
num_col_classifier, self.tables, pixel_lines)
#print(time.time()-t_0_box,'time box in 2') #print(time.time()-t_0_box,'time box in 2')
self.logger.info("num_col_classifier: %s", num_col_classifier) self.logger.info("num_col_classifier: %s", num_col_classifier)
@ -3392,13 +3383,11 @@ class Eynollah:
pixel_lines=3 pixel_lines=3
if np.abs(slope_deskew) < SLOPE_THRESHOLD: if np.abs(slope_deskew) < SLOPE_THRESHOLD:
num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), text_regions_p, num_col_classifier, self.tables, pixel_lines)
num_col_classifier, self.tables, pixel_lines)
if np.abs(slope_deskew) >= SLOPE_THRESHOLD: if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
num_col_d, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( num_col_d, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), text_regions_p_1_n, num_col_classifier, self.tables, pixel_lines)
num_col_classifier, self.tables, pixel_lines)
if num_col_classifier>=3: if num_col_classifier>=3:
if np.abs(slope_deskew) < SLOPE_THRESHOLD: if np.abs(slope_deskew) < SLOPE_THRESHOLD:
@ -3498,7 +3487,7 @@ class Eynollah:
#text_regions_p[:,:][regions_fully[:,:,0]==6]=6 #text_regions_p[:,:][regions_fully[:,:,0]==6]=6
##regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p) ##regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p)
##regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 4] = 4 ##regions_fully[:, :, 0][regions_fully_only_drop[:, :] == 4] = 4
drop_capital_label_in_full_layout_model = 3 drop_capital_label_in_full_layout_model = 3
drops = (regions_fully[:,:,0]==drop_capital_label_in_full_layout_model)*1 drops = (regions_fully[:,:,0]==drop_capital_label_in_full_layout_model)*1
@ -4715,7 +4704,6 @@ class Eynollah:
return pcgts return pcgts
#print("text region early 3 in %.1fs", time.time() - t0) #print("text region early 3 in %.1fs", time.time() - t0)
if self.light_version: if self.light_version:
contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent)
@ -4851,21 +4839,17 @@ class Eynollah:
if not self.headers_off: if not self.headers_off:
if np.abs(slope_deskew) < SLOPE_THRESHOLD: if np.abs(slope_deskew) < SLOPE_THRESHOLD:
num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), text_regions_p, num_col_classifier, self.tables, label_seps, contours_only_text_parent_h)
num_col_classifier, self.tables, label_seps, contours_only_text_parent_h)
else: else:
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), text_regions_p_1_n, num_col_classifier, self.tables, label_seps, contours_only_text_parent_h_d_ordered)
num_col_classifier, self.tables, label_seps, contours_only_text_parent_h_d_ordered)
elif self.headers_off: elif self.headers_off:
if np.abs(slope_deskew) < SLOPE_THRESHOLD: if np.abs(slope_deskew) < SLOPE_THRESHOLD:
num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), text_regions_p, num_col_classifier, self.tables, label_seps)
num_col_classifier, self.tables, label_seps)
else: else:
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(
np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), text_regions_p_1_n, num_col_classifier, self.tables, label_seps)
num_col_classifier, self.tables, label_seps)
if num_col_classifier >= 3: if num_col_classifier >= 3:
if np.abs(slope_deskew) < SLOPE_THRESHOLD: if np.abs(slope_deskew) < SLOPE_THRESHOLD:

View file

@ -796,7 +796,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8):
return len(peaks_fin_true), peaks_fin_true return len(peaks_fin_true), peaks_fin_true
def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8):
regions_without_separators_0 = regions_without_separators[:, :, 0].sum(axis=0) regions_without_separators_0 = regions_without_separators.sum(axis=0)
##plt.plot(regions_without_separators_0) ##plt.plot(regions_without_separators_0)
##plt.show() ##plt.show()
@ -823,7 +823,10 @@ def return_regions_without_separators(regions_pre):
return regions_without_separators return regions_without_separators
def put_drop_out_from_only_drop_model(layout_no_patch, layout1): def put_drop_out_from_only_drop_model(layout_no_patch, layout1):
drop_only = (layout_no_patch[:, :, 0] == 4) * 1 if layout_no_patch.ndim == 3:
layout_no_patch = layout_no_patch[:, :, 0]
drop_only = (layout_no_patch[:, :] == 4) * 1
contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop, hir_on_drop = return_contours_of_image(drop_only)
contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop)
@ -849,9 +852,8 @@ def put_drop_out_from_only_drop_model(layout_no_patch, layout1):
(map_of_drop_contour_bb == 5).sum()) >= 15: (map_of_drop_contour_bb == 5).sum()) >= 15:
contours_drop_parent_final.append(contours_drop_parent[jj]) contours_drop_parent_final.append(contours_drop_parent[jj])
layout_no_patch[:, :, 0][layout_no_patch[:, :, 0] == 4] = 0 layout_no_patch[:, :][layout_no_patch[:, :] == 4] = 0
layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=4)
layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=(4, 4, 4))
return layout_no_patch return layout_no_patch
@ -925,17 +927,16 @@ def check_any_text_region_in_model_one_is_main_or_header(
contours_only_text_parent_main_d=[] contours_only_text_parent_main_d=[]
contours_only_text_parent_head_d=[] contours_only_text_parent_head_d=[]
for ii in range(len(contours_only_text_parent)): for ii, con in enumerate(contours_only_text_parent):
con=contours_only_text_parent[ii] img = np.zeros(regions_model_1.shape[:2])
img=np.zeros((regions_model_1.shape[0],regions_model_1.shape[1],3)) img = cv2.fillPoly(img, pts=[con], color=255)
img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255))
all_pixels=((img[:,:,0]==255)*1).sum() all_pixels=((img == 255)*1).sum()
pixels_header=( ( (img[:,:,0]==255) & (regions_model_full[:,:,0]==2) )*1 ).sum() pixels_header=( ( (img == 255) & (regions_model_full[:,:,0]==2) )*1 ).sum()
pixels_main=all_pixels-pixels_header pixels_main=all_pixels-pixels_header
if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ):
regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=2
contours_only_text_parent_head.append(con) contours_only_text_parent_head.append(con)
if contours_only_text_parent_d_ordered is not None: if contours_only_text_parent_d_ordered is not None:
contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii])
@ -944,7 +945,7 @@ def check_any_text_region_in_model_one_is_main_or_header(
all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) all_found_textline_polygons_head.append(all_found_textline_polygons[ii])
conf_contours_head.append(None) conf_contours_head.append(None)
else: else:
regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=1
contours_only_text_parent_main.append(con) contours_only_text_parent_main.append(con)
conf_contours_main.append(conf_contours[ii]) conf_contours_main.append(conf_contours[ii])
if contours_only_text_parent_d_ordered is not None: if contours_only_text_parent_d_ordered is not None:
@ -1015,11 +1016,11 @@ def check_any_text_region_in_model_one_is_main_or_header_light(
contours_only_text_parent_head_d=[] contours_only_text_parent_head_d=[]
for ii, con in enumerate(contours_only_text_parent_z): for ii, con in enumerate(contours_only_text_parent_z):
img=np.zeros((regions_model_1.shape[0], regions_model_1.shape[1], 3)) img = np.zeros(regions_model_1.shape[:2])
img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) img = cv2.fillPoly(img, pts=[con], color=255)
all_pixels = (img[:,:,0]==255).sum() all_pixels = (img == 255).sum()
pixels_header=((img[:,:,0]==255) & pixels_header=((img == 255) &
(regions_model_full[:,:,0]==2)).sum() (regions_model_full[:,:,0]==2)).sum()
pixels_main = all_pixels - pixels_header pixels_main = all_pixels - pixels_header
@ -1029,7 +1030,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light(
( pixels_header / float(pixels_main) >= 0.3 and ( pixels_header / float(pixels_main) >= 0.3 and
length_con[ii] / float(height_con[ii]) >=3 )): length_con[ii] / float(height_con[ii]) >=3 )):
regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ] = 2 regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 2
contours_only_text_parent_head.append(contours_only_text_parent[ii]) contours_only_text_parent_head.append(contours_only_text_parent[ii])
conf_contours_head.append(None) # why not conf_contours[ii], too? conf_contours_head.append(None) # why not conf_contours[ii], too?
if contours_only_text_parent_d_ordered is not None: if contours_only_text_parent_d_ordered is not None:
@ -1039,7 +1040,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light(
all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) all_found_textline_polygons_head.append(all_found_textline_polygons[ii])
else: else:
regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ] = 1 regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 1
contours_only_text_parent_main.append(contours_only_text_parent[ii]) contours_only_text_parent_main.append(contours_only_text_parent[ii])
conf_contours_main.append(conf_contours[ii]) conf_contours_main.append(conf_contours[ii])
if contours_only_text_parent_d_ordered is not None: if contours_only_text_parent_d_ordered is not None:
@ -1119,11 +1120,11 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col)
textlines_big.append(textlines_tot[i]) textlines_big.append(textlines_tot[i])
textlines_big_org_form.append(textlines_tot_org_form[i]) textlines_big_org_form.append(textlines_tot_org_form[i])
img_textline_s = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) img_textline_s = np.zeros(textline_iamge.shape[:2])
img_textline_s = cv2.fillPoly(img_textline_s, pts=textlines_small, color=(1, 1, 1)) img_textline_s = cv2.fillPoly(img_textline_s, pts=textlines_small, color=1)
img_textline_b = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) img_textline_b = np.zeros(textline_iamge.shape[:2])
img_textline_b = cv2.fillPoly(img_textline_b, pts=textlines_big, color=(1, 1, 1)) img_textline_b = cv2.fillPoly(img_textline_b, pts=textlines_big, color=1)
sum_small_big_all = img_textline_s + img_textline_b sum_small_big_all = img_textline_s + img_textline_b
sum_small_big_all2 = (sum_small_big_all[:, :] == 2) * 1 sum_small_big_all2 = (sum_small_big_all[:, :] == 2) * 1
@ -1135,11 +1136,11 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col)
# print(len(textlines_small),'small') # print(len(textlines_small),'small')
intersections = [] intersections = []
for z2 in range(len(textlines_big)): for z2 in range(len(textlines_big)):
img_text = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) img_text = np.zeros(textline_iamge.shape[:2])
img_text = cv2.fillPoly(img_text, pts=[textlines_small[z1]], color=(1, 1, 1)) img_text = cv2.fillPoly(img_text, pts=[textlines_small[z1]], color=1)
img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) img_text2 = np.zeros(textline_iamge.shape[:2])
img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z2]], color=(1, 1, 1)) img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z2]], color=1)
sum_small_big = img_text2 + img_text sum_small_big = img_text2 + img_text
sum_small_big_2 = (sum_small_big[:, :] == 2) * 1 sum_small_big_2 = (sum_small_big[:, :] == 2) * 1
@ -1165,19 +1166,17 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col)
index_small_textlines = list(np.where(np.array(dis_small_from_bigs_tot) == z)[0]) index_small_textlines = list(np.where(np.array(dis_small_from_bigs_tot) == z)[0])
# print(z,index_small_textlines) # print(z,index_small_textlines)
img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1], 3)) img_text2 = np.zeros(textline_iamge.shape[:2], dtype=np.uint8)
img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z]], color=(255, 255, 255)) img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z]], color=255)
textlines_big_with_change.append(z) textlines_big_with_change.append(z)
for k in index_small_textlines: for k in index_small_textlines:
img_text2 = cv2.fillPoly(img_text2, pts=[textlines_small[k]], color=(255, 255, 255)) img_text2 = cv2.fillPoly(img_text2, pts=[textlines_small[k]], color=255)
textlines_small_with_change.append(k) textlines_small_with_change.append(k)
img_text2 = img_text2.astype(np.uint8) _, thresh = cv2.threshold(img_text2, 0, 255, 0)
imgray = cv2.cvtColor(img_text2, cv2.COLOR_BGR2GRAY) cont, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(cont[0],type(cont)) # print(cont[0],type(cont))
textlines_big_with_change_con.append(cont) textlines_big_with_change_con.append(cont)
@ -1189,8 +1188,7 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col)
# print(textlines_big_with_change,'textlines_big_with_change') # print(textlines_big_with_change,'textlines_big_with_change')
# print(textlines_small_with_change,'textlines_small_with_change') # print(textlines_small_with_change,'textlines_small_with_change')
# print(textlines_big) # print(textlines_big)
textlines_con_changed.append(textlines_big_org_form)
else:
textlines_con_changed.append(textlines_big_org_form) textlines_con_changed.append(textlines_big_org_form)
return textlines_con_changed return textlines_con_changed
@ -1262,29 +1260,22 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(
img_p_in_ver, img_in_hor,num_col_classifier): img_p_in_ver, img_in_hor,num_col_classifier):
#img_p_in_ver = cv2.erode(img_p_in_ver, self.kernel, iterations=2) #img_p_in_ver = cv2.erode(img_p_in_ver, self.kernel, iterations=2)
img_p_in_ver=img_p_in_ver.astype(np.uint8) _, thresh = cv2.threshold(img_p_in_ver, 0, 255, 0)
img_p_in_ver=np.repeat(img_p_in_ver[:, :, np.newaxis], 3, axis=2) contours_lines_ver, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgray = cv2.cvtColor(img_p_in_ver, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_lines_ver,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
slope_lines_ver, _, x_min_main_ver, _, _, _, y_min_main_ver, y_max_main_ver, cx_main_ver = \ slope_lines_ver, _, x_min_main_ver, _, _, _, y_min_main_ver, y_max_main_ver, cx_main_ver = \
find_features_of_lines(contours_lines_ver) find_features_of_lines(contours_lines_ver)
for i in range(len(x_min_main_ver)): for i in range(len(x_min_main_ver)):
img_p_in_ver[int(y_min_main_ver[i]): img_p_in_ver[int(y_min_main_ver[i]):
int(y_min_main_ver[i])+30, int(y_min_main_ver[i])+30,
int(cx_main_ver[i])-25: int(cx_main_ver[i])-25:
int(cx_main_ver[i])+25, 0] = 0 int(cx_main_ver[i])+25] = 0
img_p_in_ver[int(y_max_main_ver[i])-30: img_p_in_ver[int(y_max_main_ver[i])-30:
int(y_max_main_ver[i]), int(y_max_main_ver[i]),
int(cx_main_ver[i])-25: int(cx_main_ver[i])-25:
int(cx_main_ver[i])+25, 0] = 0 int(cx_main_ver[i])+25] = 0
img_in_hor=img_in_hor.astype(np.uint8) _, thresh = cv2.threshold(img_in_hor, 0, 255, 0)
img_in_hor=np.repeat(img_in_hor[:, :, np.newaxis], 3, axis=2) contours_lines_hor, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgray = cv2.cvtColor(img_in_hor, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_lines_hor,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
slope_lines_hor, dist_x_hor, x_min_main_hor, x_max_main_hor, cy_main_hor, _, _, _, _ = \ slope_lines_hor, dist_x_hor, x_min_main_hor, x_max_main_hor, cy_main_hor, _, _, _, _ = \
find_features_of_lines(contours_lines_hor) find_features_of_lines(contours_lines_hor)
@ -1340,22 +1331,19 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(
img_p_in=img_in_hor img_p_in=img_in_hor
special_separators=[] special_separators=[]
img_p_in_ver[:,:,0][img_p_in_ver[:,:,0]==255]=1 img_p_in_ver[img_p_in_ver == 255] = 1
sep_ver_hor=img_p_in+img_p_in_ver sep_ver_hor = img_p_in + img_p_in_ver
sep_ver_hor_cross=(sep_ver_hor[:,:,0]==2)*1 sep_ver_hor_cross = (sep_ver_hor == 2) * 1
sep_ver_hor_cross=np.repeat(sep_ver_hor_cross[:, :, np.newaxis], 3, axis=2) _, thresh = cv2.threshold(sep_ver_hor_cross.astype(np.uint8), 0, 255, 0)
sep_ver_hor_cross=sep_ver_hor_cross.astype(np.uint8) contours_cross, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgray = cv2.cvtColor(sep_ver_hor_cross, cv2.COLOR_BGR2GRAY) center_cross = np.array(find_center_of_contours(contours_cross), dtype=int)
ret, thresh = cv2.threshold(imgray, 0, 255, 0) for cx, cy in center_cross.T:
contours_cross,_=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) img_p_in[cy - 30: cy + 30, cx + 5: cx + 40] = 0
cx_cross, cy_cross = find_center_of_contours(contours_cross) img_p_in[cy - 30: cy + 30, cx - 40: cx - 4] = 0
for ii in range(len(cx_cross)):
img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])+5:int(cx_cross[ii])+40,0]=0
img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])-40:int(cx_cross[ii])-4,0]=0
else: else:
img_p_in=np.copy(img_in_hor) img_p_in=np.copy(img_in_hor)
special_separators=[] special_separators=[]
return img_p_in[:,:,0], special_separators return img_p_in, special_separators
def return_points_with_boundies(peaks_neg_fin, first_point, last_point): def return_points_with_boundies(peaks_neg_fin, first_point, last_point):
peaks_neg_tot = [] peaks_neg_tot = []
@ -1365,11 +1353,11 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point):
peaks_neg_tot.append(last_point) peaks_neg_tot.append(last_point)
return peaks_neg_tot return peaks_neg_tot
def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, pixel_lines, contours_h=None): def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, label_lines, contours_h=None):
t_ins_c0 = time.time() t_ins_c0 = time.time()
separators_closeup=( (region_pre_p[:,:,:]==pixel_lines))*1 separators_closeup=( (region_pre_p[:,:]==label_lines))*1
separators_closeup[0:110,:,:]=0 separators_closeup[0:110,:]=0
separators_closeup[separators_closeup.shape[0]-150:,:,:]=0 separators_closeup[separators_closeup.shape[0]-150:,:]=0
kernel = np.ones((5,5),np.uint8) kernel = np.ones((5,5),np.uint8)
separators_closeup=separators_closeup.astype(np.uint8) separators_closeup=separators_closeup.astype(np.uint8)
@ -1381,15 +1369,11 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables,
separators_closeup_n=separators_closeup_n.astype(np.uint8) separators_closeup_n=separators_closeup_n.astype(np.uint8)
separators_closeup_n_binary=np.zeros(( separators_closeup_n.shape[0],separators_closeup_n.shape[1]) ) separators_closeup_n_binary=np.zeros(( separators_closeup_n.shape[0],separators_closeup_n.shape[1]) )
separators_closeup_n_binary[:,:]=separators_closeup_n[:,:,0] separators_closeup_n_binary[:,:]=separators_closeup_n[:,:]
separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]!=0]=1 separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]!=0]=1
gray_early=np.repeat(separators_closeup_n_binary[:, :, np.newaxis], 3, axis=2) _, thresh_e = cv2.threshold(separators_closeup_n_binary, 0, 255, 0)
gray_early=gray_early.astype(np.uint8) contours_line_e, _ = cv2.findContours(thresh_e.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgray_e = cv2.cvtColor(gray_early, cv2.COLOR_BGR2GRAY)
ret_e, thresh_e = cv2.threshold(imgray_e, 0, 255, 0)
contours_line_e,hierarchy_e=cv2.findContours(thresh_e,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
_, dist_xe, _, _, _, _, y_min_main, y_max_main, _ = \ _, dist_xe, _, _, _, _, y_min_main, y_max_main, _ = \
find_features_of_lines(contours_line_e) find_features_of_lines(contours_line_e)
dist_ye = y_max_main - y_min_main dist_ye = y_max_main - y_min_main
@ -1399,10 +1383,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables,
cnts_hor_e=[] cnts_hor_e=[]
for ce in args_hor_e: for ce in args_hor_e:
cnts_hor_e.append(contours_line_e[ce]) cnts_hor_e.append(contours_line_e[ce])
figs_e=np.zeros(thresh_e.shape)
figs_e=cv2.fillPoly(figs_e,pts=cnts_hor_e,color=(1,1,1))
separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary, pts=cnts_hor_e, color=(0,0,0)) separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary, pts=cnts_hor_e, color=0)
gray = cv2.bitwise_not(separators_closeup_n_binary) gray = cv2.bitwise_not(separators_closeup_n_binary)
gray=gray.astype(np.uint8) gray=gray.astype(np.uint8)
@ -1422,7 +1404,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables,
kernel = np.ones((5,5),np.uint8) kernel = np.ones((5,5),np.uint8)
horizontal = cv2.dilate(horizontal,kernel,iterations = 2) horizontal = cv2.dilate(horizontal,kernel,iterations = 2)
horizontal = cv2.erode(horizontal,kernel,iterations = 2) horizontal = cv2.erode(horizontal,kernel,iterations = 2)
horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=(255,255,255)) horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=255)
rows = vertical.shape[0] rows = vertical.shape[0]
verticalsize = rows // 30 verticalsize = rows // 30
@ -1440,13 +1422,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables,
separators_closeup_new[:,:][vertical[:,:]!=0]=1 separators_closeup_new[:,:][vertical[:,:]!=0]=1
separators_closeup_new[:,:][horizontal[:,:]!=0]=1 separators_closeup_new[:,:][horizontal[:,:]!=0]=1
vertical=np.repeat(vertical[:, :, np.newaxis], 3, axis=2) _, thresh = cv2.threshold(vertical, 0, 255, 0)
vertical=vertical.astype(np.uint8) contours_line_vers, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgray = cv2.cvtColor(vertical, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_line_vers,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \
find_features_of_lines(contours_line_vers) find_features_of_lines(contours_line_vers)
@ -1461,11 +1438,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables,
dist_y_ver=y_max_main_ver-y_min_main_ver dist_y_ver=y_max_main_ver-y_min_main_ver
len_y=separators_closeup.shape[0]/3.0 len_y=separators_closeup.shape[0]/3.0
horizontal=np.repeat(horizontal[:, :, np.newaxis], 3, axis=2) _, thresh = cv2.threshold(horizontal, 0, 255, 0)
horizontal=horizontal.astype(np.uint8) contours_line_hors, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgray = cv2.cvtColor(horizontal, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_line_hors,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \
find_features_of_lines(contours_line_hors) find_features_of_lines(contours_line_hors)
@ -1558,7 +1532,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables,
peaks_neg_fin_fin=[] peaks_neg_fin_fin=[]
for itiles in args_big_parts: for itiles in args_big_parts:
regions_without_separators_tile=regions_without_separators[int(splitter_y_new[itiles]): regions_without_separators_tile=regions_without_separators[int(splitter_y_new[itiles]):
int(splitter_y_new[itiles+1]),:,0] int(splitter_y_new[itiles+1]),:]
try: try:
num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile, num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile,
num_col_classifier, tables, multiplier=7.0) num_col_classifier, tables, multiplier=7.0)

View file

@ -119,14 +119,11 @@ def return_parent_contours(contours, hierarchy):
def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002): def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002):
# pixels of images are identified by 5 # pixels of images are identified by 5
if len(region_pre_p.shape) == 3: if region_pre_p.ndim == 3:
cnts_images = (region_pre_p[:, :, 0] == label) * 1 cnts_images = (region_pre_p[:, :, 0] == label) * 1
else: else:
cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = (region_pre_p[:, :] == label) * 1
cnts_images = cnts_images.astype(np.uint8) _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0)
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = return_parent_contours(contours_imgs, hierarchy)
@ -135,13 +132,11 @@ def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002):
return contours_imgs return contours_imgs
def do_work_of_contours_in_image(contour, index_r_con, img, slope_first): def do_work_of_contours_in_image(contour, index_r_con, img, slope_first):
img_copy = np.zeros(img.shape) img_copy = np.zeros(img.shape[:2], dtype=np.uint8)
img_copy = cv2.fillPoly(img_copy, pts=[contour], color=(1, 1, 1)) img_copy = cv2.fillPoly(img_copy, pts=[contour], color=1)
img_copy = rotation_image_new(img_copy, -slope_first) img_copy = rotation_image_new(img_copy, -slope_first)
img_copy = img_copy.astype(np.uint8) _, thresh = cv2.threshold(img_copy, 0, 255, 0)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
@ -164,8 +159,8 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first):
cnts_org = [] cnts_org = []
# print(cnts,'cnts') # print(cnts,'cnts')
for i in range(len(cnts)): for i in range(len(cnts)):
img_copy = np.zeros(img.shape) img_copy = np.zeros(img.shape[:2], dtype=np.uint8)
img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1)) img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=1)
# plt.imshow(img_copy) # plt.imshow(img_copy)
# plt.show() # plt.show()
@ -176,9 +171,7 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first):
# plt.imshow(img_copy) # plt.imshow(img_copy)
# plt.show() # plt.show()
img_copy = img_copy.astype(np.uint8) _, thresh = cv2.threshold(img_copy, 0, 255, 0)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
@ -195,12 +188,11 @@ def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first):
interpolation=cv2.INTER_NEAREST) interpolation=cv2.INTER_NEAREST)
cnts_org = [] cnts_org = []
for cnt in cnts: for cnt in cnts:
img_copy = np.zeros(img.shape) img_copy = np.zeros(img.shape[:2], dtype=np.uint8)
img_copy = cv2.fillPoly(img_copy, pts=[(cnt / zoom).astype(int)], color=(1, 1, 1)) img_copy = cv2.fillPoly(img_copy, pts=[cnt // zoom], color=1)
img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(img_copy, 0, 255, 0)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1])
@ -210,14 +202,13 @@ def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first):
return cnts_org return cnts_org
def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first, confidence_matrix): def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first, confidence_matrix):
img_copy = np.zeros(img.shape) img_copy = np.zeros(img.shape[:2], dtype=np.uint8)
img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=(1, 1, 1)) img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=1)
confidence_matrix_mapped_with_contour = confidence_matrix * img_copy[:,:,0] confidence_matrix_mapped_with_contour = confidence_matrix * img_copy
confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy[:,:,0])) confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy))
img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8)
imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(img_copy, 0, 255, 0)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(cont_int)==0: if len(cont_int)==0:
@ -245,14 +236,11 @@ def get_textregion_contours_in_org_image_light(cnts, img, confidence_matrix):
def return_contours_of_interested_textline(region_pre_p, label): def return_contours_of_interested_textline(region_pre_p, label):
# pixels of images are identified by 5 # pixels of images are identified by 5
if len(region_pre_p.shape) == 3: if region_pre_p.ndim == 3:
cnts_images = (region_pre_p[:, :, 0] == label) * 1 cnts_images = (region_pre_p[:, :, 0] == label) * 1
else: else:
cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = (region_pre_p[:, :] == label) * 1
cnts_images = cnts_images.astype(np.uint8) _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0)
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = return_parent_contours(contours_imgs, hierarchy)
@ -262,25 +250,22 @@ def return_contours_of_interested_textline(region_pre_p, label):
def return_contours_of_image(image): def return_contours_of_image(image):
if len(image.shape) == 2: if len(image.shape) == 2:
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
image = image.astype(np.uint8) image = image.astype(np.uint8)
imgray = image
else: else:
image = image.astype(np.uint8) image = image.astype(np.uint8)
imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0) _, thresh = cv2.threshold(imgray, 0, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours, hierarchy return contours, hierarchy
def return_contours_of_interested_region_by_min_size(region_pre_p, label, min_size=0.00003): def return_contours_of_interested_region_by_min_size(region_pre_p, label, min_size=0.00003):
# pixels of images are identified by 5 # pixels of images are identified by 5
if len(region_pre_p.shape) == 3: if region_pre_p.ndim == 3:
cnts_images = (region_pre_p[:, :, 0] == label) * 1 cnts_images = (region_pre_p[:, :, 0] == label) * 1
else: else:
cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = (region_pre_p[:, :] == label) * 1
cnts_images = cnts_images.astype(np.uint8) _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0)
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = return_parent_contours(contours_imgs, hierarchy)
@ -291,24 +276,21 @@ def return_contours_of_interested_region_by_min_size(region_pre_p, label, min_si
def return_contours_of_interested_region_by_size(region_pre_p, label, min_area, max_area): def return_contours_of_interested_region_by_size(region_pre_p, label, min_area, max_area):
# pixels of images are identified by 5 # pixels of images are identified by 5
if len(region_pre_p.shape) == 3: if region_pre_p.ndim == 3:
cnts_images = (region_pre_p[:, :, 0] == label) * 1 cnts_images = (region_pre_p[:, :, 0] == label) * 1
else: else:
cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = (region_pre_p[:, :] == label) * 1
cnts_images = cnts_images.astype(np.uint8) _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0)
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2)
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = return_parent_contours(contours_imgs, hierarchy)
contours_imgs = filter_contours_area_of_image_tables( contours_imgs = filter_contours_area_of_image_tables(
thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area) thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area)
img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3)) img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1]))
img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1)) img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=1)
return img_ret[:, :, 0] return img_ret
def dilate_textline_contours(all_found_textline_polygons): def dilate_textline_contours(all_found_textline_polygons):
return [[polygon2contour(contour2polygon(contour, dilate=6)) return [[polygon2contour(contour2polygon(contour, dilate=6))

View file

@ -142,13 +142,12 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis):
rotation_matrix) rotation_matrix)
def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help):
(h, w) = img_patch.shape[:2] h, w = img_patch.shape[:2]
center = (w // 2, h // 2) center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, -thetha, 1.0) M = cv2.getRotationMatrix2D(center, -thetha, 1.0)
x_d = M[0, 2] x_d = M[0, 2]
y_d = M[1, 2] y_d = M[1, 2]
thetha = thetha / 180. * np.pi rotation_matrix = M[:2, :2]
rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]])
contour_text_interest_copy = contour_text_interest.copy() contour_text_interest_copy = contour_text_interest.copy()
x_cont = contour_text_interest[:, 0, 0] x_cont = contour_text_interest[:, 0, 0]
@ -1302,19 +1301,16 @@ def separate_lines_new_inside_tiles(img_path, thetha):
def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_ind, add_boxes_coor_into_textlines): def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_ind, add_boxes_coor_into_textlines):
kernel = np.ones((5, 5), np.uint8) kernel = np.ones((5, 5), np.uint8)
pixel = 255 label = 255
min_area = 0 min_area = 0
max_area = 1 max_area = 1
if len(img_patch.shape) == 3: if img_patch.ndim == 3:
cnts_images = (img_patch[:, :, 0] == pixel) * 1 cnts_images = (img_patch[:, :, 0] == label) * 1
else: else:
cnts_images = (img_patch[:, :] == pixel) * 1 cnts_images = (img_patch[:, :] == label) * 1
cnts_images = cnts_images.astype(np.uint8) _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0)
cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) contours_imgs, hierarchy = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = return_parent_contours(contours_imgs, hierarchy)
contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs = filter_contours_area_of_image_tables(thresh,
@ -1322,14 +1318,12 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i
max_area=max_area, min_area=min_area) max_area=max_area, min_area=min_area)
cont_final = [] cont_final = []
for i in range(len(contours_imgs)): for i in range(len(contours_imgs)):
img_contour = np.zeros((cnts_images.shape[0], cnts_images.shape[1], 3)) img_contour = np.zeros(cnts_images.shape[:2], dtype=np.uint8)
img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=(255, 255, 255)) img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=255)
img_contour = img_contour.astype(np.uint8)
img_contour = cv2.dilate(img_contour, kernel, iterations=4) img_contour = cv2.dilate(img_contour, kernel, iterations=4)
imgrayrot = cv2.cvtColor(img_contour, cv2.COLOR_BGR2GRAY) _, threshrot = cv2.threshold(img_contour, 0, 255, 0)
_, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
##contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[ ##contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[
##0] ##0]
@ -1344,8 +1338,7 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i
def textline_contours_postprocessing(textline_mask, slope, def textline_contours_postprocessing(textline_mask, slope,
contour_text_interest, box_ind, contour_text_interest, box_ind,
add_boxes_coor_into_textlines=False): add_boxes_coor_into_textlines=False):
textline_mask = np.repeat(textline_mask[:, :, np.newaxis], 3, axis=2) * 255 textline_mask = textline_mask * 255
textline_mask = textline_mask.astype(np.uint8)
kernel = np.ones((5, 5), np.uint8) kernel = np.ones((5, 5), np.uint8)
textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_OPEN, kernel) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_OPEN, kernel)
textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_CLOSE, kernel) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_CLOSE, kernel)
@ -1356,12 +1349,11 @@ def textline_contours_postprocessing(textline_mask, slope,
y_help = 2 y_help = 2
textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help),
textline_mask.shape[1] + int(2 * x_help), 3)) textline_mask.shape[1] + int(2 * x_help)))
textline_mask_help[y_help : y_help + textline_mask.shape[0], textline_mask_help[y_help : y_help + textline_mask.shape[0],
x_help : x_help + textline_mask.shape[1], :] = np.copy(textline_mask[:, :, :]) x_help : x_help + textline_mask.shape[1]] = np.copy(textline_mask[:, :])
dst = rotate_image(textline_mask_help, slope) dst = rotate_image(textline_mask_help, slope)
dst = dst[:, :, 0]
dst[dst != 0] = 1 dst[dst != 0] = 1
# if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3:
@ -1372,21 +1364,18 @@ def textline_contours_postprocessing(textline_mask, slope,
contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0] contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0]
contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1]
img_contour = np.zeros((box_ind[3], box_ind[2], 3)) img_contour = np.zeros((box_ind[3], box_ind[2]))
img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=(255, 255, 255)) img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=255)
img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help),
img_contour.shape[1] + int(2 * x_help), 3)) img_contour.shape[1] + int(2 * x_help)))
img_contour_help[y_help : y_help + img_contour.shape[0], img_contour_help[y_help : y_help + img_contour.shape[0],
x_help : x_help + img_contour.shape[1], :] = np.copy(img_contour[:, :, :]) x_help : x_help + img_contour.shape[1]] = np.copy(img_contour[:, :])
img_contour_rot = rotate_image(img_contour_help, slope) img_contour_rot = rotate_image(img_contour_help, slope)
img_contour_rot = img_contour_rot.astype(np.uint8) _, threshrot = cv2.threshold(img_contour_rot, 0, 255, 0)
# dst_help = dst_help.astype(np.uint8) contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
imgrayrot = cv2.cvtColor(img_contour_rot, cv2.COLOR_BGR2GRAY)
_, threshrot = cv2.threshold(imgrayrot, 0, 255, 0)
contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))]
ind_big_con = np.argmax(len_con_text_rot) ind_big_con = np.argmax(len_con_text_rot)