From ec9939c3c77f4d903ca6970bc6ebb2f8a65564c8 Mon Sep 17 00:00:00 2001 From: Konstantin Baierer Date: Mon, 1 Mar 2021 17:53:47 +0100 Subject: [PATCH] typo: s,hierachy,hierarchy, --- qurator/eynollah/eynollah.py | 12 ++++++------ qurator/eynollah/utils/__init__.py | 12 ++++++------ qurator/eynollah/utils/contour.py | 4 ++-- qurator/eynollah/utils/drop_capitals.py | 16 ++++++++-------- qurator/eynollah/utils/separate_lines.py | 12 ++++++------ 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/qurator/eynollah/eynollah.py b/qurator/eynollah/eynollah.py index 2b30f9f..b0d6a5c 100644 --- a/qurator/eynollah/eynollah.py +++ b/qurator/eynollah/eynollah.py @@ -833,8 +833,8 @@ class Eynollah: slope_for_all = [slope_deskew][0] else: try: - textline_con, hierachy = return_contours_of_image(img_int_p) - textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierachy, max_area=1, min_area=0.0008) + textline_con, hierarchy = return_contours_of_image(img_int_p) + textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.0008) y_diff_mean = find_contours_mean_y_diff(textline_con_fil) sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) @@ -944,8 +944,8 @@ class Eynollah: bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv]) else: try: - textline_con, hierachy = return_contours_of_image(img_int_p) - textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierachy, max_area=1, min_area=0.00008) + textline_con, hierarchy = return_contours_of_image(img_int_p) + textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.00008) y_diff_mean = find_contours_mean_y_diff(textline_con_fil) sigma_des = int(y_diff_mean * (4.0 / 40.0)) if sigma_des < 1: @@ -1018,8 +1018,8 @@ class Eynollah: crop_img = crop_img[:, :, 0] crop_img = cv2.erode(crop_img, KERNEL, iterations=2) try: - textline_con, hierachy = return_contours_of_image(crop_img) - textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierachy, max_area=1, min_area=0.0008) + textline_con, hierarchy = return_contours_of_image(crop_img) + textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, max_area=1, min_area=0.0008) y_diff_mean = find_contours_mean_y_diff(textline_con_fil) sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) crop_img[crop_img > 0] = 1 diff --git a/qurator/eynollah/utils/__init__.py b/qurator/eynollah/utils/__init__.py index b92151c..e5a76f9 100644 --- a/qurator/eynollah/utils/__init__.py +++ b/qurator/eynollah/utils/__init__.py @@ -957,7 +957,7 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) img_text2 = img_text2.astype(np.uint8) imgray = cv2.cvtColor(img_text2, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - cont, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(cont[0],type(cont)) @@ -1187,7 +1187,7 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(im imgray = cv2.cvtColor(img_p_in_ver, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_lines_ver,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours_lines_ver,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) slope_lines_ver,dist_x_ver, x_min_main_ver ,x_max_main_ver ,cy_main_ver,slope_lines_org_ver,y_min_main_ver, y_max_main_ver, cx_main_ver=find_features_of_lines(contours_lines_ver) @@ -1201,7 +1201,7 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new(im imgray = cv2.cvtColor(img_in_hor, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_lines_hor,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours_lines_hor,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) slope_lines_hor,dist_x_hor, x_min_main_hor ,x_max_main_hor ,cy_main_hor,slope_lines_org_hor,y_min_main_hor, y_max_main_hor, cx_main_hor=find_features_of_lines(contours_lines_hor) @@ -1335,7 +1335,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, pixel_l ret_e, thresh_e = cv2.threshold(imgray_e, 0, 255, 0) #print('burda3') - contours_line_e,hierachy_e=cv2.findContours(thresh_e,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours_line_e,hierarchy_e=cv2.findContours(thresh_e,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #slope_lines_e,dist_x_e, x_min_main_e ,x_max_main_e ,cy_main_e,slope_lines_org_e,y_min_main_e, y_max_main_e, cx_main_e=self.find_features_of_lines(contours_line_e) @@ -1442,7 +1442,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, pixel_l imgray = cv2.cvtColor(vertical, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_line_vers,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours_line_vers,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) slope_lines,dist_x, x_min_main ,x_max_main ,cy_main,slope_lines_org,y_min_main, y_max_main, cx_main=find_features_of_lines(contours_line_vers) #print(slope_lines,'vertical') args=np.array( range(len(slope_lines) )) @@ -1465,7 +1465,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, pixel_l imgray = cv2.cvtColor(horizontal, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_line_hors,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours_line_hors,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) slope_lines,dist_x, x_min_main ,x_max_main ,cy_main,slope_lines_org,y_min_main, y_max_main, cx_main=find_features_of_lines(contours_line_hors) slope_lines_org_hor=slope_lines_org[slope_lines==0] diff --git a/qurator/eynollah/utils/contour.py b/qurator/eynollah/utils/contour.py index 9143e00..bf53fcd 100644 --- a/qurator/eynollah/utils/contour.py +++ b/qurator/eynollah/utils/contour.py @@ -194,8 +194,8 @@ def return_contours_of_image(image): image = image.astype(np.uint8) imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - return contours, hierachy + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + return contours, hierarchy def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003): diff --git a/qurator/eynollah/utils/drop_capitals.py b/qurator/eynollah/utils/drop_capitals.py index 8a94f90..a69e9f5 100644 --- a/qurator/eynollah/utils/drop_capitals.py +++ b/qurator/eynollah/utils/drop_capitals.py @@ -117,7 +117,7 @@ def adhere_drop_capital_region_into_corresponding_textline( imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_combined, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -172,7 +172,7 @@ def adhere_drop_capital_region_into_corresponding_textline( imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_combined, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -217,7 +217,7 @@ def adhere_drop_capital_region_into_corresponding_textline( imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_combined, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -267,7 +267,7 @@ def adhere_drop_capital_region_into_corresponding_textline( ##imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) ##ret, thresh = cv2.threshold(imgray, 0, 255, 0) - ##contours_combined,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + ##contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) ##print(len(contours_combined),'len textlines mixed') ##areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -322,7 +322,7 @@ def adhere_drop_capital_region_into_corresponding_textline( imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_combined, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -377,7 +377,7 @@ def adhere_drop_capital_region_into_corresponding_textline( imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_combined, hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours_combined, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours_combined),'len textlines mixed') areas_cnt_text = np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) @@ -408,7 +408,7 @@ def adhere_drop_capital_region_into_corresponding_textline( ######imgray = cv2.cvtColor(img_con, cv2.COLOR_BGR2GRAY) ######ret, thresh = cv2.threshold(imgray, 0, 255, 0) - ######contours_new,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + ######contours_new,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #####contours_new,hir_new=return_contours_of_image(img_con) #####contours_new_parent=return_parent_contours( contours_new,hir_new) @@ -442,7 +442,7 @@ def adhere_drop_capital_region_into_corresponding_textline( #####imgray = cv2.cvtColor(img_textlines, cv2.COLOR_BGR2GRAY) #####ret, thresh = cv2.threshold(imgray, 0, 255, 0) - #####contours_combined,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + #####contours_combined,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #####areas_cnt_text=np.array([cv2.contourArea(contours_combined[j]) for j in range(len(contours_combined))]) diff --git a/qurator/eynollah/utils/separate_lines.py b/qurator/eynollah/utils/separate_lines.py index e5f1f8b..baf0fda 100644 --- a/qurator/eynollah/utils/separate_lines.py +++ b/qurator/eynollah/utils/separate_lines.py @@ -102,8 +102,8 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): else: peaks_new_tot = peaks_e[:] - textline_con, hierachy = return_contours_of_image(img_patch) - textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierachy, max_area=1, min_area=0.0008) + textline_con, hierarchy = return_contours_of_image(img_patch) + textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) @@ -219,8 +219,8 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_new_tot=peaks_e[:] - textline_con,hierachy=return_contours_of_image(img_patch) - textline_con_fil=filter_contours_area_of_image(img_patch,textline_con,hierachy,max_area=1,min_area=0.0008) + textline_con,hierarchy=return_contours_of_image(img_patch) + textline_con_fil=filter_contours_area_of_image(img_patch,textline_con,hierarchy,max_area=1,min_area=0.0008) y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) sigma_gaus=int( y_diff_mean * (7./40.0) ) @@ -1054,8 +1054,8 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): else: peaks_new_tot = peaks_e[:] - textline_con, hierachy = return_contours_of_image(img_patch) - textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierachy, max_area=1, min_area=0.0008) + textline_con, hierarchy = return_contours_of_image(img_patch) + textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) sigma_gaus = int(y_diff_mean * (7.0 / 40.0))