From 5a1900e6642be6ec4234f8b31c61c7121611a50f Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 15:34:36 +0100 Subject: [PATCH 01/15] :fire: remove OCR option from eynollah layout --- README.md | 3 - src/eynollah/cli.py | 35 +--- src/eynollah/eynollah.py | 329 +-------------------------------- src/eynollah/eynollah_ocr.py | 3 +- tests/cli_tests/test_layout.py | 6 - 5 files changed, 16 insertions(+), 360 deletions(-) diff --git a/README.md b/README.md index 0283fe9..59da45a 100644 --- a/README.md +++ b/README.md @@ -120,9 +120,6 @@ The following options can be used to further configure the processing: | `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | | `-thart` | threshold of artifical class in the case of textline detection. The default value is 0.1 | | `-tharl` | threshold of artifical class in the case of layout detection. The default value is 0.1 | -| `-ocr` | do ocr | -| `-tr` | apply transformer ocr. Default model is a CNN-RNN model | -| `-bs_ocr` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively | | `-ncu` | upper limit of columns in document image | | `-ncl` | lower limit of columns in document image | | `-slro` | skip layout detection and reading order | diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index c37864a..8a4cb3a 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -321,7 +321,7 @@ def enhancement(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower "--input_binary/--input-RGB", "-ib/-irgb", is_flag=True, - help="in general, eynollah uses RGB as input but if the input document is strongly dark, bright or for any other reason you can turn binarized input on. This option does not mean that you have to provide a binary image, otherwise this means that the tool itself will binarized the RGB input document.", + help="In general, eynollah uses RGB as input but if the input document is very dark, very bright or for any other reason you can turn on input binarization. When this flag is set, eynollah will binarize the RGB input document, you should always provide RGB images to eynollah.", ) @click.option( "--allow_scaling/--no-allow-scaling", @@ -353,23 +353,6 @@ def enhancement(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower is_flag=True, help="if this parameter set to true, this tool would apply machine based reading order detection", ) -@click.option( - "--do_ocr", - "-ocr/-noocr", - is_flag=True, - help="if this parameter set to true, this tool will try to do ocr", -) -@click.option( - "--transformer_ocr", - "-tr/-notr", - is_flag=True, - help="if this parameter set to true, this tool will apply transformer ocr", -) -@click.option( - "--batch_size_ocr", - "-bs_ocr", - help="number of inference batch size of ocr model. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", -) @click.option( "--num_col_upper", "-ncu", @@ -421,9 +404,6 @@ def layout( headers_off, light_version, reading_order_machine_based, - do_ocr, - transformer_ocr, - batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, @@ -470,9 +450,6 @@ def layout( light_version=light_version, ignore_page_extraction=ignore_page_extraction, reading_order_machine_based=reading_order_machine_based, - do_ocr=do_ocr, - transformer_ocr=transformer_ocr, - batch_size_ocr=batch_size_ocr, num_col_upper=num_col_upper, num_col_lower=num_col_lower, skip_layout_and_reading_order=skip_layout_and_reading_order, @@ -506,7 +483,15 @@ def layout( @click.option( "--dir_in_bin", "-dib", - help="directory of binarized images (in addition to --dir_in for RGB images; filename stems must match the RGB image files, with '.png' suffix).\nPerform prediction using both RGB and binary images. (This does not necessarily improve results, however it may be beneficial for certain document images.)", + help=(""" + directory of binarized images (in addition to --dir_in for RGB + images; filename stems must match the RGB image files, with '.png' + \n + Perform prediction using both RGB and binary images. + (This does not necessarily improve results, however it may be beneficial + for certain document images. +"""), + type=click.Path(exists=True, file_okay=False), ) @click.option( diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index dc90f1d..45fabd1 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -95,19 +95,6 @@ from .utils.rotate import ( rotation_not_90_func_full_layout, rotation_image_new ) -from .utils.utils_ocr import ( - return_start_and_end_of_common_text_of_textline_ocr_without_common_section, - return_textline_contour_with_added_box_coordinate, - preprocess_and_resize_image_for_ocrcnn_model, - return_textlines_split_if_needed, - decode_batch_predictions, - return_rnn_cnn_ocr_of_given_textlines, - fit_text_single_line, - break_curved_line_into_small_pieces_and_then_merge, - get_orientation_moments, - rotate_image_with_padding, - get_contours_and_bounding_boxes -) from .utils.separate_lines import ( separate_lines_new2, return_deskew_slop, @@ -176,9 +163,6 @@ class Eynollah: light_version : bool = False, ignore_page_extraction : bool = False, reading_order_machine_based : bool = False, - do_ocr : bool = False, - transformer_ocr: bool = False, - batch_size_ocr: Optional[int] = None, num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, threshold_art_class_layout: Optional[float] = None, @@ -209,12 +193,6 @@ class Eynollah: self.extract_only_images = extract_only_images self.ignore_page_extraction = ignore_page_extraction self.skip_layout_and_reading_order = skip_layout_and_reading_order - self.ocr = do_ocr - self.tr = transformer_ocr - if not batch_size_ocr: - self.b_s_ocr = 8 - else: - self.b_s_ocr = int(batch_size_ocr) if num_col_upper: self.num_col_upper = int(num_col_upper) else: @@ -284,14 +262,6 @@ class Eynollah: if self.tables: loadable.append(("table", 'light' if self.light_version else '')) - if self.ocr: - if self.tr: - loadable.append(('ocr', 'tr')) - loadable.append(('trocr_processor', '')) - else: - loadable.append('ocr') - loadable.append('num_to_char') - self.model_zoo.load_models(*loadable) def __del__(self): @@ -2078,15 +2048,7 @@ class Eynollah: ###img_bin = np.copy(prediction_bin) ###else: ###img_bin = np.copy(img_resized) - if (self.ocr and self.tr) and not self.input_binary: - prediction_bin = self.do_prediction(True, img_resized, self.model_zoo.get("binarization"), n_batch_inference=5) - prediction_bin = 255 * (prediction_bin[:,:,0] == 0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - prediction_bin = prediction_bin.astype(np.uint16) - #img= np.copy(prediction_bin) - img_bin = np.copy(prediction_bin) - else: - img_bin = np.copy(img_resized) + img_bin = np.copy(img_resized) #print("inside 1 ", time.time()-t_in) ###textline_mask_tot_ea = self.run_textline(img_bin) @@ -3586,190 +3548,13 @@ class Eynollah: region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] return ordered, region_ids - def return_start_and_end_of_common_text_of_textline_ocr(self,textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.2*width) - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - - if len(peaks_real)>70: - print(len(peaks_real), 'len(peaks_real)') - peaks_real = peaks_real[(peaks_realwidth1)] + - arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - argsort_sorted = np.argsort(peaks_sort_4) - first_4_sorted = peaks_sort_4[argsort_sorted] - y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - #print(first_4_sorted,'first_4_sorted') - - arg_sortnew = np.argsort(y_4_sorted) - peaks_final =np.sort( first_4_sorted[arg_sortnew][2:] ) + - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peaks_final[0], peaks_final[0]], [0, height-1]) - #plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') - - return peaks_final[0], peaks_final[1] - else: - pass - - def return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - self, peaks_real, sum_smoothed, start_split, end_split): - - peaks_real = peaks_real[(peaks_realstart_split)] - - arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - argsort_sorted = np.argsort(peaks_sort_4) - - first_4_sorted = peaks_sort_4[argsort_sorted] - y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - #print(first_4_sorted,'first_4_sorted') - - arg_sortnew = np.argsort(y_4_sorted) - peaks_final =np.sort( first_4_sorted[arg_sortnew][3:] ) - return peaks_final[0] - - def return_start_and_end_of_common_text_of_textline_ocr_new(self, textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.15*width) - - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - mid = int(width/2.) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: - peak_start = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - peaks_real, sum_smoothed, width1, mid+2) - peak_end = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - peaks_real, sum_smoothed, mid-2, width2) - - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peak_start, peak_start], [0, height-1]) - #plt.plot([peak_end, peak_end], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') - - return peak_start, peak_end - else: - pass - - def return_ocr_of_textline_without_common_section( - self, - textline_image, - model_ocr, - processor, - device, - width_textline, - h2w_ratio, - ind_tot, - ): - - if h2w_ratio > 0.05: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - #width = np.shape(textline_image)[1] - #height = np.shape(textline_image)[0] - #common_window = int(0.3*width) - #width1 = int ( width/2. - common_window ) - #width2 = int ( width/2. + common_window ) - - split_point = return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) - if split_point: - image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - - #pixel_values1 = processor(image1, return_tensors="pt").pixel_values - #pixel_values2 = processor(image2, return_tensors="pt").pixel_values - - pixel_values_merged = processor([image1,image2], return_tensors="pt").pixel_values - generated_ids_merged = model_ocr.generate(pixel_values_merged.to(device)) - generated_text_merged = processor.batch_decode(generated_ids_merged, skip_special_tokens=True) - - #print(generated_text_merged,'generated_text_merged') - - #generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - #generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - - #generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - #generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - - #generated_text = generated_text1 + ' ' + generated_text2 - generated_text = generated_text_merged[0] + ' ' + generated_text_merged[1] - - #print(generated_text1,'generated_text1') - #print(generated_text2, 'generated_text2') - #print('########################################') - else: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - #print(generated_text,'generated_text') - #print('########################################') - return generated_text - - def return_ocr_of_textline( - self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): - - if h2w_ratio > 0.05: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - #width = np.shape(textline_image)[1] - #height = np.shape(textline_image)[0] - #common_window = int(0.3*width) - #width1 = int ( width/2. - common_window ) - #width2 = int ( width/2. + common_window ) - - try: - width1, width2 = self.return_start_and_end_of_common_text_of_textline_ocr_new(textline_image, ind_tot) - - image1 = textline_image[:, :width2,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, width1:,:]#image.crop((width1, 0, width, height)) - - pixel_values1 = processor(image1, return_tensors="pt").pixel_values - pixel_values2 = processor(image2, return_tensors="pt").pixel_values - - generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - - generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - #print(generated_text1,'generated_text1') - #print(generated_text2, 'generated_text2') - #print('########################################') - - match = sq(None, generated_text1, generated_text2).find_longest_match( - 0, len(generated_text1), 0, len(generated_text2)) - generated_text = generated_text1 + generated_text2[match.b+match.size:] - except: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - return generated_text + def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): return list(np.array(ls_cons)[np.array(sorted_indexes)]) @@ -4009,8 +3794,6 @@ class Eynollah: enabled_modes.append("Light textline detection") if self.full_layout: enabled_modes.append("Full layout analysis") - if self.ocr: - enabled_modes.append("OCR") if self.tables: enabled_modes.append("Table detection") if enabled_modes: @@ -4130,21 +3913,12 @@ class Eynollah: id_of_texts_tot =['region_0001'] conf_contours_textregions =[0] - if self.ocr and not self.tr: - gc.collect() - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons, np.zeros((len(all_found_textline_polygons), 4)), - self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), textline_light=True) - else: - ocr_all_textlines = None - pcgts = self.writer.build_pagexml_no_full_layout( cont_page, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, page_coord, [], [], [], [], [], [], [], slopes, [], [], cont_page, [], [], - ocr_all_textlines=ocr_all_textlines, conf_contours_textregion=conf_contours_textregions, skip_layout_reading_order=True) self.logger.info("Basic processing complete") @@ -4629,94 +4403,6 @@ class Eynollah: boxes_d, textline_mask_tot_d) self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") - ocr_all_textlines = None - ocr_all_textlines_marginals_left = None - ocr_all_textlines_marginals_right = None - ocr_all_textlines_h = None - ocr_all_textlines_drop = None - if self.ocr: - self.logger.info("Step 4.5/5: OCR Processing") - - if not self.tr: - gc.collect() - - if len(all_found_textline_polygons): - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons, all_box_coord, - self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) - - if len(all_found_textline_polygons_marginals_left): - ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_left, all_box_coord_marginals_left, - self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) - - if len(all_found_textline_polygons_marginals_right): - ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_right, all_box_coord_marginals_right, - self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) - - if self.full_layout and len(all_found_textline_polygons): - ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_h, all_box_coord_h, - self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) - - if self.full_layout and len(polygons_of_drop_capitals): - ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( - image_page, polygons_of_drop_capitals, np.zeros((len(polygons_of_drop_capitals), 4)), - self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) - - else: - if self.light_version: - self.logger.info("Using light version OCR") - if self.textline_light: - self.logger.info("Using light text line detection for OCR") - self.logger.info("Processing text lines...") - - gc.collect() - - torch.cuda.empty_cache() - self.model_zoo.get("ocr").to(self.device) - - ind_tot = 0 - #cv2.imwrite('./img_out.png', image_page) - ocr_all_textlines = [] - # FIXME: what about lines in marginals / headings / drop-capitals here? - for indexing, ind_poly_first in enumerate(all_found_textline_polygons): - ocr_textline_in_textregion = [] - for indexing2, ind_poly in enumerate(ind_poly_first): - if not (self.textline_light or self.curved_line): - ind_poly = copy.deepcopy(ind_poly) - box_ind = all_box_coord[indexing] - #print(ind_poly,np.shape(ind_poly), 'ind_poly') - #print(box_ind) - ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) - #print(ind_poly_copy) - ind_poly[ind_poly<0] = 0 - x, y, w, h = cv2.boundingRect(ind_poly) - #print(ind_poly_copy, np.shape(ind_poly_copy)) - #print(x, y, w, h, h/float(w),'ratio') - h2w_ratio = h/float(w) - mask_poly = np.zeros(image_page.shape) - if not self.light_version: - img_poly_on_img = np.copy(image_page) - else: - img_poly_on_img = np.copy(img_bin_light) - mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) - - if self.textline_light: - mask_poly = cv2.dilate(mask_poly, KERNEL, iterations=1) - img_poly_on_img[:,:,0][mask_poly[:,:,0] ==0] = 255 - img_poly_on_img[:,:,1][mask_poly[:,:,0] ==0] = 255 - img_poly_on_img[:,:,2][mask_poly[:,:,0] ==0] = 255 - - img_croped = img_poly_on_img[y:y+h, x:x+w, :] - #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) - text_ocr = self.return_ocr_of_textline_without_common_section( - img_croped, self.model_zoo.get("ocr"), self.model_zoo.get("trocr_processor"), self.device, w, h2w_ratio, ind_tot) - ocr_textline_in_textregion.append(text_ocr) - ind_tot = ind_tot +1 - ocr_all_textlines.append(ocr_textline_in_textregion) - self.logger.info("Step 5/5: Output Generation") if self.full_layout: @@ -4728,9 +4414,7 @@ class Eynollah: all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, ocr_all_textlines, ocr_all_textlines_h, - ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, - ocr_all_textlines_drop, + cont_page, polygons_seplines, conf_contours_textregions, conf_contours_textregions_h) else: pcgts = self.writer.build_pagexml_no_full_layout( @@ -4741,9 +4425,6 @@ class Eynollah: all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_seplines, contours_tables, - ocr_all_textlines=ocr_all_textlines, - ocr_all_textlines_marginals_left=ocr_all_textlines_marginals_left, - ocr_all_textlines_marginals_right=ocr_all_textlines_marginals_right, conf_contours_textregions=conf_contours_textregions) return pcgts diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 0f3eda6..61de12c 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -473,8 +473,7 @@ class Eynollah_ocr: img = cv2.imread(dir_img) if dir_in_bin is not None: cropped_lines_bin = [] - dir_img_bin = os.path.join(dir_in_bin, file_name+'.png') - img_bin = cv2.imread(dir_img_bin) + img_bin = cv2.imread(os.path.join(dir_in_bin, file_name+'.png')) if dir_out_image_text: out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py index a34514e..bc354c8 100644 --- a/tests/cli_tests/test_layout.py +++ b/tests/cli_tests/test_layout.py @@ -13,12 +13,6 @@ from ocrd_models.constants import NAMESPACES as NS "--textline_light", "--light_version"], # -ep ... # -eoi ... - # FIXME: find out whether OCR extra was installed, otherwise skip these - ["--do_ocr"], - ["--do_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr"], - #["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"], # --skip_layout_and_reading_order ], ids=str) def test_run_eynollah_layout_filename( From 82266f8234711e13e8112fdae8670d189074a97e Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 18:42:39 +0100 Subject: [PATCH 02/15] reorganize cli --- src/eynollah/cli.py | 602 --------------------------- src/eynollah/cli/__init__.py | 21 + src/eynollah/cli/cli.py | 66 +++ src/eynollah/cli/cli_binarize.py | 44 ++ src/eynollah/cli/cli_enhance.py | 63 +++ src/eynollah/cli/cli_layout.py | 257 ++++++++++++ src/eynollah/{ => cli}/cli_models.py | 0 src/eynollah/cli/cli_ocr.py | 132 ++++++ src/eynollah/cli/cli_readingorder.py | 35 ++ src/eynollah/ocrd_cli.py | 3 + 10 files changed, 621 insertions(+), 602 deletions(-) delete mode 100644 src/eynollah/cli.py create mode 100644 src/eynollah/cli/__init__.py create mode 100644 src/eynollah/cli/cli.py create mode 100644 src/eynollah/cli/cli_binarize.py create mode 100644 src/eynollah/cli/cli_enhance.py create mode 100644 src/eynollah/cli/cli_layout.py rename src/eynollah/{ => cli}/cli_models.py (100%) create mode 100644 src/eynollah/cli/cli_ocr.py create mode 100644 src/eynollah/cli/cli_readingorder.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py deleted file mode 100644 index 8a4cb3a..0000000 --- a/src/eynollah/cli.py +++ /dev/null @@ -1,602 +0,0 @@ -from dataclasses import dataclass -import logging -import sys -import os -from typing import Union - -import click - -# NOTE: For debugging/predictable order of imports -from .eynollah_imports import imported_libs -from .model_zoo import EynollahModelZoo -from .cli_models import models_cli - -@dataclass() -class EynollahCliCtx: - """ - Holds options relevant for all eynollah subcommands - """ - model_zoo: EynollahModelZoo - log_level : Union[str, None] = 'INFO' - - -@click.group() -@click.option( - "--model-basedir", - "-m", - help="directory of models", - # NOTE: not mandatory to exist so --help for subcommands works but will log a warning - # and raise exception when trying to load models in the CLI - # type=click.Path(exists=True), - default=f'{os.getcwd()}/models_eynollah', -) -@click.option( - "--model-overrides", - "-mv", - help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", - type=(str, str, str), - multiple=True, -) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) -@click.pass_context -def main(ctx, model_basedir, model_overrides, log_level): - """ - eynollah - Document Layout Analysis, Image Enhancement, OCR - """ - # Initialize logging - console_handler = logging.StreamHandler(sys.stdout) - console_handler.setLevel(logging.NOTSET) - formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') - console_handler.setFormatter(formatter) - logging.getLogger('eynollah').addHandler(console_handler) - logging.getLogger('eynollah').setLevel(log_level or logging.INFO) - # Initialize model zoo - model_zoo = EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides) - # Initialize CLI context - ctx.obj = EynollahCliCtx( - model_zoo=model_zoo, - log_level=log_level, - ) - -main.add_command(models_cli, 'models') - -@main.command() -@click.option( - "--input", - "-i", - help="PAGE-XML input filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of PAGE-XML input files (instead of --input)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--out", - "-o", - help="directory for output images", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.pass_context -def machine_based_reading_order(ctx, input, dir_in, out): - """ - Generate ReadingOrder with a ML model - """ - from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout - assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - orderer = machine_based_reading_order_on_layout(model_zoo=ctx.obj.model_zoo) - orderer.run(xml_filename=input, - dir_in=dir_in, - dir_out=out, - ) - - -@main.command() -@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') -@click.option( - "--input-image", "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False) -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--output", - "-o", - help="output image (if using -i) or output image directory (if using -di)", - type=click.Path(file_okay=True, dir_okay=True), - required=True, -) -@click.option( - '-M', - '--mode', - type=click.Choice(['single', 'multi']), - default='single', - help="Whether to use the (newer and faster) single-model binarization or the (slightly better) multi-model binarization" -) -@click.pass_context -def binarization( - ctx, - patches, - input_image, - mode, - dir_in, - output, -): - """ - Binarize images with a ML model - """ - from eynollah.sbb_binarize import SbbBinarizer - assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - binarizer = SbbBinarizer(model_zoo=ctx.obj.model_zoo, mode=mode) - binarizer.run( - image_path=input_image, - use_patches=patches, - output=output, - dir_in=dir_in - ) - - -@main.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--num_col_upper", - "-ncu", - help="lower limit of columns in document image", -) -@click.option( - "--num_col_lower", - "-ncl", - help="upper limit of columns in document image", -) -@click.option( - "--save_org_scale/--no_save_org_scale", - "-sos/-nosos", - is_flag=True, - help="if this parameter set to true, this tool will save the enhanced image in org scale.", -) -@click.pass_context -def enhancement(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower, save_org_scale): - """ - Enhance image - """ - assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - from .image_enhancer import Enhancer - enhancer = Enhancer( - model_zoo=ctx.obj.model_zoo, - num_col_upper=num_col_upper, - num_col_lower=num_col_lower, - save_org_scale=save_org_scale, - ) - enhancer.run(overwrite=overwrite, - dir_in=dir_in, - image_filename=image, - dir_out=out, - ) - -@main.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_images", - "-si", - help="if a directory is given, images in documents will be cropped and saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_layout", - "-sl", - help="if a directory is given, plot of layout will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_deskewed", - "-sd", - help="if a directory is given, deskewed image will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_all", - "-sa", - help="if a directory is given, all plots needed for documentation will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--save_page", - "-sp", - help="if a directory is given, page crop of image will be saved there", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--enable-plotting/--disable-plotting", - "-ep/-noep", - is_flag=True, - help="If set, will plot intermediary files and images", -) -@click.option( - "--extract_only_images/--disable-extracting_only_images", - "-eoi/-noeoi", - is_flag=True, - help="If a directory is given, only images in documents will be cropped and saved there and the other processing will not be done", -) -@click.option( - "--allow-enhancement/--no-allow-enhancement", - "-ae/-noae", - is_flag=True, - help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory", -) -@click.option( - "--curved-line/--no-curvedline", - "-cl/-nocl", - is_flag=True, - help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.", -) -@click.option( - "--textline_light/--no-textline_light", - "-tll/-notll", - is_flag=True, - help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method.", -) -@click.option( - "--full-layout/--no-full-layout", - "-fl/-nofl", - is_flag=True, - help="if this parameter set to true, this tool will try to return all elements of layout.", -) -@click.option( - "--tables/--no-tables", - "-tab/-notab", - is_flag=True, - help="if this parameter set to true, this tool will try to detect tables.", -) -@click.option( - "--right2left/--left2right", - "-r2l/-l2r", - is_flag=True, - help="if this parameter set to true, this tool will extract right-to-left reading order.", -) -@click.option( - "--input_binary/--input-RGB", - "-ib/-irgb", - is_flag=True, - help="In general, eynollah uses RGB as input but if the input document is very dark, very bright or for any other reason you can turn on input binarization. When this flag is set, eynollah will binarize the RGB input document, you should always provide RGB images to eynollah.", -) -@click.option( - "--allow_scaling/--no-allow-scaling", - "-as/-noas", - is_flag=True, - help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection", -) -@click.option( - "--headers_off/--headers-on", - "-ho/-noho", - is_flag=True, - help="if this parameter set to true, this tool would ignore headers role in reading order", -) -@click.option( - "--light_version/--original", - "-light/-org", - is_flag=True, - help="if this parameter set to true, this tool would use lighter version", -) -@click.option( - "--ignore_page_extraction/--extract_page_included", - "-ipe/-epi", - is_flag=True, - help="if this parameter set to true, this tool would ignore page extraction", -) -@click.option( - "--reading_order_machine_based/--heuristic_reading_order", - "-romb/-hro", - is_flag=True, - help="if this parameter set to true, this tool would apply machine based reading order detection", -) -@click.option( - "--num_col_upper", - "-ncu", - help="lower limit of columns in document image", -) -@click.option( - "--num_col_lower", - "-ncl", - help="upper limit of columns in document image", -) -@click.option( - "--threshold_art_class_layout", - "-tharl", - help="threshold of artifical class in the case of layout detection. The default value is 0.1", -) -@click.option( - "--threshold_art_class_textline", - "-thart", - help="threshold of artifical class in the case of textline detection. The default value is 0.1", -) -@click.option( - "--skip_layout_and_reading_order", - "-slro/-noslro", - is_flag=True, - help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.", -) -@click.pass_context -def layout( - ctx, - image, - out, - overwrite, - dir_in, - save_images, - save_layout, - save_deskewed, - save_all, - extract_only_images, - save_page, - enable_plotting, - allow_enhancement, - curved_line, - textline_light, - full_layout, - tables, - right2left, - input_binary, - allow_scaling, - headers_off, - light_version, - reading_order_machine_based, - num_col_upper, - num_col_lower, - threshold_art_class_textline, - threshold_art_class_layout, - skip_layout_and_reading_order, - ignore_page_extraction, -): - """ - Detect Layout (with optional image enhancement and reading order detection) - """ - from .eynollah import Eynollah - assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" - assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" - assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" - assert enable_plotting or not save_page, "Plotting with -sp also requires -ep" - assert enable_plotting or not save_images, "Plotting with -si also requires -ep" - assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep" - assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \ - "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" - assert textline_light == light_version, "Both light textline detection -tll and light version -light must be set or unset equally" - assert not extract_only_images or not allow_enhancement, "Image extraction -eoi can not be set alongside allow_enhancement -ae" - assert not extract_only_images or not allow_scaling, "Image extraction -eoi can not be set alongside allow_scaling -as" - assert not extract_only_images or not light_version, "Image extraction -eoi can not be set alongside light_version -light" - assert not extract_only_images or not curved_line, "Image extraction -eoi can not be set alongside curved_line -cl" - assert not extract_only_images or not textline_light, "Image extraction -eoi can not be set alongside textline_light -tll" - assert not extract_only_images or not full_layout, "Image extraction -eoi can not be set alongside full_layout -fl" - assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab" - assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" - assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" - assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - eynollah = Eynollah( - model_zoo=ctx.obj.model_zoo, - extract_only_images=extract_only_images, - enable_plotting=enable_plotting, - allow_enhancement=allow_enhancement, - curved_line=curved_line, - textline_light=textline_light, - full_layout=full_layout, - tables=tables, - right2left=right2left, - input_binary=input_binary, - allow_scaling=allow_scaling, - headers_off=headers_off, - light_version=light_version, - ignore_page_extraction=ignore_page_extraction, - reading_order_machine_based=reading_order_machine_based, - num_col_upper=num_col_upper, - num_col_lower=num_col_lower, - skip_layout_and_reading_order=skip_layout_and_reading_order, - threshold_art_class_textline=threshold_art_class_textline, - threshold_art_class_layout=threshold_art_class_layout, - ) - eynollah.run(overwrite=overwrite, - image_filename=image, - dir_in=dir_in, - dir_out=out, - dir_of_cropped_images=save_images, - dir_of_layout=save_layout, - dir_of_deskewed=save_deskewed, - dir_of_all=save_all, - dir_save_page=save_page, - ) - -@main.command() -@click.option( - "--image", - "-i", - help="input image filename", - type=click.Path(exists=True, dir_okay=False), -) -@click.option( - "--dir_in", - "-di", - help="directory of input images (instead of --image)", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_in_bin", - "-dib", - help=(""" - directory of binarized images (in addition to --dir_in for RGB - images; filename stems must match the RGB image files, with '.png' - \n - Perform prediction using both RGB and binary images. - (This does not necessarily improve results, however it may be beneficial - for certain document images. -"""), - - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_xmls", - "-dx", - help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--out", - "-o", - help="directory for output PAGE-XML files", - type=click.Path(exists=True, file_okay=False), - required=True, -) -@click.option( - "--dir_out_image_text", - "-doit", - help="directory for output images, newly rendered with predicted text", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) -@click.option( - "--tr_ocr", - "-trocr/-notrocr", - is_flag=True, - help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.", -) -@click.option( - "--export_textline_images_and_text", - "-etit/-noetit", - is_flag=True, - help="if this parameter set to true, images and text in xml will be exported into output dir. This files can be used for training a OCR engine.", -) -@click.option( - "--do_not_mask_with_textline_contour", - "-nmtc/-mtc", - is_flag=True, - help="if this parameter set to true, cropped textline images will not be masked with textline contour.", -) -@click.option( - "--batch_size", - "-bs", - help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", -) -@click.option( - "--dataset_abbrevation", - "-ds_pref", - help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", -) -@click.option( - "--min_conf_value_of_textline_text", - "-min_conf", - help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.", -) -@click.pass_context -def ocr( - ctx, - image, - dir_in, - dir_in_bin, - dir_xmls, - out, - dir_out_image_text, - overwrite, - tr_ocr, - export_textline_images_and_text, - do_not_mask_with_textline_contour, - batch_size, - dataset_abbrevation, - min_conf_value_of_textline_text, -): - """ - Recognize text with a CNN/RNN or transformer ML model. - """ - assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" - # FIXME: refactor: move export_textline_images_and_text out of eynollah.py - # assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" - assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" - assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" - assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" - assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." - from .eynollah_ocr import Eynollah_ocr - eynollah_ocr = Eynollah_ocr( - model_zoo=ctx.obj.model_zoo, - tr_ocr=tr_ocr, - export_textline_images_and_text=export_textline_images_and_text, - do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, - batch_size=batch_size, - pref_of_dataset=dataset_abbrevation, - min_conf_value_of_textline_text=min_conf_value_of_textline_text) - eynollah_ocr.run(overwrite=overwrite, - dir_in=dir_in, - dir_in_bin=dir_in_bin, - image_filename=image, - dir_xmls=dir_xmls, - dir_out_image_text=dir_out_image_text, - dir_out=out, - ) - -if __name__ == "__main__": - main() diff --git a/src/eynollah/cli/__init__.py b/src/eynollah/cli/__init__.py new file mode 100644 index 0000000..c0d1921 --- /dev/null +++ b/src/eynollah/cli/__init__.py @@ -0,0 +1,21 @@ +# NOTE: For predictable order of imports of torch/shapely/tensorflow +# this must be the first import of the CLI! +from ..eynollah_imports import imported_libs +from .cli_models import models_cli +from .cli_binarize import binarize_cli + +from .cli import main +from .cli_binarize import binarize_cli +from .cli_enhance import enhance_cli +from .cli_layout import layout_cli +from .cli_ocr import ocr_cli +from .cli_readingorder import readingorder_cli + +main.add_command(binarize_cli, 'binarization') +main.add_command(enhance_cli, 'enhancement') +main.add_command(layout_cli, 'layout') +main.add_command(readingorder_cli, 'machine-based-reading-order') +main.add_command(models_cli, 'models') +main.add_command(ocr_cli, 'ocr') + + diff --git a/src/eynollah/cli/cli.py b/src/eynollah/cli/cli.py new file mode 100644 index 0000000..dca6fd4 --- /dev/null +++ b/src/eynollah/cli/cli.py @@ -0,0 +1,66 @@ +from dataclasses import dataclass +import logging +import sys +import os +from typing import Union + +import click + +from ..model_zoo import EynollahModelZoo +from .cli_models import models_cli + +@dataclass() +class EynollahCliCtx: + """ + Holds options relevant for all eynollah subcommands + """ + model_zoo: EynollahModelZoo + log_level : Union[str, None] = 'INFO' + + +@click.group() +@click.option( + "--model-basedir", + "-m", + help="directory of models", + # NOTE: not mandatory to exist so --help for subcommands works but will log a warning + # and raise exception when trying to load models in the CLI + # type=click.Path(exists=True), + default=f'{os.getcwd()}/models_eynollah', +) +@click.option( + "--model-overrides", + "-mv", + help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", + type=(str, str, str), + multiple=True, +) +@click.option( + "--log_level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) +@click.pass_context +def main(ctx, model_basedir, model_overrides, log_level): + """ + eynollah - Document Layout Analysis, Image Enhancement, OCR + """ + # Initialize logging + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.NOTSET) + formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') + console_handler.setFormatter(formatter) + logging.getLogger('eynollah').addHandler(console_handler) + logging.getLogger('eynollah').setLevel(log_level or logging.INFO) + # Initialize model zoo + model_zoo = EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides) + # Initialize CLI context + ctx.obj = EynollahCliCtx( + model_zoo=model_zoo, + log_level=log_level, + ) + + +if __name__ == "__main__": + main() diff --git a/src/eynollah/cli/cli_binarize.py b/src/eynollah/cli/cli_binarize.py new file mode 100644 index 0000000..c783028 --- /dev/null +++ b/src/eynollah/cli/cli_binarize.py @@ -0,0 +1,44 @@ +import click + +@click.command() +@click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') +@click.option( + "--input-image", "--image", + "-i", + help="input image filename", + type=click.Path(exists=True, dir_okay=False) +) +@click.option( + "--dir_in", + "-di", + help="directory of input images (instead of --image)", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--output", + "-o", + help="output image (if using -i) or output image directory (if using -di)", + type=click.Path(file_okay=True, dir_okay=True), + required=True, +) +@click.pass_context +def binarize_cli( + ctx, + patches, + input_image, + dir_in, + output, +): + """ + Binarize images with a ML model + """ + from eynollah.sbb_binarize import SbbBinarizer + assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." + binarizer = SbbBinarizer(model_zoo=ctx.obj.model_zoo) + binarizer.run( + image_path=input_image, + use_patches=patches, + output=output, + dir_in=dir_in + ) + diff --git a/src/eynollah/cli/cli_enhance.py b/src/eynollah/cli/cli_enhance.py new file mode 100644 index 0000000..df9137c --- /dev/null +++ b/src/eynollah/cli/cli_enhance.py @@ -0,0 +1,63 @@ +import click + +@click.command() +@click.option( + "--image", + "-i", + help="input image filename", + type=click.Path(exists=True, dir_okay=False), +) +@click.option( + "--out", + "-o", + help="directory for output PAGE-XML files", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) +@click.option( + "--dir_in", + "-di", + help="directory of input images (instead of --image)", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--num_col_upper", + "-ncu", + help="lower limit of columns in document image", +) +@click.option( + "--num_col_lower", + "-ncl", + help="upper limit of columns in document image", +) +@click.option( + "--save_org_scale/--no_save_org_scale", + "-sos/-nosos", + is_flag=True, + help="if this parameter set to true, this tool will save the enhanced image in org scale.", +) +@click.pass_context +def enhance_cli(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower, save_org_scale): + """ + Enhance image + """ + assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." + from .image_enhancer import Enhancer + enhancer = Enhancer( + model_zoo=ctx.obj.model_zoo, + num_col_upper=num_col_upper, + num_col_lower=num_col_lower, + save_org_scale=save_org_scale, + ) + enhancer.run(overwrite=overwrite, + dir_in=dir_in, + image_filename=image, + dir_out=out, + ) + diff --git a/src/eynollah/cli/cli_layout.py b/src/eynollah/cli/cli_layout.py new file mode 100644 index 0000000..9a43b56 --- /dev/null +++ b/src/eynollah/cli/cli_layout.py @@ -0,0 +1,257 @@ +import click + +@click.command() +@click.option( + "--image", + "-i", + help="input image filename", + type=click.Path(exists=True, dir_okay=False), +) + +@click.option( + "--out", + "-o", + help="directory for output PAGE-XML files", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) +@click.option( + "--dir_in", + "-di", + help="directory of input images (instead of --image)", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_images", + "-si", + help="if a directory is given, images in documents will be cropped and saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_layout", + "-sl", + help="if a directory is given, plot of layout will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_deskewed", + "-sd", + help="if a directory is given, deskewed image will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_all", + "-sa", + help="if a directory is given, all plots needed for documentation will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_page", + "-sp", + help="if a directory is given, page crop of image will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--enable-plotting/--disable-plotting", + "-ep/-noep", + is_flag=True, + help="If set, will plot intermediary files and images", +) +@click.option( + "--extract_only_images/--disable-extracting_only_images", + "-eoi/-noeoi", + is_flag=True, + help="If a directory is given, only images in documents will be cropped and saved there and the other processing will not be done", +) +@click.option( + "--allow-enhancement/--no-allow-enhancement", + "-ae/-noae", + is_flag=True, + help="if this parameter set to true, this tool would check that input image need resizing and enhancement or not. If so output of resized and enhanced image and corresponding layout data will be written in out directory", +) +@click.option( + "--curved-line/--no-curvedline", + "-cl/-nocl", + is_flag=True, + help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.", +) +@click.option( + "--textline_light/--no-textline_light", + "-tll/-notll", + is_flag=True, + help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method.", +) +@click.option( + "--full-layout/--no-full-layout", + "-fl/-nofl", + is_flag=True, + help="if this parameter set to true, this tool will try to return all elements of layout.", +) +@click.option( + "--tables/--no-tables", + "-tab/-notab", + is_flag=True, + help="if this parameter set to true, this tool will try to detect tables.", +) +@click.option( + "--right2left/--left2right", + "-r2l/-l2r", + is_flag=True, + help="if this parameter set to true, this tool will extract right-to-left reading order.", +) +@click.option( + "--input_binary/--input-RGB", + "-ib/-irgb", + is_flag=True, + help="In general, eynollah uses RGB as input but if the input document is very dark, very bright or for any other reason you can turn on input binarization. When this flag is set, eynollah will binarize the RGB input document, you should always provide RGB images to eynollah.", +) +@click.option( + "--allow_scaling/--no-allow-scaling", + "-as/-noas", + is_flag=True, + help="if this parameter set to true, this tool would check the scale and if needed it will scale it to perform better layout detection", +) +@click.option( + "--headers_off/--headers-on", + "-ho/-noho", + is_flag=True, + help="if this parameter set to true, this tool would ignore headers role in reading order", +) +@click.option( + "--light_version/--original", + "-light/-org", + is_flag=True, + help="if this parameter set to true, this tool would use lighter version", +) +@click.option( + "--ignore_page_extraction/--extract_page_included", + "-ipe/-epi", + is_flag=True, + help="if this parameter set to true, this tool would ignore page extraction", +) +@click.option( + "--reading_order_machine_based/--heuristic_reading_order", + "-romb/-hro", + is_flag=True, + help="if this parameter set to true, this tool would apply machine based reading order detection", +) +@click.option( + "--num_col_upper", + "-ncu", + help="lower limit of columns in document image", +) +@click.option( + "--num_col_lower", + "-ncl", + help="upper limit of columns in document image", +) +@click.option( + "--threshold_art_class_layout", + "-tharl", + help="threshold of artifical class in the case of layout detection. The default value is 0.1", +) +@click.option( + "--threshold_art_class_textline", + "-thart", + help="threshold of artifical class in the case of textline detection. The default value is 0.1", +) +@click.option( + "--skip_layout_and_reading_order", + "-slro/-noslro", + is_flag=True, + help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.", +) +@click.pass_context +def layout_cli( + ctx, + image, + out, + overwrite, + dir_in, + save_images, + save_layout, + save_deskewed, + save_all, + extract_only_images, + save_page, + enable_plotting, + allow_enhancement, + curved_line, + textline_light, + full_layout, + tables, + right2left, + input_binary, + allow_scaling, + headers_off, + light_version, + reading_order_machine_based, + num_col_upper, + num_col_lower, + threshold_art_class_textline, + threshold_art_class_layout, + skip_layout_and_reading_order, + ignore_page_extraction, +): + """ + Detect Layout (with optional image enhancement and reading order detection) + """ + from ..eynollah import Eynollah + assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" + assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" + assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" + assert enable_plotting or not save_page, "Plotting with -sp also requires -ep" + assert enable_plotting or not save_images, "Plotting with -si also requires -ep" + assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep" + assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \ + "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" + assert textline_light == light_version, "Both light textline detection -tll and light version -light must be set or unset equally" + assert not extract_only_images or not allow_enhancement, "Image extraction -eoi can not be set alongside allow_enhancement -ae" + assert not extract_only_images or not allow_scaling, "Image extraction -eoi can not be set alongside allow_scaling -as" + assert not extract_only_images or not light_version, "Image extraction -eoi can not be set alongside light_version -light" + assert not extract_only_images or not curved_line, "Image extraction -eoi can not be set alongside curved_line -cl" + assert not extract_only_images or not textline_light, "Image extraction -eoi can not be set alongside textline_light -tll" + assert not extract_only_images or not full_layout, "Image extraction -eoi can not be set alongside full_layout -fl" + assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab" + assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" + assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" + assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." + eynollah = Eynollah( + model_zoo=ctx.obj.model_zoo, + extract_only_images=extract_only_images, + enable_plotting=enable_plotting, + allow_enhancement=allow_enhancement, + curved_line=curved_line, + textline_light=textline_light, + full_layout=full_layout, + tables=tables, + right2left=right2left, + input_binary=input_binary, + allow_scaling=allow_scaling, + headers_off=headers_off, + light_version=light_version, + ignore_page_extraction=ignore_page_extraction, + reading_order_machine_based=reading_order_machine_based, + num_col_upper=num_col_upper, + num_col_lower=num_col_lower, + skip_layout_and_reading_order=skip_layout_and_reading_order, + threshold_art_class_textline=threshold_art_class_textline, + threshold_art_class_layout=threshold_art_class_layout, + ) + eynollah.run(overwrite=overwrite, + image_filename=image, + dir_in=dir_in, + dir_out=out, + dir_of_cropped_images=save_images, + dir_of_layout=save_layout, + dir_of_deskewed=save_deskewed, + dir_of_all=save_all, + dir_save_page=save_page, + ) + diff --git a/src/eynollah/cli_models.py b/src/eynollah/cli/cli_models.py similarity index 100% rename from src/eynollah/cli_models.py rename to src/eynollah/cli/cli_models.py diff --git a/src/eynollah/cli/cli_ocr.py b/src/eynollah/cli/cli_ocr.py new file mode 100644 index 0000000..7b87256 --- /dev/null +++ b/src/eynollah/cli/cli_ocr.py @@ -0,0 +1,132 @@ +import click + +@click.command() +@click.option( + "--image", + "-i", + help="input image filename", + type=click.Path(exists=True, dir_okay=False), +) +@click.option( + "--dir_in", + "-di", + help="directory of input images (instead of --image)", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_in_bin", + "-dib", + help=(""" + directory of binarized images (in addition to --dir_in for RGB + images; filename stems must match the RGB image files, with '.png' + \n + Perform prediction using both RGB and binary images. + (This does not necessarily improve results, however it may be beneficial + for certain document images. +"""), + + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_xmls", + "-dx", + help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--out", + "-o", + help="directory for output PAGE-XML files", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--dir_out_image_text", + "-doit", + help="directory for output images, newly rendered with predicted text", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) +@click.option( + "--tr_ocr", + "-trocr/-notrocr", + is_flag=True, + help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.", +) +@click.option( + "--export_textline_images_and_text", + "-etit/-noetit", + is_flag=True, + help="if this parameter set to true, images and text in xml will be exported into output dir. This files can be used for training a OCR engine.", +) +@click.option( + "--do_not_mask_with_textline_contour", + "-nmtc/-mtc", + is_flag=True, + help="if this parameter set to true, cropped textline images will not be masked with textline contour.", +) +@click.option( + "--batch_size", + "-bs", + help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", +) +@click.option( + "--dataset_abbrevation", + "-ds_pref", + help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", +) +@click.option( + "--min_conf_value_of_textline_text", + "-min_conf", + help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.", +) +@click.pass_context +def ocr_cli( + ctx, + image, + dir_in, + dir_in_bin, + dir_xmls, + out, + dir_out_image_text, + overwrite, + tr_ocr, + export_textline_images_and_text, + do_not_mask_with_textline_contour, + batch_size, + dataset_abbrevation, + min_conf_value_of_textline_text, +): + """ + Recognize text with a CNN/RNN or transformer ML model. + """ + assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" + # FIXME: refactor: move export_textline_images_and_text out of eynollah.py + # assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" + assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" + assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" + assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" + assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." + from .eynollah_ocr import Eynollah_ocr + eynollah_ocr = Eynollah_ocr( + model_zoo=ctx.obj.model_zoo, + tr_ocr=tr_ocr, + export_textline_images_and_text=export_textline_images_and_text, + do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, + batch_size=batch_size, + pref_of_dataset=dataset_abbrevation, + min_conf_value_of_textline_text=min_conf_value_of_textline_text) + eynollah_ocr.run(overwrite=overwrite, + dir_in=dir_in, + dir_in_bin=dir_in_bin, + image_filename=image, + dir_xmls=dir_xmls, + dir_out_image_text=dir_out_image_text, + dir_out=out, + ) diff --git a/src/eynollah/cli/cli_readingorder.py b/src/eynollah/cli/cli_readingorder.py new file mode 100644 index 0000000..d301b29 --- /dev/null +++ b/src/eynollah/cli/cli_readingorder.py @@ -0,0 +1,35 @@ +import click + +@click.command() +@click.option( + "--input", + "-i", + help="PAGE-XML input filename", + type=click.Path(exists=True, dir_okay=False), +) +@click.option( + "--dir_in", + "-di", + help="directory of PAGE-XML input files (instead of --input)", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--out", + "-o", + help="directory for output images", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.pass_context +def readingorder_cli(ctx, input, dir_in, out): + """ + Generate ReadingOrder with a ML model + """ + from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout + assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." + orderer = machine_based_reading_order_on_layout(model_zoo=ctx.obj.model_zoo) + orderer.run(xml_filename=input, + dir_in=dir_in, + dir_out=out, + ) + diff --git a/src/eynollah/ocrd_cli.py b/src/eynollah/ocrd_cli.py index 8929927..acd8d4e 100644 --- a/src/eynollah/ocrd_cli.py +++ b/src/eynollah/ocrd_cli.py @@ -1,3 +1,6 @@ +# NOTE: For predictable order of imports of torch/shapely/tensorflow +# this must be the first import of the CLI! +from .eynollah_imports import imported_libs from .processor import EynollahProcessor from click import command from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor From e503c1a0b79cee2371e20dc30ed310cf38ff6e36 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 18:19:03 +0100 Subject: [PATCH 03/15] drop obsolete multi-model binarization --- src/eynollah/model_zoo/default_specs.py | 36 -------------- src/eynollah/ocrd_cli_binarization.py | 4 +- src/eynollah/sbb_binarize.py | 63 +++++++++++-------------- 3 files changed, 29 insertions(+), 74 deletions(-) diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index 2bbbf15..a720fa0 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -45,42 +45,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ type='Keras', ), - EynollahModelSpec( - category="binarization_multi_1", - variant='', - filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin1", - dist_url=dist_url("binarization"), - dists=['binarization'], - type='Keras', - ), - - EynollahModelSpec( - category="binarization_multi_2", - variant='', - filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin2", - dist_url=dist_url("binarization"), - dists=['binarization'], - type='Keras', - ), - - EynollahModelSpec( - category="binarization_multi_3", - variant='', - filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin3", - dist_url=dist_url("binarization"), - dists=['binarization'], - type='Keras', - ), - - EynollahModelSpec( - category="binarization_multi_4", - variant='', - filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin4", - dist_url=dist_url("binarization"), - dists=['binarization'], - type='Keras', - ), - EynollahModelSpec( category="col_classifier", variant='', diff --git a/src/eynollah/ocrd_cli_binarization.py b/src/eynollah/ocrd_cli_binarization.py index a199e72..f234520 100644 --- a/src/eynollah/ocrd_cli_binarization.py +++ b/src/eynollah/ocrd_cli_binarization.py @@ -40,7 +40,7 @@ class SbbBinarizeProcessor(Processor): # resolve relative path via OCR-D ResourceManager assert isinstance(self.parameter, frozendict) model_zoo = EynollahModelZoo(basedir=self.parameter['model']) - self.binarizer = SbbBinarizer(model_zoo=model_zoo, mode='single', logger=self.logger) + self.binarizer = SbbBinarizer(model_zoo=model_zoo, logger=self.logger) def process_page_pcgts(self, *input_pcgts: Optional[OcrdPage], page_id: Optional[str] = None) -> OcrdPageResult: """ @@ -103,7 +103,7 @@ class SbbBinarizeProcessor(Processor): line_image_bin = cv2pil(self.binarizer.run(image=pil2cv(line_image), use_patches=True)) # update PAGE (reference the image file): line_image_ref = AlternativeImageType(comments=line_xywh['features'] + ',binarized') - line.add_AlternativeImage(region_image_ref) + line.add_AlternativeImage(line_image_ref) result.images.append(OcrdPageResultImage(line_image_bin, line.id + '.IMG-BIN', line_image_ref)) return result diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 77741e9..28dc84a 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -33,12 +33,10 @@ class SbbBinarizer: self, *, model_zoo: EynollahModelZoo, - mode: str, logger: Optional[logging.Logger] = None, ): self.logger = logger if logger else logging.getLogger('eynollah.binarization') - self.model_zoo = model_zoo - self.models = self.setup_models(mode) + self.models = (model_zoo.model_path('binarization'), model_zoo.load_model('binarization')) self.session = self.start_new_session() def start_new_session(self): @@ -49,12 +47,6 @@ class SbbBinarizer: tensorflow_backend.set_session(session) return session - def setup_models(self, mode: str) -> Dict[Path, AnyModel]: - return { - self.model_zoo.model_path(v): self.model_zoo.load_model(v) - for v in (['binarization'] if mode == 'single' else [f'binarization_multi_{i}' for i in range(1, 5)]) - } - def end_session(self): tensorflow_backend.clear_session() self.session.close() @@ -330,21 +322,21 @@ class SbbBinarizer: if image_path is not None: image = cv2.imread(image_path) img_last = 0 - for n, (model_file, model) in enumerate(self.models.items()): - self.logger.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file, n + 1, len(self.models.keys())) - res = self.predict(model, image, use_patches) + model_file, model = self.models + self.logger.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file) + res = self.predict(model, image, use_patches) - img_fin = np.zeros((res.shape[0], res.shape[1], 3)) - res[:, :][res[:, :] == 0] = 2 - res = res - 1 - res = res * 255 - img_fin[:, :, 0] = res - img_fin[:, :, 1] = res - img_fin[:, :, 2] = res + img_fin = np.zeros((res.shape[0], res.shape[1], 3)) + res[:, :][res[:, :] == 0] = 2 + res = res - 1 + res = res * 255 + img_fin[:, :, 0] = res + img_fin[:, :, 1] = res + img_fin[:, :, 2] = res - img_fin = img_fin.astype(np.uint8) - img_fin = (res[:, :] == 0) * 255 - img_last = img_last + img_fin + img_fin = img_fin.astype(np.uint8) + img_fin = (res[:, :] == 0) * 255 + img_last = img_last + img_fin kernel = np.ones((5, 5), np.uint8) img_last[:, :][img_last[:, :] > 0] = 255 @@ -361,22 +353,21 @@ class SbbBinarizer: self.logger.info('Binarizing [%3d/%d] %s', i + 1, len(ls_imgs), image_name) image = cv2.imread(os.path.join(dir_in,image_name) ) img_last = 0 - for n, (model_file, model) in enumerate(self.models.items()): - self.logger.info('Predicting %s with model %s [%s/%s]', image_name, model_file, n + 1, len(self.models.keys())) + model_file, model = self.models + self.logger.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file) + res = self.predict(model, image, use_patches) - res = self.predict(model, image, use_patches) + img_fin = np.zeros((res.shape[0], res.shape[1], 3)) + res[:, :][res[:, :] == 0] = 2 + res = res - 1 + res = res * 255 + img_fin[:, :, 0] = res + img_fin[:, :, 1] = res + img_fin[:, :, 2] = res - img_fin = np.zeros((res.shape[0], res.shape[1], 3)) - res[:, :][res[:, :] == 0] = 2 - res = res - 1 - res = res * 255 - img_fin[:, :, 0] = res - img_fin[:, :, 1] = res - img_fin[:, :, 2] = res - - img_fin = img_fin.astype(np.uint8) - img_fin = (res[:, :] == 0) * 255 - img_last = img_last + img_fin + img_fin = img_fin.astype(np.uint8) + img_fin = (res[:, :] == 0) * 255 + img_last = img_last + img_fin kernel = np.ones((5, 5), np.uint8) img_last[:, :][img_last[:, :] > 0] = 255 From 000af16a475e8203e9ae0d8db76e209cd9a3d5e7 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 19:23:49 +0100 Subject: [PATCH 04/15] :fire: remove torch pinning --- requirements-ocr.txt | 2 +- src/eynollah/sbb_binarize.py | 18 +++++++----------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/requirements-ocr.txt b/requirements-ocr.txt index 9f31ebb..8f3b062 100644 --- a/requirements-ocr.txt +++ b/requirements-ocr.txt @@ -1,2 +1,2 @@ -torch <= 2.0.1 +torch transformers <= 4.30.2 diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 28dc84a..851ac7d 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -9,15 +9,13 @@ Tool to load model and binarize a given image. import os import logging -from pathlib import Path -from typing import Dict, Optional +from typing import Optional import numpy as np import cv2 from ocrd_utils import tf_disable_interactive_logs from eynollah.model_zoo import EynollahModelZoo -from eynollah.model_zoo.types import AnyModel tf_disable_interactive_logs() import tensorflow as tf from tensorflow.python.keras import backend as tensorflow_backend @@ -323,7 +321,7 @@ class SbbBinarizer: image = cv2.imread(image_path) img_last = 0 model_file, model = self.models - self.logger.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file) + self.logger.info('Predicting %s with model %s', image_path if image_path else '[image]', model_file) res = self.predict(model, image, use_patches) img_fin = np.zeros((res.shape[0], res.shape[1], 3)) @@ -338,7 +336,6 @@ class SbbBinarizer: img_fin = (res[:, :] == 0) * 255 img_last = img_last + img_fin - kernel = np.ones((5, 5), np.uint8) img_last[:, :][img_last[:, :] > 0] = 255 img_last = (img_last[:, :] == 0) * 255 if output: @@ -348,13 +345,13 @@ class SbbBinarizer: else: ls_imgs = list(filter(is_image_filename, os.listdir(dir_in))) self.logger.info("Found %d image files to binarize in %s", len(ls_imgs), dir_in) - for i, image_name in enumerate(ls_imgs): - image_stem = image_name.split('.')[0] - self.logger.info('Binarizing [%3d/%d] %s', i + 1, len(ls_imgs), image_name) - image = cv2.imread(os.path.join(dir_in,image_name) ) + for i, image_path in enumerate(ls_imgs): + self.logger.info('Binarizing [%3d/%d] %s', i + 1, len(ls_imgs), image_path) + image_stem = image_path.split('.')[0] + image = cv2.imread(os.path.join(dir_in,image_path) ) img_last = 0 model_file, model = self.models - self.logger.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file) + self.logger.info('Predicting %s with model %s', image_path if image_path else '[image]', model_file) res = self.predict(model, image, use_patches) img_fin = np.zeros((res.shape[0], res.shape[1], 3)) @@ -369,7 +366,6 @@ class SbbBinarizer: img_fin = (res[:, :] == 0) * 255 img_last = img_last + img_fin - kernel = np.ones((5, 5), np.uint8) img_last[:, :][img_last[:, :] > 0] = 255 img_last = (img_last[:, :] == 0) * 255 From 095b36c3896429143d19458f0c8f682587c1306f Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 19:45:58 +0100 Subject: [PATCH 05/15] models: split into layout, extra and ocr layout: Everything not OCR or extra ocr: trocr/cnnrnn models extra: obsolete or niche models --- src/eynollah/model_zoo/default_specs.py | 44 ++++++++++++------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index a720fa0..8138ec5 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -4,7 +4,7 @@ from .specs import EynollahModelSpec, EynollahModelSpecSet ZENODO = "https://zenodo.org/records/17295988/files" MODELS_VERSION = "v0_7_0" -def dist_url(dist_name: str) -> str: +def dist_url(dist_name: str="layout") -> str: return f'{ZENODO}/models_{dist_name}_{MODELS_VERSION}.zip' DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ @@ -14,7 +14,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="models_eynollah/eynollah-enhancement_20210425", dists=['enhancement', 'layout', 'ci'], - dist_url=dist_url("enhancement"), + dist_url=dist_url(), type='Keras', ), @@ -23,7 +23,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='hybrid', filename="models_eynollah/eynollah-binarization-hybrid_20230504/model_bin_hybrid_trans_cnn_sbb_ens", dists=['layout', 'binarization', ], - dist_url=dist_url("binarization"), + dist_url=dist_url(), type='Keras', ), @@ -32,7 +32,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='20210309', filename="models_eynollah/eynollah-binarization_20210309", dists=['binarization'], - dist_url=dist_url("binarization"), + dist_url=dist_url("extra"), type='Keras', ), @@ -41,7 +41,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="models_eynollah/eynollah-binarization_20210425", dists=['binarization'], - dist_url=dist_url("binarization"), + dist_url=dist_url("extra"), type='Keras', ), @@ -49,7 +49,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="col_classifier", variant='', filename="models_eynollah/eynollah-column-classifier_20210425", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -58,7 +58,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="page", variant='', filename="models_eynollah/model_eynollah_page_extraction_20250915", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -67,7 +67,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="region", variant='', filename="models_eynollah/eynollah-main-regions-ensembled_20210425", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -76,7 +76,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="region", variant='extract_only_images', filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -85,7 +85,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="region", variant='light', filename="models_eynollah/eynollah-main-regions_20220314", - dist_url=dist_url("layout"), + dist_url=dist_url(), help="early layout", dists=['layout'], type='Keras', @@ -95,7 +95,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="region_p2", variant='', filename="models_eynollah/eynollah-main-regions-aug-rotation_20210425", - dist_url=dist_url("layout"), + dist_url=dist_url(), help="early layout, non-light, 2nd part", dists=['layout'], type='Keras', @@ -110,7 +110,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ #filename="models_eynollah/modelens_1_2_4_5_early_lay_1_2_spaltige", #filename="models_eynollah/model_3_eraly_layout_no_patches_1_2_spaltige", filename="models_eynollah/modelens_e_l_all_sp_0_1_2_3_4_171024", - dist_url=dist_url("layout"), + dist_url=dist_url("all"), dists=['layout'], help="early layout, light, 1-or-2-column", type='Keras', @@ -126,7 +126,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ #'filename="models_eynollah/modelens_full_lay_1_2_221024", #'filename="models_eynollah/eynollah-full-regions-1column_20210425", filename="models_eynollah/modelens_full_lay_1__4_3_091124", - dist_url=dist_url("layout"), + dist_url=dist_url(), help="full layout / no patches", dists=['layout'], type='Keras', @@ -146,7 +146,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ # filename="models_eynollah/modelens_full_layout_24_till_28", # filename="models_eynollah/model_2_full_layout_new_trans", filename="models_eynollah/modelens_full_lay_1__4_3_091124", - dist_url=dist_url("layout"), + dist_url=dist_url(), help="full layout / with patches", dists=['layout'], type='Keras', @@ -161,7 +161,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ #filename="models_eynollah/model_mb_ro_aug_ens_8", #filename="models_eynollah/model_ens_reading_order_machine_based", filename="models_eynollah/model_eynollah_reading_order_20250824", - dist_url=dist_url("reading_order"), + dist_url=dist_url(), dists=['layout', 'reading_order'], type='Keras', ), @@ -176,7 +176,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ #filename="models_eynollah/modelens_textline_9_12_13_14_15", #filename="models_eynollah/eynollah-textline_20210425", filename="models_eynollah/modelens_textline_0_1__2_4_16092024", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -186,7 +186,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='light', #filename="models_eynollah/eynollah-textline_light_20210425", filename="models_eynollah/modelens_textline_0_1__2_4_16092024", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -195,7 +195,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="table", variant='', filename="models_eynollah/eynollah-tables_20210319", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -204,7 +204,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="table", variant='light', filename="models_eynollah/modelens_table_0t4_201124", - dist_url=dist_url("layout"), + dist_url=dist_url(), dists=['layout'], type='Keras', ), @@ -250,7 +250,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="ocr", variant='tr', filename="models_eynollah/model_eynollah_ocr_trocr_20250919", - dist_url=dist_url("trocr"), + dist_url=dist_url("ocr"), help='much slower transformer-based', dists=['trocr'], type='Keras', @@ -260,7 +260,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="trocr_processor", variant='', filename="models_eynollah/model_eynollah_ocr_trocr_20250919", - dist_url=dist_url("trocr"), + dist_url=dist_url("ocr"), dists=['trocr'], type='TrOCRProcessor', ), @@ -269,7 +269,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="trocr_processor", variant='htr', filename="models_eynollah/microsoft/trocr-base-handwritten", - dist_url=dist_url("trocr"), + dist_url=dist_url("extra"), dists=['trocr'], type='TrOCRProcessor', ), From ca83cf934d01030745c72791d3ff390371385a4b Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 20:48:14 +0100 Subject: [PATCH 06/15] fix imports from src/cli/cli_*/*_cli --- src/eynollah/cli/cli_binarize.py | 2 +- src/eynollah/cli/cli_enhance.py | 2 +- src/eynollah/cli/cli_ocr.py | 2 +- src/eynollah/cli/cli_readingorder.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eynollah/cli/cli_binarize.py b/src/eynollah/cli/cli_binarize.py index c783028..d4a6e31 100644 --- a/src/eynollah/cli/cli_binarize.py +++ b/src/eynollah/cli/cli_binarize.py @@ -32,7 +32,7 @@ def binarize_cli( """ Binarize images with a ML model """ - from eynollah.sbb_binarize import SbbBinarizer + from ..sbb_binarize import SbbBinarizer assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." binarizer = SbbBinarizer(model_zoo=ctx.obj.model_zoo) binarizer.run( diff --git a/src/eynollah/cli/cli_enhance.py b/src/eynollah/cli/cli_enhance.py index df9137c..fa4158b 100644 --- a/src/eynollah/cli/cli_enhance.py +++ b/src/eynollah/cli/cli_enhance.py @@ -48,7 +48,7 @@ def enhance_cli(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower Enhance image """ assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - from .image_enhancer import Enhancer + from ..image_enhancer import Enhancer enhancer = Enhancer( model_zoo=ctx.obj.model_zoo, num_col_upper=num_col_upper, diff --git a/src/eynollah/cli/cli_ocr.py b/src/eynollah/cli/cli_ocr.py index 7b87256..962ee9b 100644 --- a/src/eynollah/cli/cli_ocr.py +++ b/src/eynollah/cli/cli_ocr.py @@ -113,7 +113,7 @@ def ocr_cli( assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." - from .eynollah_ocr import Eynollah_ocr + from ..eynollah_ocr import Eynollah_ocr eynollah_ocr = Eynollah_ocr( model_zoo=ctx.obj.model_zoo, tr_ocr=tr_ocr, diff --git a/src/eynollah/cli/cli_readingorder.py b/src/eynollah/cli/cli_readingorder.py index d301b29..0f44b7f 100644 --- a/src/eynollah/cli/cli_readingorder.py +++ b/src/eynollah/cli/cli_readingorder.py @@ -25,7 +25,7 @@ def readingorder_cli(ctx, input, dir_in, out): """ Generate ReadingOrder with a ML model """ - from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout + from ..mb_ro_on_layout import machine_based_reading_order_on_layout assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." orderer = machine_based_reading_order_on_layout(model_zoo=ctx.obj.model_zoo) orderer.run(xml_filename=input, From 83e8b289da8d9f6fd2f13beeac5edef6fa0aae47 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 20:29:29 +0100 Subject: [PATCH 07/15] :fire: drop light_version/textline_light (now default and implied) --- README.md | 2 - src/eynollah/cli/cli_layout.py | 19 - src/eynollah/eynollah.py | 641 +++++------------------- src/eynollah/image_enhancer.py | 26 +- src/eynollah/mb_ro_on_layout.py | 10 +- src/eynollah/model_zoo/default_specs.py | 55 +- src/eynollah/model_zoo/model_zoo.py | 3 +- src/eynollah/model_zoo/specs.py | 2 - src/eynollah/ocrd-tool.json | 10 - src/eynollah/processor.py | 6 - src/eynollah/utils/drop_capitals.py | 3 +- src/eynollah/utils/marginals.py | 107 +--- src/eynollah/utils/separate_lines.py | 18 +- src/eynollah/utils/utils_ocr.py | 3 +- src/eynollah/writer.py | 7 +- tests/cli_tests/test_layout.py | 3 - 16 files changed, 183 insertions(+), 732 deletions(-) diff --git a/README.md b/README.md index 59da45a..8640ac5 100644 --- a/README.md +++ b/README.md @@ -103,8 +103,6 @@ The following options can be used to further configure the processing: | option | description | |-------------------|:--------------------------------------------------------------------------------------------| | `-fl` | full layout analysis including all steps and segmentation classes (recommended) | -| `-light` | lighter and faster but simpler method for main region detection and deskewing (recommended) | -| `-tll` | this indicates the light textline and should be passed with light version (recommended) | | `-tab` | apply table detection | | `-ae` | apply enhancement (the resulting image is saved to the output directory) | | `-as` | apply scaling | diff --git a/src/eynollah/cli/cli_layout.py b/src/eynollah/cli/cli_layout.py index 9a43b56..7d6bbed 100644 --- a/src/eynollah/cli/cli_layout.py +++ b/src/eynollah/cli/cli_layout.py @@ -81,12 +81,6 @@ import click is_flag=True, help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline. This should be taken into account that with this option the tool need more time to do process.", ) -@click.option( - "--textline_light/--no-textline_light", - "-tll/-notll", - is_flag=True, - help="if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method.", -) @click.option( "--full-layout/--no-full-layout", "-fl/-nofl", @@ -123,12 +117,6 @@ import click is_flag=True, help="if this parameter set to true, this tool would ignore headers role in reading order", ) -@click.option( - "--light_version/--original", - "-light/-org", - is_flag=True, - help="if this parameter set to true, this tool would use lighter version", -) @click.option( "--ignore_page_extraction/--extract_page_included", "-ipe/-epi", @@ -183,14 +171,12 @@ def layout_cli( enable_plotting, allow_enhancement, curved_line, - textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, - light_version, reading_order_machine_based, num_col_upper, num_col_lower, @@ -211,12 +197,9 @@ def layout_cli( assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep" assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \ "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" - assert textline_light == light_version, "Both light textline detection -tll and light version -light must be set or unset equally" assert not extract_only_images or not allow_enhancement, "Image extraction -eoi can not be set alongside allow_enhancement -ae" assert not extract_only_images or not allow_scaling, "Image extraction -eoi can not be set alongside allow_scaling -as" - assert not extract_only_images or not light_version, "Image extraction -eoi can not be set alongside light_version -light" assert not extract_only_images or not curved_line, "Image extraction -eoi can not be set alongside curved_line -cl" - assert not extract_only_images or not textline_light, "Image extraction -eoi can not be set alongside textline_light -tll" assert not extract_only_images or not full_layout, "Image extraction -eoi can not be set alongside full_layout -fl" assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab" assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" @@ -228,14 +211,12 @@ def layout_cli( enable_plotting=enable_plotting, allow_enhancement=allow_enhancement, curved_line=curved_line, - textline_light=textline_light, full_layout=full_layout, tables=tables, right2left=right2left, input_binary=input_binary, allow_scaling=allow_scaling, headers_off=headers_off, - light_version=light_version, ignore_page_extraction=ignore_page_extraction, reading_order_machine_based=reading_order_machine_based, num_col_upper=num_col_upper, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 45fabd1..c0e94e3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -36,7 +36,6 @@ from functools import partial from pathlib import Path from multiprocessing import cpu_count import gc -import copy from concurrent.futures import ProcessPoolExecutor import cv2 @@ -51,13 +50,6 @@ import statistics tf_disable_interactive_logs() import tensorflow as tf -# warnings.filterwarnings("ignore") -from tensorflow.python.keras import backend as K -from tensorflow.keras.models import load_model -# use tf1 compatibility for keras backend -from tensorflow.compat.v1.keras.backend import set_session -from tensorflow.keras import layers -from tensorflow.keras.layers import StringLookup try: import torch except ImportError: @@ -71,16 +63,13 @@ from .model_zoo import EynollahModelZoo from .utils.contour import ( filter_contours_area_of_image, filter_contours_area_of_image_tables, - find_contours_mean_y_diff, find_center_of_contours, find_new_features_of_contours, find_features_of_contours, get_text_region_boxes_by_given_contours, - get_textregion_contours_in_org_image, get_textregion_contours_in_org_image_light, return_contours_of_image, return_contours_of_interested_region, - return_contours_of_interested_textline, return_parent_contours, dilate_textregion_contours, dilate_textline_contours, @@ -93,40 +82,30 @@ from .utils.rotate import ( rotate_image, rotation_not_90_func, rotation_not_90_func_full_layout, - rotation_image_new ) from .utils.separate_lines import ( - separate_lines_new2, return_deskew_slop, do_work_of_slopes_new, do_work_of_slopes_new_curved, do_work_of_slopes_new_light, ) -from .utils.drop_capitals import ( - adhere_drop_capital_region_into_corresponding_textline, - filter_small_drop_capitals_from_no_patch_layout -) from .utils.marginals import get_marginals from .utils.resize import resize_image from .utils.shm import share_ndarray from .utils import ( is_image_filename, - boosting_headers_by_longshot_region_segmentation, crop_image_inside_box, box2rect, - box2slice, find_num_col, otsu_copy_binary, - put_drop_out_from_only_drop_model, putt_bb_of_drop_capitals_of_model_in_patches_in_layout, - check_any_text_region_in_model_one_is_main_or_header, check_any_text_region_in_model_one_is_main_or_header_light, small_textlines_to_parent_adherence2, order_of_regions, find_number_of_columns_in_document, return_boxes_of_images_by_order_of_reading_new ) -from .utils.pil_cv2 import check_dpi, pil2cv +from .utils.pil_cv2 import pil2cv from .utils.xml import order_and_id_of_texts from .plot import EynollahPlotter from .writer import EynollahXmlWriter @@ -153,14 +132,12 @@ class Eynollah: enable_plotting : bool = False, allow_enhancement : bool = False, curved_line : bool = False, - textline_light : bool = False, full_layout : bool = False, tables : bool = False, right2left : bool = False, input_binary : bool = False, allow_scaling : bool = False, headers_off : bool = False, - light_version : bool = False, ignore_page_extraction : bool = False, reading_order_machine_based : bool = False, num_col_upper : Optional[int] = None, @@ -174,14 +151,10 @@ class Eynollah: self.model_zoo = model_zoo self.plotter = None - if skip_layout_and_reading_order: - textline_light = True - self.light_version = light_version self.reading_order_machine_based = reading_order_machine_based self.enable_plotting = enable_plotting self.allow_enhancement = allow_enhancement self.curved_line = curved_line - self.textline_light = textline_light self.full_layout = full_layout self.tables = tables self.right2left = right2left @@ -189,7 +162,6 @@ class Eynollah: self.input_binary = input_binary self.allow_scaling = allow_scaling self.headers_off = headers_off - self.light_version = light_version self.extract_only_images = extract_only_images self.ignore_page_extraction = ignore_page_extraction self.skip_layout_and_reading_order = skip_layout_and_reading_order @@ -244,23 +216,18 @@ class Eynollah: "col_classifier", "binarization", "page", - ("region", 'extract_only_images' if self.extract_only_images else 'light' if self.light_version else '') + ("region", 'extract_only_images' if self.extract_only_images else '') ] if not self.extract_only_images: - loadable.append(("textline", 'light' if self.light_version else '')) - if self.light_version: - loadable.append("region_1_2") - else: - loadable.append("region_p2") - # if self.allow_enhancement:? - loadable.append("enhancement") + loadable.append(("textline")) + loadable.append("region_1_2") if self.full_layout: loadable.append("region_fl_np") #loadable.append("region_fl") if self.reading_order_machine_based: loadable.append("reading_order") if self.tables: - loadable.append(("table", 'light' if self.light_version else '')) + loadable.append(("table")) self.model_zoo.load_models(*loadable) @@ -286,16 +253,10 @@ class Eynollah: t_c0 = time.time() if image_filename: ret['img'] = cv2.imread(image_filename) - if self.light_version: - self.dpi = 100 - else: - self.dpi = check_dpi(image_filename) + self.dpi = 100 else: ret['img'] = pil2cv(image_pil) - if self.light_version: - self.dpi = 100 - else: - self.dpi = check_dpi(image_pil) + self.dpi = 100 ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) for prefix in ('', '_grayscale'): ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) @@ -309,8 +270,7 @@ class Eynollah: self.writer = EynollahXmlWriter( dir_out=dir_out, image_filename=image_filename, - curved_line=self.curved_line, - textline_light = self.textline_light) + curved_line=self.curved_line) def imread(self, grayscale=False, uint8=True): key = 'img' @@ -555,7 +515,7 @@ class Eynollah: return img, img_new, is_image_enhanced - def resize_and_enhance_image_with_column_classifier(self, light_version): + def resize_and_enhance_image_with_column_classifier(self): self.logger.debug("enter resize_and_enhance_image_with_column_classifier") dpi = self.dpi self.logger.info("Detected %s DPI", dpi) @@ -638,19 +598,16 @@ class Eynollah: self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) if not self.extract_only_images: if dpi < DPI_THRESHOLD: - if light_version and num_col in (1,2): + if num_col in (1,2): img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( img, num_col, width_early, label_p_pred) else: img_new, num_column_is_classified = self.calculate_width_height_by_columns( img, num_col, width_early, label_p_pred) - if light_version: - image_res = np.copy(img_new) - else: - image_res = self.predict_enhancement(img_new) + image_res = np.copy(img_new) is_image_enhanced = True else: - if light_version and num_col in (1,2): + if num_col in (1,2): img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( img, num_col, width_early, label_p_pred) image_res = np.copy(img_new) @@ -1550,9 +1507,8 @@ class Eynollah: img_width_h = img.shape[1] model_region = self.model_zoo.get("region_fl") if patches else self.model_zoo.get("region_fl_np") - if self.light_version: - thresholding_for_fl_light_version = True - elif not patches: + thresholding_for_fl_light_version = True + if not patches: img = otsu_copy_binary(img).astype(np.uint8) prediction_regions = None thresholding_for_fl_light_version = False @@ -1747,7 +1703,6 @@ class Eynollah: results = self.executor.map(partial(do_work_of_slopes_new_light, textline_mask_tot_ea=textline_mask_tot_shared, slope_deskew=slope_deskew, - textline_light=self.textline_light, logger=self.logger,), boxes, contours, contours_par) results = list(results) # exhaust prior to release @@ -1810,78 +1765,17 @@ class Eynollah: prediction_textline = self.do_prediction(use_patches, img, self.model_zoo.get("textline"), marginal_of_patch_percent=0.15, n_batch_inference=3, - thresholding_for_artificial_class_in_light_version=self.textline_light, threshold_art_class_textline=self.threshold_art_class_textline) - #if not self.textline_light: - #if num_col_classifier==1: - #prediction_textline_nopatch = self.do_prediction(False, img, self.model_zoo.get_model("textline")) - #prediction_textline[:,:][prediction_textline_nopatch[:,:]==0] = 0 prediction_textline = resize_image(prediction_textline, img_h, img_w) textline_mask_tot_ea_art = (prediction_textline[:,:]==2)*1 old_art = np.copy(textline_mask_tot_ea_art) - if not self.textline_light: - textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') - #textline_mask_tot_ea_art = cv2.dilate(textline_mask_tot_ea_art, KERNEL, iterations=1) - prediction_textline[:,:][textline_mask_tot_ea_art[:,:]==1]=2 - """ - else: - textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') - hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 1)) - - kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) - ##cv2.imwrite('textline_mask_tot_ea_art.png', textline_mask_tot_ea_art) - textline_mask_tot_ea_art = cv2.dilate(textline_mask_tot_ea_art, hor_kernel, iterations=1) - - ###cv2.imwrite('dil_textline_mask_tot_ea_art.png', dil_textline_mask_tot_ea_art) - - textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') - - #print(np.shape(dil_textline_mask_tot_ea_art), np.unique(dil_textline_mask_tot_ea_art), 'dil_textline_mask_tot_ea_art') - tsk = time.time() - skeleton_art_textline = skeletonize(textline_mask_tot_ea_art[:,:,0]) - - skeleton_art_textline = skeleton_art_textline*1 - - skeleton_art_textline = skeleton_art_textline.astype('uint8') - - skeleton_art_textline = cv2.dilate(skeleton_art_textline, kernel, iterations=1) - - #print(np.unique(skeleton_art_textline), np.shape(skeleton_art_textline)) - - #print(skeleton_art_textline, np.unique(skeleton_art_textline)) - - #cv2.imwrite('skeleton_art_textline.png', skeleton_art_textline) - - - prediction_textline[:,:,0][skeleton_art_textline[:,:]==1]=2 - - #cv2.imwrite('prediction_textline1.png', prediction_textline[:,:,0]) - - ##hor_kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 1)) - ##ver_kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3)) - ##textline_mask_tot_ea_main = (prediction_textline[:,:]==1)*1 - ##textline_mask_tot_ea_main = textline_mask_tot_ea_main.astype('uint8') - - ##dil_textline_mask_tot_ea_main = cv2.erode(textline_mask_tot_ea_main, ver_kernel2, iterations=1) - - ##dil_textline_mask_tot_ea_main = cv2.dilate(textline_mask_tot_ea_main, hor_kernel2, iterations=1) - - ##dil_textline_mask_tot_ea_main = cv2.dilate(textline_mask_tot_ea_main, ver_kernel2, iterations=1) - - ##prediction_textline[:,:][dil_textline_mask_tot_ea_main[:,:]==1]=1 - - """ textline_mask_tot_ea_lines = (prediction_textline[:,:]==1)*1 textline_mask_tot_ea_lines = textline_mask_tot_ea_lines.astype('uint8') - if not self.textline_light: - textline_mask_tot_ea_lines = cv2.dilate(textline_mask_tot_ea_lines, KERNEL, iterations=1) prediction_textline[:,:][textline_mask_tot_ea_lines[:,:]==1]=1 - if not self.textline_light: - prediction_textline[:,:][old_art[:,:]==1]=2 #cv2.imwrite('prediction_textline2.png', prediction_textline[:,:,0]) @@ -2649,92 +2543,9 @@ class Eynollah: img_height_h = img_org.shape[0] img_width_h = img_org.shape[1] patches = False - if self.light_version: - prediction_table, _ = self.do_prediction_new_concept(patches, img, self.model_zoo.get("table")) - prediction_table = prediction_table.astype(np.int16) - return prediction_table[:,:,0] - else: - if num_col_classifier < 4 and num_col_classifier > 2: - prediction_table = self.do_prediction(patches, img, self.model_zoo.get("table")) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_zoo.get("table")) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table[:,:,0][pre_updown[:,:,0]==1]=1 - prediction_table = prediction_table.astype(np.int16) - - elif num_col_classifier ==2: - height_ext = 0 # img.shape[0] // 4 - h_start = height_ext // 2 - width_ext = img.shape[1] // 8 - w_start = width_ext // 2 - - img_new = np.zeros((img.shape[0] + height_ext, - img.shape[1] + width_ext, - img.shape[2])).astype(float) - ys = slice(h_start, h_start + img.shape[0]) - xs = slice(w_start, w_start + img.shape[1]) - img_new[ys, xs] = img - - prediction_ext = self.do_prediction(patches, img_new, self.model_zoo.get("table")) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_zoo.get("table")) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table = prediction_ext[ys, xs] - prediction_table_updown = pre_updown[ys, xs] - - prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 - prediction_table = prediction_table.astype(np.int16) - elif num_col_classifier ==1: - height_ext = 0 # img.shape[0] // 4 - h_start = height_ext // 2 - width_ext = img.shape[1] // 4 - w_start = width_ext // 2 - - img_new =np.zeros((img.shape[0] + height_ext, - img.shape[1] + width_ext, - img.shape[2])).astype(float) - ys = slice(h_start, h_start + img.shape[0]) - xs = slice(w_start, w_start + img.shape[1]) - img_new[ys, xs] = img - - prediction_ext = self.do_prediction(patches, img_new, self.model_zoo.get("table")) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_zoo.get("table")) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table = prediction_ext[ys, xs] - prediction_table_updown = pre_updown[ys, xs] - - prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1 - prediction_table = prediction_table.astype(np.int16) - else: - prediction_table = np.zeros(img.shape) - img_w_half = img.shape[1] // 2 - - pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], self.model_zoo.get("table")) - pre2 = self.do_prediction(patches, img[:,img_w_half:,:], self.model_zoo.get("table")) - pre_full = self.do_prediction(patches, img[:,:,:], self.model_zoo.get("table")) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_zoo.get("table")) - pre_updown = cv2.flip(pre_updown, -1) - - prediction_table_full_erode = cv2.erode(pre_full[:,:,0], KERNEL, iterations=4) - prediction_table_full_erode = cv2.dilate(prediction_table_full_erode, KERNEL, iterations=4) - - prediction_table_full_updown_erode = cv2.erode(pre_updown[:,:,0], KERNEL, iterations=4) - prediction_table_full_updown_erode = cv2.dilate(prediction_table_full_updown_erode, KERNEL, iterations=4) - - prediction_table[:,0:img_w_half,:] = pre1[:,:,:] - prediction_table[:,img_w_half:,:] = pre2[:,:,:] - - prediction_table[:,:,0][prediction_table_full_erode[:,:]==1]=1 - prediction_table[:,:,0][prediction_table_full_updown_erode[:,:]==1]=1 - prediction_table = prediction_table.astype(np.int16) - - #prediction_table_erode = cv2.erode(prediction_table[:,:,0], self.kernel, iterations=6) - #prediction_table_erode = cv2.dilate(prediction_table_erode, self.kernel, iterations=6) - - prediction_table_erode = cv2.erode(prediction_table[:,:,0], KERNEL, iterations=20) - prediction_table_erode = cv2.dilate(prediction_table_erode, KERNEL, iterations=20) - return prediction_table_erode.astype(np.int16) + prediction_table, _ = self.do_prediction_new_concept(patches, img, self.model_zoo.get("table")) + prediction_table = prediction_table.astype(np.int16) + return prediction_table[:,:,0] def run_graphics_and_columns_light( self, text_regions_p_1, textline_mask_tot_ea, @@ -2876,11 +2687,11 @@ class Eynollah: return (num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page, table_prediction) - def run_enhancement(self, light_version): + def run_enhancement(self): t_in = time.time() self.logger.info("Resizing and enhancing image...") is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = \ - self.resize_and_enhance_image_with_column_classifier(light_version) + self.resize_and_enhance_image_with_column_classifier() self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ') scale = 1 if is_image_enhanced: @@ -2911,8 +2722,7 @@ class Eynollah: scaler_h_textline, scaler_w_textline, num_col_classifier) - if self.textline_light: - textline_mask_tot_ea = textline_mask_tot_ea.astype(np.int16) + textline_mask_tot_ea = textline_mask_tot_ea.astype(np.int16) if self.plotter: self.plotter.save_plot_of_textlines(textline_mask_tot_ea, image_page) @@ -2945,7 +2755,7 @@ class Eynollah: regions_without_separators = regions_without_separators.astype(np.uint8) text_regions_p = get_marginals( rotate_image(regions_without_separators, slope_deskew), text_regions_p, - num_col_classifier, slope_deskew, light_version=self.light_version, kernel=KERNEL) + num_col_classifier, slope_deskew, kernel=KERNEL) except Exception as e: self.logger.error("exception %s", e) @@ -3004,20 +2814,6 @@ class Eynollah: self.logger.debug("len(boxes): %s", len(boxes)) #print(time.time()-t_0_box,'time box in 3.1') - if self.tables: - if self.light_version: - pass - else: - text_regions_p_tables = np.copy(text_regions_p) - text_regions_p_tables[(table_prediction == 1)] = 10 - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables, - num_col_classifier , 0.000005, pixel_line) - #print(time.time()-t_0_box,'time box in 3.2') - img_revised_tab2, contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction, 10, num_col_classifier) - #print(time.time()-t_0_box,'time box in 3.3') else: boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, @@ -3025,63 +2821,24 @@ class Eynollah: boxes = None self.logger.debug("len(boxes): %s", len(boxes_d)) - if self.tables: - if self.light_version: - pass - else: - text_regions_p_tables = np.copy(text_regions_p_1_n) - text_regions_p_tables = np.round(text_regions_p_tables) - text_regions_p_tables[(text_regions_p_tables != 3) & (table_prediction_n == 1)] = 10 - - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes_d, 0, splitter_y_new_d, - peaks_neg_tot_tables_d, text_regions_p_tables, - num_col_classifier, 0.000005, pixel_line) - img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction_n, 10, num_col_classifier) - - img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) - img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) - img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) - img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, - text_regions_p.shape[0], text_regions_p.shape[1]) #print(time.time()-t_0_box,'time box in 4') self.logger.info("detecting boxes took %.1fs", time.time() - t1) if self.tables: - if self.light_version: - text_regions_p[table_prediction == 1] = 10 - img_revised_tab = text_regions_p[:,:] - else: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - img_revised_tab = np.copy(img_revised_tab2) - img_revised_tab[(text_regions_p == 1) & (img_revised_tab != 10)] = 1 - else: - img_revised_tab = np.copy(text_regions_p) - img_revised_tab[img_revised_tab == 10] = 0 - img_revised_tab[img_revised_tab2_d_rotated == 10] = 10 - - text_regions_p[text_regions_p == 10] = 0 - text_regions_p[img_revised_tab == 10] = 10 + text_regions_p[table_prediction == 1] = 10 + img_revised_tab = text_regions_p[:,:] else: img_revised_tab = text_regions_p[:,:] #img_revised_tab = text_regions_p[:, :] - if self.light_version: - polygons_of_images = return_contours_of_interested_region(text_regions_p, 2) - else: - polygons_of_images = return_contours_of_interested_region(img_revised_tab, 2) + polygons_of_images = return_contours_of_interested_region(text_regions_p, 2) pixel_img = 4 min_area_mar = 0.00001 - if self.light_version: - marginal_mask = (text_regions_p[:,:]==pixel_img)*1 - marginal_mask = marginal_mask.astype('uint8') - marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) + marginal_mask = (text_regions_p[:,:]==pixel_img)*1 + marginal_mask = marginal_mask.astype('uint8') + marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) - polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) - else: - polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) + polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) pixel_img = 10 contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) @@ -3099,144 +2856,43 @@ class Eynollah: self.logger.debug('enter run_boxes_full_layout') t_full0 = time.time() if self.tables: - if self.light_version: - text_regions_p[:,:][table_prediction[:,:]==1] = 10 - img_revised_tab = text_regions_p[:,:] - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ - rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, - table_prediction, slope_deskew) + text_regions_p[:,:][table_prediction[:,:]==1] = 10 + img_revised_tab = text_regions_p[:,:] + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ + rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, + table_prediction, slope_deskew) - text_regions_p_1_n = resize_image(text_regions_p_1_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d, - text_regions_p.shape[0], - text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - - regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 - regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 - else: - text_regions_p_1_n = None - textline_mask_tot_d = None - regions_without_separators_d = None - # regions_without_separators = ( text_regions_p[:,:]==1 | text_regions_p[:,:]==2 )*1 - #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) - regions_without_separators = (text_regions_p[:,:] == 1)*1 - regions_without_separators[table_prediction == 1] = 1 + text_regions_p_1_n = resize_image(text_regions_p_1_n, + text_regions_p.shape[0], + text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, + text_regions_p.shape[0], + text_regions_p.shape[1]) + table_prediction_n = resize_image(table_prediction_n, + text_regions_p.shape[0], + text_regions_p.shape[1]) + regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 + regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 else: - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ - rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, - table_prediction, slope_deskew) + text_regions_p_1_n = None + textline_mask_tot_d = None + regions_without_separators_d = None + # regions_without_separators = ( text_regions_p[:,:]==1 | text_regions_p[:,:]==2 )*1 + #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) + regions_without_separators = (text_regions_p[:,:] == 1)*1 + regions_without_separators[table_prediction == 1] = 1 - text_regions_p_1_n = resize_image(text_regions_p_1_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d, - text_regions_p.shape[0], - text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n, - text_regions_p.shape[0], - text_regions_p.shape[1]) - - regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 - regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 - else: - text_regions_p_1_n = None - textline_mask_tot_d = None - regions_without_separators_d = None - - # regions_without_separators = ( text_regions_p[:,:]==1 | text_regions_p[:,:]==2 )*1 - #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions) - regions_without_separators = (text_regions_p[:,:] == 1)*1 - regions_without_separators[table_prediction == 1] = 1 - - pixel_lines=3 - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - text_regions_p, num_col_classifier, self.tables, pixel_lines) - - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - num_col_d, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - text_regions_p_1_n, num_col_classifier, self.tables, pixel_lines) - - if num_col_classifier>=3: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - regions_without_separators = regions_without_separators.astype(np.uint8) - regions_without_separators = cv2.erode(regions_without_separators[:,:], KERNEL, iterations=6) - - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - regions_without_separators_d = regions_without_separators_d.astype(np.uint8) - regions_without_separators_d = cv2.erode(regions_without_separators_d[:,:], KERNEL, iterations=6) - else: - pass - - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new, regions_without_separators, matrix_of_lines_ch, - num_col_classifier, erosion_hurts, self.tables, self.right2left) - text_regions_p_tables = np.copy(text_regions_p) - text_regions_p_tables[:,:][(table_prediction[:,:]==1)] = 10 - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables, - num_col_classifier , 0.000005, pixel_line) - - img_revised_tab2,contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction, 10, num_col_classifier) - else: - boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( - splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, - num_col_classifier, erosion_hurts, self.tables, self.right2left) - text_regions_p_tables = np.copy(text_regions_p_1_n) - text_regions_p_tables = np.round(text_regions_p_tables) - text_regions_p_tables[(text_regions_p_tables != 3) & (table_prediction_n == 1)] = 10 - - pixel_line = 3 - img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes_d, 0, splitter_y_new_d, - peaks_neg_tot_tables_d, text_regions_p_tables, - num_col_classifier, 0.000005, pixel_line) - - img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables( - img_revised_tab2, table_prediction_n, 10, num_col_classifier) - img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew) - - img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) - img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) - img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, - text_regions_p.shape[0], - text_regions_p.shape[1]) - - if np.abs(slope_deskew) < 0.13: - img_revised_tab = np.copy(img_revised_tab2) - else: - img_revised_tab = np.copy(text_regions_p) - img_revised_tab[img_revised_tab == 10] = 0 - img_revised_tab[img_revised_tab2_d_rotated == 10] = 10 - - ##img_revised_tab = img_revised_tab2[:,:] - #img_revised_tab = text_regions_p[:,:] - text_regions_p[text_regions_p == 10] = 0 - text_regions_p[img_revised_tab == 10] = 10 - #img_revised_tab[img_revised_tab2 == 10] = 10 pixel_img = 4 min_area_mar = 0.00001 - if self.light_version: - marginal_mask = (text_regions_p[:,:]==pixel_img)*1 - marginal_mask = marginal_mask.astype('uint8') - marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) + marginal_mask = (text_regions_p[:,:]==pixel_img)*1 + marginal_mask = marginal_mask.astype('uint8') + marginal_mask = cv2.dilate(marginal_mask, KERNEL, iterations=2) - polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) - else: - polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) + polygons_of_marginals = return_contours_of_interested_region(marginal_mask, 1, min_area_mar) pixel_img = 10 contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar) @@ -3249,7 +2905,7 @@ class Eynollah: image_page = image_page.astype(np.uint8) #print("full inside 1", time.time()- t_full0) regions_fully, regions_fully_only_drop = self.extract_text_regions_new( - img_bin_light if self.light_version else image_page, + img_bin_light, False, cols=num_col_classifier) #print("full inside 2", time.time()- t_full0) # 6 is the separators lable in old full layout model @@ -3333,7 +2989,7 @@ class Eynollah: min_cont_size_to_be_dilated = 10 - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: (cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, @@ -3447,13 +3103,13 @@ class Eynollah: img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, int(x_min_main[j]):int(x_max_main[j])] = 1 co_text_all_org = contours_only_text_parent + contours_only_text_parent_h - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: co_text_all = contours_only_dilated + contours_only_text_parent_h else: co_text_all = contours_only_text_parent + contours_only_text_parent_h else: co_text_all_org = contours_only_text_parent - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: co_text_all = contours_only_dilated else: co_text_all = contours_only_text_parent @@ -3528,7 +3184,7 @@ class Eynollah: ordered = [i[0] for i in ordered] - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: org_contours_indexes = [] for ind in range(len(ordered)): region_with_curr_order = ordered[ind] @@ -3788,10 +3444,6 @@ class Eynollah: # Log enabled features directly enabled_modes = [] - if self.light_version: - enabled_modes.append("Light version") - if self.textline_light: - enabled_modes.append("Light textline detection") if self.full_layout: enabled_modes.append("Full layout analysis") if self.tables: @@ -3851,7 +3503,7 @@ class Eynollah: self.logger.info("Step 1/5: Image Enhancement") img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = \ - self.run_enhancement(self.light_version) + self.run_enhancement() self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, " f"{self.dpi} DPI, {num_col_classifier} columns") @@ -3928,49 +3580,34 @@ class Eynollah: t1 = time.time() self.logger.info("Step 2/5: Layout Analysis") - if self.light_version: - self.logger.info("Using light version processing") - text_regions_p_1 ,erosion_hurts, polygons_seplines, polygons_text_early, \ - textline_mask_tot_ea, img_bin_light, confidence_matrix = \ - self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) - #print("text region early -2 in %.1fs", time.time() - t0) + self.logger.info("Using light version processing") + text_regions_p_1 ,erosion_hurts, polygons_seplines, polygons_text_early, \ + textline_mask_tot_ea, img_bin_light, confidence_matrix = \ + self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) + #print("text region early -2 in %.1fs", time.time() - t0) - if num_col_classifier == 1 or num_col_classifier ==2: - if num_col_classifier == 1: - img_w_new = 1000 - else: - img_w_new = 1300 - img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] - - textline_mask_tot_ea_deskew = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) - slope_deskew = self.run_deskew(textline_mask_tot_ea_deskew) + if num_col_classifier == 1 or num_col_classifier ==2: + if num_col_classifier == 1: + img_w_new = 1000 else: - slope_deskew = self.run_deskew(textline_mask_tot_ea) - #print("text region early -2,5 in %.1fs", time.time() - t0) - #self.logger.info("Textregion detection took %.1fs ", time.time() - t1t) - num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ - text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea, img_bin_light = \ - self.run_graphics_and_columns_light(text_regions_p_1, textline_mask_tot_ea, - num_col_classifier, num_column_is_classified, - erosion_hurts, img_bin_light) - #self.logger.info("run graphics %.1fs ", time.time() - t1t) - #print("text region early -3 in %.1fs", time.time() - t0) - textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) + img_w_new = 1300 + img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] + textline_mask_tot_ea_deskew = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) + slope_deskew = self.run_deskew(textline_mask_tot_ea_deskew) else: - text_regions_p_1, erosion_hurts, polygons_seplines, polygons_text_early = \ - self.get_regions_from_xy_2models(img_res, is_image_enhanced, - num_col_classifier) - self.logger.info(f"Textregion detection took {time.time() - t1:.1f}s") - confidence_matrix = np.zeros((text_regions_p_1.shape[:2])) + slope_deskew = self.run_deskew(textline_mask_tot_ea) + #print("text region early -2,5 in %.1fs", time.time() - t0) + #self.logger.info("Textregion detection took %.1fs ", time.time() - t1t) + num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ + text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea, img_bin_light = \ + self.run_graphics_and_columns_light(text_regions_p_1, textline_mask_tot_ea, + num_col_classifier, num_column_is_classified, + erosion_hurts, img_bin_light) + #self.logger.info("run graphics %.1fs ", time.time() - t1t) + #print("text region early -3 in %.1fs", time.time() - t0) + textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) - t1 = time.time() - num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ - text_regions_p_1, cont_page, table_prediction = \ - self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, - erosion_hurts) - self.logger.info(f"Graphics detection took {time.time() - t1:.1f}s") - #self.logger.info('cont_page %s', cont_page) #plt.imshow(table_prediction) #plt.show() self.logger.info(f"Layout analysis complete ({time.time() - t1:.1f}s)") @@ -3985,13 +3622,7 @@ class Eynollah: #print("text region early in %.1fs", time.time() - t0) t1 = time.time() - if not self.light_version: - textline_mask_tot_ea = self.run_textline(image_page) - self.logger.info(f"Textline detection took {time.time() - t1:.1f}s") - t1 = time.time() - slope_deskew = self.run_deskew(textline_mask_tot_ea) - self.logger.info(f"Deskewing took {time.time() - t1:.1f}s") - elif num_col_classifier in (1,2): + if num_col_classifier in (1,2): org_h_l_m = textline_mask_tot_ea.shape[0] org_w_l_m = textline_mask_tot_ea.shape[1] if num_col_classifier == 1: @@ -4030,10 +3661,8 @@ class Eynollah: if self.curved_line: self.logger.info("Mode: Curved line detection") - elif self.textline_light: - self.logger.info("Mode: Light detection") - if self.light_version and num_col_classifier in (1,2): + if num_col_classifier in (1,2): image_page = resize_image(image_page,org_h_l_m, org_w_l_m ) textline_mask_tot_ea = resize_image(textline_mask_tot_ea,org_h_l_m, org_w_l_m ) text_regions_p = resize_image(text_regions_p,org_h_l_m, org_w_l_m ) @@ -4057,11 +3686,10 @@ class Eynollah: regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = \ self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions, table_prediction, erosion_hurts, - img_bin_light if self.light_version else None) + img_bin_light) ###polygons_of_marginals = dilate_textregion_contours(polygons_of_marginals) - if self.light_version: - drop_label_in_full_layout = 4 - textline_mask_tot_ea_org[img_revised_tab==drop_label_in_full_layout] = 0 + drop_label_in_full_layout = 4 + textline_mask_tot_ea_org[img_revised_tab==drop_label_in_full_layout] = 0 text_only = (img_revised_tab[:, :] == 1) * 1 @@ -4222,68 +3850,40 @@ class Eynollah: #print("text region early 3 in %.1fs", time.time() - t0) - if self.light_version: - contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) - contours_only_text_parent , contours_only_text_parent_d_ordered = self.filter_contours_inside_a_bigger_one( - contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, - marginal_cnts=polygons_of_marginals) - #print("text region early 3.5 in %.1fs", time.time() - t0) - conf_contours_textregions = get_textregion_contours_in_org_image_light( - contours_only_text_parent, self.image, confidence_matrix) - #contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) - else: - conf_contours_textregions = get_textregion_contours_in_org_image_light( - contours_only_text_parent, self.image, confidence_matrix) + contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) + contours_only_text_parent , contours_only_text_parent_d_ordered = self.filter_contours_inside_a_bigger_one( + contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, + marginal_cnts=polygons_of_marginals) + #print("text region early 3.5 in %.1fs", time.time() - t0) + conf_contours_textregions = get_textregion_contours_in_org_image_light( + contours_only_text_parent, self.image, confidence_matrix) + #contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) #print("text region early 4 in %.1fs", time.time() - t0) boxes_text = get_text_region_boxes_by_given_contours(contours_only_text_parent) boxes_marginals = get_text_region_boxes_by_given_contours(polygons_of_marginals) #print("text region early 5 in %.1fs", time.time() - t0) ## birdan sora chock chakir if not self.curved_line: - if self.light_version: - if self.textline_light: - all_found_textline_polygons, \ - all_box_coord, slopes = self.get_slopes_and_deskew_new_light2( - contours_only_text_parent, textline_mask_tot_ea_org, - boxes_text, slope_deskew) - all_found_textline_polygons_marginals, \ - all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_light2( - polygons_of_marginals, textline_mask_tot_ea_org, - boxes_marginals, slope_deskew) + all_found_textline_polygons, \ + all_box_coord, slopes = self.get_slopes_and_deskew_new_light2( + contours_only_text_parent, textline_mask_tot_ea_org, + boxes_text, slope_deskew) + all_found_textline_polygons_marginals, \ + all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_light2( + polygons_of_marginals, textline_mask_tot_ea_org, + boxes_marginals, slope_deskew) - all_found_textline_polygons = dilate_textline_contours( - all_found_textline_polygons) - all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( - all_found_textline_polygons, None, textline_mask_tot_ea_org, type_contour="textline") - all_found_textline_polygons_marginals = dilate_textline_contours( - all_found_textline_polygons_marginals) - contours_only_text_parent, all_found_textline_polygons, \ - contours_only_text_parent_d_ordered, conf_contours_textregions = \ - self.filter_contours_without_textline_inside( - contours_only_text_parent, all_found_textline_polygons, - contours_only_text_parent_d_ordered, conf_contours_textregions) - else: - textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) - all_found_textline_polygons, \ - all_box_coord, slopes = self.get_slopes_and_deskew_new_light( - contours_only_text_parent, contours_only_text_parent, textline_mask_tot_ea, - boxes_text, slope_deskew) - all_found_textline_polygons_marginals, \ - all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_light( - polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, - boxes_marginals, slope_deskew) - #all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( - # all_found_textline_polygons, textline_mask_tot_ea_org, type_contour="textline") - else: - textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) - all_found_textline_polygons, \ - all_box_coord, slopes = self.get_slopes_and_deskew_new( - contours_only_text_parent, contours_only_text_parent, textline_mask_tot_ea, - boxes_text, slope_deskew) - all_found_textline_polygons_marginals, \ - all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new( - polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, - boxes_marginals, slope_deskew) + all_found_textline_polygons = dilate_textline_contours( + all_found_textline_polygons) + all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( + all_found_textline_polygons, None, textline_mask_tot_ea_org, type_contour="textline") + all_found_textline_polygons_marginals = dilate_textline_contours( + all_found_textline_polygons_marginals) + contours_only_text_parent, all_found_textline_polygons, \ + contours_only_text_parent_d_ordered, conf_contours_textregions = \ + self.filter_contours_without_textline_inside( + contours_only_text_parent, all_found_textline_polygons, + contours_only_text_parent_d_ordered, conf_contours_textregions) else: scale_param = 1 textline_mask_tot_ea_erode = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=2) @@ -4314,10 +3914,7 @@ class Eynollah: #print(len(polygons_of_marginals), len(ordered_left_marginals), len(ordered_right_marginals), 'marginals ordred') if self.full_layout: - if self.light_version: - fun = check_any_text_region_in_model_one_is_main_or_header_light - else: - fun = check_any_text_region_in_model_one_is_main_or_header + fun = check_any_text_region_in_model_one_is_main_or_header_light text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, \ all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, \ contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, \ @@ -4336,7 +3933,7 @@ class Eynollah: ##all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline( ##text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, ##all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, - ##kernel=KERNEL, curved_line=self.curved_line, textline_light=self.textline_light) + ##kernel=KERNEL, curved_line=self.curved_line) if not self.reading_order_machine_based: label_seps = 6 diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 575a583..a1b2786 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -43,7 +43,6 @@ class Enhancer: save_org_scale : bool = False, ): self.input_binary = False - self.light_version = False self.save_org_scale = save_org_scale if num_col_upper: self.num_col_upper = int(num_col_upper) @@ -69,16 +68,10 @@ class Enhancer: ret = {} if image_filename: ret['img'] = cv2.imread(image_filename) - if self.light_version: - self.dpi = 100 - else: - self.dpi = 0#check_dpi(image_filename) + self.dpi = 100 else: ret['img'] = pil2cv(image_pil) - if self.light_version: - self.dpi = 100 - else: - self.dpi = 0#check_dpi(image_pil) + self.dpi = 100 ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) for prefix in ('', '_grayscale'): ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) @@ -271,7 +264,7 @@ class Enhancer: return img_new, num_column_is_classified - def resize_and_enhance_image_with_column_classifier(self, light_version): + def resize_and_enhance_image_with_column_classifier(self): self.logger.debug("enter resize_and_enhance_image_with_column_classifier") dpi = 0#self.dpi self.logger.info("Detected %s DPI", dpi) @@ -354,16 +347,13 @@ class Enhancer: self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) if dpi < DPI_THRESHOLD: - if light_version and num_col in (1,2): + if num_col in (1,2): img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( img, num_col, width_early, label_p_pred) else: img_new, num_column_is_classified = self.calculate_width_height_by_columns( img, num_col, width_early, label_p_pred) - if light_version: - image_res = np.copy(img_new) - else: - image_res = self.predict_enhancement(img_new) + image_res = np.copy(img_new) is_image_enhanced = True else: @@ -657,11 +647,11 @@ class Enhancer: gc.collect() return prediction_true - def run_enhancement(self, light_version): + def run_enhancement(self): t_in = time.time() self.logger.info("Resizing and enhancing image...") is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = \ - self.resize_and_enhance_image_with_column_classifier(light_version) + self.resize_and_enhance_image_with_column_classifier() self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ') return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified @@ -669,7 +659,7 @@ class Enhancer: def run_single(self): t0 = time.time() - img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(light_version=False) + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement() return img_res, is_image_enhanced diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 7f065f1..eec544c 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -49,8 +49,6 @@ class machine_based_reading_order_on_layout: self.logger.warning("no GPU device available") self.model_zoo.load_model('reading_order') - # FIXME: light_version is always true, no need for checks in the code - self.light_version = True def read_xml(self, xml_file): tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) @@ -517,7 +515,7 @@ class machine_based_reading_order_on_layout: min_cont_size_to_be_dilated = 10 - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) args_cont_located = np.array(range(len(contours_only_text_parent))) @@ -617,13 +615,13 @@ class machine_based_reading_order_on_layout: img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, int(x_min_main[j]):int(x_max_main[j])] = 1 co_text_all_org = contours_only_text_parent + contours_only_text_parent_h - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: co_text_all = contours_only_dilated + contours_only_text_parent_h else: co_text_all = contours_only_text_parent + contours_only_text_parent_h else: co_text_all_org = contours_only_text_parent - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: co_text_all = contours_only_dilated else: co_text_all = contours_only_text_parent @@ -702,7 +700,7 @@ class machine_based_reading_order_on_layout: ##id_all_text = np.array(id_all_text)[index_sort] - if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: org_contours_indexes = [] for ind in range(len(ordered)): region_with_curr_order = ordered[ind] diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index 8138ec5..21968be 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -13,7 +13,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="enhancement", variant='', filename="models_eynollah/eynollah-enhancement_20210425", - dists=['enhancement', 'layout', 'ci'], dist_url=dist_url(), type='Keras', ), @@ -22,7 +21,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="binarization", variant='hybrid', filename="models_eynollah/eynollah-binarization-hybrid_20230504/model_bin_hybrid_trans_cnn_sbb_ens", - dists=['layout', 'binarization', ], dist_url=dist_url(), type='Keras', ), @@ -31,7 +29,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="binarization", variant='20210309', filename="models_eynollah/eynollah-binarization_20210309", - dists=['binarization'], dist_url=dist_url("extra"), type='Keras', ), @@ -40,7 +37,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="binarization", variant='', filename="models_eynollah/eynollah-binarization_20210425", - dists=['binarization'], dist_url=dist_url("extra"), type='Keras', ), @@ -50,7 +46,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="models_eynollah/eynollah-column-classifier_20210425", dist_url=dist_url(), - dists=['layout'], type='Keras', ), @@ -59,7 +54,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="models_eynollah/model_eynollah_page_extraction_20250915", dist_url=dist_url(), - dists=['layout'], type='Keras', ), @@ -68,7 +62,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="models_eynollah/eynollah-main-regions-ensembled_20210425", dist_url=dist_url(), - dists=['layout'], type='Keras', ), @@ -77,27 +70,24 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='extract_only_images', filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", dist_url=dist_url(), - dists=['layout'], type='Keras', ), EynollahModelSpec( category="region", - variant='light', + variant='', filename="models_eynollah/eynollah-main-regions_20220314", dist_url=dist_url(), help="early layout", - dists=['layout'], type='Keras', ), EynollahModelSpec( category="region_p2", - variant='', + variant='non-light', filename="models_eynollah/eynollah-main-regions-aug-rotation_20210425", - dist_url=dist_url(), + dist_url=dist_url('extra'), help="early layout, non-light, 2nd part", - dists=['layout'], type='Keras', ), @@ -110,8 +100,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ #filename="models_eynollah/modelens_1_2_4_5_early_lay_1_2_spaltige", #filename="models_eynollah/model_3_eraly_layout_no_patches_1_2_spaltige", filename="models_eynollah/modelens_e_l_all_sp_0_1_2_3_4_171024", - dist_url=dist_url("all"), - dists=['layout'], + dist_url=dist_url("layout"), help="early layout, light, 1-or-2-column", type='Keras', ), @@ -128,7 +117,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/modelens_full_lay_1__4_3_091124", dist_url=dist_url(), help="full layout / no patches", - dists=['layout'], type='Keras', ), @@ -148,7 +136,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/modelens_full_lay_1__4_3_091124", dist_url=dist_url(), help="full layout / with patches", - dists=['layout'], type='Keras', ), @@ -162,13 +149,12 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ #filename="models_eynollah/model_ens_reading_order_machine_based", filename="models_eynollah/model_eynollah_reading_order_20250824", dist_url=dist_url(), - dists=['layout', 'reading_order'], type='Keras', ), EynollahModelSpec( category="textline", - variant='', + variant='non-light', #filename="models_eynollah/modelens_textline_1_4_16092024", #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", #filename="models_eynollah/modelens_textline_1_3_4_20240915", @@ -176,36 +162,32 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ #filename="models_eynollah/modelens_textline_9_12_13_14_15", #filename="models_eynollah/eynollah-textline_20210425", filename="models_eynollah/modelens_textline_0_1__2_4_16092024", - dist_url=dist_url(), - dists=['layout'], + dist_url=dist_url('extra'), type='Keras', ), EynollahModelSpec( category="textline", - variant='light', + variant='', #filename="models_eynollah/eynollah-textline_light_20210425", filename="models_eynollah/modelens_textline_0_1__2_4_16092024", dist_url=dist_url(), - dists=['layout'], + type='Keras', + ), + + EynollahModelSpec( + category="table", + variant='non-light', + filename="models_eynollah/eynollah-tables_20210319", + dist_url=dist_url('extra'), type='Keras', ), EynollahModelSpec( category="table", variant='', - filename="models_eynollah/eynollah-tables_20210319", - dist_url=dist_url(), - dists=['layout'], - type='Keras', - ), - - EynollahModelSpec( - category="table", - variant='light', filename="models_eynollah/modelens_table_0t4_201124", dist_url=dist_url(), - dists=['layout'], type='Keras', ), @@ -214,7 +196,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="models_eynollah/model_eynollah_ocr_cnnrnn_20250930", dist_url=dist_url("ocr"), - dists=['layout', 'ocr'], type='Keras', ), @@ -224,7 +205,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/model_eynollah_ocr_cnnrnn__degraded_20250805/", help="slightly better at degraded Fraktur", dist_url=dist_url("ocr"), - dists=['ocr'], type='Keras', ), @@ -233,7 +213,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="characters_org.txt", dist_url=dist_url("ocr"), - dists=['ocr'], type='decoder', ), @@ -242,7 +221,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="characters_org.txt", dist_url=dist_url("ocr"), - dists=['ocr'], type='List[str]', ), @@ -252,7 +230,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/model_eynollah_ocr_trocr_20250919", dist_url=dist_url("ocr"), help='much slower transformer-based', - dists=['trocr'], type='Keras', ), @@ -261,7 +238,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='', filename="models_eynollah/model_eynollah_ocr_trocr_20250919", dist_url=dist_url("ocr"), - dists=['trocr'], type='TrOCRProcessor', ), @@ -270,7 +246,6 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ variant='htr', filename="models_eynollah/microsoft/trocr-base-handwritten", dist_url=dist_url("extra"), - dists=['trocr'], type='TrOCRProcessor', ), diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py index 512bf1a..80d0aa7 100644 --- a/src/eynollah/model_zoo/model_zoo.py +++ b/src/eynollah/model_zoo/model_zoo.py @@ -176,13 +176,12 @@ class EynollahModelZoo: spec.category, spec.variant, spec.help, - ', '.join(spec.dists), f'Yes, at {self.model_path(spec.category, spec.variant)}' if self.model_path(spec.category, spec.variant).exists() else f'No, download {spec.dist_url}', # self.model_path(spec.category, spec.variant), ] - for spec in self.specs.specs + for spec in sorted(self.specs.specs, key=lambda x: x.dist_url) ], headers=[ 'Type', diff --git a/src/eynollah/model_zoo/specs.py b/src/eynollah/model_zoo/specs.py index 54a55f2..3c47b7b 100644 --- a/src/eynollah/model_zoo/specs.py +++ b/src/eynollah/model_zoo/specs.py @@ -10,8 +10,6 @@ class EynollahModelSpec(): category: str # Relative filename to the models_eynollah directory in the dists filename: str - # basename of the ZIP files that should contain this model - dists: List[str] # URL to the smallest model distribution containing this model (link to Zenodo) dist_url: str type: str diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index 4cd1642..c694a6f 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -29,16 +29,6 @@ "type": "boolean", "default": true, "description": "Try to detect all element subtypes, including drop-caps and headings" - }, - "light_version": { - "type": "boolean", - "default": true, - "description": "Try to detect all element subtypes in light version (faster+simpler method for main region detection and deskewing)" - }, - "textline_light": { - "type": "boolean", - "default": true, - "description": "Light version need textline light. If this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method." }, "tables": { "type": "boolean", diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py index 6d80281..0addaff 100644 --- a/src/eynollah/processor.py +++ b/src/eynollah/processor.py @@ -18,9 +18,6 @@ class EynollahProcessor(Processor): def setup(self) -> None: assert self.parameter - if self.parameter['textline_light'] != self.parameter['light_version']: - raise ValueError("Error: You must set or unset both parameter 'textline_light' (to enable light textline detection), " - "and parameter 'light_version' (faster+simpler method for main region detection and deskewing)") model_zoo = EynollahModelZoo(basedir=self.parameter['models']) self.eynollah = Eynollah( model_zoo=model_zoo, @@ -29,8 +26,6 @@ class EynollahProcessor(Processor): right2left=self.parameter['right_to_left'], reading_order_machine_based=self.parameter['reading_order_machine_based'], ignore_page_extraction=self.parameter['ignore_page_extraction'], - light_version=self.parameter['light_version'], - textline_light=self.parameter['textline_light'], full_layout=self.parameter['full_layout'], allow_scaling=self.parameter['allow_scaling'], headers_off=self.parameter['headers_off'], @@ -93,7 +88,6 @@ class EynollahProcessor(Processor): dir_out=None, image_filename=image_filename, curved_line=self.eynollah.curved_line, - textline_light=self.eynollah.textline_light, pcgts=pcgts) self.eynollah.run_single() return result diff --git a/src/eynollah/utils/drop_capitals.py b/src/eynollah/utils/drop_capitals.py index 9f82fac..228a6d9 100644 --- a/src/eynollah/utils/drop_capitals.py +++ b/src/eynollah/utils/drop_capitals.py @@ -19,7 +19,6 @@ def adhere_drop_capital_region_into_corresponding_textline( all_found_textline_polygons_h, kernel=None, curved_line=False, - textline_light=False, ): # print(np.shape(all_found_textline_polygons),np.shape(all_found_textline_polygons[3]),'all_found_textline_polygonsshape') # print(all_found_textline_polygons[3]) @@ -79,7 +78,7 @@ def adhere_drop_capital_region_into_corresponding_textline( # region_with_intersected_drop=region_with_intersected_drop/3 region_with_intersected_drop = region_with_intersected_drop.astype(np.uint8) # print(np.unique(img_con_all_copy[:,:,0])) - if curved_line or textline_light: + if curved_line: if len(region_with_intersected_drop) > 1: sum_pixels_of_intersection = [] diff --git a/src/eynollah/utils/marginals.py b/src/eynollah/utils/marginals.py index eaf0048..9f76fb7 100644 --- a/src/eynollah/utils/marginals.py +++ b/src/eynollah/utils/marginals.py @@ -6,7 +6,7 @@ from .contour import find_new_features_of_contours, return_contours_of_intereste from .resize import resize_image from .rotate import rotate_image -def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_version=False, kernel=None): +def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, kernel=None): mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1])) mask_marginals=mask_marginals.astype(np.uint8) @@ -27,9 +27,8 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - if light_version: - kernel_hor = np.ones((1, 5), dtype=np.uint8) - text_with_lines = cv2.erode(text_with_lines,kernel_hor,iterations=6) + kernel_hor = np.ones((1, 5), dtype=np.uint8) + text_with_lines = cv2.erode(text_with_lines,kernel_hor,iterations=6) text_with_lines_y=text_with_lines.sum(axis=0) text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0) @@ -43,10 +42,7 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve elif thickness_along_y_percent>=30 and thickness_along_y_percent<50: min_textline_thickness=20 else: - if light_version: - min_textline_thickness=45 - else: - min_textline_thickness=40 + min_textline_thickness=45 if thickness_along_y_percent>=14: @@ -128,92 +124,39 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve if max_point_of_right_marginal>=text_regions.shape[1]: max_point_of_right_marginal=text_regions.shape[1]-1 - if light_version: - text_regions_org = np.copy(text_regions) - text_regions[text_regions[:,:]==1]=4 - - pixel_img=4 - min_area_text=0.00001 - - polygon_mask_marginals_rotated = return_contours_of_interested_region(mask_marginals,1,min_area_text) - - polygon_mask_marginals_rotated = polygon_mask_marginals_rotated[0] + text_regions_org = np.copy(text_regions) + text_regions[text_regions[:,:]==1]=4 + + pixel_img=4 + min_area_text=0.00001 + + polygon_mask_marginals_rotated = return_contours_of_interested_region(mask_marginals,1,min_area_text) + + polygon_mask_marginals_rotated = polygon_mask_marginals_rotated[0] - polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) + polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) - cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) + cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) - text_regions[(text_regions[:,:]==4)]=1 + text_regions[(text_regions[:,:]==4)]=1 - marginlas_should_be_main_text=[] + marginlas_should_be_main_text=[] - x_min_marginals_left=[] - x_min_marginals_right=[] + x_min_marginals_left=[] + x_min_marginals_right=[] - for i in range(len(cx_text_only)): - results = cv2.pointPolygonTest(polygon_mask_marginals_rotated, (cx_text_only[i], cy_text_only[i]), False) + for i in range(len(cx_text_only)): + results = cv2.pointPolygonTest(polygon_mask_marginals_rotated, (cx_text_only[i], cy_text_only[i]), False) - if results == -1: - marginlas_should_be_main_text.append(polygons_of_marginals[i]) + if results == -1: + marginlas_should_be_main_text.append(polygons_of_marginals[i]) - text_regions_org=cv2.fillPoly(text_regions_org, pts =marginlas_should_be_main_text, color=(4,4)) - text_regions = np.copy(text_regions_org) + text_regions_org=cv2.fillPoly(text_regions_org, pts =marginlas_should_be_main_text, color=(4,4)) + text_regions = np.copy(text_regions_org) - else: - - text_regions[(mask_marginals_rotated[:,:]!=1) & (text_regions[:,:]==1)]=4 - - pixel_img=4 - min_area_text=0.00001 - - polygons_of_marginals=return_contours_of_interested_region(text_regions,pixel_img,min_area_text) - - cx_text_only,cy_text_only ,x_min_text_only,x_max_text_only, y_min_text_only ,y_max_text_only,y_cor_x_min_main=find_new_features_of_contours(polygons_of_marginals) - - text_regions[(text_regions[:,:]==4)]=1 - - marginlas_should_be_main_text=[] - - x_min_marginals_left=[] - x_min_marginals_right=[] - - for i in range(len(cx_text_only)): - x_width_mar=abs(x_min_text_only[i]-x_max_text_only[i]) - y_height_mar=abs(y_min_text_only[i]-y_max_text_only[i]) - - if x_width_mar>16 and y_height_mar/x_width_mar<18: - marginlas_should_be_main_text.append(polygons_of_marginals[i]) - if x_min_text_only[i]<(mid_point-one_third_left): - x_min_marginals_left_new=x_min_text_only[i] - if len(x_min_marginals_left)==0: - x_min_marginals_left.append(x_min_marginals_left_new) - else: - x_min_marginals_left[0]=min(x_min_marginals_left[0],x_min_marginals_left_new) - else: - x_min_marginals_right_new=x_min_text_only[i] - if len(x_min_marginals_right)==0: - x_min_marginals_right.append(x_min_marginals_right_new) - else: - x_min_marginals_right[0]=min(x_min_marginals_right[0],x_min_marginals_right_new) - - if len(x_min_marginals_left)==0: - x_min_marginals_left=[0] - if len(x_min_marginals_right)==0: - x_min_marginals_right=[text_regions.shape[1]-1] - - - text_regions=cv2.fillPoly(text_regions, pts =marginlas_should_be_main_text, color=(4,4)) - - - #text_regions[:,:int(x_min_marginals_left[0])][text_regions[:,:int(x_min_marginals_left[0])]==1]=0 - #text_regions[:,int(x_min_marginals_right[0]):][text_regions[:,int(x_min_marginals_right[0]):]==1]=0 - - - text_regions[:,:int(min_point_of_left_marginal)][text_regions[:,:int(min_point_of_left_marginal)]==1]=0 - text_regions[:,int(max_point_of_right_marginal):][text_regions[:,int(max_point_of_right_marginal):]==1]=0 ###text_regions[:,0:point_left][text_regions[:,0:point_left]==1]=4 diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 22ef00d..4db4e19 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1748,7 +1748,7 @@ def do_work_of_slopes_new_curved( @wrap_ndarray_shared(kw='textline_mask_tot_ea') def do_work_of_slopes_new_light( box_text, contour, contour_par, - textline_mask_tot_ea=None, slope_deskew=0, textline_light=True, + textline_mask_tot_ea=None, slope_deskew=0, logger=None ): if logger is None: @@ -1765,16 +1765,10 @@ def do_work_of_slopes_new_light( mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1)) - if textline_light: - all_text_region_raw = np.copy(textline_mask_tot_ea) - all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw) - cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot, - max_area=1, min_area=0.00001) - else: - all_text_region_raw = np.copy(textline_mask_tot_ea[y: y + h, x: x + w]) - mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] - all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_deskew, contour_par, box_text) + all_text_region_raw = np.copy(textline_mask_tot_ea) + all_text_region_raw[mask_only_con_region == 0] = 0 + cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(all_text_region_raw) + cnt_clean_rot = filter_contours_area_of_image(all_text_region_raw, cnt_clean_rot_raw, hir_on_cnt_clean_rot, + max_area=1, min_area=0.00001) return cnt_clean_rot, crop_coor, slope_deskew diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 6e71b0f..2ba328b 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -379,7 +379,6 @@ def return_rnn_cnn_ocr_of_given_textlines(image, all_box_coord, prediction_model, b_s_ocr, num_to_char, - textline_light=False, curved_line=False): max_len = 512 padding_token = 299 @@ -404,7 +403,7 @@ def return_rnn_cnn_ocr_of_given_textlines(image, else: for indexing2, ind_poly in enumerate(ind_poly_first): cropped_lines_region_indexer.append(indexer_text_region) - if not (textline_light or curved_line): + if not curved_line: ind_poly = copy.deepcopy(ind_poly) box_ind = all_box_coord[indexing] diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 38b7b9e..7f75903 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -23,14 +23,13 @@ import numpy as np class EynollahXmlWriter: - def __init__(self, *, dir_out, image_filename, curved_line,textline_light, pcgts=None): + def __init__(self, *, dir_out, image_filename, curved_line, pcgts=None): self.logger = logging.getLogger('eynollah.writer') self.counter = EynollahIdCounter() self.dir_out = dir_out self.image_filename = image_filename self.output_filename = os.path.join(self.dir_out or "", self.image_filename_stem) + ".xml" self.curved_line = curved_line - self.textline_light = textline_light self.pcgts = pcgts self.scale_x: Optional[float] = None # XXX set outside __init__ self.scale_y: Optional[float] = None # XXX set outside __init__ @@ -73,8 +72,8 @@ class EynollahXmlWriter: point = point[0] point_x = point[0] + page_coord[2] point_y = point[1] + page_coord[0] - # FIXME: or actually... not self.textline_light and not self.curved_line or np.abs(slopes[region_idx]) > 45? - if not self.textline_light and not (self.curved_line and np.abs(slopes[region_idx]) <= 45): + # FIXME: or actually... not self.curved_line or np.abs(slopes[region_idx]) > 45? + if not (self.curved_line and np.abs(slopes[region_idx]) <= 45): point_x += region_bboxes[2] point_y += region_bboxes[0] point_x = max(0, int(point_x / self.scale_x)) diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py index bc354c8..7cbe013 100644 --- a/tests/cli_tests/test_layout.py +++ b/tests/cli_tests/test_layout.py @@ -9,8 +9,6 @@ from ocrd_models.constants import NAMESPACES as NS #["--allow_scaling", "--curved-line"], ["--allow_scaling", "--curved-line", "--full-layout"], ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", - "--textline_light", "--light_version"], # -ep ... # -eoi ... # --skip_layout_and_reading_order @@ -47,7 +45,6 @@ def test_run_eynollah_layout_filename( [ ["--tables"], ["--tables", "--full-layout"], - ["--tables", "--full-layout", "--textline_light", "--light_version"], ], ids=str) def test_run_eynollah_layout_filename2( tmp_path, From 177d555ded2f3e5a431703d5ec5c108175b221d7 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 26 Nov 2025 21:35:45 +0100 Subject: [PATCH 08/15] factor out extract_only_images as eynollah extract-images --- README.md | 1 - src/eynollah/cli/__init__.py | 5 +- src/eynollah/cli/cli_extract_images.py | 167 +++++++++++++++ src/eynollah/cli/cli_layout.py | 15 -- src/eynollah/extract_images.py | 272 ++++++++++++++++++++++++ src/eynollah/eynollah.py | 206 +++--------------- src/eynollah/model_zoo/default_specs.py | 4 +- 7 files changed, 471 insertions(+), 199 deletions(-) create mode 100644 src/eynollah/cli/cli_extract_images.py create mode 100644 src/eynollah/extract_images.py diff --git a/README.md b/README.md index 8640ac5..afd93c3 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,6 @@ The following options can be used to further configure the processing: | `-cl` | apply contour detection for curved text lines instead of bounding boxes | | `-ib` | apply binarization (the resulting image is saved to the output directory) | | `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | -| `-eoi` | extract only images to output directory (other processing will not be done) | | `-ho` | ignore headers for reading order dectection | | `-si ` | save image regions detected to this directory | | `-sd ` | save deskewed image to this directory | diff --git a/src/eynollah/cli/__init__.py b/src/eynollah/cli/__init__.py index c0d1921..05dafa1 100644 --- a/src/eynollah/cli/__init__.py +++ b/src/eynollah/cli/__init__.py @@ -1,12 +1,14 @@ # NOTE: For predictable order of imports of torch/shapely/tensorflow # this must be the first import of the CLI! from ..eynollah_imports import imported_libs + from .cli_models import models_cli from .cli_binarize import binarize_cli from .cli import main from .cli_binarize import binarize_cli from .cli_enhance import enhance_cli +from .cli_extract_images import extract_images_cli from .cli_layout import layout_cli from .cli_ocr import ocr_cli from .cli_readingorder import readingorder_cli @@ -17,5 +19,4 @@ main.add_command(layout_cli, 'layout') main.add_command(readingorder_cli, 'machine-based-reading-order') main.add_command(models_cli, 'models') main.add_command(ocr_cli, 'ocr') - - +main.add_command(extract_images_cli, 'extract-images') diff --git a/src/eynollah/cli/cli_extract_images.py b/src/eynollah/cli/cli_extract_images.py new file mode 100644 index 0000000..adb0f43 --- /dev/null +++ b/src/eynollah/cli/cli_extract_images.py @@ -0,0 +1,167 @@ +import click + +@click.command() +@click.option( + "--image", + "-i", + help="input image filename", + type=click.Path(exists=True, dir_okay=False), +) + +@click.option( + "--out", + "-o", + help="directory for output PAGE-XML files", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) +@click.option( + "--dir_in", + "-di", + help="directory of input images (instead of --image)", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_images", + "-si", + help="if a directory is given, images in documents will be cropped and saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_layout", + "-sl", + help="if a directory is given, plot of layout will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_deskewed", + "-sd", + help="if a directory is given, deskewed image will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_all", + "-sa", + help="if a directory is given, all plots needed for documentation will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--save_page", + "-sp", + help="if a directory is given, page crop of image will be saved there", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--enable-plotting/--disable-plotting", + "-ep/-noep", + is_flag=True, + help="If set, will plot intermediary files and images", +) +@click.option( + "--input_binary/--input-RGB", + "-ib/-irgb", + is_flag=True, + help="In general, eynollah uses RGB as input but if the input document is very dark, very bright or for any other reason you can turn on input binarization. When this flag is set, eynollah will binarize the RGB input document, you should always provide RGB images to eynollah.", +) +@click.option( + "--ignore_page_extraction/--extract_page_included", + "-ipe/-epi", + is_flag=True, + help="if this parameter set to true, this tool would ignore page extraction", +) +@click.option( + "--reading_order_machine_based/--heuristic_reading_order", + "-romb/-hro", + is_flag=True, + help="if this parameter set to true, this tool would apply machine based reading order detection", +) +@click.option( + "--num_col_upper", + "-ncu", + help="lower limit of columns in document image", +) +@click.option( + "--num_col_lower", + "-ncl", + help="upper limit of columns in document image", +) +@click.option( + "--threshold_art_class_layout", + "-tharl", + help="threshold of artifical class in the case of layout detection. The default value is 0.1", +) +@click.option( + "--threshold_art_class_textline", + "-thart", + help="threshold of artifical class in the case of textline detection. The default value is 0.1", +) +@click.option( + "--skip_layout_and_reading_order", + "-slro/-noslro", + is_flag=True, + help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.", +) +@click.pass_context +def extract_images_cli( + ctx, + image, + out, + overwrite, + dir_in, + save_images, + save_layout, + save_deskewed, + save_all, + save_page, + enable_plotting, + input_binary, + reading_order_machine_based, + num_col_upper, + num_col_lower, + threshold_art_class_textline, + threshold_art_class_layout, + skip_layout_and_reading_order, + ignore_page_extraction, +): + """ + Detect Layout (with optional image enhancement and reading order detection) + """ + assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" + assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" + assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" + assert enable_plotting or not save_page, "Plotting with -sp also requires -ep" + assert enable_plotting or not save_images, "Plotting with -si also requires -ep" + assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images, \ + "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" + assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." + + from ..extract_images import EynollahImageExtractor + extractor = EynollahImageExtractor( + model_zoo=ctx.obj.model_zoo, + enable_plotting=enable_plotting, + input_binary=input_binary, + ignore_page_extraction=ignore_page_extraction, + reading_order_machine_based=reading_order_machine_based, + num_col_upper=num_col_upper, + num_col_lower=num_col_lower, + skip_layout_and_reading_order=skip_layout_and_reading_order, + threshold_art_class_textline=threshold_art_class_textline, + threshold_art_class_layout=threshold_art_class_layout, + ) + extractor.run(overwrite=overwrite, + image_filename=image, + dir_in=dir_in, + dir_out=out, + dir_of_cropped_images=save_images, + dir_of_layout=save_layout, + dir_of_deskewed=save_deskewed, + dir_of_all=save_all, + dir_save_page=save_page, + ) + diff --git a/src/eynollah/cli/cli_layout.py b/src/eynollah/cli/cli_layout.py index 7d6bbed..df66993 100644 --- a/src/eynollah/cli/cli_layout.py +++ b/src/eynollah/cli/cli_layout.py @@ -63,12 +63,6 @@ import click is_flag=True, help="If set, will plot intermediary files and images", ) -@click.option( - "--extract_only_images/--disable-extracting_only_images", - "-eoi/-noeoi", - is_flag=True, - help="If a directory is given, only images in documents will be cropped and saved there and the other processing will not be done", -) @click.option( "--allow-enhancement/--no-allow-enhancement", "-ae/-noae", @@ -166,7 +160,6 @@ def layout_cli( save_layout, save_deskewed, save_all, - extract_only_images, save_page, enable_plotting, allow_enhancement, @@ -197,17 +190,9 @@ def layout_cli( assert enable_plotting or not allow_enhancement, "Plotting with -ae also requires -ep" assert not enable_plotting or save_layout or save_deskewed or save_all or save_page or save_images or allow_enhancement, \ "Plotting with -ep also requires -sl, -sd, -sa, -sp, -si or -ae" - assert not extract_only_images or not allow_enhancement, "Image extraction -eoi can not be set alongside allow_enhancement -ae" - assert not extract_only_images or not allow_scaling, "Image extraction -eoi can not be set alongside allow_scaling -as" - assert not extract_only_images or not curved_line, "Image extraction -eoi can not be set alongside curved_line -cl" - assert not extract_only_images or not full_layout, "Image extraction -eoi can not be set alongside full_layout -fl" - assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab" - assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" - assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." eynollah = Eynollah( model_zoo=ctx.obj.model_zoo, - extract_only_images=extract_only_images, enable_plotting=enable_plotting, allow_enhancement=allow_enhancement, curved_line=curved_line, diff --git a/src/eynollah/extract_images.py b/src/eynollah/extract_images.py new file mode 100644 index 0000000..22d87f2 --- /dev/null +++ b/src/eynollah/extract_images.py @@ -0,0 +1,272 @@ +""" +extract images? +""" + +from concurrent.futures import ProcessPoolExecutor +import logging +from multiprocessing import cpu_count +import os +import time +from typing import Optional +from pathlib import Path +import tensorflow as tf +import numpy as np +import cv2 + +from eynollah.utils.contour import filter_contours_area_of_image, return_contours_of_image, return_contours_of_interested_region +from eynollah.utils.resize import resize_image + +from .model_zoo.model_zoo import EynollahModelZoo +from .eynollah import Eynollah +from .utils import box2rect, is_image_filename +from .plot import EynollahPlotter + +class EynollahImageExtractor(Eynollah): + + def __init__( + self, + *, + model_zoo: EynollahModelZoo, + enable_plotting : bool = False, + input_binary : bool = False, + ignore_page_extraction : bool = False, + reading_order_machine_based : bool = False, + num_col_upper : Optional[int] = None, + num_col_lower : Optional[int] = None, + threshold_art_class_layout: Optional[float] = None, + threshold_art_class_textline: Optional[float] = None, + skip_layout_and_reading_order : bool = False, + ): + self.logger = logging.getLogger('eynollah.extract_images') + self.model_zoo = model_zoo + self.plotter = None + + self.reading_order_machine_based = reading_order_machine_based + self.enable_plotting = enable_plotting + # --input-binary sensible if image is very dark, if layout is not working. + self.input_binary = input_binary + self.ignore_page_extraction = ignore_page_extraction + self.skip_layout_and_reading_order = skip_layout_and_reading_order + if num_col_upper: + self.num_col_upper = int(num_col_upper) + else: + self.num_col_upper = num_col_upper + if num_col_lower: + self.num_col_lower = int(num_col_lower) + else: + self.num_col_lower = num_col_lower + + # for parallelization of CPU-intensive tasks: + self.executor = ProcessPoolExecutor(max_workers=cpu_count()) + + if threshold_art_class_layout: + self.threshold_art_class_layout = float(threshold_art_class_layout) + else: + self.threshold_art_class_layout = 0.1 + + if threshold_art_class_textline: + self.threshold_art_class_textline = float(threshold_art_class_textline) + else: + self.threshold_art_class_textline = 0.1 + + t_start = time.time() + + try: + for device in tf.config.list_physical_devices('GPU'): + tf.config.experimental.set_memory_growth(device, True) + except: + self.logger.warning("no GPU device available") + + self.logger.info("Loading models...") + self.setup_models() + self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") + + def setup_models(self): + + loadable = [ + "col_classifier", + "binarization", + "page", + "extract_images", + ] + self.model_zoo.load_models(*loadable) + + def get_regions_light_v_extract_only_images(self,img, num_col_classifier): + self.logger.debug("enter get_regions_extract_images_only") + erosion_hurts = False + img_org = np.copy(img) + img_height_h = img_org.shape[0] + img_width_h = img_org.shape[1] + + if num_col_classifier == 1: + img_w_new = 700 + elif num_col_classifier == 2: + img_w_new = 900 + elif num_col_classifier == 3: + img_w_new = 1500 + elif num_col_classifier == 4: + img_w_new = 1800 + elif num_col_classifier == 5: + img_w_new = 2200 + elif num_col_classifier == 6: + img_w_new = 2500 + else: + raise ValueError("num_col_classifier must be in range 1..6") + img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) + img_resized = resize_image(img,img_h_new, img_w_new ) + + prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.model_zoo.get("region")) + + prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) + image_page, page_coord, cont_page = self.extract_page() + + prediction_regions_org = prediction_regions_org[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + prediction_regions_org=prediction_regions_org[:,:,0] + + mask_lines_only = (prediction_regions_org[:,:] ==3)*1 + mask_texts_only = (prediction_regions_org[:,:] ==1)*1 + mask_images_only=(prediction_regions_org[:,:] ==2)*1 + + polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) + polygons_seplines = filter_contours_area_of_image( + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) + + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1,1,1)) + + text_regions_p_true[text_regions_p_true.shape[0]-15:text_regions_p_true.shape[0], :] = 0 + text_regions_p_true[:, text_regions_p_true.shape[1]-15:text_regions_p_true.shape[1]] = 0 + + ##polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001) + polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.001) + + polygons_of_images_fin = [] + for ploy_img_ind in polygons_of_images: + box = _, _, w, h = cv2.boundingRect(ploy_img_ind) + if h < 150 or w < 150: + pass + else: + page_coord_img = box2rect(box) # type: ignore + polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], + [page_coord_img[3], page_coord_img[0]], + [page_coord_img[3], page_coord_img[1]], + [page_coord_img[2], page_coord_img[1]]])) + + self.logger.debug("exit get_regions_extract_images_only") + return (text_regions_p_true, + erosion_hurts, + polygons_seplines, + polygons_of_images_fin, + image_page, + page_coord, + cont_page) + + def run(self, + overwrite: bool = False, + image_filename: Optional[str] = None, + dir_in: Optional[str] = None, + dir_out: Optional[str] = None, + dir_of_cropped_images: Optional[str] = None, + dir_of_layout: Optional[str] = None, + dir_of_deskewed: Optional[str] = None, + dir_of_all: Optional[str] = None, + dir_save_page: Optional[str] = None, + ): + """ + Get image and scales, then extract the page of scanned image + """ + self.logger.debug("enter run") + t0_tot = time.time() + + # Log enabled features directly + enabled_modes = [] + if self.full_layout: + enabled_modes.append("Full layout analysis") + if self.tables: + enabled_modes.append("Table detection") + if enabled_modes: + self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) + if self.enable_plotting: + self.logger.info("Saving debug plots") + if dir_of_cropped_images: + self.logger.info(f"Saving cropped images to: {dir_of_cropped_images}") + if dir_of_layout: + self.logger.info(f"Saving layout plots to: {dir_of_layout}") + if dir_of_deskewed: + self.logger.info(f"Saving deskewed images to: {dir_of_deskewed}") + + if dir_in: + ls_imgs = [os.path.join(dir_in, image_filename) + for image_filename in filter(is_image_filename, + os.listdir(dir_in))] + elif image_filename: + ls_imgs = [image_filename] + else: + raise ValueError("run requires either a single image filename or a directory") + + for img_filename in ls_imgs: + self.logger.info(img_filename) + t0 = time.time() + + self.reset_file_name_dir(img_filename, dir_out) + if self.enable_plotting: + self.plotter = EynollahPlotter(dir_out=dir_out, + dir_of_all=dir_of_all, + dir_save_page=dir_save_page, + dir_of_deskewed=dir_of_deskewed, + dir_of_cropped_images=dir_of_cropped_images, + dir_of_layout=dir_of_layout, + image_filename_stem=Path(img_filename).stem) + #print("text region early -11 in %.1fs", time.time() - t0) + if os.path.exists(self.writer.output_filename): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", self.writer.output_filename) + else: + self.logger.warning("will skip input for existing output file '%s'", self.writer.output_filename) + continue + + pcgts = self.run_single() + self.logger.info("Job done in %.1fs", time.time() - t0) + self.writer.write_pagexml(pcgts) + + if dir_in: + self.logger.info("All jobs done in %.1fs", time.time() - t0_tot) + + def run_single(self): + t0 = time.time() + + self.logger.info(f"Processing file: {self.writer.image_filename}") + self.logger.info("Step 1/5: Image Enhancement") + + img_res, is_image_enhanced, num_col_classifier, _ = \ + self.run_enhancement() + + self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, " + f"{self.dpi} DPI, {num_col_classifier} columns") + if is_image_enhanced: + self.logger.info("Enhancement applied") + + self.logger.info(f"Enhancement complete ({time.time() - t0:.1f}s)") + + + # Image Extraction Mode + self.logger.info("Step 2/5: Image Extraction Mode") + + _, _, _, polygons_of_images, \ + image_page, page_coord, cont_page = \ + self.get_regions_light_v_extract_only_images(img_res, num_col_classifier) + pcgts = self.writer.build_pagexml_no_full_layout( + [], page_coord, [], [], [], [], + polygons_of_images, [], [], [], [], [], [], [], [], [], + cont_page, [], []) + if self.plotter: + self.plotter.write_images_into_directory(polygons_of_images, image_page) + + self.logger.info("Image extraction complete") + return pcgts diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index c0e94e3..b0dd78c 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -128,7 +128,6 @@ class Eynollah: self, *, model_zoo: EynollahModelZoo, - extract_only_images : bool =False, enable_plotting : bool = False, allow_enhancement : bool = False, curved_line : bool = False, @@ -162,7 +161,6 @@ class Eynollah: self.input_binary = input_binary self.allow_scaling = allow_scaling self.headers_off = headers_off - self.extract_only_images = extract_only_images self.ignore_page_extraction = ignore_page_extraction self.skip_layout_and_reading_order = skip_layout_and_reading_order if num_col_upper: @@ -216,18 +214,17 @@ class Eynollah: "col_classifier", "binarization", "page", - ("region", 'extract_only_images' if self.extract_only_images else '') + "region" ] - if not self.extract_only_images: - loadable.append(("textline")) - loadable.append("region_1_2") - if self.full_layout: - loadable.append("region_fl_np") - #loadable.append("region_fl") - if self.reading_order_machine_based: - loadable.append("reading_order") - if self.tables: - loadable.append(("table")) + loadable.append(("textline")) + loadable.append("region_1_2") + if self.full_layout: + loadable.append("region_fl_np") + #loadable.append("region_fl") + if self.reading_order_machine_based: + loadable.append("reading_order") + if self.tables: + loadable.append(("table")) self.model_zoo.load_models(*loadable) @@ -452,27 +449,6 @@ class Eynollah: return img_new, num_column_is_classified - def calculate_width_height_by_columns_extract_only_images(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1: - img_w_new = 700 - elif num_col == 2: - img_w_new = 900 - elif num_col == 3: - img_w_new = 1500 - elif num_col == 4: - img_w_new = 1800 - elif num_col == 5: - img_w_new = 2200 - elif num_col == 6: - img_w_new = 2500 - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - def resize_image_with_column_classifier(self, is_image_enhanced, img_bin): self.logger.debug("enter resize_image_with_column_classifier") if self.input_binary: @@ -596,30 +572,25 @@ class Eynollah: label_p_pred = [np.ones(6)] self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) - if not self.extract_only_images: - if dpi < DPI_THRESHOLD: - if num_col in (1,2): - img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( - img, num_col, width_early, label_p_pred) - else: - img_new, num_column_is_classified = self.calculate_width_height_by_columns( - img, num_col, width_early, label_p_pred) + if dpi < DPI_THRESHOLD: + if num_col in (1,2): + img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( + img, num_col, width_early, label_p_pred) + else: + img_new, num_column_is_classified = self.calculate_width_height_by_columns( + img, num_col, width_early, label_p_pred) + image_res = np.copy(img_new) + is_image_enhanced = True + else: + if num_col in (1,2): + img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( + img, num_col, width_early, label_p_pred) image_res = np.copy(img_new) is_image_enhanced = True else: - if num_col in (1,2): - img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( - img, num_col, width_early, label_p_pred) - image_res = np.copy(img_new) - is_image_enhanced = True - else: - num_column_is_classified = True - image_res = np.copy(img) - is_image_enhanced = False - else: - num_column_is_classified = True - image_res = np.copy(img) - is_image_enhanced = False + num_column_is_classified = True + image_res = np.copy(img) + is_image_enhanced = False self.logger.debug("exit resize_and_enhance_image_with_column_classifier") return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin @@ -1790,113 +1761,7 @@ class Eynollah: (prediction_textline_longshot_true_size[:, :, 0]==1).astype(np.uint8)) - def get_regions_light_v_extract_only_images(self,img,is_image_enhanced, num_col_classifier): - self.logger.debug("enter get_regions_extract_images_only") - erosion_hurts = False - img_org = np.copy(img) - img_height_h = img_org.shape[0] - img_width_h = img_org.shape[1] - - if num_col_classifier == 1: - img_w_new = 700 - elif num_col_classifier == 2: - img_w_new = 900 - elif num_col_classifier == 3: - img_w_new = 1500 - elif num_col_classifier == 4: - img_w_new = 1800 - elif num_col_classifier == 5: - img_w_new = 2200 - elif num_col_classifier == 6: - img_w_new = 2500 - img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) - img_resized = resize_image(img,img_h_new, img_w_new ) - - prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.model_zoo.get("region")) - - prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) - image_page, page_coord, cont_page = self.extract_page() - - prediction_regions_org = prediction_regions_org[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only = (prediction_regions_org[:,:] ==3)*1 - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - mask_images_only=(prediction_regions_org[:,:] ==2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_texts, color=(1,1,1)) - - text_regions_p_true[text_regions_p_true.shape[0]-15:text_regions_p_true.shape[0], :] = 0 - text_regions_p_true[:, text_regions_p_true.shape[1]-15:text_regions_p_true.shape[1]] = 0 - - ##polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.0001) - polygons_of_images = return_contours_of_interested_region(text_regions_p_true, 2, 0.001) - image_boundary_of_doc = np.zeros((text_regions_p_true.shape[0], text_regions_p_true.shape[1])) - - ###image_boundary_of_doc[:6, :] = 1 - ###image_boundary_of_doc[text_regions_p_true.shape[0]-6:text_regions_p_true.shape[0], :] = 1 - - ###image_boundary_of_doc[:, :6] = 1 - ###image_boundary_of_doc[:, text_regions_p_true.shape[1]-6:text_regions_p_true.shape[1]] = 1 - - polygons_of_images_fin = [] - for ploy_img_ind in polygons_of_images: - """ - test_poly_image = np.zeros((text_regions_p_true.shape[0], text_regions_p_true.shape[1])) - test_poly_image = cv2.fillPoly(test_poly_image, pts=[ploy_img_ind], color=(1,1,1)) - - test_poly_image = test_poly_image + image_boundary_of_doc - test_poly_image_intersected_area = ( test_poly_image[:,:]==2 )*1 - - test_poly_image_intersected_area = test_poly_image_intersected_area.sum() - - if test_poly_image_intersected_area==0: - ##polygons_of_images_fin.append(ploy_img_ind) - - box = cv2.boundingRect(ploy_img_ind) - page_coord_img = box2rect(box) - # cont_page.append(np.array([[page_coord[2], page_coord[0]], - # [page_coord[3], page_coord[0]], - # [page_coord[3], page_coord[1]], - # [page_coord[2], page_coord[1]]])) - polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], - [page_coord_img[3], page_coord_img[0]], - [page_coord_img[3], page_coord_img[1]], - [page_coord_img[2], page_coord_img[1]]]) ) - """ - box = x, y, w, h = cv2.boundingRect(ploy_img_ind) - if h < 150 or w < 150: - pass - else: - page_coord_img = box2rect(box) - # cont_page.append(np.array([[page_coord[2], page_coord[0]], - # [page_coord[3], page_coord[0]], - # [page_coord[3], page_coord[1]], - # [page_coord[2], page_coord[1]]])) - polygons_of_images_fin.append(np.array([[page_coord_img[2], page_coord_img[0]], - [page_coord_img[3], page_coord_img[0]], - [page_coord_img[3], page_coord_img[1]], - [page_coord_img[2], page_coord_img[1]]])) - - self.logger.debug("exit get_regions_extract_images_only") - return (text_regions_p_true, - erosion_hurts, - polygons_seplines, - polygons_of_images_fin, - image_page, - page_coord, - cont_page) + def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_light_v") @@ -3513,23 +3378,6 @@ class Eynollah: self.logger.info(f"Enhancement complete ({time.time() - t0:.1f}s)") - # Image Extraction Mode - if self.extract_only_images: - self.logger.info("Step 2/5: Image Extraction Mode") - - text_regions_p_1, erosion_hurts, polygons_seplines, polygons_of_images, \ - image_page, page_coord, cont_page = \ - self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) - pcgts = self.writer.build_pagexml_no_full_layout( - [], page_coord, [], [], [], [], - polygons_of_images, [], [], [], [], [], [], [], [], [], - cont_page, [], []) - if self.plotter: - self.plotter.write_images_into_directory(polygons_of_images, image_page) - - self.logger.info("Image extraction complete") - return pcgts - # Basic Processing Mode if self.skip_layout_and_reading_order: self.logger.info("Step 2/5: Basic Processing Mode") diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index 21968be..b9a1a2c 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -66,8 +66,8 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ ), EynollahModelSpec( - category="region", - variant='extract_only_images', + category="extract_images", + variant='', filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", dist_url=dist_url(), type='Keras', From 4aa9543a7d7bd247a685dd77aea852e649f02a5d Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 27 Nov 2025 11:30:00 +0100 Subject: [PATCH 09/15] remove more branches after textline_light default true --- src/eynollah/eynollah.py | 44 +--------------------------------------- src/eynollah/writer.py | 6 +----- 2 files changed, 2 insertions(+), 48 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index b0dd78c..841b77a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1512,52 +1512,10 @@ class Eynollah: img_width_h = img.shape[1] model_region = self.model_zoo.get("region_fl") if patches else self.model_zoo.get("region_fl_np") - if not patches: - img = otsu_copy_binary(img) - img = img.astype(np.uint8) - prediction_regions2 = None - elif cols: - if cols == 1: - img_height_new = int(img_height_h * 0.7) - img_width_new = int(img_width_h * 0.7) - elif cols == 2: - img_height_new = int(img_height_h * 0.4) - img_width_new = int(img_width_h * 0.4) - else: - img_height_new = int(img_height_h * 0.3) - img_width_new = int(img_width_h * 0.3) - img2 = otsu_copy_binary(img) - img2 = img2.astype(np.uint8) - img2 = resize_image(img2, img_height_new, img_width_new) - prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent=0.1) - prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h) - - img = otsu_copy_binary(img).astype(np.uint8) - if cols == 1: - img = resize_image(img, int(img_height_h * 0.5), int(img_width_h * 0.5)).astype(np.uint8) - elif cols == 2 and img_width_h >= 2000: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - elif cols == 3 and ((self.scale_x == 1 and img_width_h > 3000) or - (self.scale_x != 1 and img_width_h > 2800)): - img = resize_image(img, 2800 * img_height_h // img_width_h, 2800).astype(np.uint8) - elif cols == 4 and ((self.scale_x == 1 and img_width_h > 4000) or - (self.scale_x != 1 and img_width_h > 3700)): - img = resize_image(img, 3700 * img_height_h // img_width_h, 3700).astype(np.uint8) - elif cols == 4: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - elif cols == 5 and self.scale_x == 1 and img_width_h > 5000: - img = resize_image(img, int(img_height_h * 0.7), int(img_width_h * 0.7)).astype(np.uint8) - elif cols == 5: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - elif img_width_h > 5600: - img = resize_image(img, 5600 * img_height_h // img_width_h, 5600).astype(np.uint8) - else: - img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9)).astype(np.uint8) - prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent=0.1) prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) self.logger.debug("exit extract_text_regions") - return prediction_regions, prediction_regions2 + return prediction_regions, None def get_textlines_of_a_textregion_sorted(self, textlines_textregion, cx_textline, cy_textline, w_h_textline): N = len(cy_textline) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 7f75903..4b444a6 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -72,13 +72,9 @@ class EynollahXmlWriter: point = point[0] point_x = point[0] + page_coord[2] point_y = point[1] + page_coord[0] - # FIXME: or actually... not self.curved_line or np.abs(slopes[region_idx]) > 45? - if not (self.curved_line and np.abs(slopes[region_idx]) <= 45): - point_x += region_bboxes[2] - point_y += region_bboxes[0] point_x = max(0, int(point_x / self.scale_x)) point_y = max(0, int(point_y / self.scale_y)) - points_co += str(point_x) + ',' + str(point_y) + ' ' + points_co += f'{point_x},{point_y} ' coords.set_points(points_co[:-1]) def write_pagexml(self, pcgts): From c24cf94bce16dfa7b2c2ab19c12393f93ca778df Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 27 Nov 2025 12:43:45 +0100 Subject: [PATCH 10/15] enforce kwargs for writer.build_... --- src/eynollah/eynollah.py | 178 ++++++++++++++++++++++++++++++--------- src/eynollah/writer.py | 121 +++++++++++++++++--------- 2 files changed, 218 insertions(+), 81 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 841b77a..e5b4984 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -8,7 +8,6 @@ document layout analysis (segmentation) with output in PAGE-XML # FIXME: fix all of those... # pyright: reportUnnecessaryTypeIgnoreComment=true # pyright: reportPossiblyUnboundVariable=false -# pyright: reportMissingImports=false # pyright: reportCallIssue=false # pyright: reportOperatorIssue=false # pyright: reportUnboundVariable=false @@ -49,9 +48,9 @@ import statistics tf_disable_interactive_logs() -import tensorflow as tf +import tensorflow as tf # type: ignore try: - import torch + import torch # type: ignore except ImportError: torch = None try: @@ -3372,13 +3371,28 @@ class Eynollah: conf_contours_textregions =[0] pcgts = self.writer.build_pagexml_no_full_layout( - cont_page, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, page_coord, [], - [], [], [], [], [], [], - slopes, [], [], - cont_page, [], [], + found_polygons_text_region=cont_page, + page_coord=page_coord, + order_of_texts=order_text_new, + id_of_texts=id_of_texts_tot, + all_found_textline_polygons=all_found_textline_polygons, + all_box_coord=page_coord, + polygons_of_images=[], + polygons_of_marginals_left=[], + polygons_of_marginals_right=[], + all_found_textline_polygons_marginals_left=[], + all_found_textline_polygons_marginals_right=[], + all_box_coord_marginals_left=[], + all_box_coord_marginals_right=[], + slopes=slopes, + slopes_marginals_left=[], + slopes_marginals_right=[], + cont_page=cont_page, + polygons_seplines=[], + contours_tables=[], conf_contours_textregion=conf_contours_textregions, - skip_layout_reading_order=True) + skip_layout_reading_order=True + ) self.logger.info("Basic processing complete") return pcgts @@ -3422,8 +3436,26 @@ class Eynollah: self.logger.info("No columns detected - generating empty PAGE-XML") pcgts = self.writer.build_pagexml_no_full_layout( - [], page_coord, [], [], [], [], [], [], [], [], [], [], [], [], [], [], - cont_page, [], []) + found_polygons_text_region=[], + page_coord=page_coord, + order_of_texts=[], + id_of_texts=[], + all_found_textline_polygons=[], + all_box_coord=[], + polygons_of_images=[], + polygons_of_marginals_left=[], + polygons_of_marginals_right=[], + all_found_textline_polygons_marginals_left=[], + all_found_textline_polygons_marginals_right=[], + all_box_coord_marginals_left=[], + all_box_coord_marginals_right=[], + slopes=[], + slopes_marginals_left=[], + slopes_marginals_right=[], + cont_page=cont_page, + polygons_seplines=[], + contours_tables=[] + ) return pcgts #print("text region early in %.1fs", time.time() - t0) @@ -3636,22 +3668,53 @@ class Eynollah: empty_marginals = [[]] * len(polygons_of_marginals) if self.full_layout: pcgts = self.writer.build_pagexml_full_layout( - [], [], page_coord, [], [], [], [], [], [], - polygons_of_images, contours_tables, [], - polygons_of_marginals, polygons_of_marginals, - empty_marginals, empty_marginals, - empty_marginals, empty_marginals, - [], [], [], [], - cont_page, polygons_seplines) + contours_only_text_parent=[], + contours_only_text_parent_h=[], + page_coord=page_coord, + order_of_texts=[], + id_of_texts=[], + all_found_textline_polygons=[], + all_found_textline_polygons_h=[], + all_box_coord=[], + all_box_coord_h=[], + polygons_of_images=polygons_of_images, + contours_tables=contours_tables, + polygons_of_drop_capitals=[], + polygons_of_marginals_left=polygons_of_marginals, + polygons_of_marginals_right=polygons_of_marginals, + all_found_textline_polygons_marginals_left=empty_marginals, + all_found_textline_polygons_marginals_right=empty_marginals, + all_box_coord_marginals_left=empty_marginals, + all_box_coord_marginals_right=empty_marginals, + slopes=[], + slopes_h=[], + slopes_marginals_left=[], + slopes_marginals_right=[], + cont_page=cont_page, + polygons_seplines=polygons_seplines + ) else: pcgts = self.writer.build_pagexml_no_full_layout( - [], page_coord, [], [], [], [], - polygons_of_images, - polygons_of_marginals, polygons_of_marginals, - empty_marginals, empty_marginals, - empty_marginals, empty_marginals, - [], [], [], - cont_page, polygons_seplines, contours_tables) + found_polygons_text_region=[], + page_coord=page_coord, + order_of_texts=[], + id_of_texts=[], + all_found_textline_polygons=[], + all_box_coord=[], + polygons_of_images=polygons_of_images, + polygons_of_marginals_left=polygons_of_marginals, + polygons_of_marginals_right=polygons_of_marginals, + all_found_textline_polygons_marginals_left=empty_marginals, + all_found_textline_polygons_marginals_right=empty_marginals, + all_box_coord_marginals_left=empty_marginals, + all_box_coord_marginals_right=empty_marginals, + slopes=[], + slopes_marginals_left=[], + slopes_marginals_right=[], + cont_page=cont_page, + polygons_seplines=polygons_seplines, + contours_tables=contours_tables + ) return pcgts @@ -3810,24 +3873,55 @@ class Eynollah: if self.full_layout: pcgts = self.writer.build_pagexml_full_layout( - contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, - polygons_of_images, contours_tables, polygons_of_drop_capitals, - polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, - conf_contours_textregions, conf_contours_textregions_h) + found_polygons_text_region=contours_only_text_parent, + found_polygons_text_region_h=contours_only_text_parent_h, + page_coord=page_coord, + order_of_texts=order_text_new, + id_of_texts=id_of_texts_tot, + all_found_textline_polygons=all_found_textline_polygons, + all_found_textline_polygons_h=all_found_textline_polygons_h, + all_box_coord=all_box_coord, + all_box_coord_h=all_box_coord_h, + polygons_of_images=polygons_of_images, + contours_tables=contours_tables, + polygons_of_drop_capitals=polygons_of_drop_capitals, + polygons_of_marginals_left=polygons_of_marginals_left, + polygons_of_marginals_right=polygons_of_marginals_right, + all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, + all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left=all_box_coord_marginals_left, + all_box_coord_marginals_right=all_box_coord_marginals_right, + slopes=slopes, + slopes_h=slopes_h, + slopes_marginals_left=slopes_marginals_left, + slopes_marginals_right=slopes_marginals_right, + cont_page=cont_page, + polygons_seplines=polygons_seplines, + conf_contours_textregions=conf_contours_textregions, + conf_contours_textregions_h=conf_contours_textregions_h + ) else: pcgts = self.writer.build_pagexml_no_full_layout( - contours_only_text_parent, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, all_box_coord, polygons_of_images, - polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, contours_tables, - conf_contours_textregions=conf_contours_textregions) + found_polygons_text_region=contours_only_text_parent, + page_coord=page_coord, + order_of_texts=order_text_new, + id_of_texts=id_of_texts_tot, + all_found_textline_polygons=all_found_textline_polygons, + all_box_coord=all_box_coord, + polygons_of_images=polygons_of_images, + polygons_of_marginals_left=polygons_of_marginals_left, + polygons_of_marginals_right=polygons_of_marginals_right, + all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, + all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left=all_box_coord_marginals_left, + all_box_coord_marginals_right=all_box_coord_marginals_right, + slopes=slopes, + slopes_marginals_left=slopes_marginals_left, + slopes_marginals_right=slopes_marginals_right, + cont_page=cont_page, + polygons_seplines=polygons_seplines, + contours_tables=contours_tables, + conf_contours_textregions=conf_contours_textregions + ) return pcgts diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 4b444a6..a944c72 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -83,48 +83,91 @@ class EynollahXmlWriter: f.write(to_xml(pcgts)) def build_pagexml_no_full_layout( - self, found_polygons_text_region, - page_coord, order_of_texts, id_of_texts, - all_found_textline_polygons, - all_box_coord, - found_polygons_text_region_img, - found_polygons_marginals_left, found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, - found_polygons_tables, - **kwargs): + self, + *, + found_polygons_text_region, + page_coord, + order_of_texts, + id_of_texts, + all_found_textline_polygons, + all_box_coord, + found_polygons_text_region_img, + found_polygons_marginals_left, + found_polygons_marginals_right, + all_found_textline_polygons_marginals_left, + all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, + all_box_coord_marginals_right, + slopes, + slopes_marginals_left, + slopes_marginals_right, + cont_page, + polygons_seplines, + found_polygons_tables, + ): return self.build_pagexml_full_layout( - found_polygons_text_region, [], - page_coord, order_of_texts, id_of_texts, - all_found_textline_polygons, [], - all_box_coord, [], - found_polygons_text_region_img, found_polygons_tables, [], - found_polygons_marginals_left, found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, [], slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, - **kwargs) + found_polygons_text_region=found_polygons_text_region, + found_polygons_text_region_h=[], + page_coord=page_coord, + order_of_texts=order_of_texts, + id_of_texts=id_of_texts, + all_found_textline_polygons=all_found_textline_polygons, + all_found_textline_polygons_h=[], + all_box_coord=all_box_coord, + all_box_coord_h=[], + found_polygons_text_region_img=found_polygons_text_region_img, + found_polygons_tables=found_polygons_tables, + found_polygons_drop_capitals=[], + found_polygons_marginals_left=found_polygons_marginals_left, + found_polygons_marginals_right=found_polygons_marginals_right, + all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, + all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left=all_box_coord_marginals_left, + all_box_coord_marginals_right=all_box_coord_marginals_right, + slopes=slopes, + slopes_h=[], + slopes_marginals_left=slopes_marginals_left, + slopes_marginals_right=slopes_marginals_right, + cont_page=cont_page, + polygons_seplines=polygons_seplines, + ) def build_pagexml_full_layout( - self, - found_polygons_text_region, found_polygons_text_region_h, - page_coord, order_of_texts, id_of_texts, - all_found_textline_polygons, all_found_textline_polygons_h, - all_box_coord, all_box_coord_h, - found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, - found_polygons_marginals_left,found_polygons_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, - ocr_all_textlines=None, ocr_all_textlines_h=None, - ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, - ocr_all_textlines_drop=None, - conf_contours_textregions=None, conf_contours_textregions_h=None, - skip_layout_reading_order=False): + self, + *, + found_polygons_text_region, + found_polygons_text_region_h, + page_coord, + order_of_texts, + id_of_texts, + all_found_textline_polygons, + all_found_textline_polygons_h, + all_box_coord, + all_box_coord_h, + found_polygons_text_region_img, + found_polygons_tables, + found_polygons_drop_capitals, + found_polygons_marginals_left, + found_polygons_marginals_right, + all_found_textline_polygons_marginals_left, + all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, + all_box_coord_marginals_right, + slopes, + slopes_h, + slopes_marginals_left, + slopes_marginals_right, + cont_page, + polygons_seplines, + ocr_all_textlines=None, + ocr_all_textlines_h=None, + ocr_all_textlines_marginals_left=None, + ocr_all_textlines_marginals_right=None, + ocr_all_textlines_drop=None, + conf_contours_textregions=None, + conf_contours_textregions_h=None, + skip_layout_reading_order=False, + ): self.logger.debug('enter build_pagexml') # create the file structure From 5171e09c2d264554d86d2829bb0ffee80dba8133 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 28 Nov 2025 10:50:50 +0100 Subject: [PATCH 11/15] eynollah.py: fix kwargs to writer --- src/eynollah/eynollah.py | 77 ++++++++++++++++------------------------ src/eynollah/writer.py | 3 -- 2 files changed, 30 insertions(+), 50 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index e5b4984..5e67b5e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -8,7 +8,6 @@ document layout analysis (segmentation) with output in PAGE-XML # FIXME: fix all of those... # pyright: reportUnnecessaryTypeIgnoreComment=true # pyright: reportPossiblyUnboundVariable=false -# pyright: reportCallIssue=false # pyright: reportOperatorIssue=false # pyright: reportUnboundVariable=false # pyright: reportArgumentType=false @@ -20,12 +19,6 @@ document layout analysis (segmentation) with output in PAGE-XML import logging import sys -# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files -if sys.version_info < (3, 10): - import importlib_resources -else: - import importlib.resources as importlib_resources - from difflib import SequenceMatcher as sq import math import os @@ -48,9 +41,9 @@ import statistics tf_disable_interactive_logs() -import tensorflow as tf # type: ignore +import tensorflow as tf try: - import torch # type: ignore + import torch except ImportError: torch = None try: @@ -3367,19 +3360,17 @@ class Eynollah: order_text_new = [0] slopes =[0] - id_of_texts_tot =['region_0001'] conf_contours_textregions =[0] pcgts = self.writer.build_pagexml_no_full_layout( found_polygons_text_region=cont_page, page_coord=page_coord, order_of_texts=order_text_new, - id_of_texts=id_of_texts_tot, all_found_textline_polygons=all_found_textline_polygons, all_box_coord=page_coord, - polygons_of_images=[], - polygons_of_marginals_left=[], - polygons_of_marginals_right=[], + found_polygons_text_region_img=[], + found_polygons_marginals_left=[], + found_polygons_marginals_right=[], all_found_textline_polygons_marginals_left=[], all_found_textline_polygons_marginals_right=[], all_box_coord_marginals_left=[], @@ -3389,9 +3380,7 @@ class Eynollah: slopes_marginals_right=[], cont_page=cont_page, polygons_seplines=[], - contours_tables=[], - conf_contours_textregion=conf_contours_textregions, - skip_layout_reading_order=True + found_polygons_tables=[], ) self.logger.info("Basic processing complete") return pcgts @@ -3439,12 +3428,11 @@ class Eynollah: found_polygons_text_region=[], page_coord=page_coord, order_of_texts=[], - id_of_texts=[], all_found_textline_polygons=[], all_box_coord=[], - polygons_of_images=[], - polygons_of_marginals_left=[], - polygons_of_marginals_right=[], + found_polygons_text_region_img=[], + found_polygons_marginals_left=[], + found_polygons_marginals_right=[], all_found_textline_polygons_marginals_left=[], all_found_textline_polygons_marginals_right=[], all_box_coord_marginals_left=[], @@ -3454,7 +3442,7 @@ class Eynollah: slopes_marginals_right=[], cont_page=cont_page, polygons_seplines=[], - contours_tables=[] + found_polygons_tables=[], ) return pcgts @@ -3668,20 +3656,19 @@ class Eynollah: empty_marginals = [[]] * len(polygons_of_marginals) if self.full_layout: pcgts = self.writer.build_pagexml_full_layout( - contours_only_text_parent=[], - contours_only_text_parent_h=[], + found_polygons_text_region=[], + found_polygons_text_region_h=[], page_coord=page_coord, order_of_texts=[], - id_of_texts=[], all_found_textline_polygons=[], all_found_textline_polygons_h=[], all_box_coord=[], all_box_coord_h=[], - polygons_of_images=polygons_of_images, - contours_tables=contours_tables, - polygons_of_drop_capitals=[], - polygons_of_marginals_left=polygons_of_marginals, - polygons_of_marginals_right=polygons_of_marginals, + found_polygons_text_region_img=polygons_of_images, + found_polygons_tables=contours_tables, + found_polygons_drop_capitals=[], + found_polygons_marginals_left=polygons_of_marginals, + found_polygons_marginals_right=polygons_of_marginals, all_found_textline_polygons_marginals_left=empty_marginals, all_found_textline_polygons_marginals_right=empty_marginals, all_box_coord_marginals_left=empty_marginals, @@ -3698,12 +3685,11 @@ class Eynollah: found_polygons_text_region=[], page_coord=page_coord, order_of_texts=[], - id_of_texts=[], all_found_textline_polygons=[], all_box_coord=[], - polygons_of_images=polygons_of_images, - polygons_of_marginals_left=polygons_of_marginals, - polygons_of_marginals_right=polygons_of_marginals, + found_polygons_text_region_img=polygons_of_images, + found_polygons_marginals_left=polygons_of_marginals, + found_polygons_marginals_right=polygons_of_marginals, all_found_textline_polygons_marginals_left=empty_marginals, all_found_textline_polygons_marginals_right=empty_marginals, all_box_coord_marginals_left=empty_marginals, @@ -3713,7 +3699,7 @@ class Eynollah: slopes_marginals_right=[], cont_page=cont_page, polygons_seplines=polygons_seplines, - contours_tables=contours_tables + found_polygons_tables=contours_tables ) return pcgts @@ -3877,16 +3863,15 @@ class Eynollah: found_polygons_text_region_h=contours_only_text_parent_h, page_coord=page_coord, order_of_texts=order_text_new, - id_of_texts=id_of_texts_tot, all_found_textline_polygons=all_found_textline_polygons, all_found_textline_polygons_h=all_found_textline_polygons_h, all_box_coord=all_box_coord, all_box_coord_h=all_box_coord_h, - polygons_of_images=polygons_of_images, - contours_tables=contours_tables, - polygons_of_drop_capitals=polygons_of_drop_capitals, - polygons_of_marginals_left=polygons_of_marginals_left, - polygons_of_marginals_right=polygons_of_marginals_right, + found_polygons_text_region_img=polygons_of_images, + found_polygons_tables=contours_tables, + found_polygons_drop_capitals=polygons_of_drop_capitals, + found_polygons_marginals_left=polygons_of_marginals_left, + found_polygons_marginals_right=polygons_of_marginals_right, all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, all_box_coord_marginals_left=all_box_coord_marginals_left, @@ -3905,12 +3890,11 @@ class Eynollah: found_polygons_text_region=contours_only_text_parent, page_coord=page_coord, order_of_texts=order_text_new, - id_of_texts=id_of_texts_tot, all_found_textline_polygons=all_found_textline_polygons, all_box_coord=all_box_coord, - polygons_of_images=polygons_of_images, - polygons_of_marginals_left=polygons_of_marginals_left, - polygons_of_marginals_right=polygons_of_marginals_right, + found_polygons_text_region_img=polygons_of_images, + found_polygons_marginals_left=polygons_of_marginals_left, + found_polygons_marginals_right=polygons_of_marginals_right, all_found_textline_polygons_marginals_left=all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right=all_found_textline_polygons_marginals_right, all_box_coord_marginals_left=all_box_coord_marginals_left, @@ -3920,8 +3904,7 @@ class Eynollah: slopes_marginals_right=slopes_marginals_right, cont_page=cont_page, polygons_seplines=polygons_seplines, - contours_tables=contours_tables, - conf_contours_textregions=conf_contours_textregions + found_polygons_tables=contours_tables, ) return pcgts diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index a944c72..63e54b2 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -88,7 +88,6 @@ class EynollahXmlWriter: found_polygons_text_region, page_coord, order_of_texts, - id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, @@ -110,7 +109,6 @@ class EynollahXmlWriter: found_polygons_text_region_h=[], page_coord=page_coord, order_of_texts=order_of_texts, - id_of_texts=id_of_texts, all_found_textline_polygons=all_found_textline_polygons, all_found_textline_polygons_h=[], all_box_coord=all_box_coord, @@ -139,7 +137,6 @@ class EynollahXmlWriter: found_polygons_text_region_h, page_coord, order_of_texts, - id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, From 9bcfeab0572ab4fada84b2aa9936380ac2dc5912 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 28 Nov 2025 10:46:47 +0100 Subject: [PATCH 12/15] :skull: remove dead code from eynollah.py --- src/eynollah/eynollah.py | 399 +-------------------------- src/eynollah/image_enhancer.py | 8 +- src/eynollah/utils/separate_lines.py | 63 ----- src/eynollah/writer.py | 2 - 4 files changed, 5 insertions(+), 467 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 5e67b5e..9383c5e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -77,15 +77,14 @@ from .utils.rotate import ( ) from .utils.separate_lines import ( return_deskew_slop, - do_work_of_slopes_new, do_work_of_slopes_new_curved, - do_work_of_slopes_new_light, ) from .utils.marginals import get_marginals from .utils.resize import resize_image from .utils.shm import share_ndarray from .utils import ( is_image_filename, + isNaN, crop_image_inside_box, box2rect, find_num_col, @@ -269,9 +268,6 @@ class Eynollah: key += '_uint8' return self._imgs[key].copy() - def isNaN(self, num): - return num != num - def predict_enhancement(self, img): self.logger.debug("enter predict_enhancement") @@ -913,136 +909,6 @@ class Eynollah: gc.collect() return prediction_true - def do_padding_with_scale(self, img, scale): - h_n = int(img.shape[0]*scale) - w_n = int(img.shape[1]*scale) - - channel0_avg = int( np.mean(img[:,:,0]) ) - channel1_avg = int( np.mean(img[:,:,1]) ) - channel2_avg = int( np.mean(img[:,:,2]) ) - - h_diff = img.shape[0] - h_n - w_diff = img.shape[1] - w_n - - h_start = int(0.5 * h_diff) - w_start = int(0.5 * w_diff) - - img_res = resize_image(img, h_n, w_n) - #label_res = resize_image(label, h_n, w_n) - - img_scaled_padded = np.copy(img) - - #label_scaled_padded = np.zeros(label.shape) - - img_scaled_padded[:,:,0] = channel0_avg - img_scaled_padded[:,:,1] = channel1_avg - img_scaled_padded[:,:,2] = channel2_avg - - img_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = img_res[:,:,:] - #label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:] - - return img_scaled_padded#, label_scaled_padded - - def do_prediction_new_concept_scatter_nd( - self, patches, img, model, - n_batch_inference=1, marginal_of_patch_percent=0.1, - thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False): - - self.logger.debug("enter do_prediction_new_concept") - img_height_model = model.layers[-1].output_shape[1] - img_width_model = model.layers[-1].output_shape[2] - - if not patches: - img_h_page = img.shape[0] - img_w_page = img.shape[1] - img = img / 255.0 - img = resize_image(img, img_height_model, img_width_model) - - label_p_pred = model.predict(img[np.newaxis], verbose=0) - seg = np.argmax(label_p_pred, axis=3)[0] - - if thresholding_for_artificial_class_in_light_version: - #seg_text = label_p_pred[0,:,:,1] - #seg_text[seg_text<0.2] =0 - #seg_text[seg_text>0] =1 - #seg[seg_text==1]=1 - - seg_art = label_p_pred[0,:,:,4] - seg_art[seg_art<0.2] =0 - seg_art[seg_art>0] =1 - seg[seg_art==1]=4 - - seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) - prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) - return prediction_true - - if img.shape[0] < img_height_model: - img = resize_image(img, img_height_model, img.shape[1]) - if img.shape[1] < img_width_model: - img = resize_image(img, img.shape[0], img_width_model) - - self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) - ##margin = int(marginal_of_patch_percent * img_height_model) - #width_mid = img_width_model - 2 * margin - #height_mid = img_height_model - 2 * margin - img = img / 255.0 - img = img.astype(np.float16) - img_h = img.shape[0] - img_w = img.shape[1] - - stride_x = img_width_model - 100 - stride_y = img_height_model - 100 - - one_tensor = tf.ones_like(img) - img_patches, one_patches = tf.image.extract_patches( - images=[img, one_tensor], - sizes=[1, img_height_model, img_width_model, 1], - strides=[1, stride_y, stride_x, 1], - rates=[1, 1, 1, 1], - padding='SAME') - img_patches = tf.squeeze(img_patches) - one_patches = tf.squeeze(one_patches) - img_patches_resh = tf.reshape(img_patches, shape=(img_patches.shape[0] * img_patches.shape[1], - img_height_model, img_width_model, 3)) - pred_patches = model.predict(img_patches_resh, batch_size=n_batch_inference) - one_patches = tf.reshape(one_patches, shape=(img_patches.shape[0] * img_patches.shape[1], - img_height_model, img_width_model, 3)) - x = tf.range(img.shape[1]) - y = tf.range(img.shape[0]) - x, y = tf.meshgrid(x, y) - indices = tf.stack([y, x], axis=-1) - - indices_patches = tf.image.extract_patches( - images=tf.expand_dims(indices, axis=0), - sizes=[1, img_height_model, img_width_model, 1], - strides=[1, stride_y, stride_x, 1], - rates=[1, 1, 1, 1], - padding='SAME') - indices_patches = tf.squeeze(indices_patches) - indices_patches = tf.reshape(indices_patches, shape=(img_patches.shape[0] * img_patches.shape[1], - img_height_model, img_width_model, 2)) - margin_y = int( 0.5 * (img_height_model - stride_y) ) - margin_x = int( 0.5 * (img_width_model - stride_x) ) - - mask_margin = np.zeros((img_height_model, img_width_model)) - mask_margin[margin_y:img_height_model - margin_y, - margin_x:img_width_model - margin_x] = 1 - - indices_patches_array = indices_patches.numpy() - for i in range(indices_patches_array.shape[0]): - indices_patches_array[i,:,:,0] = indices_patches_array[i,:,:,0]*mask_margin - indices_patches_array[i,:,:,1] = indices_patches_array[i,:,:,1]*mask_margin - - reconstructed = tf.scatter_nd( - indices=indices_patches_array, - updates=pred_patches, - shape=(img.shape[0], img.shape[1], pred_patches.shape[-1])).numpy() - - prediction_true = np.argmax(reconstructed, axis=2).astype(np.uint8) - gc.collect() - return np.repeat(prediction_true[:, :, np.newaxis], 3, axis=2) - def do_prediction_new_concept( self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, @@ -1615,41 +1481,6 @@ class Eynollah: all_box_coord, slopes) - def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): - if not len(contours): - return [], [], [] - self.logger.debug("enter get_slopes_and_deskew_new_light") - with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: - assert self.executor - results = self.executor.map(partial(do_work_of_slopes_new_light, - textline_mask_tot_ea=textline_mask_tot_shared, - slope_deskew=slope_deskew, - logger=self.logger,), - boxes, contours, contours_par) - results = list(results) # exhaust prior to release - #textline_polygons, box_coord, slopes = zip(*results) - self.logger.debug("exit get_slopes_and_deskew_new_light") - return tuple(zip(*results)) - - def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): - if not len(contours): - return [], [], [] - self.logger.debug("enter get_slopes_and_deskew_new") - with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: - assert self.executor - results = self.executor.map(partial(do_work_of_slopes_new, - textline_mask_tot_ea=textline_mask_tot_shared, - slope_deskew=slope_deskew, - MAX_SLOPE=MAX_SLOPE, - KERNEL=KERNEL, - logger=self.logger, - plotter=self.plotter,), - boxes, contours, contours_par) - results = list(results) # exhaust prior to release - #textline_polygons, box_coord, slopes = zip(*results) - self.logger.debug("exit get_slopes_and_deskew_new") - return tuple(zip(*results)) - def get_slopes_and_deskew_new_curved(self, contours_par, textline_mask_tot, boxes, mask_texts_only, num_col, scale_par, slope_deskew): if not len(contours_par): @@ -1875,145 +1706,6 @@ class Eynollah: img_bin, confidence_matrix) - def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier): - self.logger.debug("enter get_regions_from_xy_2models") - erosion_hurts = False - img_org = np.copy(img) - img_height_h = img_org.shape[0] - img_width_h = img_org.shape[1] - - ratio_y=1.3 - ratio_x=1 - - img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org_y = self.do_prediction(True, img, self.model_zoo.get("region")) - prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h ) - - #plt.imshow(prediction_regions_org_y[:,:,0]) - #plt.show() - prediction_regions_org_y = prediction_regions_org_y[:,:,0] - mask_zeros_y = (prediction_regions_org_y[:,:]==0)*1 - - ##img_only_regions_with_sep = ( (prediction_regions_org_y[:,:] != 3) & (prediction_regions_org_y[:,:] != 0) )*1 - img_only_regions_with_sep = (prediction_regions_org_y == 1).astype(np.uint8) - try: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=20) - _, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) - img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1))) - - prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get("region")) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - - prediction_regions_org=prediction_regions_org[:,:,0] - prediction_regions_org[(prediction_regions_org[:,:]==1) & (mask_zeros_y[:,:]==1)]=0 - - img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1])) - - prediction_regions_org2 = self.do_prediction(True, img, self.model_zoo.get("region_p2"), marginal_of_patch_percent=0.2) - prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h ) - - mask_zeros2 = (prediction_regions_org2[:,:,0] == 0) - mask_lines2 = (prediction_regions_org2[:,:,0] == 3) - text_sume_early = (prediction_regions_org[:,:] == 1).sum() - prediction_regions_org_copy = np.copy(prediction_regions_org) - prediction_regions_org_copy[(prediction_regions_org_copy[:,:]==1) & (mask_zeros2[:,:]==1)] = 0 - text_sume_second = ((prediction_regions_org_copy[:,:]==1)*1).sum() - rate_two_models = 100. * text_sume_second / text_sume_early - - self.logger.info("ratio_of_two_models: %s", rate_two_models) - if not(is_image_enhanced and rate_two_models < RATIO_OF_TWO_MODEL_THRESHOLD): - prediction_regions_org = np.copy(prediction_regions_org_copy) - - prediction_regions_org[(mask_lines2[:,:]==1) & (prediction_regions_org[:,:]==0)]=3 - mask_lines_only=(prediction_regions_org[:,:]==3)*1 - prediction_regions_org = cv2.erode(prediction_regions_org[:,:], KERNEL, iterations=2) - prediction_regions_org = cv2.dilate(prediction_regions_org[:,:], KERNEL, iterations=2) - - if rate_two_models<=40: - if self.input_binary: - prediction_bin = np.copy(img_org) - else: - prediction_bin = self.do_prediction(True, img_org, self.model_zoo.get("binarization"), n_batch_inference=5) - prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) - prediction_bin = 255 * (prediction_bin[:,:,0]==0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - - ratio_y=1 - ratio_x=1 - - img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - - prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get("region")) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only=(prediction_regions_org[:,:]==3)*1 - - mask_texts_only=(prediction_regions_org[:,:]==1)*1 - mask_images_only=(prediction_regions_org[:,:]==2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true,pts = polygons_of_only_lines, color=(3, 3, 3)) - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - - text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1)) - - self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_seplines, polygons_of_only_texts - except: - if self.input_binary: - prediction_bin = np.copy(img_org) - prediction_bin = self.do_prediction(True, img_org, self.model_zoo.get("binarization"), n_batch_inference=5) - prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) - prediction_bin = 255 * (prediction_bin[:,:,0]==0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) - else: - prediction_bin = np.copy(img_org) - ratio_y=1 - ratio_x=1 - - - img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get("region")) - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - #mask_lines_only=(prediction_regions_org[:,:]==3)*1 - #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) - - #prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get_model("region")) - #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - #prediction_regions_org = prediction_regions_org[:,:,0] - #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 - - mask_lines_only = (prediction_regions_org == 3)*1 - mask_texts_only = (prediction_regions_org == 1)*1 - mask_images_only= (prediction_regions_org == 2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) - - erosion_hurts = True - self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_seplines, polygons_of_only_texts - def do_order_of_regions( self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): @@ -2259,7 +1951,7 @@ class Eynollah: img_comm = cv2.fillPoly(img_comm, pts=main_contours, color=indiv) - if not self.isNaN(slope_mean_hor): + if not isNaN(slope_mean_hor): image_revised_last = np.zeros(image_regions_eraly_p.shape[:2]) for i in range(len(boxes)): box_ys = slice(*boxes[i][2:4]) @@ -2455,52 +2147,6 @@ class Eynollah: return page_coord, image_page, textline_mask_tot_ea, img_bin_light, cont_page - def run_graphics_and_columns( - self, text_regions_p_1, - num_col_classifier, num_column_is_classified, erosion_hurts): - - t_in_gr = time.time() - img_g = self.imread(grayscale=True, uint8=True) - - img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3)) - img_g3 = img_g3.astype(np.uint8) - img_g3[:, :, 0] = img_g[:, :] - img_g3[:, :, 1] = img_g[:, :] - img_g3[:, :, 2] = img_g[:, :] - - image_page, page_coord, cont_page = self.extract_page() - - if self.tables: - table_prediction = self.get_tables_from_model(image_page, num_col_classifier) - else: - table_prediction = np.zeros((image_page.shape[0], image_page.shape[1])).astype(np.int16) - - if self.plotter: - self.plotter.save_page_image(image_page) - - text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - mask_images = (text_regions_p_1[:, :] == 2) * 1 - mask_images = mask_images.astype(np.uint8) - mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10) - mask_lines = (text_regions_p_1[:, :] == 3) * 1 - mask_lines = mask_lines.astype(np.uint8) - img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1 - img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8) - - if erosion_hurts: - img_only_regions = np.copy(img_only_regions_with_sep[:,:]) - else: - img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6) - try: - num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) - num_col = num_col + 1 - if not num_column_is_classified: - num_col_classifier = num_col + 1 - except Exception as why: - self.logger.error(why) - num_col = None - return (num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, - text_regions_p_1, cont_page, table_prediction) def run_enhancement(self): t_in = time.time() @@ -2518,10 +2164,7 @@ class Eynollah: else: self.get_image_and_scales_after_enhancing(img_org, img_res) else: - if self.allow_enhancement: - self.get_image_and_scales(img_org, img_res, scale) - else: - self.get_image_and_scales(img_org, img_res, scale) + self.get_image_and_scales(img_org, img_res, scale) if self.allow_scaling: img_org, img_res, is_image_enhanced = \ self.resize_image_with_column_classifier(is_image_enhanced, img_bin) @@ -3019,42 +2662,6 @@ class Eynollah: region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] return ordered, region_ids - - - - - - - - - def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): - return list(np.array(ls_cons)[np.array(sorted_indexes)]) - - def return_it_in_two_groups(self, x_differential): - split = [ind if x_differential[ind]!=x_differential[ind+1] else -1 - for ind in range(len(x_differential)-1)] - split_masked = list( np.array(split[:])[np.array(split[:])!=-1] ) - if 0 not in split_masked: - split_masked.insert(0, -1) - split_masked.append(len(x_differential)-1) - - split_masked = np.array(split_masked) +1 - - sums = [np.sum(x_differential[split_masked[ind]:split_masked[ind+1]]) - for ind in range(len(split_masked)-1)] - - indexes_to_bec_changed = [ind if (np.abs(sums[ind-1]) > np.abs(sums[ind]) and - np.abs(sums[ind+1]) > np.abs(sums[ind])) else -1 - for ind in range(1,len(sums)-1)] - indexes_to_bec_changed_filtered = np.array(indexes_to_bec_changed)[np.array(indexes_to_bec_changed)!=-1] - - x_differential_new = np.copy(x_differential) - for i in indexes_to_bec_changed_filtered: - i_slice = slice(split_masked[i], split_masked[i+1]) - x_differential_new[i_slice] = -1 * np.array(x_differential)[i_slice] - - return x_differential_new - def filter_contours_inside_a_bigger_one(self, contours, contours_d_ordered, image, marginal_cnts=None, type_contour="textregion"): if type_contour == "textregion": diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index a1b2786..babbd55 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -10,14 +10,14 @@ Image enhancer. The output can be written as same scale of input or in new predi import logging import os import time -from typing import Dict, Optional +from typing import Optional from pathlib import Path import gc import cv2 from keras.models import Model import numpy as np -import tensorflow as tf +import tensorflow as tf # type: ignore from skimage.morphology import skeletonize from .model_zoo import EynollahModelZoo @@ -27,7 +27,6 @@ from .utils import ( is_image_filename, crop_image_inside_box ) -from .patch_encoder import PatchEncoder, Patches DPI_THRESHOLD = 298 KERNEL = np.ones((5, 5), np.uint8) @@ -91,9 +90,6 @@ class Enhancer: key += '_uint8' return self._imgs[key].copy() - def isNaN(self, num): - return num != num - def predict_enhancement(self, img): self.logger.debug("enter predict_enhancement") diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 4db4e19..c220234 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -5,8 +5,6 @@ import numpy as np import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d -from multiprocessing import Process, Queue, cpu_count -from multiprocessing import Pool from .rotate import rotate_image from .resize import resize_image from .contour import ( @@ -20,9 +18,7 @@ from .contour import ( from .shm import share_ndarray, wrap_ndarray_shared from . import ( find_num_col_deskew, - crop_image_inside_box, box2rect, - box2slice, ) def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): @@ -1590,65 +1586,6 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map var = 0 return angle, var -@wrap_ndarray_shared(kw='textline_mask_tot_ea') -def do_work_of_slopes_new( - box_text, contour, contour_par, - textline_mask_tot_ea=None, slope_deskew=0.0, - logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None -): - if KERNEL is None: - KERNEL = np.ones((5, 5), np.uint8) - if logger is None: - logger = getLogger(__package__) - logger.debug('enter do_work_of_slopes_new') - - x, y, w, h = box_text - crop_coor = box2rect(box_text) - mask_textline = np.zeros(textline_mask_tot_ea.shape) - mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1)) - all_text_region_raw = textline_mask_tot_ea * mask_textline - all_text_region_raw = all_text_region_raw[y: y + h, x: x + w].astype(np.uint8) - img_int_p = all_text_region_raw[:,:] - img_int_p = cv2.erode(img_int_p, KERNEL, iterations=2) - - if not np.prod(img_int_p.shape) or img_int_p.shape[0] /img_int_p.shape[1] < 0.1: - slope = 0 - slope_for_all = slope_deskew - all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w] - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text, 0) - else: - try: - textline_con, hierarchy = return_contours_of_image(img_int_p) - textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, - hierarchy, - max_area=1, min_area=0.00008) - y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN - if np.isnan(y_diff_mean): - slope_for_all = MAX_SLOPE - else: - sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) - img_int_p[img_int_p > 0] = 1 - slope_for_all = return_deskew_slop(img_int_p, sigma_des, logger=logger, plotter=plotter) - if abs(slope_for_all) <= 0.5: - slope_for_all = slope_deskew - except: - logger.exception("cannot determine angle of contours") - slope_for_all = MAX_SLOPE - - if slope_for_all == MAX_SLOPE: - slope_for_all = slope_deskew - slope = slope_for_all - mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) - mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1)) - - all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].copy() - mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] - - all_text_region_raw[mask_only_con_region == 0] = 0 - cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text) - - return cnt_clean_rot, crop_coor, slope - @wrap_ndarray_shared(kw='textline_mask_tot_ea') @wrap_ndarray_shared(kw='mask_texts_only') def do_work_of_slopes_new_curved( diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 63e54b2..1781230 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -4,7 +4,6 @@ from pathlib import Path import os.path from typing import Optional import logging -import xml.etree.ElementTree as ET from .utils.xml import create_page_xml, xml_reading_order from .utils.counter import EynollahIdCounter @@ -19,7 +18,6 @@ from ocrd_models.ocrd_page import ( SeparatorRegionType, to_xml ) -import numpy as np class EynollahXmlWriter: From 951bd2fce6d31bfd8dc592d5ea8f609d66fe539f Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 28 Nov 2025 15:03:06 +0100 Subject: [PATCH 13/15] CI: do not upgrade (now-unpineed) torch --- .github/workflows/test-eynollah.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index d6b92ba..82de94d 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -67,10 +67,6 @@ jobs: make install-dev EXTRAS=OCR,plotting make deps-test EXTRAS=OCR,plotting - - name: Hard-upgrade torch for debugging - run: | - python -m pip install --upgrade torch - - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" From 30f9c695dcb7ff0550b7d9ebcff100dbd2cf4cb1 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 28 Nov 2025 12:09:50 +0100 Subject: [PATCH 14/15] move line-gt extraction out of ocr to eynollah-training --- src/eynollah/cli/cli_ocr.py | 21 +- src/eynollah/eynollah_ocr.py | 770 +++++++++++------------ src/eynollah/training/cli.py | 2 + src/eynollah/training/extract_line_gt.py | 136 ++++ 4 files changed, 500 insertions(+), 429 deletions(-) create mode 100644 src/eynollah/training/extract_line_gt.py diff --git a/src/eynollah/cli/cli_ocr.py b/src/eynollah/cli/cli_ocr.py index 962ee9b..9bb8620 100644 --- a/src/eynollah/cli/cli_ocr.py +++ b/src/eynollah/cli/cli_ocr.py @@ -59,12 +59,6 @@ import click is_flag=True, help="if this parameter set to true, transformer ocr will be applied, otherwise cnn_rnn model.", ) -@click.option( - "--export_textline_images_and_text", - "-etit/-noetit", - is_flag=True, - help="if this parameter set to true, images and text in xml will be exported into output dir. This files can be used for training a OCR engine.", -) @click.option( "--do_not_mask_with_textline_contour", "-nmtc/-mtc", @@ -76,11 +70,6 @@ import click "-bs", help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", ) -@click.option( - "--dataset_abbrevation", - "-ds_pref", - help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", -) @click.option( "--min_conf_value_of_textline_text", "-min_conf", @@ -97,7 +86,6 @@ def ocr_cli( dir_out_image_text, overwrite, tr_ocr, - export_textline_images_and_text, do_not_mask_with_textline_contour, batch_size, dataset_abbrevation, @@ -106,18 +94,11 @@ def ocr_cli( """ Recognize text with a CNN/RNN or transformer ML model. """ - assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" - # FIXME: refactor: move export_textline_images_and_text out of eynollah.py - # assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" - assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" - assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" - assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" - assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." + assert bool(image) ^ bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." from ..eynollah_ocr import Eynollah_ocr eynollah_ocr = Eynollah_ocr( model_zoo=ctx.obj.model_zoo, tr_ocr=tr_ocr, - export_textline_images_and_text=export_textline_images_and_text, do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, batch_size=batch_size, pref_of_dataset=dataset_abbrevation, diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 61de12c..52dcca9 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -9,17 +9,13 @@ from logging import Logger, getLogger from typing import Optional from pathlib import Path import os -import json import gc import sys import math import time -from keras.layers import StringLookup import cv2 import xml.etree.ElementTree as ET -import tensorflow as tf -from keras.models import load_model from PIL import Image, ImageDraw, ImageFont import numpy as np from eynollah.model_zoo import EynollahModelZoo @@ -48,11 +44,6 @@ if sys.version_info < (3, 10): else: import importlib.resources as importlib_resources -try: - from transformers import TrOCRProcessor, VisionEncoderDecoderModel -except ImportError: - TrOCRProcessor = VisionEncoderDecoderModel = None - class Eynollah_ocr: def __init__( self, @@ -60,27 +51,16 @@ class Eynollah_ocr: model_zoo: EynollahModelZoo, tr_ocr=False, batch_size: Optional[int]=None, - export_textline_images_and_text: bool=False, do_not_mask_with_textline_contour: bool=False, - pref_of_dataset=None, min_conf_value_of_textline_text : Optional[float]=None, logger: Optional[Logger]=None, ): self.tr_ocr = tr_ocr - # For generating textline-image pairs for traning, move to generate_gt_for_training - self.export_textline_images_and_text = export_textline_images_and_text # masking for OCR and GT generation, relevant for skewed lines and bounding boxes self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour - # prefix or dataset - self.pref_of_dataset = pref_of_dataset self.logger = logger if logger else getLogger('eynollah.ocr') self.model_zoo = model_zoo - # TODO: Properly document what 'export_textline_images_and_text' is about - if export_textline_images_and_text: - self.logger.info("export_textline_images_and_text was set, so no actual models are loaded") - return - self.min_conf_value_of_textline_text = min_conf_value_of_textline_text if min_conf_value_of_textline_text else 0.3 self.b_s = 2 if batch_size is None and tr_ocr else 8 if batch_size is None else batch_size @@ -539,40 +519,55 @@ class Eynollah_ocr: mask_poly = mask_poly[y:y+h, x:x+w, :] img_crop = img_poly_on_img[y:y+h, x:x+w, :] - if self.export_textline_images_and_text: + # print(file_name, angle_degrees, w*h, + # mask_poly[:,:,0].sum(), + # mask_poly[:,:,0].sum() /float(w*h) , + # 'didi') + + if angle_degrees > 3: + better_des_slope = get_orientation_moments(textline_coords) + + img_crop = rotate_image_with_padding(img_crop, better_des_slope) + if dir_in_bin is not None: + img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) + + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) + mask_poly = mask_poly.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) + + mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] + if not self.do_not_mask_with_textline_contour: img_crop[mask_poly==0] = 255 - - else: - # print(file_name, angle_degrees, w*h, - # mask_poly[:,:,0].sum(), - # mask_poly[:,:,0].sum() /float(w*h) , - # 'didi') - - if angle_degrees > 3: - better_des_slope = get_orientation_moments(textline_coords) - - img_crop = rotate_image_with_padding(img_crop, better_des_slope) - if dir_in_bin is not None: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) - - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) - mask_poly = mask_poly.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) - - mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - + if dir_in_bin is not None: + img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 + img_crop_bin[mask_poly==0] = 255 + + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: if dir_in_bin is not None: - img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: + img_crop, img_crop_bin = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly) + + else: + better_des_slope = 0 + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + if dir_in_bin is not None: + if not self.do_not_mask_with_textline_contour: + img_crop_bin[mask_poly==0] = 255 + if type_textregion=='drop-capital': + pass + else: + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: if dir_in_bin is not None: img_crop, img_crop_bin = \ break_curved_line_into_small_pieces_and_then_merge( @@ -581,188 +576,178 @@ class Eynollah_ocr: img_crop, _ = \ break_curved_line_into_small_pieces_and_then_merge( img_crop, mask_poly) - - else: - better_des_slope = 0 - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - if type_textregion=='drop-capital': - pass - else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: - if dir_in_bin is not None: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - if not self.export_textline_images_and_text: - if w_scaled < 750:#1.5*image_width: + if w_scaled < 750:#1.5*image_width: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) + cropped_lines.append(img_fin) + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + cropped_lines_meging_indexing.append(0) + if dir_in_bin is not None: img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) + img_crop_bin, image_height, image_width) + cropped_lines_bin.append(img_fin) + else: + splited_images, splited_images_bin = return_textlines_split_if_needed( + img_crop, img_crop_bin if dir_in_bin is not None else None) + if splited_images: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[0], image_height, image_width) cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(1) + if abs(better_des_slope) > 45: cropped_lines_ver_index.append(1) else: cropped_lines_ver_index.append(0) + + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[1], image_height, image_width) + + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(-1) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + if dir_in_bin is not None: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[0], image_height, image_width) + cropped_lines_bin.append(img_fin) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[1], image_height, image_width) + cropped_lines_bin.append(img_fin) + else: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) + cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + if dir_in_bin is not None: img_fin = preprocess_and_resize_image_for_ocrcnn_model( img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed( - img_crop, img_crop_bin if dir_in_bin is not None else None) - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[0], image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[1], image_height, image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[0], image_height, image_width) - cropped_lines_bin.append(img_fin) - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[1], image_height, image_width) - cropped_lines_bin.append(img_fin) - - else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - - if self.export_textline_images_and_text: - if img_crop.shape[0]==0 or img_crop.shape[1]==0: - pass - else: - if child_textlines.tag.endswith("TextEquiv"): - for cheild_text in child_textlines: - if cheild_text.tag.endswith("Unicode"): - textline_text = cheild_text.text - if textline_text: - base_name = os.path.join( - dir_out, file_name + '_line_' + str(indexer_textlines)) - if self.pref_of_dataset: - base_name += '_' + self.pref_of_dataset - if not self.do_not_mask_with_textline_contour: - base_name += '_masked' - - with open(base_name + '.txt', 'w') as text_file: - text_file.write(textline_text) - cv2.imwrite(base_name + '.png', img_crop) - indexer_textlines+=1 - - if not self.export_textline_images_and_text: - indexer_text_region = indexer_text_region +1 - - if not self.export_textline_images_and_text: - extracted_texts = [] - extracted_conf_value = [] - - n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*self.b_s - imgs = cropped_lines[n_start:] - imgs = np.array(imgs) - imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) - indices_ver = np.where(ver_imgs == 1)[0] - - #print(indices_ver, 'indices_ver') - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_ver_flipped = None - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:] - imgs_bin = np.array(imgs_bin) - imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_bin_ver_flipped = None - else: - n_start = i*self.b_s - n_end = (i+1)*self.b_s - imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) + + indexer_text_region = indexer_text_region +1 + + extracted_texts = [] + extracted_conf_value = [] + + n_iterations = math.ceil(len(cropped_lines) / self.b_s) + + for i in range(n_iterations): + if i==(n_iterations-1): + n_start = i*self.b_s + imgs = cropped_lines[n_start:] + imgs = np.array(imgs) + imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) + + ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) + indices_ver = np.where(ver_imgs == 1)[0] + + #print(indices_ver, 'indices_ver') + if len(indices_ver)>0: + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') - ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) - indices_ver = np.where(ver_imgs == 1)[0] - #print(indices_ver, 'indices_ver') + else: + imgs_ver_flipped = None + + if dir_in_bin is not None: + imgs_bin = cropped_lines_bin[n_start:] + imgs_bin = np.array(imgs_bin) + imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: - imgs_ver_flipped = None - - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:n_end] - imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) - - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_bin_ver_flipped = None - - - self.logger.debug("processing next %d lines", len(imgs)) - preds = self.model_zoo.get('ocr').predict(imgs, verbose=0) + imgs_bin_ver_flipped = None + else: + n_start = i*self.b_s + n_end = (i+1)*self.b_s + imgs = cropped_lines[n_start:n_end] + imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) + + ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) + indices_ver = np.where(ver_imgs == 1)[0] + #print(indices_ver, 'indices_ver') if len(indices_ver)>0: - preds_flipped = self.model_zoo.get('ocr').predict(imgs_ver_flipped, verbose=0) + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_ver_flipped = None + + + if dir_in_bin is not None: + imgs_bin = cropped_lines_bin[n_start:n_end] + imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) + + + if len(indices_ver)>0: + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_bin_ver_flipped = None + + + self.logger.debug("processing next %d lines", len(imgs)) + preds = self.model_zoo.get('ocr').predict(imgs, verbose=0) + + if len(indices_ver)>0: + preds_flipped = self.model_zoo.get('ocr').predict(imgs_ver_flipped, verbose=0) + preds_max_fliped = np.max(preds_flipped, axis=2 ) + preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character + masked_means_flipped = \ + np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped[np.isnan(masked_means_flipped)] = 0 + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means[np.isnan(masked_means)] = 0 + + masked_means_ver = masked_means[indices_ver] + #print(masked_means_ver, 'pred_max_not_unk') + + indices_where_flipped_conf_value_is_higher = \ + np.where(masked_means_flipped > masked_means_ver)[0] + + #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') + if len(indices_where_flipped_conf_value_is_higher)>0: + indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] + preds[indices_to_be_replaced,:,:] = \ + preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + if dir_in_bin is not None: + preds_bin = self.model_zoo.get('ocr').predict(imgs_bin, verbose=0) + + if len(indices_ver)>0: + preds_flipped = self.model_zoo.get('ocr').predict(imgs_bin_ver_flipped, verbose=0) preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character @@ -789,212 +774,179 @@ class Eynollah_ocr: #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') if len(indices_where_flipped_conf_value_is_higher)>0: indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds[indices_to_be_replaced,:,:] = \ + preds_bin[indices_to_be_replaced,:,:] = \ preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - if dir_in_bin is not None: - preds_bin = self.model_zoo.get('ocr').predict(imgs_bin, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.model_zoo.get('ocr').predict(imgs_bin_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds_bin[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - - preds = (preds + preds_bin) / 2. - - pred_texts = decode_batch_predictions(preds, self.model_zoo.get('num_to_char')) - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) + preds = (preds + preds_bin) / 2. - for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - if masked_means[ib] >= self.min_conf_value_of_textline_text: - extracted_texts.append(pred_texts_ib) - extracted_conf_value.append(masked_means[ib]) - else: - extracted_texts.append("") - extracted_conf_value.append(0) - del cropped_lines - if dir_in_bin is not None: - del cropped_lines_bin - gc.collect() + pred_texts = decode_batch_predictions(preds, self.model_zoo.get('num_to_char')) - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value[ind] - if cropped_lines_meging_indexing[ind]==0 - else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) - extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] - for ind_cfm in range(len(extracted_texts_merged)) - if extracted_texts_merged[ind_cfm] is not None] - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - if dir_out_image_text: - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - font = ImageFont.truetype(font=font, size=40) - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer)==ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) + for ib in range(imgs.shape[0]): + pred_texts_ib = pred_texts[ib].replace("[UNK]", "") + if masked_means[ib] >= self.min_conf_value_of_textline_text: + extracted_texts.append(pred_texts_ib) + extracted_conf_value.append(masked_means[ib]) else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') + extracted_texts.append("") + extracted_conf_value.append(0) + del cropped_lines + if dir_in_bin is not None: + del cropped_lines_bin + gc.collect() + + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] + + extracted_conf_value_merged = [extracted_conf_value[ind] + if cropped_lines_meging_indexing[ind]==0 + else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] - ###index_tot_regions = [] - ###tot_region_ref = [] + extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] + for ind_cfm in range(len(extracted_texts_merged)) + if extracted_texts_merged[ind_cfm] is not None] + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + + if dir_out_image_text: + #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! + font = importlib_resources.files(__package__) / "Charis-Regular.ttf" + with importlib_resources.as_file(font) as font: + font = ImageFont.truetype(font=font, size=40) + + for indexer_text, bb_ind in enumerate(total_bb_coordinates): + x_bb = bb_ind[0] + y_bb = bb_ind[1] + w_bb = bb_ind[2] + h_bb = bb_ind[3] + + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], + font.path, w_bb, int(h_bb*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] - ###for jj in root1.iter(link+'RegionRefIndexed'): - ###index_tot_regions.append(jj.attrib['index']) - ###tot_region_ref.append(jj.attrib['regionRef']) - - ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} - - #id_textregions = [] - #textregions_by_existing_ids = [] - indexer = 0 - indexer_textregion = 0 - for nn in root1.iter(region_tags): - #id_textregion = nn.attrib['id'] - #id_textregions.append(id_textregion) - #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally + text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - childtest3.set('conf', - f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] + # Draw the text + draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) + image_text.save(out_image_with_text) + + text_by_textregion = [] + for ind in unique_cropped_lines_region_indexer: + ind = np.array(cropped_lines_region_indexer)==ind + extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] + if len(extracted_texts_merged_un)>1: + text_by_textregion_ind = "" + next_glue = "" + for indt in range(len(extracted_texts_merged_un)): + if (extracted_texts_merged_un[indt].endswith('⸗') or + extracted_texts_merged_un[indt].endswith('-') or + extracted_texts_merged_un[indt].endswith('¬')): + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] + next_glue = "" else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] + next_glue = " " + text_by_textregion.append(text_by_textregion_ind) + else: + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') + + ###index_tot_regions = [] + ###tot_region_ref = [] + + ###for jj in root1.iter(link+'RegionRefIndexed'): + ###index_tot_regions.append(jj.attrib['index']) + ###tot_region_ref.append(jj.attrib['regionRef']) + + ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} + + #id_textregions = [] + #textregions_by_existing_ids = [] + indexer = 0 + indexer_textregion = 0 + for nn in root1.iter(region_tags): + #id_textregion = nn.attrib['id'] + #id_textregions.append(id_textregion) + #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) + + is_textregion_text = False + for childtest in nn: + if childtest.tag.endswith("TextEquiv"): + is_textregion_text = True + + if not is_textregion_text: + text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') + unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + + + has_textline = False + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): - ###sample_order = [(id_to_order[tid], text) - ### for tid, text in zip(id_textregions, textregions_by_existing_ids) - ### if tid in id_to_order] - - ##ordered_texts_sample = [text for _, text in sorted(sample_order)] - ##tot_page_text = ' '.join(ordered_texts_sample) - - ##for page_element in root1.iter(link+'Page'): - ##text_page = ET.SubElement(page_element, 'TextEquiv') - ##unicode_textpage = ET.SubElement(text_page, 'Unicode') - ##unicode_textpage.text = tot_page_text - - ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) - #print("Job done in %.1fs", time.time() - t0) + is_textline_text = False + for childtest2 in child_textregion: + if childtest2.tag.endswith("TextEquiv"): + is_textline_text = True + + + if not is_textline_text: + text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + unicode_textline = ET.SubElement(text_subelement, 'Unicode') + unicode_textline.text = extracted_texts_merged[indexer] + else: + for childtest3 in child_textregion: + if childtest3.tag.endswith("TextEquiv"): + for child_uc in childtest3: + if child_uc.tag.endswith("Unicode"): + childtest3.set('conf', + f"{extracted_conf_value_merged[indexer]:.2f}") + child_uc.text = extracted_texts_merged[indexer] + + indexer = indexer + 1 + has_textline = True + if has_textline: + if is_textregion_text: + for child4 in nn: + if child4.tag.endswith("TextEquiv"): + for childtr_uc in child4: + if childtr_uc.tag.endswith("Unicode"): + childtr_uc.text = text_by_textregion[indexer_textregion] + else: + unicode_textregion.text = text_by_textregion[indexer_textregion] + indexer_textregion = indexer_textregion + 1 + + ###sample_order = [(id_to_order[tid], text) + ### for tid, text in zip(id_textregions, textregions_by_existing_ids) + ### if tid in id_to_order] + + ##ordered_texts_sample = [text for _, text in sorted(sample_order)] + ##tot_page_text = ' '.join(ordered_texts_sample) + + ##for page_element in root1.iter(link+'Page'): + ##text_page = ET.SubElement(page_element, 'TextEquiv') + ##unicode_textpage = ET.SubElement(text_page, 'Unicode') + ##unicode_textpage.text = tot_page_text + + ET.register_namespace("",name_space) + tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) + #print("Job done in %.1fs", time.time() - t0) diff --git a/src/eynollah/training/cli.py b/src/eynollah/training/cli.py index 8ab754d..65a7a8a 100644 --- a/src/eynollah/training/cli.py +++ b/src/eynollah/training/cli.py @@ -8,6 +8,7 @@ from .build_model_load_pretrained_weights_and_save import build_model_load_pretr from .generate_gt_for_training import main as generate_gt_cli from .inference import main as inference_cli from .train import ex +from .extract_line_gt import linegt_cli @click.command(context_settings=dict( ignore_unknown_options=True, @@ -24,3 +25,4 @@ main.add_command(build_model_load_pretrained_weights_and_save) main.add_command(generate_gt_cli, 'generate-gt') main.add_command(inference_cli, 'inference') main.add_command(train_cli, 'train') +main.add_command(linegt_cli, 'export_textline_images_and_text') diff --git a/src/eynollah/training/extract_line_gt.py b/src/eynollah/training/extract_line_gt.py new file mode 100644 index 0000000..bda21e7 --- /dev/null +++ b/src/eynollah/training/extract_line_gt.py @@ -0,0 +1,136 @@ +from logging import Logger, getLogger +from typing import Optional +from pathlib import Path +import os + +import click +import cv2 +import xml.etree.ElementTree as ET +import numpy as np + +from ..utils import is_image_filename + +@click.command() +@click.option( + "--image", + "-i", + help="input image filename", + type=click.Path(exists=True, dir_okay=False), +) +@click.option( + "--dir_in", + "-di", + 'image_filename', + help="directory of input images (instead of --image)", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_xmls", + "-dx", + help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--out", + "-o", + 'dir_out', + help="directory for output PAGE-XML files", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--dataset_abbrevation", + "-ds_pref", + 'pref_of_dataset', + help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", +) +@click.option( + "--do_not_mask_with_textline_contour", + "-nmtc/-mtc", + is_flag=True, + help="if this parameter set to true, cropped textline images will not be masked with textline contour.", +) +def linegt_cli( + image_filename, + dir_in, + dir_xmls, + dir_out, + pref_of_dataset, + do_not_mask_with_textline_contour, +): + assert bool(dir_in) ^ bool(image_filename), "Set --dir-in or --image-filename, not both" + if dir_in: + ls_imgs = [ + os.path.join(dir_in, image_filename) for image_filename in filter(is_image_filename, os.listdir(dir_in)) + ] + else: + assert image_filename + ls_imgs = [image_filename] + + for dir_img in ls_imgs: + file_name = Path(dir_img).stem + dir_xml = os.path.join(dir_xmls, file_name + '.xml') + + img = cv2.imread(dir_img) + + total_bb_coordinates = [] + + tree1 = ET.parse(dir_xml, parser=ET.XMLParser(encoding="utf-8")) + root1 = tree1.getroot() + alltags = [elem.tag for elem in root1.iter()] + + name_space = alltags[0].split('}')[0] + name_space = name_space.split('{')[1] + + region_tags = [x for x in alltags if x.endswith('TextRegion')][0] + + cropped_lines_region_indexer = [] + + indexer_text_region = 0 + indexer_textlines = 0 + # FIXME: non recursive, use OCR-D PAGE generateDS API. Or use an existing tool for this purpose altogether + for nn in root1.iter(region_tags): + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + for child_textlines in child_textregion: + if child_textlines.tag.endswith("Coords"): + cropped_lines_region_indexer.append(indexer_text_region) + p_h = child_textlines.attrib['points'].split(' ') + textline_coords = np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h]) + + x, y, w, h = cv2.boundingRect(textline_coords) + + total_bb_coordinates.append([x, y, w, h]) + + img_poly_on_img = np.copy(img) + + mask_poly = np.zeros(img.shape) + mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + + mask_poly = mask_poly[y : y + h, x : x + w, :] + img_crop = img_poly_on_img[y : y + h, x : x + w, :] + + if not do_not_mask_with_textline_contour: + img_crop[mask_poly == 0] = 255 + + if img_crop.shape[0] == 0 or img_crop.shape[1] == 0: + continue + + if child_textlines.tag.endswith("TextEquiv"): + for cheild_text in child_textlines: + if cheild_text.tag.endswith("Unicode"): + textline_text = cheild_text.text + if textline_text: + base_name = os.path.join( + dir_out, file_name + '_line_' + str(indexer_textlines) + ) + if pref_of_dataset: + base_name += '_' + pref_of_dataset + if not do_not_mask_with_textline_contour: + base_name += '_masked' + + with open(base_name + '.txt', 'w') as text_file: + text_file.write(textline_text) + cv2.imwrite(base_name + '.png', img_crop) + indexer_textlines += 1 From b161e338544daf8703fcc3c411e8891e9f86270b Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 28 Nov 2025 14:54:43 +0100 Subject: [PATCH 15/15] :fire: refactor eynollah ocr . --- src/eynollah/cli/cli_ocr.py | 2 - src/eynollah/eynollah_ocr.py | 1611 ++++++++++++++----------------- src/eynollah/utils/font.py | 16 + src/eynollah/utils/utils_ocr.py | 1 + src/eynollah/utils/xml.py | 4 + 5 files changed, 769 insertions(+), 865 deletions(-) create mode 100644 src/eynollah/utils/font.py diff --git a/src/eynollah/cli/cli_ocr.py b/src/eynollah/cli/cli_ocr.py index 9bb8620..eb94dcc 100644 --- a/src/eynollah/cli/cli_ocr.py +++ b/src/eynollah/cli/cli_ocr.py @@ -88,7 +88,6 @@ def ocr_cli( tr_ocr, do_not_mask_with_textline_contour, batch_size, - dataset_abbrevation, min_conf_value_of_textline_text, ): """ @@ -101,7 +100,6 @@ def ocr_cli( tr_ocr=tr_ocr, do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, batch_size=batch_size, - pref_of_dataset=dataset_abbrevation, min_conf_value_of_textline_text=min_conf_value_of_textline_text) eynollah_ocr.run(overwrite=overwrite, dir_in=dir_in, diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 52dcca9..3c918e5 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -1,24 +1,22 @@ # FIXME: fix all of those... -# pyright: reportPossiblyUnboundVariable=false -# pyright: reportOptionalMemberAccess=false -# pyright: reportArgumentType=false -# pyright: reportCallIssue=false # pyright: reportOptionalSubscript=false from logging import Logger, getLogger -from typing import Optional +from typing import List, Optional from pathlib import Path import os import gc -import sys import math -import time +from dataclasses import dataclass import cv2 -import xml.etree.ElementTree as ET -from PIL import Image, ImageDraw, ImageFont +from cv2.typing import MatLike +from xml.etree import ElementTree as ET +from PIL import Image, ImageDraw import numpy as np from eynollah.model_zoo import EynollahModelZoo +from eynollah.utils.font import get_font +from eynollah.utils.xml import etree_namespace_for_element_tag try: import torch except ImportError: @@ -38,11 +36,13 @@ from .utils.utils_ocr import ( rotate_image_with_padding, ) -# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files -if sys.version_info < (3, 10): - import importlib_resources -else: - import importlib.resources as importlib_resources +# TODO: refine typing +@dataclass +class EynollahOcrResult: + extracted_texts_merged: List + extracted_conf_value_merged: Optional[List] + cropped_lines_region_indexer: List + total_bb_coordinates:List class Eynollah_ocr: def __init__( @@ -76,6 +76,7 @@ class Eynollah_ocr: @property def device(self): + assert torch if torch.cuda.is_available(): self.logger.info("Using GPU acceleration") return torch.device("cuda:0") @@ -83,870 +84,754 @@ class Eynollah_ocr: self.logger.info("Using CPU processing") return torch.device("cpu") - def run(self, overwrite: bool = False, - dir_in: Optional[str] = None, - # Prediction with RGB and binarized images for selected pages, should not be the default - dir_in_bin: Optional[str] = None, - image_filename: Optional[str] = None, - dir_xmls: Optional[str] = None, - dir_out_image_text: Optional[str] = None, - dir_out: Optional[str] = None, + def run_trocr( + self, + *, + img: MatLike, + page_tree: ET.ElementTree, + page_ns, + tr_ocr_input_height_and_width, + ) -> EynollahOcrResult: + + total_bb_coordinates = [] + + + cropped_lines = [] + cropped_lines_region_indexer = [] + cropped_lines_meging_indexing = [] + + extracted_texts = [] + + indexer_text_region = 0 + indexer_b_s = 0 + + for nn in page_tree.getroot().iter(f'{{{page_ns}}}TextRegion'): + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + + for child_textlines in child_textregion: + if child_textlines.tag.endswith("Coords"): + cropped_lines_region_indexer.append(indexer_text_region) + p_h=child_textlines.attrib['points'].split(' ') + textline_coords = np.array( [ [int(x.split(',')[0]), + int(x.split(',')[1]) ] + for x in p_h] ) + x,y,w,h = cv2.boundingRect(textline_coords) + + total_bb_coordinates.append([x,y,w,h]) + + h2w_ratio = h/float(w) + + img_poly_on_img = np.copy(img) + mask_poly = np.zeros(img.shape) + mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + img_crop[mask_poly==0] = 255 + + self.logger.debug("processing %d lines for '%s'", + len(cropped_lines), nn.attrib['id']) + if h2w_ratio > 0.1: + cropped_lines.append(resize_image(img_crop, + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width) ) + cropped_lines_meging_indexing.append(0) + indexer_b_s+=1 + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_zoo.get('ocr').generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + else: + splited_images, _ = return_textlines_split_if_needed(img_crop, None) + #print(splited_images) + if splited_images: + cropped_lines.append(resize_image(splited_images[0], + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width)) + cropped_lines_meging_indexing.append(1) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_zoo.get('ocr').generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + + cropped_lines.append(resize_image(splited_images[1], + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width)) + cropped_lines_meging_indexing.append(-1) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_zoo.get('ocr').generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + else: + cropped_lines.append(img_crop) + cropped_lines_meging_indexing.append(0) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_zoo.get('ocr').generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + + + indexer_text_region = indexer_text_region +1 + + if indexer_b_s!=0: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_zoo.get('ocr').generate(pixel_values_merged.to(self.device)) + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode(generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + ####extracted_texts = [] + ####n_iterations = math.ceil(len(cropped_lines) / self.b_s) + + ####for i in range(n_iterations): + ####if i==(n_iterations-1): + ####n_start = i*self.b_s + ####imgs = cropped_lines[n_start:] + ####else: + ####n_start = i*self.b_s + ####n_end = (i+1)*self.b_s + ####imgs = cropped_lines[n_start:n_end] + ####pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values + ####generated_ids_merged = self.model_ocr.generate( + #### pixel_values_merged.to(self.device)) + ####generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( + #### generated_ids_merged, skip_special_tokens=True) + + ####extracted_texts = extracted_texts + generated_text_merged + + del cropped_lines + gc.collect() + + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] + + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + #print(extracted_texts_merged, len(extracted_texts_merged)) + + return EynollahOcrResult( + extracted_texts_merged=extracted_texts_merged, + extracted_conf_value_merged=None, + cropped_lines_region_indexer=cropped_lines_region_indexer, + total_bb_coordinates=total_bb_coordinates, + ) + + def run_cnn( + self, + *, + img: MatLike, + img_bin: Optional[MatLike], + page_tree: ET.ElementTree, + page_ns, + image_width, + image_height, + ) -> EynollahOcrResult: + + total_bb_coordinates = [] + + cropped_lines = [] + img_crop_bin = None + imgs_bin = None + imgs_bin_ver_flipped = None + cropped_lines_bin = [] + cropped_lines_ver_index = [] + cropped_lines_region_indexer = [] + cropped_lines_meging_indexing = [] + + indexer_text_region = 0 + for nn in page_tree.getroot().iter(f'{{{page_ns}}}TextRegion'): + try: + type_textregion = nn.attrib['type'] + except: + type_textregion = 'paragraph' + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + for child_textlines in child_textregion: + if child_textlines.tag.endswith("Coords"): + cropped_lines_region_indexer.append(indexer_text_region) + p_h=child_textlines.attrib['points'].split(' ') + textline_coords = np.array( [ [int(x.split(',')[0]), + int(x.split(',')[1]) ] + for x in p_h] ) + + x,y,w,h = cv2.boundingRect(textline_coords) + + angle_radians = math.atan2(h, w) + # Convert to degrees + angle_degrees = math.degrees(angle_radians) + if type_textregion=='drop-capital': + angle_degrees = 0 + + total_bb_coordinates.append([x,y,w,h]) + + w_scaled = w * image_height/float(h) + + img_poly_on_img = np.copy(img) + if img_bin: + img_poly_on_img_bin = np.copy(img_bin) + img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] + + mask_poly = np.zeros(img.shape) + mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + + # print(file_name, angle_degrees, w*h, + # mask_poly[:,:,0].sum(), + # mask_poly[:,:,0].sum() /float(w*h) , + # 'didi') + + if angle_degrees > 3: + better_des_slope = get_orientation_moments(textline_coords) + + img_crop = rotate_image_with_padding(img_crop, better_des_slope) + if img_bin: + img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) + + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) + mask_poly = mask_poly.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) + + mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] + + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + if img_bin: + img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] + if not self.do_not_mask_with_textline_contour: + img_crop_bin[mask_poly==0] = 255 + + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: + if img_bin: + img_crop, img_crop_bin = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly) + + else: + better_des_slope = 0 + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + if img_bin: + if not self.do_not_mask_with_textline_contour: + img_crop_bin[mask_poly==0] = 255 + if type_textregion=='drop-capital': + pass + else: + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: + if img_bin: + img_crop, img_crop_bin = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly) + + if w_scaled < 750:#1.5*image_width: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) + cropped_lines.append(img_fin) + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + cropped_lines_meging_indexing.append(0) + if img_bin: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop_bin, image_height, image_width) + cropped_lines_bin.append(img_fin) + else: + splited_images, splited_images_bin = return_textlines_split_if_needed( + img_crop, img_crop_bin if img_bin else None) + if splited_images: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[0], image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(1) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[1], image_height, image_width) + + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(-1) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + if img_bin: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[0], image_height, image_width) + cropped_lines_bin.append(img_fin) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[1], image_height, image_width) + cropped_lines_bin.append(img_fin) + + else: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(0) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + if img_bin: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop_bin, image_height, image_width) + cropped_lines_bin.append(img_fin) + + + indexer_text_region = indexer_text_region +1 + + extracted_texts = [] + extracted_conf_value = [] + + n_iterations = math.ceil(len(cropped_lines) / self.b_s) + + # FIXME: copy pasta + for i in range(n_iterations): + if i==(n_iterations-1): + n_start = i*self.b_s + imgs = cropped_lines[n_start:] + imgs = np.array(imgs) + imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) + + ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) + indices_ver = np.where(ver_imgs == 1)[0] + + #print(indices_ver, 'indices_ver') + if len(indices_ver)>0: + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + + else: + imgs_ver_flipped = None + + if img_bin: + imgs_bin = cropped_lines_bin[n_start:] + imgs_bin = np.array(imgs_bin) + imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) + + if len(indices_ver)>0: + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + + else: + imgs_bin_ver_flipped = None + else: + n_start = i*self.b_s + n_end = (i+1)*self.b_s + imgs = cropped_lines[n_start:n_end] + imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) + + ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) + indices_ver = np.where(ver_imgs == 1)[0] + #print(indices_ver, 'indices_ver') + + if len(indices_ver)>0: + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_ver_flipped = None + + + if img_bin: + imgs_bin = cropped_lines_bin[n_start:n_end] + imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) + + + if len(indices_ver)>0: + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_bin_ver_flipped = None + + + self.logger.debug("processing next %d lines", len(imgs)) + preds = self.model_zoo.get('ocr').predict(imgs, verbose=0) + + if len(indices_ver)>0: + preds_flipped = self.model_zoo.get('ocr').predict(imgs_ver_flipped, verbose=0) + preds_max_fliped = np.max(preds_flipped, axis=2 ) + preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character + masked_means_flipped = \ + np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped[np.isnan(masked_means_flipped)] = 0 + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means[np.isnan(masked_means)] = 0 + + masked_means_ver = masked_means[indices_ver] + #print(masked_means_ver, 'pred_max_not_unk') + + indices_where_flipped_conf_value_is_higher = \ + np.where(masked_means_flipped > masked_means_ver)[0] + + #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') + if len(indices_where_flipped_conf_value_is_higher)>0: + indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] + preds[indices_to_be_replaced,:,:] = \ + preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + + if img_bin: + preds_bin = self.model_zoo.get('ocr').predict(imgs_bin, verbose=0) + + if len(indices_ver)>0: + preds_flipped = self.model_zoo.get('ocr').predict(imgs_bin_ver_flipped, verbose=0) + preds_max_fliped = np.max(preds_flipped, axis=2 ) + preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character + masked_means_flipped = \ + np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped[np.isnan(masked_means_flipped)] = 0 + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means[np.isnan(masked_means)] = 0 + + masked_means_ver = masked_means[indices_ver] + #print(masked_means_ver, 'pred_max_not_unk') + + indices_where_flipped_conf_value_is_higher = \ + np.where(masked_means_flipped > masked_means_ver)[0] + + #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') + if len(indices_where_flipped_conf_value_is_higher)>0: + indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] + preds_bin[indices_to_be_replaced,:,:] = \ + preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + + preds = (preds + preds_bin) / 2. + + pred_texts = decode_batch_predictions(preds, self.model_zoo.get('num_to_char')) + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) + + for ib in range(imgs.shape[0]): + pred_texts_ib = pred_texts[ib].replace("[UNK]", "") + if masked_means[ib] >= self.min_conf_value_of_textline_text: + extracted_texts.append(pred_texts_ib) + extracted_conf_value.append(masked_means[ib]) + else: + extracted_texts.append("") + extracted_conf_value.append(0) + del cropped_lines + del cropped_lines_bin + gc.collect() + + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] + + extracted_conf_value_merged = [extracted_conf_value[ind] # type: ignore + if cropped_lines_meging_indexing[ind]==0 + else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] + + extracted_conf_value_merged: List[float] = [extracted_conf_value_merged[ind_cfm] + for ind_cfm in range(len(extracted_texts_merged)) + if extracted_texts_merged[ind_cfm] is not None] + + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + + return EynollahOcrResult( + extracted_texts_merged=extracted_texts_merged, + extracted_conf_value_merged=extracted_conf_value_merged, + cropped_lines_region_indexer=cropped_lines_region_indexer, + total_bb_coordinates=total_bb_coordinates, + ) + + def write_ocr( + self, + *, + result: EynollahOcrResult, + page_tree: ET.ElementTree, + out_file_ocr, + page_ns, + img, + out_image_with_text, ): + cropped_lines_region_indexer = result.cropped_lines_region_indexer + total_bb_coordinates = result.total_bb_coordinates + extracted_texts_merged = result.extracted_texts_merged + extracted_conf_value_merged = result.extracted_conf_value_merged + + unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + if out_image_with_text: + image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") + draw = ImageDraw.Draw(image_text) + font = get_font() + + for indexer_text, bb_ind in enumerate(total_bb_coordinates): + x_bb = bb_ind[0] + y_bb = bb_ind[1] + w_bb = bb_ind[2] + h_bb = bb_ind[3] + + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], + font.path, w_bb, int(h_bb*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally + text_y = y_bb + (h_bb - text_height) // 2 # Center vertically + + # Draw the text + draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) + image_text.save(out_image_with_text) + + text_by_textregion = [] + for ind in unique_cropped_lines_region_indexer: + ind = np.array(cropped_lines_region_indexer)==ind + extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] + if len(extracted_texts_merged_un)>1: + text_by_textregion_ind = "" + next_glue = "" + for indt in range(len(extracted_texts_merged_un)): + if (extracted_texts_merged_un[indt].endswith('⸗') or + extracted_texts_merged_un[indt].endswith('-') or + extracted_texts_merged_un[indt].endswith('¬')): + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] + next_glue = "" + else: + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] + next_glue = " " + text_by_textregion.append(text_by_textregion_ind) + else: + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + + indexer = 0 + indexer_textregion = 0 + for nn in page_tree.getroot().iter(f'{{{page_ns}}}TextRegion'): + + is_textregion_text = False + for childtest in nn: + if childtest.tag.endswith("TextEquiv"): + is_textregion_text = True + + if not is_textregion_text: + text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') + unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + + + has_textline = False + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + + is_textline_text = False + for childtest2 in child_textregion: + if childtest2.tag.endswith("TextEquiv"): + is_textline_text = True + + + if not is_textline_text: + text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + if extracted_conf_value_merged: + text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + unicode_textline = ET.SubElement(text_subelement, 'Unicode') + unicode_textline.text = extracted_texts_merged[indexer] + else: + for childtest3 in child_textregion: + if childtest3.tag.endswith("TextEquiv"): + for child_uc in childtest3: + if child_uc.tag.endswith("Unicode"): + if extracted_conf_value_merged: + childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + child_uc.text = extracted_texts_merged[indexer] + + indexer = indexer + 1 + has_textline = True + if has_textline: + if is_textregion_text: + for child4 in nn: + if child4.tag.endswith("TextEquiv"): + for childtr_uc in child4: + if childtr_uc.tag.endswith("Unicode"): + childtr_uc.text = text_by_textregion[indexer_textregion] + else: + unicode_textregion.text = text_by_textregion[indexer_textregion] + indexer_textregion = indexer_textregion + 1 + + ET.register_namespace("",page_ns) + page_tree.write(out_file_ocr, xml_declaration=True, method='xml', encoding="utf-8", default_namespace=None) + + def run( + self, + *, + overwrite: bool = False, + dir_in: Optional[str] = None, + dir_in_bin: Optional[str] = None, + image_filename: Optional[str] = None, + dir_xmls: str, + dir_out_image_text: Optional[str] = None, + dir_out: str, + ): + """ + Run OCR. + + Args: + + dir_in_bin (str): Prediction with RGB and binarized images for selected pages, should not be the default + """ if dir_in: ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, + for image_filename in filter(is_image_filename, os.listdir(dir_in))] else: assert image_filename ls_imgs = [image_filename] - if self.tr_ocr: - tr_ocr_input_height_and_width = 384 - for dir_img in ls_imgs: - file_name = Path(dir_img).stem - assert dir_xmls # FIXME: check the logic - dir_xml = os.path.join(dir_xmls, file_name+'.xml') - assert dir_out # FIXME: check the logic - out_file_ocr = os.path.join(dir_out, file_name+'.xml') - - if os.path.exists(out_file_ocr): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) - else: - self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) - continue - - img = cv2.imread(dir_img) - - if dir_out_image_text: - out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') - image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") - draw = ImageDraw.Draw(image_text) - total_bb_coordinates = [] - - ##file_name = Path(dir_xmls).stem - tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) - - - - cropped_lines = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - extracted_texts = [] - - indexer_text_region = 0 - indexer_b_s = 0 - - for nn in root1.iter(region_tags): - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - x,y,w,h = cv2.boundingRect(textline_coords) - - if dir_out_image_text: - total_bb_coordinates.append([x,y,w,h]) - - h2w_ratio = h/float(w) - - img_poly_on_img = np.copy(img) - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - img_crop[mask_poly==0] = 255 - - self.logger.debug("processing %d lines for '%s'", - len(cropped_lines), nn.attrib['id']) - if h2w_ratio > 0.1: - cropped_lines.append(resize_image(img_crop, - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width) ) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - splited_images, _ = return_textlines_split_if_needed(img_crop, None) - #print(splited_images) - if splited_images: - cropped_lines.append(resize_image(splited_images[0], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - cropped_lines.append(resize_image(splited_images[1], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(-1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - cropped_lines.append(img_crop) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - - indexer_text_region = indexer_text_region +1 - - if indexer_b_s!=0: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_zoo.get('ocr').generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode(generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - ####extracted_texts = [] - ####n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - ####for i in range(n_iterations): - ####if i==(n_iterations-1): - ####n_start = i*self.b_s - ####imgs = cropped_lines[n_start:] - ####else: - ####n_start = i*self.b_s - ####n_end = (i+1)*self.b_s - ####imgs = cropped_lines[n_start:n_end] - ####pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values - ####generated_ids_merged = self.model_ocr.generate( - #### pixel_values_merged.to(self.device)) - ####generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( - #### generated_ids_merged, skip_special_tokens=True) - - ####extracted_texts = extracted_texts + generated_text_merged - - del cropped_lines - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - #print(extracted_texts_merged, len(extracted_texts_merged)) - - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - if dir_out_image_text: - - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - font = ImageFont.truetype(font=font, size=40) - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - - - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') - #######text_by_textregion = [] - #######for ind in unique_cropped_lines_region_indexer: - #######ind = np.array(cropped_lines_region_indexer)==ind - #######extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - #######text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer) == ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) - else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - - indexer = 0 - indexer_textregion = 0 - for nn in root1.iter(region_tags): - #id_textregion = nn.attrib['id'] - #id_textregions.append(id_textregion) - #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') - - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - ##text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - ##childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] - else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 - - ###sample_order = [(id_to_order[tid], text) - ### for tid, text in zip(id_textregions, textregions_by_existing_ids) - ### if tid in id_to_order] - - ##ordered_texts_sample = [text for _, text in sorted(sample_order)] - ##tot_page_text = ' '.join(ordered_texts_sample) - - ##for page_element in root1.iter(link+'Page'): - ##text_page = ET.SubElement(page_element, 'TextEquiv') - ##unicode_textpage = ET.SubElement(text_page, 'Unicode') - ##unicode_textpage.text = tot_page_text - - ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) - else: - ###max_len = 280#512#280#512 - ###padding_token = 1500#299#1500#299 - image_width = 512#max_len * 4 - image_height = 32 - - - img_size=(image_width, image_height) + for img_filename in ls_imgs: + file_stem = Path(img_filename).stem + page_file_in = os.path.join(dir_xmls, file_stem+'.xml') + out_file_ocr = os.path.join(dir_out, file_stem+'.xml') - for dir_img in ls_imgs: - file_name = Path(dir_img).stem - dir_xml = os.path.join(dir_xmls, file_name+'.xml') - out_file_ocr = os.path.join(dir_out, file_name+'.xml') + if os.path.exists(out_file_ocr): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) + else: + self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) + return - if os.path.exists(out_file_ocr): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) - else: - self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) - continue - - img = cv2.imread(dir_img) - if dir_in_bin is not None: - cropped_lines_bin = [] - img_bin = cv2.imread(os.path.join(dir_in_bin, file_name+'.png')) - - if dir_out_image_text: - out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') - image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") - draw = ImageDraw.Draw(image_text) - total_bb_coordinates = [] + img = cv2.imread(img_filename) - tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' + page_tree = ET.parse(page_file_in, parser = ET.XMLParser(encoding="utf-8")) + page_ns = etree_namespace_for_element_tag(page_tree.getroot().tag) - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] + out_image_with_text = None + if dir_out_image_text: + out_image_with_text = os.path.join(dir_out_image_text, file_stem + '.png') - region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) - - cropped_lines = [] - cropped_lines_ver_index = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - tinl = time.time() - indexer_text_region = 0 - indexer_textlines = 0 - for nn in root1.iter(region_tags): - try: - type_textregion = nn.attrib['type'] - except: - type_textregion = 'paragraph' - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - - x,y,w,h = cv2.boundingRect(textline_coords) - - angle_radians = math.atan2(h, w) - # Convert to degrees - angle_degrees = math.degrees(angle_radians) - if type_textregion=='drop-capital': - angle_degrees = 0 - - if dir_out_image_text: - total_bb_coordinates.append([x,y,w,h]) - - w_scaled = w * image_height/float(h) - - img_poly_on_img = np.copy(img) - if dir_in_bin is not None: - img_poly_on_img_bin = np.copy(img_bin) - img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] - - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - # print(file_name, angle_degrees, w*h, - # mask_poly[:,:,0].sum(), - # mask_poly[:,:,0].sum() /float(w*h) , - # 'didi') - - if angle_degrees > 3: - better_des_slope = get_orientation_moments(textline_coords) - - img_crop = rotate_image_with_padding(img_crop, better_des_slope) - if dir_in_bin is not None: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) - - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) - mask_poly = mask_poly.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) - - mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: - img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: - if dir_in_bin is not None: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - else: - better_des_slope = 0 - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - if type_textregion=='drop-capital': - pass - else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: - if dir_in_bin is not None: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - if w_scaled < 750:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - cropped_lines_meging_indexing.append(0) - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed( - img_crop, img_crop_bin if dir_in_bin is not None else None) - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[0], image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[1], image_height, image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[0], image_height, image_width) - cropped_lines_bin.append(img_fin) - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[1], image_height, image_width) - cropped_lines_bin.append(img_fin) - - else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - + img_bin = None + if dir_in_bin: + img_bin = cv2.imread(os.path.join(dir_in_bin, file_stem+'.png')) - indexer_text_region = indexer_text_region +1 - - extracted_texts = [] - extracted_conf_value = [] - n_iterations = math.ceil(len(cropped_lines) / self.b_s) + if self.tr_ocr: + result = self.run_trocr( + img=img, + page_tree=page_tree, + page_ns=page_ns, - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*self.b_s - imgs = cropped_lines[n_start:] - imgs = np.array(imgs) - imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) - indices_ver = np.where(ver_imgs == 1)[0] - - #print(indices_ver, 'indices_ver') - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_ver_flipped = None - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:] - imgs_bin = np.array(imgs_bin) - imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_bin_ver_flipped = None - else: - n_start = i*self.b_s - n_end = (i+1)*self.b_s - imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) - indices_ver = np.where(ver_imgs == 1)[0] - #print(indices_ver, 'indices_ver') - - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_ver_flipped = None + tr_ocr_input_height_and_width = 384 + ) + else: + result = self.run_cnn( + img=img, + page_tree=page_tree, + page_ns=page_ns, - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:n_end] - imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) - - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_bin_ver_flipped = None - + img_bin=img_bin, + image_width=512, + image_height=32, + ) - self.logger.debug("processing next %d lines", len(imgs)) - preds = self.model_zoo.get('ocr').predict(imgs, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.model_zoo.get('ocr').predict(imgs_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - if dir_in_bin is not None: - preds_bin = self.model_zoo.get('ocr').predict(imgs_bin, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.model_zoo.get('ocr').predict(imgs_bin_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds_bin[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - - preds = (preds + preds_bin) / 2. - - pred_texts = decode_batch_predictions(preds, self.model_zoo.get('num_to_char')) - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - - for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - if masked_means[ib] >= self.min_conf_value_of_textline_text: - extracted_texts.append(pred_texts_ib) - extracted_conf_value.append(masked_means[ib]) - else: - extracted_texts.append("") - extracted_conf_value.append(0) - del cropped_lines - if dir_in_bin is not None: - del cropped_lines_bin - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value[ind] - if cropped_lines_meging_indexing[ind]==0 - else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] - for ind_cfm in range(len(extracted_texts_merged)) - if extracted_texts_merged[ind_cfm] is not None] - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - if dir_out_image_text: - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - font = ImageFont.truetype(font=font, size=40) - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer)==ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) - else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') - - ###index_tot_regions = [] - ###tot_region_ref = [] - - ###for jj in root1.iter(link+'RegionRefIndexed'): - ###index_tot_regions.append(jj.attrib['index']) - ###tot_region_ref.append(jj.attrib['regionRef']) - - ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} - - #id_textregions = [] - #textregions_by_existing_ids = [] - indexer = 0 - indexer_textregion = 0 - for nn in root1.iter(region_tags): - #id_textregion = nn.attrib['id'] - #id_textregions.append(id_textregion) - #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') - - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - childtest3.set('conf', - f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] - else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 - - ###sample_order = [(id_to_order[tid], text) - ### for tid, text in zip(id_textregions, textregions_by_existing_ids) - ### if tid in id_to_order] - - ##ordered_texts_sample = [text for _, text in sorted(sample_order)] - ##tot_page_text = ' '.join(ordered_texts_sample) - - ##for page_element in root1.iter(link+'Page'): - ##text_page = ET.SubElement(page_element, 'TextEquiv') - ##unicode_textpage = ET.SubElement(text_page, 'Unicode') - ##unicode_textpage.text = tot_page_text - - ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) - #print("Job done in %.1fs", time.time() - t0) + self.write_ocr( + result=result, + img=img, + page_tree=page_tree, + page_ns=page_ns, + out_file_ocr=out_file_ocr, + out_image_with_text=out_image_with_text, + ) diff --git a/src/eynollah/utils/font.py b/src/eynollah/utils/font.py new file mode 100644 index 0000000..939933e --- /dev/null +++ b/src/eynollah/utils/font.py @@ -0,0 +1,16 @@ + +# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files +import sys +from PIL import ImageFont + +if sys.version_info < (3, 10): + import importlib_resources +else: + import importlib.resources as importlib_resources + + +def get_font(): + #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! + font = importlib_resources.files(__package__) / "../Charis-Regular.ttf" + with importlib_resources.as_file(font) as font: + return ImageFont.truetype(font=font, size=40) diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 2ba328b..b738e29 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -128,6 +128,7 @@ def return_textlines_split_if_needed(textline_image, textline_image_bin=None): return [image1, image2], None else: return None, None + def preprocess_and_resize_image_for_ocrcnn_model(img, image_height, image_width): if img.shape[0]==0 or img.shape[1]==0: img_fin = np.ones((image_height, image_width, 3)) diff --git a/src/eynollah/utils/xml.py b/src/eynollah/utils/xml.py index 88d1df8..ded098e 100644 --- a/src/eynollah/utils/xml.py +++ b/src/eynollah/utils/xml.py @@ -88,3 +88,7 @@ def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region order_of_texts.append(interest) return order_of_texts, id_of_texts + +def etree_namespace_for_element_tag(tag: str): + right = tag.find('}') + return tag[1:right]