mirror of
https://github.com/qurator-spk/eynollah.git
synced 2025-06-08 11:49:55 +02:00
Merge remote-tracking branch 'vahidrezanezhad/main' into main
This commit is contained in:
commit
4223fed628
9 changed files with 653 additions and 111 deletions
12
README.md
12
README.md
|
@ -80,10 +80,18 @@ eynollah \
|
|||
-o <directory to write output xml or enhanced image> \
|
||||
-m <directory of models> \
|
||||
-fl <if true, the tool will perform full layout analysis> \
|
||||
-ae <if true, the tool will resize and enhance the image and produce the resulting image as output> \
|
||||
-ae <if true, the tool will resize and enhance the image and produce the resulting image as output. The rescaled and enhanced image will be saved in output directory> \
|
||||
-as <if true, the tool will check whether the document needs rescaling or not> \
|
||||
-cl <if true, the tool will extract the contours of curved textlines instead of rectangle bounding boxes> \
|
||||
-si <if a directory is given here, the tool will output image regions inside documents there>
|
||||
-si <if a directory is given here, the tool will output image regions inside documents there> \
|
||||
-sd <if a directory is given, deskewed image will be saved there> \
|
||||
-sa <if a directory is given, all plots needed for documentation will be saved there> \
|
||||
-tab <if true, this tool will try to detect tables> \
|
||||
-ib <in general, eynollah uses RGB as input but if the input document is strongly dark, bright or for any other reason you can turn binarized input on. This option does not mean that you have to provide a binary image, otherwise this means that the tool itself will binarized the RGB input document> \
|
||||
-ho <if true, this tool would ignore headers role in reading order detection> \
|
||||
-sl <if a directory is given, plot of layout will be saved there> \
|
||||
-ep <if true, the tool will be enabled to save desired plot. This should be true alongside with -sl, -sd, -sa , -si or -ae options>
|
||||
|
||||
```
|
||||
|
||||
The tool does accept and works better on original images (RGB format) than binarized images.
|
||||
|
|
|
@ -73,6 +73,12 @@ from qurator.eynollah.eynollah import Eynollah
|
|||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will try to return all elements of layout.",
|
||||
)
|
||||
@click.option(
|
||||
"--tables/--no-tables",
|
||||
"-tab/-notab",
|
||||
is_flag=True,
|
||||
help="if this parameter set to true, this tool will try to detect tables.",
|
||||
)
|
||||
@click.option(
|
||||
"--input_binary/--input-RGB",
|
||||
"-ib/-irgb",
|
||||
|
@ -109,6 +115,7 @@ def main(
|
|||
allow_enhancement,
|
||||
curved_line,
|
||||
full_layout,
|
||||
tables,
|
||||
input_binary,
|
||||
allow_scaling,
|
||||
headers_off,
|
||||
|
@ -117,11 +124,11 @@ def main(
|
|||
if log_level:
|
||||
setOverrideLogLevel(log_level)
|
||||
initLogging()
|
||||
if not enable_plotting and (save_layout or save_deskewed or save_all or save_images):
|
||||
print("Error: You used one of -sl, -sd, -sa or -si but did not enable plotting with -ep")
|
||||
if not enable_plotting and (save_layout or save_deskewed or save_all or save_images or allow_enhancement):
|
||||
print("Error: You used one of -sl, -sd, -sa, -si or -ae but did not enable plotting with -ep")
|
||||
sys.exit(1)
|
||||
elif enable_plotting and not (save_layout or save_deskewed or save_all or save_images):
|
||||
print("Error: You used -ep to enable plotting but set none of -sl, -sd, -sa or -si")
|
||||
elif enable_plotting and not (save_layout or save_deskewed or save_all or save_images or allow_enhancement):
|
||||
print("Error: You used -ep to enable plotting but set none of -sl, -sd, -sa, -si or -ae")
|
||||
sys.exit(1)
|
||||
eynollah = Eynollah(
|
||||
image_filename=image,
|
||||
|
@ -135,6 +142,7 @@ def main(
|
|||
allow_enhancement=allow_enhancement,
|
||||
curved_line=curved_line,
|
||||
full_layout=full_layout,
|
||||
tables=tables,
|
||||
input_binary=input_binary,
|
||||
allow_scaling=allow_scaling,
|
||||
headers_off=headers_off,
|
||||
|
|
|
@ -26,12 +26,16 @@ sys.stderr = stderr
|
|||
import tensorflow as tf
|
||||
tf.get_logger().setLevel("ERROR")
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
from scipy.signal import find_peaks
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.ndimage import gaussian_filter1d
|
||||
|
||||
from .utils.contour import (
|
||||
filter_contours_area_of_image,
|
||||
filter_contours_area_of_image_tables,
|
||||
find_contours_mean_y_diff,
|
||||
find_new_features_of_contours,
|
||||
find_features_of_contours,
|
||||
get_text_region_boxes_by_given_contours,
|
||||
get_textregion_contours_in_org_image,
|
||||
return_contours_of_image,
|
||||
|
@ -92,6 +96,7 @@ class Eynollah:
|
|||
allow_enhancement=False,
|
||||
curved_line=False,
|
||||
full_layout=False,
|
||||
tables=False,
|
||||
input_binary=False,
|
||||
allow_scaling=False,
|
||||
headers_off=False,
|
||||
|
@ -110,10 +115,12 @@ class Eynollah:
|
|||
self.allow_enhancement = allow_enhancement
|
||||
self.curved_line = curved_line
|
||||
self.full_layout = full_layout
|
||||
self.tables = tables
|
||||
self.input_binary = input_binary
|
||||
self.allow_scaling = allow_scaling
|
||||
self.headers_off = headers_off
|
||||
self.plotter = None if not enable_plotting else EynollahPlotter(
|
||||
dir_out=self.dir_out,
|
||||
dir_of_all=dir_of_all,
|
||||
dir_of_deskewed=dir_of_deskewed,
|
||||
dir_of_cropped_images=dir_of_cropped_images,
|
||||
|
@ -137,6 +144,7 @@ class Eynollah:
|
|||
self.model_page_dir = dir_models + "/model_page_mixed_best.h5"
|
||||
self.model_region_dir_p_ens = dir_models + "/model_ensemble_s.h5"
|
||||
self.model_textline_dir = dir_models + "/model_textline_newspapers.h5"
|
||||
self.model_tables = dir_models + "/model_tables_ens_mixed_new_2.h5"
|
||||
|
||||
def _cache_images(self, image_filename=None, image_pil=None):
|
||||
ret = {}
|
||||
|
@ -1166,7 +1174,7 @@ class Eynollah:
|
|||
try:
|
||||
img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=20)
|
||||
|
||||
_, _ = find_num_col(img_only_regions, multiplier=6.0)
|
||||
_, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0)
|
||||
|
||||
img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1)))
|
||||
|
||||
|
@ -1612,12 +1620,326 @@ class Eynollah:
|
|||
order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0])
|
||||
|
||||
return order_text_new, id_of_texts_tot
|
||||
def check_iou_of_bounding_box_and_contour_for_tables(self, layout, table_prediction_early, pixel_tabel, num_col_classifier):
|
||||
layout_org = np.copy(layout)
|
||||
layout_org[:,:,0][layout_org[:,:,0]==pixel_tabel] = 0
|
||||
layout = (layout[:,:,0]==pixel_tabel)*1
|
||||
|
||||
layout =np.repeat(layout[:, :, np.newaxis], 3, axis=2)
|
||||
layout = layout.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(layout, cv2.COLOR_BGR2GRAY )
|
||||
_, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))])
|
||||
|
||||
contours_new = []
|
||||
for i in range(len(contours)):
|
||||
x, y, w, h = cv2.boundingRect(contours[i])
|
||||
iou = cnt_size[i] /float(w*h) *100
|
||||
|
||||
if iou<80:
|
||||
layout_contour = np.zeros((layout_org.shape[0], layout_org.shape[1]))
|
||||
layout_contour= cv2.fillPoly(layout_contour,pts=[contours[i]] ,color=(1,1,1))
|
||||
|
||||
|
||||
layout_contour_sum = layout_contour.sum(axis=0)
|
||||
layout_contour_sum_diff = np.diff(layout_contour_sum)
|
||||
layout_contour_sum_diff= np.abs(layout_contour_sum_diff)
|
||||
layout_contour_sum_diff_smoothed= gaussian_filter1d(layout_contour_sum_diff, 10)
|
||||
|
||||
peaks, _ = find_peaks(layout_contour_sum_diff_smoothed, height=0)
|
||||
peaks= peaks[layout_contour_sum_diff_smoothed[peaks]>4]
|
||||
|
||||
for j in range(len(peaks)):
|
||||
layout_contour[:,peaks[j]-3+1:peaks[j]+1+3] = 0
|
||||
|
||||
layout_contour=cv2.erode(layout_contour[:,:], KERNEL, iterations=5)
|
||||
layout_contour=cv2.dilate(layout_contour[:,:], KERNEL, iterations=5)
|
||||
|
||||
layout_contour =np.repeat(layout_contour[:, :, np.newaxis], 3, axis=2)
|
||||
layout_contour = layout_contour.astype(np.uint8)
|
||||
|
||||
imgray = cv2.cvtColor(layout_contour, cv2.COLOR_BGR2GRAY )
|
||||
_, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_sep, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
for ji in range(len(contours_sep) ):
|
||||
contours_new.append(contours_sep[ji])
|
||||
if num_col_classifier>=2:
|
||||
only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1]))
|
||||
only_recent_contour_image= cv2.fillPoly(only_recent_contour_image,pts=[contours_sep[ji]] ,color=(1,1,1))
|
||||
table_pixels_masked_from_early_pre = only_recent_contour_image[:,:]*table_prediction_early[:,:]
|
||||
iou_in = table_pixels_masked_from_early_pre.sum() /float(only_recent_contour_image.sum()) *100
|
||||
#print(iou_in,'iou_in_in1')
|
||||
|
||||
if iou_in>30:
|
||||
layout_org= cv2.fillPoly(layout_org,pts=[contours_sep[ji]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel))
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
|
||||
layout_org= cv2.fillPoly(layout_org,pts=[contours_sep[ji]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel))
|
||||
|
||||
else:
|
||||
contours_new.append(contours[i])
|
||||
if num_col_classifier>=2:
|
||||
only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1]))
|
||||
only_recent_contour_image= cv2.fillPoly(only_recent_contour_image,pts=[contours[i]] ,color=(1,1,1))
|
||||
|
||||
table_pixels_masked_from_early_pre = only_recent_contour_image[:,:]*table_prediction_early[:,:]
|
||||
iou_in = table_pixels_masked_from_early_pre.sum() /float(only_recent_contour_image.sum()) *100
|
||||
#print(iou_in,'iou_in')
|
||||
if iou_in>30:
|
||||
layout_org= cv2.fillPoly(layout_org,pts=[contours[i]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel))
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
layout_org= cv2.fillPoly(layout_org,pts=[contours[i]] ,color=(pixel_tabel,pixel_tabel,pixel_tabel))
|
||||
|
||||
return layout_org, contours_new
|
||||
def delete_separator_around(self,spliter_y,peaks_neg,image_by_region, pixel_line, pixel_table):
|
||||
# format of subboxes: box=[x1, x2 , y1, y2]
|
||||
pix_del = 100
|
||||
if len(image_by_region.shape)==3:
|
||||
for i in range(len(spliter_y)-1):
|
||||
for j in range(1,len(peaks_neg[i])-1):
|
||||
image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0]==pixel_line ]=0
|
||||
image_by_region[spliter_y[i]:spliter_y[i+1],peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,1]==pixel_line ]=0
|
||||
image_by_region[spliter_y[i]:spliter_y[i+1],peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,2]==pixel_line ]=0
|
||||
|
||||
image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0]==pixel_table ]=0
|
||||
image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,1]==pixel_table ]=0
|
||||
image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,0][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del,2]==pixel_table ]=0
|
||||
else:
|
||||
for i in range(len(spliter_y)-1):
|
||||
for j in range(1,len(peaks_neg[i])-1):
|
||||
image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del]==pixel_line ]=0
|
||||
|
||||
image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del][image_by_region[int(spliter_y[i]):int(spliter_y[i+1]),peaks_neg[i][j]-pix_del:peaks_neg[i][j]+pix_del]==pixel_table ]=0
|
||||
return image_by_region
|
||||
def add_tables_heuristic_to_layout(self, image_regions_eraly_p,boxes, slope_mean_hor, spliter_y,peaks_neg_tot, image_revised, num_col_classifier, min_area, pixel_line):
|
||||
pixel_table =10
|
||||
image_revised_1 = self.delete_separator_around(spliter_y, peaks_neg_tot, image_revised, pixel_line, pixel_table)
|
||||
|
||||
try:
|
||||
image_revised_1[:,:30][image_revised_1[:,:30]==pixel_line] = 0
|
||||
image_revised_1[:,image_revised_1.shape[1]-30:][image_revised_1[:,image_revised_1.shape[1]-30:]==pixel_line] = 0
|
||||
except:
|
||||
pass
|
||||
|
||||
img_comm_e = np.zeros(image_revised_1.shape)
|
||||
img_comm = np.repeat(img_comm_e[:, :, np.newaxis], 3, axis=2)
|
||||
|
||||
for indiv in np.unique(image_revised_1):
|
||||
image_col=(image_revised_1==indiv)*255
|
||||
img_comm_in=np.repeat(image_col[:, :, np.newaxis], 3, axis=2)
|
||||
img_comm_in=img_comm_in.astype(np.uint8)
|
||||
|
||||
imgray = cv2.cvtColor(img_comm_in, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
contours,hirarchy=cv2.findContours(thresh.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
if indiv==pixel_table:
|
||||
main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = 0.001)
|
||||
else:
|
||||
main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = min_area)
|
||||
|
||||
img_comm = cv2.fillPoly(img_comm, pts = main_contours, color = (indiv, indiv, indiv))
|
||||
img_comm = img_comm.astype(np.uint8)
|
||||
|
||||
if not self.isNaN(slope_mean_hor):
|
||||
image_revised_last = np.zeros((image_regions_eraly_p.shape[0], image_regions_eraly_p.shape[1],3))
|
||||
for i in range(len(boxes)):
|
||||
image_box=img_comm[int(boxes[i][2]):int(boxes[i][3]),int(boxes[i][0]):int(boxes[i][1]),:]
|
||||
try:
|
||||
image_box_tabels_1=(image_box[:,:,0]==pixel_table)*1
|
||||
contours_tab,_=return_contours_of_image(image_box_tabels_1)
|
||||
contours_tab=filter_contours_area_of_image_tables(image_box_tabels_1,contours_tab,_,1,0.003)
|
||||
image_box_tabels_1=(image_box[:,:,0]==pixel_line)*1
|
||||
|
||||
image_box_tabels_and_m_text=( (image_box[:,:,0]==pixel_table) | (image_box[:,:,0]==1) )*1
|
||||
image_box_tabels_and_m_text=image_box_tabels_and_m_text.astype(np.uint8)
|
||||
|
||||
image_box_tabels_1=image_box_tabels_1.astype(np.uint8)
|
||||
image_box_tabels_1 = cv2.dilate(image_box_tabels_1,KERNEL,iterations = 5)
|
||||
|
||||
contours_table_m_text,_=return_contours_of_image(image_box_tabels_and_m_text)
|
||||
image_box_tabels=np.repeat(image_box_tabels_1[:, :, np.newaxis], 3, axis=2)
|
||||
|
||||
image_box_tabels=image_box_tabels.astype(np.uint8)
|
||||
imgray = cv2.cvtColor(image_box_tabels, cv2.COLOR_BGR2GRAY)
|
||||
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
|
||||
|
||||
contours_line,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
y_min_main_line ,y_max_main_line=find_features_of_contours(contours_line)
|
||||
y_min_main_tab ,y_max_main_tab=find_features_of_contours(contours_tab)
|
||||
|
||||
cx_tab_m_text,cy_tab_m_text ,x_min_tab_m_text , x_max_tab_m_text, y_min_tab_m_text ,y_max_tab_m_text, _= find_new_features_of_contours(contours_table_m_text)
|
||||
cx_tabl,cy_tabl ,x_min_tabl , x_max_tabl, y_min_tabl ,y_max_tabl,_= find_new_features_of_contours(contours_tab)
|
||||
|
||||
if len(y_min_main_tab )>0:
|
||||
y_down_tabs=[]
|
||||
y_up_tabs=[]
|
||||
|
||||
for i_t in range(len(y_min_main_tab )):
|
||||
y_down_tab=[]
|
||||
y_up_tab=[]
|
||||
for i_l in range(len(y_min_main_line)):
|
||||
if y_min_main_tab[i_t]>y_min_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l] and y_min_main_tab[i_t]>y_max_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l]:
|
||||
pass
|
||||
elif y_min_main_tab[i_t]<y_max_main_line[i_l] and y_max_main_tab[i_t]<y_max_main_line[i_l] and y_max_main_tab[i_t]<y_min_main_line[i_l] and y_min_main_tab[i_t]<y_min_main_line[i_l]:
|
||||
pass
|
||||
elif np.abs(y_max_main_line[i_l]-y_min_main_line[i_l])<100:
|
||||
pass
|
||||
else:
|
||||
y_up_tab.append(np.min([y_min_main_line[i_l], y_min_main_tab[i_t] ]) )
|
||||
y_down_tab.append( np.max([ y_max_main_line[i_l],y_max_main_tab[i_t] ]) )
|
||||
|
||||
if len(y_up_tab)==0:
|
||||
y_up_tabs.append(y_min_main_tab[i_t])
|
||||
y_down_tabs.append(y_max_main_tab[i_t])
|
||||
else:
|
||||
y_up_tabs.append(np.min(y_up_tab))
|
||||
y_down_tabs.append(np.max(y_down_tab))
|
||||
else:
|
||||
y_down_tabs=[]
|
||||
y_up_tabs=[]
|
||||
pass
|
||||
except:
|
||||
y_down_tabs=[]
|
||||
y_up_tabs=[]
|
||||
|
||||
for ii in range(len(y_up_tabs)):
|
||||
image_box[y_up_tabs[ii]:y_down_tabs[ii],:,0]=pixel_table
|
||||
|
||||
image_revised_last[int(boxes[i][2]):int(boxes[i][3]),int(boxes[i][0]):int(boxes[i][1]),:]=image_box[:,:,:]
|
||||
else:
|
||||
for i in range(len(boxes)):
|
||||
|
||||
image_box=img_comm[int(boxes[i][2]):int(boxes[i][3]),int(boxes[i][0]):int(boxes[i][1]),:]
|
||||
image_revised_last[int(boxes[i][2]):int(boxes[i][3]),int(boxes[i][0]):int(boxes[i][1]),:]=image_box[:,:,:]
|
||||
|
||||
if num_col_classifier==1:
|
||||
img_tables_col_1=( image_revised_last[:,:,0]==pixel_table )*1
|
||||
img_tables_col_1=img_tables_col_1.astype(np.uint8)
|
||||
contours_table_col1,_=return_contours_of_image(img_tables_col_1)
|
||||
|
||||
_,_ ,_ , _, y_min_tab_col1 ,y_max_tab_col1, _= find_new_features_of_contours(contours_table_col1)
|
||||
|
||||
if len(y_min_tab_col1)>0:
|
||||
for ijv in range(len(y_min_tab_col1)):
|
||||
image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv]),:,:]=pixel_table
|
||||
return image_revised_last
|
||||
def do_order_of_regions(self, *args, **kwargs):
|
||||
if self.full_layout:
|
||||
return self.do_order_of_regions_full_layout(*args, **kwargs)
|
||||
return self.do_order_of_regions_no_full_layout(*args, **kwargs)
|
||||
|
||||
def get_tables_from_model(self, img, num_col_classifier):
|
||||
img_org = np.copy(img)
|
||||
|
||||
img_height_h = img_org.shape[0]
|
||||
img_width_h = img_org.shape[1]
|
||||
|
||||
model_region, session_region = self.start_new_session_and_model(self.model_tables)
|
||||
|
||||
patches = False
|
||||
|
||||
if num_col_classifier < 4 and num_col_classifier > 2:
|
||||
prediction_table = self.do_prediction(patches, img, model_region)
|
||||
pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), model_region)
|
||||
pre_updown = cv2.flip(pre_updown, -1)
|
||||
|
||||
prediction_table[:,:,0][pre_updown[:,:,0]==1]=1
|
||||
prediction_table = prediction_table.astype(np.int16)
|
||||
|
||||
elif num_col_classifier ==2:
|
||||
height_ext = 0#int( img.shape[0]/4. )
|
||||
h_start = int(height_ext/2.)
|
||||
width_ext = int( img.shape[1]/8. )
|
||||
w_start = int(width_ext/2.)
|
||||
|
||||
height_new = img.shape[0]+height_ext
|
||||
width_new = img.shape[1]+width_ext
|
||||
|
||||
img_new =np.ones((height_new,width_new,img.shape[2])).astype(float)*0
|
||||
img_new[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] =img[:,:,:]
|
||||
|
||||
prediction_ext = self.do_prediction(patches, img_new, model_region)
|
||||
|
||||
pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), model_region)
|
||||
pre_updown = cv2.flip(pre_updown, -1)
|
||||
|
||||
prediction_table = prediction_ext[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ]
|
||||
prediction_table_updown = pre_updown[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ]
|
||||
|
||||
prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1
|
||||
prediction_table = prediction_table.astype(np.int16)
|
||||
|
||||
elif num_col_classifier ==1:
|
||||
height_ext = 0# int( img.shape[0]/4. )
|
||||
h_start = int(height_ext/2.)
|
||||
width_ext = int( img.shape[1]/4. )
|
||||
w_start = int(width_ext/2.)
|
||||
|
||||
height_new = img.shape[0]+height_ext
|
||||
width_new = img.shape[1]+width_ext
|
||||
|
||||
img_new =np.ones((height_new,width_new,img.shape[2])).astype(float)*0
|
||||
img_new[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ] =img[:,:,:]
|
||||
|
||||
prediction_ext = self.do_prediction(patches, img_new, model_region)
|
||||
|
||||
pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), model_region)
|
||||
pre_updown = cv2.flip(pre_updown, -1)
|
||||
|
||||
prediction_table = prediction_ext[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ]
|
||||
prediction_table_updown = pre_updown[h_start:h_start+img.shape[0] ,w_start: w_start+img.shape[1], : ]
|
||||
|
||||
prediction_table[:,:,0][prediction_table_updown[:,:,0]==1]=1
|
||||
prediction_table = prediction_table.astype(np.int16)
|
||||
|
||||
else:
|
||||
prediction_table = np.zeros(img.shape)
|
||||
img_w_half = int(img.shape[1]/2.)
|
||||
|
||||
pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], model_region)
|
||||
pre2 = self.do_prediction(patches, img[:,img_w_half:,:], model_region)
|
||||
|
||||
pre_full = self.do_prediction(patches, img[:,:,:], model_region)
|
||||
|
||||
pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), model_region)
|
||||
pre_updown = cv2.flip(pre_updown, -1)
|
||||
|
||||
prediction_table_full_erode = cv2.erode(pre_full[:,:,0], KERNEL, iterations=4)
|
||||
prediction_table_full_erode = cv2.dilate(prediction_table_full_erode, KERNEL, iterations=4)
|
||||
|
||||
prediction_table_full_updown_erode = cv2.erode(pre_updown[:,:,0], KERNEL, iterations=4)
|
||||
prediction_table_full_updown_erode = cv2.dilate(prediction_table_full_updown_erode, KERNEL, iterations=4)
|
||||
|
||||
prediction_table[:,0:img_w_half,:] = pre1[:,:,:]
|
||||
prediction_table[:,img_w_half:,:] = pre2[:,:,:]
|
||||
|
||||
prediction_table[:,:,0][prediction_table_full_erode[:,:]==1]=1
|
||||
prediction_table[:,:,0][prediction_table_full_updown_erode[:,:]==1]=1
|
||||
prediction_table = prediction_table.astype(np.int16)
|
||||
|
||||
#prediction_table_erode = cv2.erode(prediction_table[:,:,0], self.kernel, iterations=6)
|
||||
#prediction_table_erode = cv2.dilate(prediction_table_erode, self.kernel, iterations=6)
|
||||
|
||||
prediction_table_erode = cv2.erode(prediction_table[:,:,0], KERNEL, iterations=20)
|
||||
prediction_table_erode = cv2.dilate(prediction_table_erode, KERNEL, iterations=20)
|
||||
|
||||
del model_region
|
||||
del session_region
|
||||
gc.collect()
|
||||
|
||||
|
||||
return prediction_table_erode.astype(np.int16)
|
||||
|
||||
def run_graphics_and_columns(self, text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts):
|
||||
img_g = self.imread(grayscale=True, uint8=True)
|
||||
|
||||
|
@ -1628,6 +1950,12 @@ class Eynollah:
|
|||
img_g3[:, :, 2] = img_g[:, :]
|
||||
|
||||
image_page, page_coord, cont_page = self.extract_page()
|
||||
|
||||
if self.tables:
|
||||
table_prediction = self.get_tables_from_model(image_page, num_col_classifier)
|
||||
else:
|
||||
table_prediction = (np.zeros((image_page.shape[0], image_page.shape[1]))).astype(np.int16)
|
||||
|
||||
if self.plotter:
|
||||
self.plotter.save_page_image(image_page)
|
||||
|
||||
|
@ -1648,14 +1976,14 @@ class Eynollah:
|
|||
|
||||
|
||||
try:
|
||||
num_col, _ = find_num_col(img_only_regions, multiplier=6.0)
|
||||
num_col, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0)
|
||||
num_col = num_col + 1
|
||||
if not num_column_is_classified:
|
||||
num_col_classifier = num_col + 1
|
||||
except Exception as why:
|
||||
self.logger.error(why)
|
||||
num_col = None
|
||||
return num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page
|
||||
return num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page, table_prediction
|
||||
|
||||
def run_enhancement(self):
|
||||
self.logger.info("resize and enhance image")
|
||||
|
@ -1667,6 +1995,8 @@ class Eynollah:
|
|||
if self.allow_enhancement:
|
||||
img_res = img_res.astype(np.uint8)
|
||||
self.get_image_and_scales(img_org, img_res, scale)
|
||||
if self.plotter:
|
||||
self.plotter.save_enhanced_image(img_res)
|
||||
else:
|
||||
self.get_image_and_scales_after_enhancing(img_org, img_res)
|
||||
else:
|
||||
|
@ -1699,7 +2029,7 @@ class Eynollah:
|
|||
self.logger.info("slope_deskew: %s", slope_deskew)
|
||||
return slope_deskew, slope_first
|
||||
|
||||
def run_marginals(self, image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1):
|
||||
def run_marginals(self, image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction):
|
||||
image_page_rotated, textline_mask_tot = image_page[:, :], textline_mask_tot_ea[:, :]
|
||||
textline_mask_tot[mask_images[:, :] == 1] = 0
|
||||
|
||||
|
@ -1710,6 +2040,8 @@ class Eynollah:
|
|||
if num_col_classifier in (1, 2):
|
||||
try:
|
||||
regions_without_separators = (text_regions_p[:, :] == 1) * 1
|
||||
if self.tables:
|
||||
regions_without_separators[table_prediction==1] = 1
|
||||
regions_without_separators = regions_without_separators.astype(np.uint8)
|
||||
text_regions_p = get_marginals(rotate_image(regions_without_separators, slope_deskew), text_regions_p, num_col_classifier, slope_deskew, kernel=KERNEL)
|
||||
except Exception as e:
|
||||
|
@ -1720,24 +2052,29 @@ class Eynollah:
|
|||
self.plotter.save_plot_of_layout_main(text_regions_p, image_page)
|
||||
return textline_mask_tot, text_regions_p, image_page_rotated
|
||||
|
||||
def run_boxes_no_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, erosion_hurts):
|
||||
def run_boxes_no_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, table_prediction, erosion_hurts):
|
||||
self.logger.debug('enter run_boxes_no_full_layout')
|
||||
if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
|
||||
_, textline_mask_tot_d, text_regions_p_1_n = rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, slope_deskew)
|
||||
_, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew)
|
||||
text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
table_prediction_n = resize_image(table_prediction_n, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1
|
||||
if self.tables:
|
||||
regions_without_separators_d[table_prediction_n[:,:] == 1] = 1
|
||||
regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions)
|
||||
if self.tables:
|
||||
regions_without_separators[table_prediction ==1 ] = 1
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
text_regions_p_1_n = None
|
||||
textline_mask_tot_d = None
|
||||
regions_without_separators_d = None
|
||||
pixel_lines = 3
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
_, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)
|
||||
_, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines)
|
||||
|
||||
if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
|
||||
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)
|
||||
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines)
|
||||
K.clear_session()
|
||||
|
||||
self.logger.info("num_col_classifier: %s", num_col_classifier)
|
||||
|
@ -1751,26 +2088,153 @@ class Eynollah:
|
|||
regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6)
|
||||
t1 = time.time()
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
boxes = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts)
|
||||
boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, self.tables)
|
||||
boxes_d = None
|
||||
self.logger.debug("len(boxes): %s", len(boxes))
|
||||
|
||||
text_regions_p_tables = np.copy(text_regions_p)
|
||||
text_regions_p_tables[:,:][(table_prediction[:,:] == 1)] = 10
|
||||
pixel_line = 3
|
||||
img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables , num_col_classifier , 0.000005, pixel_line)
|
||||
img_revised_tab2, contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2,table_prediction, 10, num_col_classifier)
|
||||
else:
|
||||
boxes_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts)
|
||||
boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts, self.tables)
|
||||
boxes = None
|
||||
self.logger.debug("len(boxes): %s", len(boxes_d))
|
||||
|
||||
text_regions_p_tables = np.copy(text_regions_p_1_n)
|
||||
text_regions_p_tables =np.round(text_regions_p_tables)
|
||||
text_regions_p_tables[:,:][(text_regions_p_tables[:,:] != 3) & (table_prediction_n[:,:] == 1)] = 10
|
||||
|
||||
pixel_line = 3
|
||||
img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables,boxes_d,0,splitter_y_new_d,peaks_neg_tot_tables_d,text_regions_p_tables, num_col_classifier, 0.000005, pixel_line)
|
||||
img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2,table_prediction_n, 10, num_col_classifier)
|
||||
|
||||
img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew)
|
||||
img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated)
|
||||
img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8)
|
||||
img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
|
||||
self.logger.info("detecting boxes took %ss", str(time.time() - t1))
|
||||
img_revised_tab = text_regions_p[:, :]
|
||||
|
||||
if self.tables:
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
img_revised_tab = np.copy(img_revised_tab2[:,:,0])
|
||||
img_revised_tab[:,:][(text_regions_p[:,:] == 1) & (img_revised_tab[:,:] != 10)] = 1
|
||||
else:
|
||||
img_revised_tab = np.copy(text_regions_p[:,:])
|
||||
img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0
|
||||
img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10
|
||||
|
||||
text_regions_p[:,:][text_regions_p[:,:]==10] = 0
|
||||
text_regions_p[:,:][img_revised_tab[:,:]==10] = 10
|
||||
else:
|
||||
img_revised_tab=text_regions_p[:,:]
|
||||
#img_revised_tab = text_regions_p[:, :]
|
||||
polygons_of_images = return_contours_of_interested_region(img_revised_tab, 2)
|
||||
|
||||
# plt.imshow(img_revised_tab)
|
||||
# plt.show()
|
||||
pixel_img = 4
|
||||
min_area_mar = 0.00001
|
||||
polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar)
|
||||
|
||||
pixel_img = 10
|
||||
contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar)
|
||||
|
||||
|
||||
K.clear_session()
|
||||
self.logger.debug('exit run_boxes_no_full_layout')
|
||||
return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d
|
||||
return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d, polygons_of_marginals, contours_tables
|
||||
|
||||
def run_boxes_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions):
|
||||
def run_boxes_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions, table_prediction, erosion_hurts):
|
||||
self.logger.debug('enter run_boxes_full_layout')
|
||||
|
||||
if self.tables:
|
||||
if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
|
||||
image_page_rotated_n,textline_mask_tot_d,text_regions_p_1_n , table_prediction_n = rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew)
|
||||
|
||||
text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1])
|
||||
textline_mask_tot_d = resize_image(textline_mask_tot_d,text_regions_p.shape[0],text_regions_p.shape[1])
|
||||
table_prediction_n = resize_image(table_prediction_n,text_regions_p.shape[0],text_regions_p.shape[1])
|
||||
|
||||
regions_without_separators_d=(text_regions_p_1_n[:,:] == 1)*1
|
||||
regions_without_separators_d[table_prediction_n[:,:] == 1] = 1
|
||||
else:
|
||||
text_regions_p_1_n = None
|
||||
textline_mask_tot_d = None
|
||||
regions_without_separators_d = None
|
||||
|
||||
regions_without_separators = (text_regions_p[:,:] == 1)*1#( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_seperators_new(text_regions_p[:,:,0],img_only_regions)
|
||||
regions_without_separators[table_prediction == 1] = 1
|
||||
|
||||
pixel_lines=3
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
num_col, peaks_neg_fin, matrix_of_lines_ch, splitter_y_new, seperators_closeup_n = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines)
|
||||
|
||||
if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
|
||||
num_col_d, peaks_neg_fin_d, matrix_of_lines_ch_d, splitter_y_new_d, seperators_closeup_n_d = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2),num_col_classifier, self.tables, pixel_lines)
|
||||
K.clear_session()
|
||||
gc.collect()
|
||||
|
||||
if num_col_classifier>=3:
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
regions_without_separators = regions_without_separators.astype(np.uint8)
|
||||
regions_without_separators = cv2.erode(regions_without_separators[:,:], KERNEL, iterations=6)
|
||||
|
||||
if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
|
||||
regions_without_separators_d = regions_without_separators_d.astype(np.uint8)
|
||||
regions_without_separators_d = cv2.erode(regions_without_separators_d[:,:], KERNEL, iterations=6)
|
||||
else:
|
||||
pass
|
||||
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, self.tables)
|
||||
text_regions_p_tables = np.copy(text_regions_p)
|
||||
text_regions_p_tables[:,:][(table_prediction[:,:]==1)] = 10
|
||||
pixel_line = 3
|
||||
img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables , num_col_classifier , 0.000005, pixel_line)
|
||||
|
||||
img_revised_tab2,contoures_tables = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2, table_prediction, 10, num_col_classifier)
|
||||
|
||||
else:
|
||||
boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts, self.tables)
|
||||
text_regions_p_tables = np.copy(text_regions_p_1_n)
|
||||
text_regions_p_tables = np.round(text_regions_p_tables)
|
||||
text_regions_p_tables[:,:][(text_regions_p_tables[:,:]!=3) & (table_prediction_n[:,:]==1)] = 10
|
||||
|
||||
pixel_line = 3
|
||||
img_revised_tab2 = self.add_tables_heuristic_to_layout(text_regions_p_tables,boxes_d,0,splitter_y_new_d,peaks_neg_tot_tables_d,text_regions_p_tables, num_col_classifier, 0.000005, pixel_line)
|
||||
|
||||
img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables(img_revised_tab2, table_prediction_n, 10, num_col_classifier)
|
||||
img_revised_tab2_d_rotated = rotate_image(img_revised_tab2_d, -slope_deskew)
|
||||
|
||||
|
||||
img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated)
|
||||
img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8)
|
||||
|
||||
img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
|
||||
|
||||
if np.abs(slope_deskew) < 0.13:
|
||||
img_revised_tab = np.copy(img_revised_tab2[:,:,0])
|
||||
else:
|
||||
img_revised_tab = np.copy(text_regions_p[:,:])
|
||||
img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0
|
||||
img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10
|
||||
|
||||
|
||||
##img_revised_tab=img_revised_tab2[:,:,0]
|
||||
#img_revised_tab=text_regions_p[:,:]
|
||||
text_regions_p[:,:][text_regions_p[:,:]==10] = 0
|
||||
text_regions_p[:,:][img_revised_tab[:,:]==10] = 10
|
||||
#img_revised_tab[img_revised_tab2[:,:,0]==10] =10
|
||||
|
||||
pixel_img = 4
|
||||
min_area_mar = 0.00001
|
||||
polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar)
|
||||
|
||||
pixel_img = 10
|
||||
contours_tables = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar)
|
||||
|
||||
# set first model with second model
|
||||
text_regions_p[:, :][text_regions_p[:, :] == 2] = 5
|
||||
text_regions_p[:, :][text_regions_p[:, :] == 3] = 6
|
||||
|
@ -1811,26 +2275,27 @@ class Eynollah:
|
|||
text_regions_p[:, :][regions_fully_np[:, :, 0] == 4] = 4
|
||||
#plt.imshow(text_regions_p)
|
||||
#plt.show()
|
||||
|
||||
####if not self.tables:
|
||||
if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
|
||||
_, textline_mask_tot_d, text_regions_p_1_n, regions_fully_n = rotation_not_90_func_full_layout(image_page, textline_mask_tot, text_regions_p, regions_fully, slope_deskew)
|
||||
|
||||
text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
regions_fully_n = resize_image(regions_fully_n, text_regions_p.shape[0], text_regions_p.shape[1])
|
||||
if not self.tables:
|
||||
regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1
|
||||
else:
|
||||
text_regions_p_1_n = None
|
||||
textline_mask_tot_d = None
|
||||
regions_without_separators_d = None
|
||||
|
||||
regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions)
|
||||
if not self.tables:
|
||||
regions_without_separators = (text_regions_p[:, :] == 1) * 1
|
||||
|
||||
K.clear_session()
|
||||
img_revised_tab = np.copy(text_regions_p[:, :])
|
||||
polygons_of_images = return_contours_of_interested_region(img_revised_tab, 5)
|
||||
self.logger.debug('exit run_boxes_full_layout')
|
||||
return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators
|
||||
return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators, polygons_of_marginals, contours_tables
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
|
@ -1848,7 +2313,7 @@ class Eynollah:
|
|||
self.logger.info("Textregion detection took %ss ", str(time.time() - t1))
|
||||
|
||||
t1 = time.time()
|
||||
num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page = \
|
||||
num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page, table_prediction = \
|
||||
self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts)
|
||||
self.logger.info("Graphics detection took %ss ", str(time.time() - t1))
|
||||
self.logger.info('cont_page %s', cont_page)
|
||||
|
@ -1867,21 +2332,17 @@ class Eynollah:
|
|||
slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea)
|
||||
self.logger.info("deskewing took %ss", str(time.time() - t1))
|
||||
t1 = time.time()
|
||||
#plt.imshow(table_prediction)
|
||||
#plt.show()
|
||||
|
||||
textline_mask_tot, text_regions_p, image_page_rotated = self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1)
|
||||
textline_mask_tot, text_regions_p, image_page_rotated = self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction)
|
||||
self.logger.info("detection of marginals took %ss", str(time.time() - t1))
|
||||
t1 = time.time()
|
||||
|
||||
if not self.full_layout:
|
||||
polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d = self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, erosion_hurts)
|
||||
|
||||
pixel_img = 4
|
||||
min_area_mar = 0.00001
|
||||
polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar)
|
||||
polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d, polygons_of_marginals, contours_tables = self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, table_prediction, erosion_hurts)
|
||||
|
||||
if self.full_layout:
|
||||
polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators = self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions)
|
||||
|
||||
polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions, table_prediction, erosion_hurts)
|
||||
text_only = ((img_revised_tab[:, :] == 1)) * 1
|
||||
if np.abs(slope_deskew) >= SLOPE_THRESHOLD:
|
||||
text_only_d = ((text_regions_p_1_n[:, :] == 1)) * 1
|
||||
|
@ -2018,7 +2479,6 @@ class Eynollah:
|
|||
|
||||
K.clear_session()
|
||||
|
||||
polygons_of_tabels = []
|
||||
pixel_img = 4
|
||||
polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, pixel_img)
|
||||
all_found_texline_polygons = adhere_drop_capital_region_into_corresponding_textline(text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_texline_polygons, all_found_texline_polygons_h, kernel=KERNEL, curved_line=self.curved_line)
|
||||
|
@ -2026,16 +2486,17 @@ class Eynollah:
|
|||
# print(len(contours_only_text_parent_h),len(contours_only_text_parent_h_d_ordered),'contours_only_text_parent_h')
|
||||
pixel_lines = 6
|
||||
|
||||
|
||||
if not self.headers_off:
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines, contours_only_text_parent_h)
|
||||
num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines, contours_only_text_parent_h)
|
||||
else:
|
||||
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines, contours_only_text_parent_h_d_ordered)
|
||||
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines, contours_only_text_parent_h_d_ordered)
|
||||
elif self.headers_off:
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)
|
||||
num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines)
|
||||
else:
|
||||
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)
|
||||
_, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, self.tables, pixel_lines)
|
||||
|
||||
# print(peaks_neg_fin,peaks_neg_fin_d,'num_col2')
|
||||
# print(splitter_y_new,splitter_y_new_d,'num_col_classifier')
|
||||
|
@ -2045,22 +2506,42 @@ class Eynollah:
|
|||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
regions_without_separators = regions_without_separators.astype(np.uint8)
|
||||
regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6)
|
||||
random_pixels_for_image = np.random.randn(regions_without_separators.shape[0], regions_without_separators.shape[1])
|
||||
random_pixels_for_image[random_pixels_for_image < -0.5] = 0
|
||||
random_pixels_for_image[random_pixels_for_image != 0] = 1
|
||||
regions_without_separators[(random_pixels_for_image[:, :] == 1) & (text_regions_p[:, :] == 5)] = 1
|
||||
|
||||
#regions_without_separators_0 = regions_without_separators[:, :].sum(axis=0)
|
||||
#meda_n_updown = regions_without_separators_0[len(regions_without_separators_0) :: -1]
|
||||
#first_nonzero = next((i for i, x in enumerate(regions_without_separators_0) if x), 0)
|
||||
#last_nonzero = next((i for i, x in enumerate(meda_n_updown) if x), 0)
|
||||
#last_nonzero = len(regions_without_separators_0) - last_nonzero
|
||||
|
||||
#random_pixels_for_image = np.random.randn(regions_without_separators.shape[0], regions_without_separators.shape[1])
|
||||
#random_pixels_for_image[random_pixels_for_image < -0.5] = 0
|
||||
#random_pixels_for_image[random_pixels_for_image != 0] = 1
|
||||
#regions_without_separators[(random_pixels_for_image[:, :] == 1) & (text_regions_p[:, :] == 5)] = 1
|
||||
|
||||
#regions_without_separators[:, 0:first_nonzero] = 0
|
||||
#regions_without_separators[:, last_nonzero:] = 0
|
||||
else:
|
||||
regions_without_separators_d = regions_without_separators_d.astype(np.uint8)
|
||||
regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6)
|
||||
random_pixels_for_image = np.random.randn(regions_without_separators_d.shape[0], regions_without_separators_d.shape[1])
|
||||
random_pixels_for_image[random_pixels_for_image < -0.5] = 0
|
||||
random_pixels_for_image[random_pixels_for_image != 0] = 1
|
||||
regions_without_separators_d[(random_pixels_for_image[:, :] == 1) & (text_regions_p_1_n[:, :] == 5)] = 1
|
||||
|
||||
#regions_without_separators_0 = regions_without_separators_d[:, :].sum(axis=0)
|
||||
#meda_n_updown = regions_without_separators_0[len(regions_without_separators_0) :: -1]
|
||||
#first_nonzero = next((i for i, x in enumerate(regions_without_separators_0) if x), 0)
|
||||
#last_nonzero = next((i for i, x in enumerate(meda_n_updown) if x), 0)
|
||||
#last_nonzero = len(regions_without_separators_0) - last_nonzero
|
||||
|
||||
#random_pixels_for_image = np.random.randn(regions_without_separators_d.shape[0], regions_without_separators_d.shape[1])
|
||||
#random_pixels_for_image[random_pixels_for_image < -0.5] = 0
|
||||
#random_pixels_for_image[random_pixels_for_image != 0] = 1
|
||||
##regions_without_separators_d[(random_pixels_for_image[:, :] == 1) & (text_regions_p_1_n[:, :] == 5)] = 1
|
||||
|
||||
#regions_without_separators_d[:, 0:first_nonzero] = 0
|
||||
#regions_without_separators_d[:, last_nonzero:] = 0
|
||||
|
||||
if np.abs(slope_deskew) < SLOPE_THRESHOLD:
|
||||
boxes = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts)
|
||||
boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, self.tables)
|
||||
else:
|
||||
boxes_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts)
|
||||
boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts, self.tables)
|
||||
|
||||
if self.plotter:
|
||||
self.plotter.write_images_into_directory(polygons_of_images, image_page)
|
||||
|
@ -2071,7 +2552,7 @@ class Eynollah:
|
|||
else:
|
||||
order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d)
|
||||
|
||||
pcgts = self.writer.build_pagexml_full_layout(contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_found_texline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, polygons_of_tabels, polygons_of_drop_capitals, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_xml)
|
||||
pcgts = self.writer.build_pagexml_full_layout(contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_found_texline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_xml)
|
||||
self.logger.info("Job done in %ss", str(time.time() - t0))
|
||||
return pcgts
|
||||
else:
|
||||
|
@ -2081,6 +2562,6 @@ class Eynollah:
|
|||
else:
|
||||
contours_only_text_parent_d_ordered = list(np.array(contours_only_text_parent_d_ordered)[index_by_text_par_con])
|
||||
order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d)
|
||||
pcgts = self.writer.build_pagexml_no_full_layout(txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml)
|
||||
pcgts = self.writer.build_pagexml_no_full_layout(txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml, contours_tables)
|
||||
self.logger.info("Job done in %ss", str(time.time() - t0))
|
||||
return pcgts
|
||||
|
|
|
@ -17,6 +17,7 @@ class EynollahPlotter():
|
|||
def __init__(
|
||||
self,
|
||||
*,
|
||||
dir_out,
|
||||
dir_of_all,
|
||||
dir_of_deskewed,
|
||||
dir_of_layout,
|
||||
|
@ -26,6 +27,7 @@ class EynollahPlotter():
|
|||
scale_x=1,
|
||||
scale_y=1,
|
||||
):
|
||||
self.dir_out = dir_out
|
||||
self.dir_of_all = dir_of_all
|
||||
self.dir_of_layout = dir_of_layout
|
||||
self.dir_of_cropped_images = dir_of_cropped_images
|
||||
|
@ -125,6 +127,8 @@ class EynollahPlotter():
|
|||
def save_page_image(self, image_page):
|
||||
if self.dir_of_all is not None:
|
||||
cv2.imwrite(os.path.join(self.dir_of_all, self.image_filename_stem + "_page.png"), image_page)
|
||||
def save_enhanced_image(self, img_res):
|
||||
cv2.imwrite(os.path.join(self.dir_out, self.image_filename_stem + "_enhanced.png"), img_res)
|
||||
|
||||
def save_plot_of_textline_density(self, img_patch_org):
|
||||
if self.dir_of_all is not None:
|
||||
|
|
|
@ -360,7 +360,7 @@ def find_num_col_deskew(regions_without_separators, sigma_, multiplier=3.8):
|
|||
return np.std(z)
|
||||
|
||||
|
||||
def find_num_col(regions_without_separators, multiplier=3.8):
|
||||
def find_num_col(regions_without_separators, num_col_classifier, tables, multiplier=3.8):
|
||||
regions_without_separators_0 = regions_without_separators[:, :].sum(axis=0)
|
||||
##plt.plot(regions_without_separators_0)
|
||||
##plt.show()
|
||||
|
@ -417,6 +417,19 @@ def find_num_col(regions_without_separators, multiplier=3.8):
|
|||
peaks_neg_fin = peaks_neg[(interest_neg < grenze)]
|
||||
# interest_neg_fin=interest_neg[(interest_neg<grenze)]
|
||||
|
||||
if not tables:
|
||||
if ( num_col_classifier - ( (len(interest_neg_fin))+1 ) ) >= 3:
|
||||
index_sort_interest_neg_fin= np.argsort(interest_neg_fin)
|
||||
peaks_neg_sorted = np.array(peaks_neg)[index_sort_interest_neg_fin]
|
||||
interest_neg_fin_sorted = np.array(interest_neg_fin)[index_sort_interest_neg_fin]
|
||||
|
||||
if len(index_sort_interest_neg_fin)>=num_col_classifier:
|
||||
peaks_neg_fin = list( peaks_neg_sorted[:num_col_classifier] )
|
||||
interest_neg_fin = list( interest_neg_fin_sorted[:num_col_classifier] )
|
||||
else:
|
||||
peaks_neg_fin = peaks_neg[:]
|
||||
interest_neg_fin = interest_neg[:]
|
||||
|
||||
num_col = (len(interest_neg_fin)) + 1
|
||||
|
||||
# print(peaks_neg_fin,'peaks_neg_fin')
|
||||
|
@ -489,9 +502,9 @@ def find_num_col(regions_without_separators, multiplier=3.8):
|
|||
num_col = 1
|
||||
peaks_neg_true = []
|
||||
|
||||
diff_peaks_annormal = diff_peaks[diff_peaks < 360]
|
||||
diff_peaks_abnormal = diff_peaks[diff_peaks < 360]
|
||||
|
||||
if len(diff_peaks_annormal) > 0:
|
||||
if len(diff_peaks_abnormal) > 0:
|
||||
arg_help = np.array(range(len(diff_peaks)))
|
||||
arg_help_ann = arg_help[diff_peaks < 360]
|
||||
|
||||
|
@ -1248,7 +1261,7 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point):
|
|||
peaks_neg_tot.append(last_point)
|
||||
return peaks_neg_tot
|
||||
|
||||
def find_number_of_columns_in_document(region_pre_p, num_col_classifier, pixel_lines, contours_h=None):
|
||||
def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, pixel_lines, contours_h=None):
|
||||
|
||||
separators_closeup=( (region_pre_p[:,:,:]==pixel_lines))*1
|
||||
|
||||
|
@ -1561,7 +1574,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, pixel_l
|
|||
#regions_without_separators_tile=cv2.erode(regions_without_separators_tile,kernel,iterations = 3)
|
||||
#
|
||||
try:
|
||||
num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile,multiplier=7.0)
|
||||
num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile, num_col_classifier, tables, multiplier=7.0)
|
||||
except:
|
||||
num_col = 0
|
||||
peaks_neg_fin = []
|
||||
|
@ -1583,9 +1596,9 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, pixel_l
|
|||
return num_col_fin, peaks_neg_fin_fin,matrix_of_lines_ch,splitter_y_new,separators_closeup_n
|
||||
|
||||
|
||||
def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts):
|
||||
def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts, tables):
|
||||
boxes=[]
|
||||
|
||||
peaks_neg_tot_tables = []
|
||||
|
||||
for i in range(len(splitter_y_new)-1):
|
||||
#print(splitter_y_new[i],splitter_y_new[i+1])
|
||||
|
@ -1599,20 +1612,21 @@ def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_witho
|
|||
|
||||
try:
|
||||
if erosion_hurts:
|
||||
num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=6.)
|
||||
num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], num_col_classifier, tables, multiplier=6.)
|
||||
else:
|
||||
num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=7.)
|
||||
num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],num_col_classifier, tables, multiplier=7.)
|
||||
except:
|
||||
peaks_neg_fin=[]
|
||||
num_col = 0
|
||||
|
||||
|
||||
try:
|
||||
peaks_neg_fin_org=np.copy(peaks_neg_fin)
|
||||
if (len(peaks_neg_fin)+1)<num_col_classifier:
|
||||
if (len(peaks_neg_fin)+1)<num_col_classifier or num_col_classifier==6:
|
||||
#print('burda')
|
||||
|
||||
if len(peaks_neg_fin)==0:
|
||||
num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],multiplier=3.)
|
||||
num_col, peaks_neg_fin=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:],num_col_classifier, tables, multiplier=3.)
|
||||
peaks_neg_fin_early=[]
|
||||
peaks_neg_fin_early.append(0)
|
||||
#print(peaks_neg_fin,'peaks_neg_fin')
|
||||
|
@ -1628,12 +1642,12 @@ def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_witho
|
|||
#plt.plot(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),peaks_neg_fin_early[i_n]:peaks_neg_fin_early[i_n+1]].sum(axis=0) )
|
||||
#plt.show()
|
||||
try:
|
||||
num_col, peaks_neg_fin1=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),peaks_neg_fin_early[i_n]:peaks_neg_fin_early[i_n+1]],multiplier=7.)
|
||||
num_col, peaks_neg_fin1=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),peaks_neg_fin_early[i_n]:peaks_neg_fin_early[i_n+1]],num_col_classifier,tables, multiplier=7.)
|
||||
except:
|
||||
peaks_neg_fin1=[]
|
||||
|
||||
try:
|
||||
num_col, peaks_neg_fin2=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),peaks_neg_fin_early[i_n]:peaks_neg_fin_early[i_n+1]],multiplier=5.)
|
||||
num_col, peaks_neg_fin2=find_num_col(regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),peaks_neg_fin_early[i_n]:peaks_neg_fin_early[i_n+1]],num_col_classifier,tables, multiplier=5.)
|
||||
except:
|
||||
peaks_neg_fin2=[]
|
||||
|
||||
|
@ -1679,6 +1693,8 @@ def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_witho
|
|||
|
||||
peaks_neg_tot=return_points_with_boundies(peaks_neg_fin,0, regions_without_separators[:,:].shape[1])
|
||||
|
||||
peaks_neg_tot_tables.append(peaks_neg_tot)
|
||||
|
||||
reading_order_type,x_starting,x_ending,y_type_2,y_diff_type_2,y_lines_without_mother,x_start_without_mother,x_end_without_mother,there_is_sep_with_child,y_lines_with_child_without_mother,x_start_with_child_without_mother,x_end_with_child_without_mother=return_x_start_end_mothers_childs_and_type_of_reading_order(x_min_hor_some,x_max_hor_some,cy_hor_some,peaks_neg_tot,cy_hor_diff)
|
||||
|
||||
|
||||
|
@ -2236,5 +2252,4 @@ def return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_witho
|
|||
|
||||
#else:
|
||||
#boxes.append([ 0, regions_without_separators[:,:].shape[1] ,splitter_y_new[i],splitter_y_new[i+1]])
|
||||
|
||||
return boxes
|
||||
return boxes, peaks_neg_tot_tables
|
||||
|
|
|
@ -109,7 +109,21 @@ def find_new_features_of_contours(contours_main):
|
|||
# dis_x=np.abs(x_max_main-x_min_main)
|
||||
|
||||
return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin
|
||||
def find_features_of_contours(contours_main):
|
||||
|
||||
|
||||
areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))])
|
||||
M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))]
|
||||
cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))]
|
||||
cy_main=[(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))]
|
||||
x_min_main=np.array([np.min(contours_main[j][:,0,0]) for j in range(len(contours_main))])
|
||||
x_max_main=np.array([np.max(contours_main[j][:,0,0]) for j in range(len(contours_main))])
|
||||
|
||||
y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
||||
y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))])
|
||||
|
||||
|
||||
return y_min_main, y_max_main
|
||||
def return_parent_contours(contours, hierarchy):
|
||||
contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1]
|
||||
return contours_parent
|
||||
|
|
|
@ -142,7 +142,7 @@ def adhere_drop_capital_region_into_corresponding_textline(
|
|||
# areas_main=np.array([cv2.contourArea(all_found_texline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_texline_polygons[int(region_final)]))])
|
||||
|
||||
# cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_texline_polygons[int(region_final)])
|
||||
|
||||
try:
|
||||
cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_texline_polygons[int(region_final)])
|
||||
# print(all_box_coord[j_cont])
|
||||
# print(cx_t)
|
||||
|
@ -187,8 +187,9 @@ def adhere_drop_capital_region_into_corresponding_textline(
|
|||
# print(np.shape(all_found_texline_polygons[int(region_final)][arg_min]))
|
||||
##contours_biggest=contours_biggest.reshape(np.shape(contours_biggest)[0],np.shape(contours_biggest)[2])
|
||||
all_found_texline_polygons[int(region_final)][arg_min] = contours_biggest
|
||||
except:
|
||||
pass
|
||||
|
||||
# print(cx_t,'print')
|
||||
try:
|
||||
# print(all_found_texline_polygons[j_cont][0])
|
||||
cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_texline_polygons[int(region_final)])
|
||||
|
|
|
@ -52,20 +52,21 @@ def rotate_image_different( img, slope):
|
|||
img_rotation = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows))
|
||||
return img_rotation
|
||||
|
||||
def rotate_max_area(image, rotated, rotated_textline, rotated_layout, angle):
|
||||
def rotate_max_area(image, rotated, rotated_textline, rotated_layout, rotated_table_prediction, angle):
|
||||
wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle))
|
||||
h, w, _ = rotated.shape
|
||||
y1 = h // 2 - int(hr / 2)
|
||||
y2 = y1 + int(hr)
|
||||
x1 = w // 2 - int(wr / 2)
|
||||
x2 = x1 + int(wr)
|
||||
return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2]
|
||||
return rotated[y1:y2, x1:x2], rotated_textline[y1:y2, x1:x2], rotated_layout[y1:y2, x1:x2], rotated_table_prediction[y1:y2, x1:x2]
|
||||
|
||||
def rotation_not_90_func(img, textline, text_regions_p_1, thetha):
|
||||
def rotation_not_90_func(img, textline, text_regions_p_1, table_prediction, thetha):
|
||||
rotated = imutils.rotate(img, thetha)
|
||||
rotated_textline = imutils.rotate(textline, thetha)
|
||||
rotated_layout = imutils.rotate(text_regions_p_1, thetha)
|
||||
return rotate_max_area(img, rotated, rotated_textline, rotated_layout, thetha)
|
||||
rotated_table_prediction = imutils.rotate(table_prediction, thetha)
|
||||
return rotate_max_area(img, rotated, rotated_textline, rotated_layout, rotated_table_prediction, thetha)
|
||||
|
||||
def rotation_not_90_func_full_layout(img, textline, text_regions_p_1, text_regions_p_fully, thetha):
|
||||
rotated = imutils.rotate(img, thetha)
|
||||
|
|
|
@ -141,7 +141,7 @@ class EynollahXmlWriter():
|
|||
with open(out_fname, 'w') as f:
|
||||
f.write(to_xml(pcgts))
|
||||
|
||||
def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_texline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml):
|
||||
def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_texline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables):
|
||||
self.logger.debug('enter build_pagexml_no_full_layout')
|
||||
|
||||
# create the file structure
|
||||
|
@ -189,6 +189,16 @@ class EynollahXmlWriter():
|
|||
points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,1] ) / self.scale_y))
|
||||
points_co += ' '
|
||||
sep_hor.get_Coords().set_points(points_co[:-1])
|
||||
for mm in range(len(found_polygons_tables)):
|
||||
tab_region = TableRegionType(id=counter.next_region_id, Coords=CoordsType())
|
||||
page.add_TableRegion(tab_region)
|
||||
points_co = ''
|
||||
for lmm in range(len(found_polygons_tables[mm])):
|
||||
points_co += str(int((found_polygons_tables[mm][lmm,0,0] + page_coord[2]) / self.scale_x))
|
||||
points_co += ','
|
||||
points_co += str(int((found_polygons_tables[mm][lmm,0,1] + page_coord[0]) / self.scale_y))
|
||||
points_co += ' '
|
||||
tab_region.get_Coords().set_points(points_co[:-1])
|
||||
|
||||
return pcgts
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue