|
|
@ -1151,6 +1151,107 @@ class Eynollah:
|
|
|
|
#label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:]
|
|
|
|
#label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:]
|
|
|
|
|
|
|
|
|
|
|
|
return img_scaled_padded#, label_scaled_padded
|
|
|
|
return img_scaled_padded#, label_scaled_padded
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def do_prediction_new_concept_scatter_nd(self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, thresholding_for_artificial_class_in_light_version=False):
|
|
|
|
|
|
|
|
self.logger.debug("enter do_prediction_new_concept")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
img_height_model = model.layers[-1].output_shape[1]
|
|
|
|
|
|
|
|
img_width_model = model.layers[-1].output_shape[2]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not patches:
|
|
|
|
|
|
|
|
img_h_page = img.shape[0]
|
|
|
|
|
|
|
|
img_w_page = img.shape[1]
|
|
|
|
|
|
|
|
img = img / 255.0
|
|
|
|
|
|
|
|
img = resize_image(img, img_height_model, img_width_model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
label_p_pred = model.predict(img[np.newaxis], verbose=0)
|
|
|
|
|
|
|
|
seg = np.argmax(label_p_pred, axis=3)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if thresholding_for_artificial_class_in_light_version:
|
|
|
|
|
|
|
|
#seg_text = label_p_pred[0,:,:,1]
|
|
|
|
|
|
|
|
#seg_text[seg_text<0.2] =0
|
|
|
|
|
|
|
|
#seg_text[seg_text>0] =1
|
|
|
|
|
|
|
|
#seg[seg_text==1]=1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
seg_art = label_p_pred[0,:,:,4]
|
|
|
|
|
|
|
|
seg_art[seg_art<0.2] =0
|
|
|
|
|
|
|
|
seg_art[seg_art>0] =1
|
|
|
|
|
|
|
|
seg[seg_art==1]=4
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2)
|
|
|
|
|
|
|
|
prediction_true = resize_image(seg_color, img_h_page, img_w_page)
|
|
|
|
|
|
|
|
prediction_true = prediction_true.astype(np.uint8)
|
|
|
|
|
|
|
|
return prediction_true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if img.shape[0] < img_height_model:
|
|
|
|
|
|
|
|
img = resize_image(img, img_height_model, img.shape[1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if img.shape[1] < img_width_model:
|
|
|
|
|
|
|
|
img = resize_image(img, img.shape[0], img_width_model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model)
|
|
|
|
|
|
|
|
##margin = int(marginal_of_patch_percent * img_height_model)
|
|
|
|
|
|
|
|
#width_mid = img_width_model - 2 * margin
|
|
|
|
|
|
|
|
#height_mid = img_height_model - 2 * margin
|
|
|
|
|
|
|
|
img = img / float(255.0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
img = img.astype(np.float16)
|
|
|
|
|
|
|
|
img_h = img.shape[0]
|
|
|
|
|
|
|
|
img_w = img.shape[1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stride_x = img_width_model - 100
|
|
|
|
|
|
|
|
stride_y = img_height_model - 100
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
one_tensor = tf.ones_like(img)
|
|
|
|
|
|
|
|
img_patches = tf.image.extract_patches(images=[img,one_tensor],
|
|
|
|
|
|
|
|
sizes=[1, img_height_model, img_width_model, 1],
|
|
|
|
|
|
|
|
strides=[1, stride_y, stride_x, 1],
|
|
|
|
|
|
|
|
rates=[1, 1, 1, 1],
|
|
|
|
|
|
|
|
padding='SAME')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
one_patches = img_patches[1]
|
|
|
|
|
|
|
|
img_patches = img_patches[0]
|
|
|
|
|
|
|
|
img_patches = tf.squeeze(img_patches)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
img_patches_resh = tf.reshape(img_patches, shape = (img_patches.shape[0]*img_patches.shape[1], img_height_model, img_width_model, 3))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pred_patches = model.predict(img_patches_resh, batch_size=n_batch_inference)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
one_patches = tf.squeeze(one_patches)
|
|
|
|
|
|
|
|
one_patches = tf.reshape(one_patches, [img_patches.shape[0]*img_patches.shape[1],img_height_model,img_width_model,3])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = tf.range(img.shape[1])
|
|
|
|
|
|
|
|
y = tf.range(img.shape[0])
|
|
|
|
|
|
|
|
x, y = tf.meshgrid(x, y)
|
|
|
|
|
|
|
|
indices = tf.stack([y, x], axis=-1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
indices_patches = tf.image.extract_patches(images=tf.expand_dims(indices, axis=0), sizes=[1, img_height_model, img_width_model, 1], strides=[1, stride_y, stride_x, 1], rates=[1, 1, 1, 1], padding='SAME')
|
|
|
|
|
|
|
|
indices_patches = tf.squeeze(indices_patches)
|
|
|
|
|
|
|
|
indices_patches = tf.reshape(indices_patches, [img_patches.shape[0]*img_patches.shape[1],img_height_model, img_width_model,2])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
margin_y = int( (img_height_model - stride_y)/2. )
|
|
|
|
|
|
|
|
margin_x = int( (img_width_model - stride_x)/2. )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask_margin = np.zeros((img_height_model, img_width_model))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask_margin[margin_y:img_height_model-margin_y, margin_x:img_width_model-margin_x] = 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
indices_patches_array = indices_patches.numpy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for i in range(indices_patches_array.shape[0]):
|
|
|
|
|
|
|
|
indices_patches_array[i,:,:,0] = indices_patches_array[i,:,:,0]*mask_margin
|
|
|
|
|
|
|
|
indices_patches_array[i,:,:,1] = indices_patches_array[i,:,:,1]*mask_margin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
reconstructed = tf.scatter_nd(indices=indices_patches_array, updates=pred_patches, shape=(img.shape[0],img.shape[1],pred_patches.shape[-1]))
|
|
|
|
|
|
|
|
reconstructed_argmax = reconstructed.numpy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prediction_true = np.argmax(reconstructed_argmax, axis=2)
|
|
|
|
|
|
|
|
prediction_true = prediction_true.astype(np.uint8)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gc.collect()
|
|
|
|
|
|
|
|
return np.repeat(prediction_true[:, :, np.newaxis], 3, axis=2)
|
|
|
|
|
|
|
|
|
|
|
|
def do_prediction_new_concept(self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, thresholding_for_artificial_class_in_light_version=False):
|
|
|
|
def do_prediction_new_concept(self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, thresholding_for_artificial_class_in_light_version=False):
|
|
|
|
self.logger.debug("enter do_prediction")
|
|
|
|
self.logger.debug("enter do_prediction")
|
|
|
|
|
|
|
|
|
|
|
@ -2089,12 +2190,16 @@ class Eynollah:
|
|
|
|
if not self.dir_in:
|
|
|
|
if not self.dir_in:
|
|
|
|
prediction_textline = self.do_prediction(patches, img, model_textline, marginal_of_patch_percent=0.15, n_batch_inference=3, thresholding_for_artificial_class_in_light_version=thresholding_for_artificial_class_in_light_version)
|
|
|
|
prediction_textline = self.do_prediction(patches, img, model_textline, marginal_of_patch_percent=0.15, n_batch_inference=3, thresholding_for_artificial_class_in_light_version=thresholding_for_artificial_class_in_light_version)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
##prediction_textline = self.do_prediction_new_concept_scatter_nd(patches, img, model_textline, n_batch_inference=3)
|
|
|
|
|
|
|
|
|
|
|
|
#if not thresholding_for_artificial_class_in_light_version:
|
|
|
|
#if not thresholding_for_artificial_class_in_light_version:
|
|
|
|
#if num_col_classifier==1:
|
|
|
|
#if num_col_classifier==1:
|
|
|
|
#prediction_textline_nopatch = self.do_prediction(False, img, model_textline)
|
|
|
|
#prediction_textline_nopatch = self.do_prediction(False, img, model_textline)
|
|
|
|
#prediction_textline[:,:][prediction_textline_nopatch[:,:]==0] = 0
|
|
|
|
#prediction_textline[:,:][prediction_textline_nopatch[:,:]==0] = 0
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
prediction_textline = self.do_prediction(patches, img, self.model_textline, marginal_of_patch_percent=0.15, n_batch_inference=3,thresholding_for_artificial_class_in_light_version=thresholding_for_artificial_class_in_light_version)
|
|
|
|
prediction_textline = self.do_prediction(patches, img, self.model_textline, marginal_of_patch_percent=0.15, n_batch_inference=3,thresholding_for_artificial_class_in_light_version=thresholding_for_artificial_class_in_light_version)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
###prediction_textline = self.do_prediction_new_concept_scatter_nd(patches, img, self.model_textline, n_batch_inference=3)
|
|
|
|
#if not thresholding_for_artificial_class_in_light_version:
|
|
|
|
#if not thresholding_for_artificial_class_in_light_version:
|
|
|
|
#if num_col_classifier==1:
|
|
|
|
#if num_col_classifier==1:
|
|
|
|
#prediction_textline_nopatch = self.do_prediction(False, img, model_textline)
|
|
|
|
#prediction_textline_nopatch = self.do_prediction(False, img, model_textline)
|
|
|
@ -2374,14 +2479,17 @@ class Eynollah:
|
|
|
|
if num_col_classifier == 1 or num_col_classifier == 2:
|
|
|
|
if num_col_classifier == 1 or num_col_classifier == 2:
|
|
|
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_1_2_sp_np)
|
|
|
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_1_2_sp_np)
|
|
|
|
if self.image_org.shape[0]/self.image_org.shape[1] > 2.5:
|
|
|
|
if self.image_org.shape[0]/self.image_org.shape[1] > 2.5:
|
|
|
|
|
|
|
|
##prediction_regions_org = self.do_prediction_new_concept_scatter_nd(True, img_resized, model_region, n_batch_inference=1, thresholding_for_some_classes_in_light_version = True)
|
|
|
|
prediction_regions_org = self.do_prediction_new_concept(True, img_resized, model_region, n_batch_inference=1, thresholding_for_some_classes_in_light_version = True)
|
|
|
|
prediction_regions_org = self.do_prediction_new_concept(True, img_resized, model_region, n_batch_inference=1, thresholding_for_some_classes_in_light_version = True)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3))
|
|
|
|
prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3))
|
|
|
|
|
|
|
|
##prediction_regions_page = self.do_prediction_new_concept_scatter_nd(False, self.image_page_org_size, model_region, n_batch_inference=1, thresholding_for_artificial_class_in_light_version = True)
|
|
|
|
prediction_regions_page = self.do_prediction_new_concept(False, self.image_page_org_size, model_region, n_batch_inference=1, thresholding_for_artificial_class_in_light_version = True)
|
|
|
|
prediction_regions_page = self.do_prediction_new_concept(False, self.image_page_org_size, model_region, n_batch_inference=1, thresholding_for_artificial_class_in_light_version = True)
|
|
|
|
prediction_regions_org[self.page_coord[0] : self.page_coord[1], self.page_coord[2] : self.page_coord[3],:] = prediction_regions_page
|
|
|
|
prediction_regions_org[self.page_coord[0] : self.page_coord[1], self.page_coord[2] : self.page_coord[3],:] = prediction_regions_page
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_1_2_sp_np)
|
|
|
|
model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_1_2_sp_np)
|
|
|
|
prediction_regions_org = self.do_prediction_new_concept(True, resize_image(img_bin, int( (900+ (num_col_classifier-3)*100) *(img_bin.shape[0]/img_bin.shape[1]) ), 900+ (num_col_classifier-3)*100), model_region, n_batch_inference=2, thresholding_for_some_classes_in_light_version=True)
|
|
|
|
prediction_regions_org = self.do_prediction_new_concept(True, resize_image(img_bin, int( (900+ (num_col_classifier-3)*100) *(img_bin.shape[0]/img_bin.shape[1]) ), 900+ (num_col_classifier-3)*100), model_region, n_batch_inference=2, thresholding_for_some_classes_in_light_version=True)
|
|
|
|
|
|
|
|
###prediction_regions_org = self.do_prediction_new_concept_scatter_nd(True, resize_image(img_bin, int( (900+ (num_col_classifier-3)*100) *(img_bin.shape[0]/img_bin.shape[1]) ), 900+ (num_col_classifier-3)*100), model_region, n_batch_inference=2, thresholding_for_some_classes_in_light_version=True)
|
|
|
|
##model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens_light)
|
|
|
|
##model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens_light)
|
|
|
|
##prediction_regions_org = self.do_prediction(True, img_bin, model_region, n_batch_inference=3, thresholding_for_some_classes_in_light_version=True)
|
|
|
|
##prediction_regions_org = self.do_prediction(True, img_bin, model_region, n_batch_inference=3, thresholding_for_some_classes_in_light_version=True)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
@ -5501,3 +5609,4 @@ class Eynollah:
|
|
|
|
|
|
|
|
|
|
|
|
if self.dir_in:
|
|
|
|
if self.dir_in:
|
|
|
|
self.logger.info("All jobs done in %.1fs", time.time() - t0_tot)
|
|
|
|
self.logger.info("All jobs done in %.1fs", time.time() - t0_tot)
|
|
|
|
|
|
|
|
print("all Job done in %.1fs", time.time() - t0_tot)
|
|
|
|