mirror of
https://github.com/qurator-spk/sbb_pixelwise_segmentation.git
synced 2025-06-08 19:30:07 +02:00
Merge pull request #24 from johnlockejrr/unifying-training-models
Unifying training models
This commit is contained in:
commit
d6ccb83bf5
2 changed files with 9 additions and 9 deletions
|
@ -125,7 +125,7 @@ def get_content_of_dir(dir_in):
|
|||
"""
|
||||
|
||||
gt_all=os.listdir(dir_in)
|
||||
gt_list=[file for file in gt_all if file.split('.')[ len(file.split('.'))-1 ]=='xml' ]
|
||||
gt_list = [file for file in gt_all if os.path.splitext(file)[1] == '.xml']
|
||||
return gt_list
|
||||
|
||||
def return_parent_contours(contours, hierarchy):
|
||||
|
@ -555,7 +555,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_
|
|||
|
||||
if dir_images:
|
||||
ls_org_imgs = os.listdir(dir_images)
|
||||
ls_org_imgs_stem = [item.split('.')[0] for item in ls_org_imgs]
|
||||
ls_org_imgs_stem = [os.path.splitext(item)[0] for item in ls_org_imgs]
|
||||
for index in tqdm(range(len(gt_list))):
|
||||
#try:
|
||||
print(gt_list[index])
|
||||
|
@ -722,10 +722,10 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_
|
|||
img_poly = resize_image(img_poly, y_new, x_new)
|
||||
|
||||
try:
|
||||
xml_file_stem = gt_list[index].split('-')[1].split('.')[0]
|
||||
xml_file_stem = os.path.splitext(gt_list[index])[0]
|
||||
cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly)
|
||||
except:
|
||||
xml_file_stem = gt_list[index].split('.')[0]
|
||||
xml_file_stem = os.path.splitext(gt_list[index])[0]
|
||||
cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly)
|
||||
|
||||
if dir_images:
|
||||
|
@ -1185,10 +1185,10 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_
|
|||
img_poly = resize_image(img_poly, y_new, x_new)
|
||||
|
||||
try:
|
||||
xml_file_stem = gt_list[index].split('-')[1].split('.')[0]
|
||||
xml_file_stem = os.path.splitext(gt_list[index])[0]
|
||||
cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly)
|
||||
except:
|
||||
xml_file_stem = gt_list[index].split('.')[0]
|
||||
xml_file_stem = os.path.splitext(gt_list[index])[0]
|
||||
cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly)
|
||||
|
||||
|
||||
|
|
6
utils.py
6
utils.py
|
@ -434,7 +434,7 @@ def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batch
|
|||
batchcount = 0
|
||||
while True:
|
||||
for i in all_labels_files:
|
||||
file_name = i.split('.')[0]
|
||||
file_name = os.path.splitext(i)[0]
|
||||
img = cv2.imread(os.path.join(modal_dir,file_name+'.png'))
|
||||
|
||||
label_class = int( np.load(os.path.join(classes_file_dir,i)) )
|
||||
|
@ -479,7 +479,7 @@ def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_c
|
|||
|
||||
for i in range(c, c + batch_size): # initially from 0 to 16, c = 0.
|
||||
try:
|
||||
filename = n[i].split('.')[0]
|
||||
filename = os.path.splitext(n[i])[0]
|
||||
|
||||
train_img = cv2.imread(img_folder + '/' + n[i]) / 255.
|
||||
train_img = cv2.resize(train_img, (input_width, input_height),
|
||||
|
@ -745,7 +745,7 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow
|
|||
|
||||
indexer = 0
|
||||
for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)):
|
||||
img_name = im.split('.')[0]
|
||||
img_name = os.path.splitext(im)[0]
|
||||
if task == "segmentation" or task == "binarization":
|
||||
dir_of_label_file = os.path.join(dir_seg, img_name + '.png')
|
||||
elif task=="enhancement":
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue