Merge branch 'main' into remove-imutils-1
commit
eaff9e3537
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,47 @@
|
|||||||
|
{
|
||||||
|
"version": "0.1.0",
|
||||||
|
"git_url": "https://github.com/qurator-spk/sbb_binarization",
|
||||||
|
"tools": {
|
||||||
|
"ocrd-sbb-binarize": {
|
||||||
|
"executable": "ocrd-sbb-binarize",
|
||||||
|
"description": "Pixelwise binarization with selectional auto-encoders in Keras",
|
||||||
|
"categories": ["Image preprocessing"],
|
||||||
|
"steps": ["preprocessing/optimization/binarization"],
|
||||||
|
"input_file_grp": [],
|
||||||
|
"output_file_grp": [],
|
||||||
|
"parameters": {
|
||||||
|
"operation_level": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["page", "region"],
|
||||||
|
"default": "page",
|
||||||
|
"description": "PAGE XML hierarchy level to operate on"
|
||||||
|
},
|
||||||
|
"model": {
|
||||||
|
"description": "Directory containing HDF5 or SavedModel/ProtoBuf models. Can be an absolute path or a path relative to the OCR-D resource location, the current working directory or the $SBB_BINARIZE_DATA environment variable (if set)",
|
||||||
|
"type": "string",
|
||||||
|
"format": "uri",
|
||||||
|
"content-type": "text/directory",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2020_01_16.zip",
|
||||||
|
"name": "default",
|
||||||
|
"type": "archive",
|
||||||
|
"path_in_archive": "saved_model_2020_01_16",
|
||||||
|
"size": 563147331,
|
||||||
|
"description": "default models provided by github.com/qurator-spk (SavedModel format)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"url": "https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip",
|
||||||
|
"name": "default-2021-03-09",
|
||||||
|
"type": "archive",
|
||||||
|
"path_in_archive": ".",
|
||||||
|
"size": 133230419,
|
||||||
|
"description": "updated default models provided by github.com/qurator-spk (SavedModel format)"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,158 @@
|
|||||||
|
from os import environ
|
||||||
|
from os.path import join
|
||||||
|
from pathlib import Path
|
||||||
|
from pkg_resources import resource_string
|
||||||
|
from json import loads
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
from click import command
|
||||||
|
|
||||||
|
from ocrd_utils import (
|
||||||
|
getLogger,
|
||||||
|
assert_file_grp_cardinality,
|
||||||
|
make_file_id,
|
||||||
|
MIMETYPE_PAGE
|
||||||
|
)
|
||||||
|
from ocrd import Processor
|
||||||
|
from ocrd_modelfactory import page_from_file
|
||||||
|
from ocrd_models.ocrd_page import AlternativeImageType, to_xml
|
||||||
|
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
|
||||||
|
|
||||||
|
from .sbb_binarize import SbbBinarizer
|
||||||
|
|
||||||
|
OCRD_TOOL = loads(resource_string(__name__, 'ocrd-tool-binarization.json').decode('utf8'))
|
||||||
|
TOOL = 'ocrd-sbb-binarize'
|
||||||
|
|
||||||
|
def cv2pil(img):
|
||||||
|
return Image.fromarray(img.astype('uint8'))
|
||||||
|
|
||||||
|
def pil2cv(img):
|
||||||
|
# from ocrd/workspace.py
|
||||||
|
color_conversion = cv2.COLOR_GRAY2BGR if img.mode in ('1', 'L') else cv2.COLOR_RGB2BGR
|
||||||
|
pil_as_np_array = np.array(img).astype('uint8') if img.mode == '1' else np.array(img)
|
||||||
|
return cv2.cvtColor(pil_as_np_array, color_conversion)
|
||||||
|
|
||||||
|
class SbbBinarizeProcessor(Processor):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
|
||||||
|
kwargs['version'] = OCRD_TOOL['version']
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
if hasattr(self, 'output_file_grp'):
|
||||||
|
# processing context
|
||||||
|
self.setup()
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
"""
|
||||||
|
Set up the model prior to processing.
|
||||||
|
"""
|
||||||
|
LOG = getLogger('processor.SbbBinarize.__init__')
|
||||||
|
if not 'model' in self.parameter:
|
||||||
|
raise ValueError("'model' parameter is required")
|
||||||
|
# resolve relative path via environment variable
|
||||||
|
model_path = Path(self.parameter['model'])
|
||||||
|
if not model_path.is_absolute():
|
||||||
|
if 'SBB_BINARIZE_DATA' in environ and environ['SBB_BINARIZE_DATA']:
|
||||||
|
LOG.info("Environment variable SBB_BINARIZE_DATA is set to '%s'" \
|
||||||
|
" - prepending to model value '%s'. If you don't want this mechanism," \
|
||||||
|
" unset the SBB_BINARIZE_DATA environment variable.",
|
||||||
|
environ['SBB_BINARIZE_DATA'], model_path)
|
||||||
|
model_path = Path(environ['SBB_BINARIZE_DATA']).joinpath(model_path)
|
||||||
|
model_path = model_path.resolve()
|
||||||
|
if not model_path.is_dir():
|
||||||
|
raise FileNotFoundError("Does not exist or is not a directory: %s" % model_path)
|
||||||
|
# resolve relative path via OCR-D ResourceManager
|
||||||
|
model_path = self.resolve_resource(str(model_path))
|
||||||
|
self.binarizer = SbbBinarizer(model_dir=model_path, logger=LOG)
|
||||||
|
|
||||||
|
def process(self):
|
||||||
|
"""
|
||||||
|
Binarize images with sbb_binarization (based on selectional auto-encoders).
|
||||||
|
|
||||||
|
For each page of the input file group, open and deserialize input PAGE-XML
|
||||||
|
and its respective images. Then iterate over the element hierarchy down to
|
||||||
|
the requested ``operation_level``.
|
||||||
|
|
||||||
|
For each segment element, retrieve a raw (non-binarized) segment image
|
||||||
|
according to the layout annotation (from an existing ``AlternativeImage``,
|
||||||
|
or by cropping into the higher-level images, and deskewing when applicable).
|
||||||
|
|
||||||
|
Pass the image to the binarizer (which runs in fixed-size windows/patches
|
||||||
|
across the image and stitches the results together).
|
||||||
|
|
||||||
|
Serialize the resulting bilevel image as PNG file and add it to the output
|
||||||
|
file group (with file ID suffix ``.IMG-BIN``) along with the output PAGE-XML
|
||||||
|
(referencing it as new ``AlternativeImage`` for the segment element).
|
||||||
|
|
||||||
|
Produce a new PAGE output file by serialising the resulting hierarchy.
|
||||||
|
"""
|
||||||
|
LOG = getLogger('processor.SbbBinarize')
|
||||||
|
assert_file_grp_cardinality(self.input_file_grp, 1)
|
||||||
|
assert_file_grp_cardinality(self.output_file_grp, 1)
|
||||||
|
|
||||||
|
oplevel = self.parameter['operation_level']
|
||||||
|
|
||||||
|
for n, input_file in enumerate(self.input_files):
|
||||||
|
file_id = make_file_id(input_file, self.output_file_grp)
|
||||||
|
page_id = input_file.pageId or input_file.ID
|
||||||
|
LOG.info("INPUT FILE %i / %s", n, page_id)
|
||||||
|
pcgts = page_from_file(self.workspace.download_file(input_file))
|
||||||
|
self.add_metadata(pcgts)
|
||||||
|
pcgts.set_pcGtsId(file_id)
|
||||||
|
page = pcgts.get_Page()
|
||||||
|
page_image, page_xywh, _ = self.workspace.image_from_page(page, page_id, feature_filter='binarized')
|
||||||
|
|
||||||
|
if oplevel == 'page':
|
||||||
|
LOG.info("Binarizing on 'page' level in page '%s'", page_id)
|
||||||
|
bin_image = cv2pil(self.binarizer.run(image=pil2cv(page_image), use_patches=True))
|
||||||
|
# update METS (add the image file):
|
||||||
|
bin_image_path = self.workspace.save_image_file(bin_image,
|
||||||
|
file_id + '.IMG-BIN',
|
||||||
|
page_id=input_file.pageId,
|
||||||
|
file_grp=self.output_file_grp)
|
||||||
|
page.add_AlternativeImage(AlternativeImageType(filename=bin_image_path, comments='%s,binarized' % page_xywh['features']))
|
||||||
|
|
||||||
|
elif oplevel == 'region':
|
||||||
|
regions = page.get_AllRegions(['Text', 'Table'], depth=1)
|
||||||
|
if not regions:
|
||||||
|
LOG.warning("Page '%s' contains no text/table regions", page_id)
|
||||||
|
for region in regions:
|
||||||
|
region_image, region_xywh = self.workspace.image_from_segment(region, page_image, page_xywh, feature_filter='binarized')
|
||||||
|
region_image_bin = cv2pil(binarizer.run(image=pil2cv(region_image), use_patches=True))
|
||||||
|
region_image_bin_path = self.workspace.save_image_file(
|
||||||
|
region_image_bin,
|
||||||
|
"%s_%s.IMG-BIN" % (file_id, region.id),
|
||||||
|
page_id=input_file.pageId,
|
||||||
|
file_grp=self.output_file_grp)
|
||||||
|
region.add_AlternativeImage(
|
||||||
|
AlternativeImageType(filename=region_image_bin_path, comments='%s,binarized' % region_xywh['features']))
|
||||||
|
|
||||||
|
elif oplevel == 'line':
|
||||||
|
region_line_tuples = [(r.id, r.get_TextLine()) for r in page.get_AllRegions(['Text'], depth=0)]
|
||||||
|
if not region_line_tuples:
|
||||||
|
LOG.warning("Page '%s' contains no text lines", page_id)
|
||||||
|
for region_id, line in region_line_tuples:
|
||||||
|
line_image, line_xywh = self.workspace.image_from_segment(line, page_image, page_xywh, feature_filter='binarized')
|
||||||
|
line_image_bin = cv2pil(binarizer.run(image=pil2cv(line_image), use_patches=True))
|
||||||
|
line_image_bin_path = self.workspace.save_image_file(
|
||||||
|
line_image_bin,
|
||||||
|
"%s_%s_%s.IMG-BIN" % (file_id, region_id, line.id),
|
||||||
|
page_id=input_file.pageId,
|
||||||
|
file_grp=self.output_file_grp)
|
||||||
|
line.add_AlternativeImage(
|
||||||
|
AlternativeImageType(filename=line_image_bin_path, comments='%s,binarized' % line_xywh['features']))
|
||||||
|
|
||||||
|
self.workspace.add_file(
|
||||||
|
ID=file_id,
|
||||||
|
file_grp=self.output_file_grp,
|
||||||
|
pageId=input_file.pageId,
|
||||||
|
mimetype=MIMETYPE_PAGE,
|
||||||
|
local_filename=join(self.output_file_grp, file_id + '.xml'),
|
||||||
|
content=to_xml(pcgts))
|
||||||
|
|
||||||
|
@command()
|
||||||
|
@ocrd_cli_options
|
||||||
|
def cli(*args, **kwargs):
|
||||||
|
return ocrd_cli_wrap_processor(SbbBinarizeProcessor, *args, **kwargs)
|
@ -0,0 +1,383 @@
|
|||||||
|
"""
|
||||||
|
Tool to load model and binarize a given image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from glob import glob
|
||||||
|
from os import environ, devnull
|
||||||
|
from os.path import join
|
||||||
|
from warnings import catch_warnings, simplefilter
|
||||||
|
import os
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from PIL import Image
|
||||||
|
import cv2
|
||||||
|
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||||
|
stderr = sys.stderr
|
||||||
|
sys.stderr = open(devnull, 'w')
|
||||||
|
import tensorflow as tf
|
||||||
|
from tensorflow.keras.models import load_model
|
||||||
|
from tensorflow.python.keras import backend as tensorflow_backend
|
||||||
|
sys.stderr = stderr
|
||||||
|
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
def resize_image(img_in, input_height, input_width):
|
||||||
|
return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST)
|
||||||
|
|
||||||
|
class SbbBinarizer:
|
||||||
|
|
||||||
|
def __init__(self, model_dir, logger=None):
|
||||||
|
self.model_dir = model_dir
|
||||||
|
self.log = logger if logger else logging.getLogger('SbbBinarizer')
|
||||||
|
|
||||||
|
self.start_new_session()
|
||||||
|
|
||||||
|
self.model_files = glob(self.model_dir+"/*/", recursive = True)
|
||||||
|
|
||||||
|
self.models = []
|
||||||
|
for model_file in self.model_files:
|
||||||
|
self.models.append(self.load_model(model_file))
|
||||||
|
|
||||||
|
def start_new_session(self):
|
||||||
|
config = tf.compat.v1.ConfigProto()
|
||||||
|
config.gpu_options.allow_growth = True
|
||||||
|
|
||||||
|
self.session = tf.compat.v1.Session(config=config) # tf.InteractiveSession()
|
||||||
|
tensorflow_backend.set_session(self.session)
|
||||||
|
|
||||||
|
def end_session(self):
|
||||||
|
tensorflow_backend.clear_session()
|
||||||
|
self.session.close()
|
||||||
|
del self.session
|
||||||
|
|
||||||
|
def load_model(self, model_name):
|
||||||
|
model = load_model(join(self.model_dir, model_name), compile=False)
|
||||||
|
model_height = model.layers[len(model.layers)-1].output_shape[1]
|
||||||
|
model_width = model.layers[len(model.layers)-1].output_shape[2]
|
||||||
|
n_classes = model.layers[len(model.layers)-1].output_shape[3]
|
||||||
|
return model, model_height, model_width, n_classes
|
||||||
|
|
||||||
|
def predict(self, model_in, img, use_patches, n_batch_inference=5):
|
||||||
|
tensorflow_backend.set_session(self.session)
|
||||||
|
model, model_height, model_width, n_classes = model_in
|
||||||
|
|
||||||
|
img_org_h = img.shape[0]
|
||||||
|
img_org_w = img.shape[1]
|
||||||
|
|
||||||
|
if img.shape[0] < model_height and img.shape[1] >= model_width:
|
||||||
|
img_padded = np.zeros(( model_height, img.shape[1], img.shape[2] ))
|
||||||
|
|
||||||
|
index_start_h = int( abs( img.shape[0] - model_height) /2.)
|
||||||
|
index_start_w = 0
|
||||||
|
|
||||||
|
img_padded [ index_start_h: index_start_h+img.shape[0], :, : ] = img[:,:,:]
|
||||||
|
|
||||||
|
elif img.shape[0] >= model_height and img.shape[1] < model_width:
|
||||||
|
img_padded = np.zeros(( img.shape[0], model_width, img.shape[2] ))
|
||||||
|
|
||||||
|
index_start_h = 0
|
||||||
|
index_start_w = int( abs( img.shape[1] - model_width) /2.)
|
||||||
|
|
||||||
|
img_padded [ :, index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:]
|
||||||
|
|
||||||
|
|
||||||
|
elif img.shape[0] < model_height and img.shape[1] < model_width:
|
||||||
|
img_padded = np.zeros(( model_height, model_width, img.shape[2] ))
|
||||||
|
|
||||||
|
index_start_h = int( abs( img.shape[0] - model_height) /2.)
|
||||||
|
index_start_w = int( abs( img.shape[1] - model_width) /2.)
|
||||||
|
|
||||||
|
img_padded [ index_start_h: index_start_h+img.shape[0], index_start_w: index_start_w+img.shape[1], : ] = img[:,:,:]
|
||||||
|
|
||||||
|
else:
|
||||||
|
index_start_h = 0
|
||||||
|
index_start_w = 0
|
||||||
|
img_padded = np.copy(img)
|
||||||
|
|
||||||
|
|
||||||
|
img = np.copy(img_padded)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if use_patches:
|
||||||
|
|
||||||
|
margin = int(0.1 * model_width)
|
||||||
|
|
||||||
|
width_mid = model_width - 2 * margin
|
||||||
|
height_mid = model_height - 2 * margin
|
||||||
|
|
||||||
|
|
||||||
|
img = img / float(255.0)
|
||||||
|
|
||||||
|
img_h = img.shape[0]
|
||||||
|
img_w = img.shape[1]
|
||||||
|
|
||||||
|
prediction_true = np.zeros((img_h, img_w, 3))
|
||||||
|
mask_true = np.zeros((img_h, img_w))
|
||||||
|
nxf = img_w / float(width_mid)
|
||||||
|
nyf = img_h / float(height_mid)
|
||||||
|
|
||||||
|
if nxf > int(nxf):
|
||||||
|
nxf = int(nxf) + 1
|
||||||
|
else:
|
||||||
|
nxf = int(nxf)
|
||||||
|
|
||||||
|
if nyf > int(nyf):
|
||||||
|
nyf = int(nyf) + 1
|
||||||
|
else:
|
||||||
|
nyf = int(nyf)
|
||||||
|
|
||||||
|
|
||||||
|
list_i_s = []
|
||||||
|
list_j_s = []
|
||||||
|
list_x_u = []
|
||||||
|
list_x_d = []
|
||||||
|
list_y_u = []
|
||||||
|
list_y_d = []
|
||||||
|
|
||||||
|
batch_indexer = 0
|
||||||
|
|
||||||
|
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
|
||||||
|
|
||||||
|
for i in range(nxf):
|
||||||
|
for j in range(nyf):
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
index_x_d = i * width_mid
|
||||||
|
index_x_u = index_x_d + model_width
|
||||||
|
elif i > 0:
|
||||||
|
index_x_d = i * width_mid
|
||||||
|
index_x_u = index_x_d + model_width
|
||||||
|
|
||||||
|
if j == 0:
|
||||||
|
index_y_d = j * height_mid
|
||||||
|
index_y_u = index_y_d + model_height
|
||||||
|
elif j > 0:
|
||||||
|
index_y_d = j * height_mid
|
||||||
|
index_y_u = index_y_d + model_height
|
||||||
|
|
||||||
|
if index_x_u > img_w:
|
||||||
|
index_x_u = img_w
|
||||||
|
index_x_d = img_w - model_width
|
||||||
|
if index_y_u > img_h:
|
||||||
|
index_y_u = img_h
|
||||||
|
index_y_d = img_h - model_height
|
||||||
|
|
||||||
|
|
||||||
|
list_i_s.append(i)
|
||||||
|
list_j_s.append(j)
|
||||||
|
list_x_u.append(index_x_u)
|
||||||
|
list_x_d.append(index_x_d)
|
||||||
|
list_y_d.append(index_y_d)
|
||||||
|
list_y_u.append(index_y_u)
|
||||||
|
|
||||||
|
|
||||||
|
img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :]
|
||||||
|
|
||||||
|
batch_indexer = batch_indexer + 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if batch_indexer == n_batch_inference:
|
||||||
|
|
||||||
|
label_p_pred = model.predict(img_patch,verbose=0)
|
||||||
|
|
||||||
|
seg = np.argmax(label_p_pred, axis=3)
|
||||||
|
|
||||||
|
#print(seg.shape, len(seg), len(list_i_s))
|
||||||
|
|
||||||
|
indexer_inside_batch = 0
|
||||||
|
for i_batch, j_batch in zip(list_i_s, list_j_s):
|
||||||
|
seg_in = seg[indexer_inside_batch,:,:]
|
||||||
|
seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2)
|
||||||
|
|
||||||
|
index_y_u_in = list_y_u[indexer_inside_batch]
|
||||||
|
index_y_d_in = list_y_d[indexer_inside_batch]
|
||||||
|
|
||||||
|
index_x_u_in = list_x_u[indexer_inside_batch]
|
||||||
|
index_x_d_in = list_x_d[indexer_inside_batch]
|
||||||
|
|
||||||
|
if i_batch == 0 and j_batch == 0:
|
||||||
|
seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch == nxf - 1 and j_batch == nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||||
|
elif i_batch == 0 and j_batch == nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch == nxf - 1 and j_batch == 0:
|
||||||
|
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||||
|
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||||
|
elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||||
|
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0:
|
||||||
|
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||||
|
else:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||||
|
|
||||||
|
indexer_inside_batch = indexer_inside_batch +1
|
||||||
|
|
||||||
|
|
||||||
|
list_i_s = []
|
||||||
|
list_j_s = []
|
||||||
|
list_x_u = []
|
||||||
|
list_x_d = []
|
||||||
|
list_y_u = []
|
||||||
|
list_y_d = []
|
||||||
|
|
||||||
|
batch_indexer = 0
|
||||||
|
|
||||||
|
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
|
||||||
|
|
||||||
|
elif i==(nxf-1) and j==(nyf-1):
|
||||||
|
label_p_pred = model.predict(img_patch,verbose=0)
|
||||||
|
|
||||||
|
seg = np.argmax(label_p_pred, axis=3)
|
||||||
|
|
||||||
|
#print(seg.shape, len(seg), len(list_i_s))
|
||||||
|
|
||||||
|
indexer_inside_batch = 0
|
||||||
|
for i_batch, j_batch in zip(list_i_s, list_j_s):
|
||||||
|
seg_in = seg[indexer_inside_batch,:,:]
|
||||||
|
seg_color = np.repeat(seg_in[:, :, np.newaxis], 3, axis=2)
|
||||||
|
|
||||||
|
index_y_u_in = list_y_u[indexer_inside_batch]
|
||||||
|
index_y_d_in = list_y_d[indexer_inside_batch]
|
||||||
|
|
||||||
|
index_x_u_in = list_x_u[indexer_inside_batch]
|
||||||
|
index_x_d_in = list_x_d[indexer_inside_batch]
|
||||||
|
|
||||||
|
if i_batch == 0 and j_batch == 0:
|
||||||
|
seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch == nxf - 1 and j_batch == nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||||
|
elif i_batch == 0 and j_batch == nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch == nxf - 1 and j_batch == 0:
|
||||||
|
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||||
|
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||||
|
elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + 0 : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - 0, :] = seg_color
|
||||||
|
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0:
|
||||||
|
seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + 0 : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||||
|
elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - 0, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||||
|
else:
|
||||||
|
seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]
|
||||||
|
prediction_true[index_y_d_in + margin : index_y_u_in - margin, index_x_d_in + margin : index_x_u_in - margin, :] = seg_color
|
||||||
|
|
||||||
|
indexer_inside_batch = indexer_inside_batch +1
|
||||||
|
|
||||||
|
|
||||||
|
list_i_s = []
|
||||||
|
list_j_s = []
|
||||||
|
list_x_u = []
|
||||||
|
list_x_d = []
|
||||||
|
list_y_u = []
|
||||||
|
list_y_d = []
|
||||||
|
|
||||||
|
batch_indexer = 0
|
||||||
|
|
||||||
|
img_patch = np.zeros((n_batch_inference, model_height, model_width,3))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
prediction_true = prediction_true[index_start_h: index_start_h+img_org_h, index_start_w: index_start_w+img_org_w,:]
|
||||||
|
prediction_true = prediction_true.astype(np.uint8)
|
||||||
|
|
||||||
|
else:
|
||||||
|
img_h_page = img.shape[0]
|
||||||
|
img_w_page = img.shape[1]
|
||||||
|
img = img / float(255.0)
|
||||||
|
img = resize_image(img, model_height, model_width)
|
||||||
|
|
||||||
|
label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2]))
|
||||||
|
|
||||||
|
seg = np.argmax(label_p_pred, axis=3)[0]
|
||||||
|
seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2)
|
||||||
|
prediction_true = resize_image(seg_color, img_h_page, img_w_page)
|
||||||
|
prediction_true = prediction_true.astype(np.uint8)
|
||||||
|
return prediction_true[:,:,0]
|
||||||
|
|
||||||
|
def run(self, image=None, image_path=None, save=None, use_patches=False, dir_in=None, dir_out=None):
|
||||||
|
print(dir_in,'dir_in')
|
||||||
|
if not dir_in:
|
||||||
|
if (image is not None and image_path is not None) or \
|
||||||
|
(image is None and image_path is None):
|
||||||
|
raise ValueError("Must pass either a opencv2 image or an image_path")
|
||||||
|
if image_path is not None:
|
||||||
|
image = cv2.imread(image_path)
|
||||||
|
img_last = 0
|
||||||
|
for n, (model, model_file) in enumerate(zip(self.models, self.model_files)):
|
||||||
|
self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files)))
|
||||||
|
|
||||||
|
res = self.predict(model, image, use_patches)
|
||||||
|
|
||||||
|
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
|
||||||
|
res[:, :][res[:, :] == 0] = 2
|
||||||
|
res = res - 1
|
||||||
|
res = res * 255
|
||||||
|
img_fin[:, :, 0] = res
|
||||||
|
img_fin[:, :, 1] = res
|
||||||
|
img_fin[:, :, 2] = res
|
||||||
|
|
||||||
|
img_fin = img_fin.astype(np.uint8)
|
||||||
|
img_fin = (res[:, :] == 0) * 255
|
||||||
|
img_last = img_last + img_fin
|
||||||
|
|
||||||
|
kernel = np.ones((5, 5), np.uint8)
|
||||||
|
img_last[:, :][img_last[:, :] > 0] = 255
|
||||||
|
img_last = (img_last[:, :] == 0) * 255
|
||||||
|
if save:
|
||||||
|
cv2.imwrite(save, img_last)
|
||||||
|
return img_last
|
||||||
|
else:
|
||||||
|
ls_imgs = os.listdir(dir_in)
|
||||||
|
for image_name in ls_imgs:
|
||||||
|
image_stem = image_name.split('.')[0]
|
||||||
|
print(image_name,'image_name')
|
||||||
|
image = cv2.imread(os.path.join(dir_in,image_name) )
|
||||||
|
img_last = 0
|
||||||
|
for n, (model, model_file) in enumerate(zip(self.models, self.model_files)):
|
||||||
|
self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files)))
|
||||||
|
|
||||||
|
res = self.predict(model, image, use_patches)
|
||||||
|
|
||||||
|
img_fin = np.zeros((res.shape[0], res.shape[1], 3))
|
||||||
|
res[:, :][res[:, :] == 0] = 2
|
||||||
|
res = res - 1
|
||||||
|
res = res * 255
|
||||||
|
img_fin[:, :, 0] = res
|
||||||
|
img_fin[:, :, 1] = res
|
||||||
|
img_fin[:, :, 2] = res
|
||||||
|
|
||||||
|
img_fin = img_fin.astype(np.uint8)
|
||||||
|
img_fin = (res[:, :] == 0) * 255
|
||||||
|
img_last = img_last + img_fin
|
||||||
|
|
||||||
|
kernel = np.ones((5, 5), np.uint8)
|
||||||
|
img_last[:, :][img_last[:, :] > 0] = 255
|
||||||
|
img_last = (img_last[:, :] == 0) * 255
|
||||||
|
|
||||||
|
cv2.imwrite(os.path.join(dir_out,image_stem+'.png'), img_last)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue