From 95635d5b9ce17b3c417e3869c9586181ede6f384 Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 12:01:54 +0100 Subject: [PATCH 001/492] code to produce models --- train/.gitkeep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 train/.gitkeep diff --git a/train/.gitkeep b/train/.gitkeep new file mode 100644 index 0000000..e69de29 From 4601237427f8b8cc2786a3bf845dbec7dfbd289d Mon Sep 17 00:00:00 2001 From: b-vr103 Date: Thu, 5 Dec 2019 12:10:55 +0100 Subject: [PATCH 002/492] add files needed for training --- train/__init__.py | 0 train/metrics.py | 338 ++++++++++++++++++++++++++++++++++++++++++++++ train/models.py | 317 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 655 insertions(+) create mode 100644 train/__init__.py create mode 100644 train/metrics.py create mode 100644 train/models.py diff --git a/train/__init__.py b/train/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/train/metrics.py b/train/metrics.py new file mode 100644 index 0000000..c63cc22 --- /dev/null +++ b/train/metrics.py @@ -0,0 +1,338 @@ +from keras import backend as K +import tensorflow as tf +import numpy as np + +def focal_loss(gamma=2., alpha=4.): + + gamma = float(gamma) + alpha = float(alpha) + + def focal_loss_fixed(y_true, y_pred): + """Focal loss for multi-classification + FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t) + Notice: y_pred is probability after softmax + gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper + d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x) + Focal Loss for Dense Object Detection + https://arxiv.org/abs/1708.02002 + + Arguments: + y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls] + y_pred {tensor} -- model's output, shape of [batch_size, num_cls] + + Keyword Arguments: + gamma {float} -- (default: {2.0}) + alpha {float} -- (default: {4.0}) + + Returns: + [tensor] -- loss. + """ + epsilon = 1.e-9 + y_true = tf.convert_to_tensor(y_true, tf.float32) + y_pred = tf.convert_to_tensor(y_pred, tf.float32) + + model_out = tf.add(y_pred, epsilon) + ce = tf.multiply(y_true, -tf.log(model_out)) + weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma)) + fl = tf.multiply(alpha, tf.multiply(weight, ce)) + reduced_fl = tf.reduce_max(fl, axis=1) + return tf.reduce_mean(reduced_fl) + return focal_loss_fixed + +def weighted_categorical_crossentropy(weights=None): + """ weighted_categorical_crossentropy + + Args: + * weights: crossentropy weights + Returns: + * weighted categorical crossentropy function + """ + + def loss(y_true, y_pred): + labels_floats = tf.cast(y_true, tf.float32) + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) + + if weights is not None: + weight_mask = tf.maximum(tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) + per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + return tf.reduce_mean(per_pixel_loss) + return loss +def image_categorical_cross_entropy(y_true, y_pred, weights=None): + """ + :param y_true: tensor of shape (batch_size, height, width) representing the ground truth. + :param y_pred: tensor of shape (batch_size, height, width) representing the prediction. + :return: The mean cross-entropy on softmaxed tensors. + """ + + labels_floats = tf.cast(y_true, tf.float32) + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) + + if weights is not None: + weight_mask = tf.maximum( + tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) + per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + + return tf.reduce_mean(per_pixel_loss) +def class_tversky(y_true, y_pred): + smooth = 1.0#1.00 + + y_true = K.permute_dimensions(y_true, (3,1,2,0)) + y_pred = K.permute_dimensions(y_pred, (3,1,2,0)) + + y_true_pos = K.batch_flatten(y_true) + y_pred_pos = K.batch_flatten(y_pred) + true_pos = K.sum(y_true_pos * y_pred_pos, 1) + false_neg = K.sum(y_true_pos * (1-y_pred_pos), 1) + false_pos = K.sum((1-y_true_pos)*y_pred_pos, 1) + alpha = 0.2#0.5 + beta=0.8 + return (true_pos + smooth)/(true_pos + alpha*false_neg + (beta)*false_pos + smooth) + +def focal_tversky_loss(y_true,y_pred): + pt_1 = class_tversky(y_true, y_pred) + gamma =1.3#4./3.0#1.3#4.0/3.00# 0.75 + return K.sum(K.pow((1-pt_1), gamma)) + +def generalized_dice_coeff2(y_true, y_pred): + n_el = 1 + for dim in y_true.shape: + n_el *= int(dim) + n_cl = y_true.shape[-1] + w = K.zeros(shape=(n_cl,)) + w = (K.sum(y_true, axis=(0,1,2)))/(n_el) + w = 1/(w**2+0.000001) + numerator = y_true*y_pred + numerator = w*K.sum(numerator,(0,1,2)) + numerator = K.sum(numerator) + denominator = y_true+y_pred + denominator = w*K.sum(denominator,(0,1,2)) + denominator = K.sum(denominator) + return 2*numerator/denominator +def generalized_dice_coeff(y_true, y_pred): + axes = tuple(range(1, len(y_pred.shape)-1)) + Ncl = y_pred.shape[-1] + w = K.zeros(shape=(Ncl,)) + w = K.sum(y_true, axis=axes) + w = 1/(w**2+0.000001) + # Compute gen dice coef: + numerator = y_true*y_pred + numerator = w*K.sum(numerator,axes) + numerator = K.sum(numerator) + + denominator = y_true+y_pred + denominator = w*K.sum(denominator,axes) + denominator = K.sum(denominator) + + gen_dice_coef = 2*numerator/denominator + + return gen_dice_coef + +def generalized_dice_loss(y_true, y_pred): + return 1 - generalized_dice_coeff2(y_true, y_pred) +def soft_dice_loss(y_true, y_pred, epsilon=1e-6): + ''' + Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions. + Assumes the `channels_last` format. + + # Arguments + y_true: b x X x Y( x Z...) x c One hot encoding of ground truth + y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) + epsilon: Used for numerical stability to avoid divide by zero errors + + # References + V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation + https://arxiv.org/abs/1606.04797 + More details on Dice loss formulation + https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72) + + Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022 + ''' + + # skip the batch and class axis for calculating Dice score + axes = tuple(range(1, len(y_pred.shape)-1)) + + numerator = 2. * K.sum(y_pred * y_true, axes) + + denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) + return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch + +def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False): + """ + Compute mean metrics of two segmentation masks, via Keras. + + IoU(A,B) = |A & B| / (| A U B|) + Dice(A,B) = 2*|A & B| / (|A| + |B|) + + Args: + y_true: true masks, one-hot encoded. + y_pred: predicted masks, either softmax outputs, or one-hot encoded. + metric_name: metric to be computed, either 'iou' or 'dice'. + metric_type: one of 'standard' (default), 'soft', 'naive'. + In the standard version, y_pred is one-hot encoded and the mean + is taken only over classes that are present (in y_true or y_pred). + The 'soft' version of the metrics are computed without one-hot + encoding y_pred. + The 'naive' version return mean metrics where absent classes contribute + to the class mean as 1.0 (instead of being dropped from the mean). + drop_last = True: boolean flag to drop last class (usually reserved + for background class in semantic segmentation) + mean_per_class = False: return mean along batch axis for each class. + verbose = False: print intermediate results such as intersection, union + (as number of pixels). + Returns: + IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True + in which case it returns the per-class metric, averaged over the batch. + + Inputs are B*W*H*N tensors, with + B = batch size, + W = width, + H = height, + N = number of classes + """ + + flag_soft = (metric_type == 'soft') + flag_naive_mean = (metric_type == 'naive') + + # always assume one or more classes + num_classes = K.shape(y_true)[-1] + + if not flag_soft: + # get one-hot encoded masks from y_pred (true masks should already be one-hot) + y_pred = K.one_hot(K.argmax(y_pred), num_classes) + y_true = K.one_hot(K.argmax(y_true), num_classes) + + # if already one-hot, could have skipped above command + # keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64) + y_true = K.cast(y_true, 'float32') + y_pred = K.cast(y_pred, 'float32') + + # intersection and union shapes are batch_size * n_classes (values = area in pixels) + axes = (1,2) # W,H axes of each image + intersection = K.sum(K.abs(y_true * y_pred), axis=axes) + mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes) + union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot + + smooth = .001 + iou = (intersection + smooth) / (union + smooth) + dice = 2 * (intersection + smooth)/(mask_sum + smooth) + + metric = {'iou': iou, 'dice': dice}[metric_name] + + # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise + mask = K.cast(K.not_equal(union, 0), 'float32') + + if drop_last: + metric = metric[:,:-1] + mask = mask[:,:-1] + + if verbose: + print('intersection, union') + print(K.eval(intersection), K.eval(union)) + print(K.eval(intersection/union)) + + # return mean metrics: remaining axes are (batch, classes) + if flag_naive_mean: + return K.mean(metric) + + # take mean only over non-absent classes + class_count = K.sum(mask, axis=0) + non_zero = tf.greater(class_count, 0) + non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero) + non_zero_count = tf.boolean_mask(class_count, non_zero) + + if verbose: + print('Counts of inputs with class present, metrics for non-absent classes') + print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count)) + + return K.mean(non_zero_sum / non_zero_count) + +def mean_iou(y_true, y_pred, **kwargs): + """ + Compute mean Intersection over Union of two segmentation masks, via Keras. + + Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs. + """ + return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs) +def Mean_IOU(y_true, y_pred): + nb_classes = K.int_shape(y_pred)[-1] + iou = [] + true_pixels = K.argmax(y_true, axis=-1) + pred_pixels = K.argmax(y_pred, axis=-1) + void_labels = K.equal(K.sum(y_true, axis=-1), 0) + for i in range(0, nb_classes): # exclude first label (background) and last label (void) + true_labels = K.equal(true_pixels, i)# & ~void_labels + pred_labels = K.equal(pred_pixels, i)# & ~void_labels + inter = tf.to_int32(true_labels & pred_labels) + union = tf.to_int32(true_labels | pred_labels) + legal_batches = K.sum(tf.to_int32(true_labels), axis=1)>0 + ious = K.sum(inter, axis=1)/K.sum(union, axis=1) + iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects + iou = tf.stack(iou) + legal_labels = ~tf.debugging.is_nan(iou) + iou = tf.gather(iou, indices=tf.where(legal_labels)) + return K.mean(iou) + +def iou_vahid(y_true, y_pred): + nb_classes = tf.shape(y_true)[-1]+tf.to_int32(1) + true_pixels = K.argmax(y_true, axis=-1) + pred_pixels = K.argmax(y_pred, axis=-1) + iou = [] + + for i in tf.range(nb_classes): + tp=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.equal(pred_pixels, i) ) ) + fp=K.sum( tf.to_int32( K.not_equal(true_pixels, i) & K.equal(pred_pixels, i) ) ) + fn=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.not_equal(pred_pixels, i) ) ) + iouh=tp/(tp+fp+fn) + iou.append(iouh) + return K.mean(iou) + + +def IoU_metric(Yi,y_predi): + ## mean Intersection over Union + ## Mean IoU = TP/(FN + TP + FP) + y_predi = np.argmax(y_predi, axis=3) + y_testi = np.argmax(Yi, axis=3) + IoUs = [] + Nclass = int(np.max(Yi)) + 1 + for c in range(Nclass): + TP = np.sum( (Yi == c)&(y_predi==c) ) + FP = np.sum( (Yi != c)&(y_predi==c) ) + FN = np.sum( (Yi == c)&(y_predi != c)) + IoU = TP/float(TP + FP + FN) + IoUs.append(IoU) + return K.cast( np.mean(IoUs) ,dtype='float32' ) + + +def IoU_metric_keras(y_true, y_pred): + ## mean Intersection over Union + ## Mean IoU = TP/(FN + TP + FP) + init = tf.global_variables_initializer() + sess = tf.Session() + sess.run(init) + + return IoU_metric(y_true.eval(session=sess), y_pred.eval(session=sess)) + +def jaccard_distance_loss(y_true, y_pred, smooth=100): + """ + Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) + = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) + + The jaccard distance loss is usefull for unbalanced datasets. This has been + shifted so it converges on 0 and is smoothed to avoid exploding or disapearing + gradient. + + Ref: https://en.wikipedia.org/wiki/Jaccard_index + + @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 + @author: wassname + """ + intersection = K.sum(K.abs(y_true * y_pred), axis=-1) + sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) + jac = (intersection + smooth) / (sum_ - intersection + smooth) + return (1 - jac) * smooth + + diff --git a/train/models.py b/train/models.py new file mode 100644 index 0000000..7c806b4 --- /dev/null +++ b/train/models.py @@ -0,0 +1,317 @@ +from keras.models import * +from keras.layers import * +from keras import layers +from keras.regularizers import l2 + +resnet50_Weights_path='./pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' +IMAGE_ORDERING ='channels_last' +MERGE_AXIS=-1 + + +def one_side_pad( x ): + x = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(x) + if IMAGE_ORDERING == 'channels_first': + x = Lambda(lambda x : x[: , : , :-1 , :-1 ] )(x) + elif IMAGE_ORDERING == 'channels_last': + x = Lambda(lambda x : x[: , :-1 , :-1 , : ] )(x) + return x + +def identity_block(input_tensor, kernel_size, filters, stage, block): + """The identity block is the block that has no conv layer at shortcut. + # Arguments + input_tensor: input tensor + kernel_size: defualt 3, the kernel size of middle conv layer at main path + filters: list of integers, the filterss of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + # Returns + Output tensor for the block. + """ + filters1, filters2, filters3 = filters + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2a')(input_tensor) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) + x = Activation('relu')(x) + + x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , + padding='same', name=conv_name_base + '2b')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) + x = Activation('relu')(x) + + x = Conv2D(filters3 , (1, 1), data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) + + x = layers.add([x, input_tensor]) + x = Activation('relu')(x) + return x + + +def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): + """conv_block is the block that has a conv layer at shortcut + # Arguments + input_tensor: input tensor + kernel_size: defualt 3, the kernel size of middle conv layer at main path + filters: list of integers, the filterss of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + # Returns + Output tensor for the block. + Note that from stage 3, the first conv layer at main path is with strides=(2,2) + And the shortcut should have strides=(2,2) as well + """ + filters1, filters2, filters3 = filters + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, + name=conv_name_base + '2a')(input_tensor) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) + x = Activation('relu')(x) + + x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , padding='same', + name=conv_name_base + '2b')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) + x = Activation('relu')(x) + + x = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) + + shortcut = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, + name=conv_name_base + '1')(input_tensor) + shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) + + x = layers.add([x, shortcut]) + x = Activation('relu')(x) + return x + + +def resnet50_unet_light(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): + assert input_height%32 == 0 + assert input_width%32 == 0 + + + img_input = Input(shape=(input_height,input_width , 3 )) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) + + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x ) + + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + + if pretraining: + model=Model( img_input , x ).load_weights(resnet50_Weights_path) + + + v512_2048 = Conv2D( 512 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f5 ) + v512_2048 = ( BatchNormalization(axis=bn_axis))(v512_2048) + v512_2048 = Activation('relu')(v512_2048) + + + + v512_1024=Conv2D( 512 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f4 ) + v512_1024 = ( BatchNormalization(axis=bn_axis))(v512_1024) + v512_1024 = Activation('relu')(v512_1024) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(v512_2048) + o = ( concatenate([ o ,v512_1024],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([ o ,f3],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f2],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 128 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay) ) )(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f1],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 64 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,img_input],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 32 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + + o = Conv2D( n_classes , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( o ) + o = ( BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + + + model = Model( img_input , o ) + return model + +def resnet50_unet(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): + assert input_height%32 == 0 + assert input_width%32 == 0 + + + img_input = Input(shape=(input_height,input_width , 3 )) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) + + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x ) + + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + if pretraining: + Model( img_input , x ).load_weights(resnet50_Weights_path) + + v1024_2048 = Conv2D( 1024 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f5 ) + v1024_2048 = ( BatchNormalization(axis=bn_axis))(v1024_2048) + v1024_2048 = Activation('relu')(v1024_2048) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(v1024_2048) + o = ( concatenate([ o ,f4],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([ o ,f3],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f2],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 128 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay) ) )(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f1],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 64 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,img_input],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 32 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = Conv2D( n_classes , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( o ) + o = ( BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + + model = Model( img_input , o ) + + + + + return model From 226330535d0d01c67e4c18c7957e3d69b8f5f672 Mon Sep 17 00:00:00 2001 From: b-vr103 Date: Thu, 5 Dec 2019 14:05:07 +0100 Subject: [PATCH 003/492] add files needed for training --- train/README | 23 +++ train/config_params.json | 24 +++ train/train.py | 192 ++++++++++++++++++++++ train/utils.py | 336 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 575 insertions(+) create mode 100644 train/README create mode 100644 train/config_params.json create mode 100644 train/train.py create mode 100644 train/utils.py diff --git a/train/README b/train/README new file mode 100644 index 0000000..7d8d790 --- /dev/null +++ b/train/README @@ -0,0 +1,23 @@ +how to train: + just run: python train.py with config_params.json + + +format of ground truth: + + Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and labels should be 0 and 1 for each class and pixel. + In the case of multiclass just set n_classes to the number of classes you have and the try to produce the labels by pixels from 0 , 1 ,2 .., n_classes-1. + The labels format should be png. + + If you have an image label for binary case it should look like this: + + Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. + +traing , evaluation and output: + train and evaluation folder should have subfolder of images and labels. + And output folder should be free folder which the output model will be written there. + +patches: + + if you want to train your model with patches, the height and width of patches should be defined and also number of batchs (how many patches should be seen by model by each iteration). + In the case that model should see the image once, like page extraction, the patches should be set to false. + diff --git a/train/config_params.json b/train/config_params.json new file mode 100644 index 0000000..52db6db --- /dev/null +++ b/train/config_params.json @@ -0,0 +1,24 @@ +{ + "n_classes" : 2, + "n_epochs" : 2, + "input_height" : 448, + "input_width" : 896, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : false, + "flip_aug" : false, + "elastic_aug" : false, + "blur_aug" : false, + "scaling" : false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "rotation": false, + "weighted_loss": true, + "dir_train": "/home/vahid/textline_gt_images/train_light", + "dir_eval": "/home/vahid/textline_gt_images/eval", + "dir_output": "/home/vahid/textline_gt_images/output" +} diff --git a/train/train.py b/train/train.py new file mode 100644 index 0000000..07c7418 --- /dev/null +++ b/train/train.py @@ -0,0 +1,192 @@ +import os +import sys +import tensorflow as tf +from keras.backend.tensorflow_backend import set_session +import keras , warnings +from keras.optimizers import * +from sacred import Experiment +from models import * +from utils import * +from metrics import * + + +def configuration(): + keras.backend.clear_session() + tf.reset_default_graph() + warnings.filterwarnings('ignore') + + os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' + config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) + + + config.gpu_options.allow_growth = True + config.gpu_options.per_process_gpu_memory_fraction=0.95#0.95 + config.gpu_options.visible_device_list="0" + set_session(tf.Session(config=config)) + +def get_dirs_or_files(input_data): + if os.path.isdir(input_data): + image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') + # Check if training dir exists + assert os.path.isdir(image_input), "{} is not a directory".format(image_input) + assert os.path.isdir(labels_input), "{} is not a directory".format(labels_input) + return image_input, labels_input + +ex = Experiment() + +@ex.config +def config_params(): + n_classes=None # Number of classes. If your case study is binary case the set it to 2 and otherwise give your number of cases. + n_epochs=1 + input_height=224*1 + input_width=224*1 + weight_decay=1e-6 # Weight decay of l2 regularization of model layers. + n_batch=1 # Number of batches at each iteration. + learning_rate=1e-4 + patches=False # Make patches of image in order to use all information of image. In the case of page + # extraction this should be set to false since model should see all image. + augmentation=False + flip_aug=False # Flip image (augmentation). + elastic_aug=False # Elastic transformation (augmentation). + blur_aug=False # Blur patches of image (augmentation). + scaling=False # Scaling of patches (augmentation) will be imposed if this set to true. + binarization=False # Otsu thresholding. Used for augmentation in the case of binary case like textline prediction. For multicases should not be applied. + dir_train=None # Directory of training dataset (sub-folders should be named images and labels). + dir_eval=None # Directory of validation dataset (sub-folders should be named images and labels). + dir_output=None # Directory of output where the model should be saved. + pretraining=False # Set true to load pretrained weights of resnet50 encoder. + weighted_loss=False # Set True if classes are unbalanced and you want to use weighted loss function. + scaling_bluring=False + rotation: False + scaling_binarization=False + blur_k=['blur','guass','median'] # Used in order to blur image. Used for augmentation. + scales=[0.9 , 1.1 ] # Scale patches with these scales. Used for augmentation. + flip_index=[0,1] # Flip image. Used for augmentation. + + +@ex.automain +def run(n_classes,n_epochs,input_height, + input_width,weight_decay,weighted_loss, + n_batch,patches,augmentation,flip_aug,blur_aug,scaling, binarization, + blur_k,scales,dir_train, + scaling_bluring,scaling_binarization,rotation, + flip_index,dir_eval ,dir_output,pretraining,learning_rate): + + dir_img,dir_seg=get_dirs_or_files(dir_train) + dir_img_val,dir_seg_val=get_dirs_or_files(dir_eval) + + # make first a directory in output for both training and evaluations in order to flow data from these directories. + dir_train_flowing=os.path.join(dir_output,'train') + dir_eval_flowing=os.path.join(dir_output,'eval') + + dir_flow_train_imgs=os.path.join(dir_train_flowing,'images') + dir_flow_train_labels=os.path.join(dir_train_flowing,'labels') + + dir_flow_eval_imgs=os.path.join(dir_eval_flowing,'images') + dir_flow_eval_labels=os.path.join(dir_eval_flowing,'labels') + + if os.path.isdir(dir_train_flowing): + os.system('rm -rf '+dir_train_flowing) + os.makedirs(dir_train_flowing) + else: + os.makedirs(dir_train_flowing) + + if os.path.isdir(dir_eval_flowing): + os.system('rm -rf '+dir_eval_flowing) + os.makedirs(dir_eval_flowing) + else: + os.makedirs(dir_eval_flowing) + + + os.mkdir(dir_flow_train_imgs) + os.mkdir(dir_flow_train_labels) + + os.mkdir(dir_flow_eval_imgs) + os.mkdir(dir_flow_eval_labels) + + + + #set the gpu configuration + configuration() + + + #writing patches into a sub-folder in order to be flowed from directory. + provide_patches(dir_img,dir_seg,dir_flow_train_imgs, + dir_flow_train_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + augmentation=augmentation,patches=patches) + + provide_patches(dir_img_val,dir_seg_val,dir_flow_eval_imgs, + dir_flow_eval_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + augmentation=False,patches=patches) + + if weighted_loss: + weights=np.zeros(n_classes) + for obj in os.listdir(dir_seg): + label_obj=cv2.imread(dir_seg+'/'+obj) + label_obj_one_hot=get_one_hot( label_obj,label_obj.shape[0],label_obj.shape[1],n_classes) + weights+=(label_obj_one_hot.sum(axis=0)).sum(axis=0) + + + weights=1.00/weights + + weights=weights/float(np.sum(weights)) + weights=weights/float(np.min(weights)) + weights=weights/float(np.sum(weights)) + + + + + #get our model. + model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + + #if you want to see the model structure just uncomment model summary. + #model.summary() + + + if not weighted_loss: + model.compile(loss='categorical_crossentropy', + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) + if weighted_loss: + model.compile(loss=weighted_categorical_crossentropy(weights), + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) + + mc = keras.callbacks.ModelCheckpoint('weights{epoch:08d}.h5', + save_weights_only=True, period=1) + + + #generating train and evaluation data + train_gen = data_gen(dir_flow_train_imgs,dir_flow_train_labels, batch_size = n_batch, + input_height=input_height, input_width=input_width,n_classes=n_classes ) + val_gen = data_gen(dir_flow_eval_imgs,dir_flow_eval_labels, batch_size = n_batch, + input_height=input_height, input_width=input_width,n_classes=n_classes ) + + + model.fit_generator( + train_gen, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch), + validation_data=val_gen, + validation_steps=1, + epochs=n_epochs) + + + + os.system('rm -rf '+dir_train_flowing) + os.system('rm -rf '+dir_eval_flowing) + + model.save(dir_output+'/'+'model'+'.h5') + + + + + + + + + + diff --git a/train/utils.py b/train/utils.py new file mode 100644 index 0000000..afdc9e5 --- /dev/null +++ b/train/utils.py @@ -0,0 +1,336 @@ +import os +import cv2 +import numpy as np +import seaborn as sns +from scipy.ndimage.interpolation import map_coordinates +from scipy.ndimage.filters import gaussian_filter +import random +from tqdm import tqdm + + + + +def bluring(img_in,kind): + if kind=='guass': + img_blur = cv2.GaussianBlur(img_in,(5,5),0) + elif kind=="median": + img_blur = cv2.medianBlur(img_in,5) + elif kind=='blur': + img_blur=cv2.blur(img_in,(5,5)) + return img_blur + +def color_images(seg, n_classes): + ann_u=range(n_classes) + if len(np.shape(seg))==3: + seg=seg[:,:,0] + + seg_img=np.zeros((np.shape(seg)[0],np.shape(seg)[1],3)).astype(float) + colors=sns.color_palette("hls", n_classes) + + for c in ann_u: + c=int(c) + segl=(seg==c) + seg_img[:,:,0]+=segl*(colors[c][0]) + seg_img[:,:,1]+=segl*(colors[c][1]) + seg_img[:,:,2]+=segl*(colors[c][2]) + return seg_img + + +def resize_image(seg_in,input_height,input_width): + return cv2.resize(seg_in,(input_width,input_height),interpolation=cv2.INTER_NEAREST) +def get_one_hot(seg,input_height,input_width,n_classes): + seg=seg[:,:,0] + seg_f=np.zeros((input_height, input_width,n_classes)) + for j in range(n_classes): + seg_f[:,:,j]=(seg==j).astype(int) + return seg_f + + +def IoU(Yi,y_predi): + ## mean Intersection over Union + ## Mean IoU = TP/(FN + TP + FP) + + IoUs = [] + classes_true=np.unique(Yi) + for c in classes_true: + TP = np.sum( (Yi == c)&(y_predi==c) ) + FP = np.sum( (Yi != c)&(y_predi==c) ) + FN = np.sum( (Yi == c)&(y_predi != c)) + IoU = TP/float(TP + FP + FN) + print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c,TP,FP,FN,IoU)) + IoUs.append(IoU) + mIoU = np.mean(IoUs) + print("_________________") + print("Mean IoU: {:4.3f}".format(mIoU)) + return mIoU +def data_gen(img_folder, mask_folder, batch_size,input_height, input_width,n_classes): + c = 0 + n = os.listdir(img_folder) #List of training images + random.shuffle(n) + while True: + img = np.zeros((batch_size, input_height, input_width, 3)).astype('float') + mask = np.zeros((batch_size, input_height, input_width, n_classes)).astype('float') + + for i in range(c, c+batch_size): #initially from 0 to 16, c = 0. + #print(img_folder+'/'+n[i]) + filename=n[i].split('.')[0] + train_img = cv2.imread(img_folder+'/'+n[i])/255. + train_img = cv2.resize(train_img, (input_width, input_height),interpolation=cv2.INTER_NEAREST)# Read an image from folder and resize + + img[i-c] = train_img #add to array - img[0], img[1], and so on. + train_mask = cv2.imread(mask_folder+'/'+filename+'.png') + #print(mask_folder+'/'+filename+'.png') + #print(train_mask.shape) + train_mask = get_one_hot( resize_image(train_mask,input_height,input_width),input_height,input_width,n_classes) + #train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] + + mask[i-c] = train_mask + + c+=batch_size + if(c+batch_size>=len(os.listdir(img_folder))): + c=0 + random.shuffle(n) + yield img, mask + +def otsu_copy(img): + img_r=np.zeros(img.shape) + img1=img[:,:,0] + img2=img[:,:,1] + img3=img[:,:,2] + _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold3 = cv2.threshold(img3, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + img_r[:,:,0]=threshold1 + img_r[:,:,1]=threshold1 + img_r[:,:,2]=threshold1 + return img_r + +def rotation_90(img): + img_rot=np.zeros((img.shape[1],img.shape[0],img.shape[2])) + img_rot[:,:,0]=img[:,:,0].T + img_rot[:,:,1]=img[:,:,1].T + img_rot[:,:,2]=img[:,:,2].T + return img_rot + +def get_patches(dir_img_f,dir_seg_f,img,label,height,width,indexer): + + + img_h=img.shape[0] + img_w=img.shape[1] + + nxf=img_w/float(width) + nyf=img_h/float(height) + + if nxf>int(nxf): + nxf=int(nxf)+1 + if nyf>int(nyf): + nyf=int(nyf)+1 + + nxf=int(nxf) + nyf=int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d=i*width + index_x_u=(i+1)*width + + index_y_d=j*height + index_y_u=(j+1)*height + + if index_x_u>img_w: + index_x_u=img_w + index_x_d=img_w-width + if index_y_u>img_h: + index_y_u=img_h + index_y_d=img_h-height + + + img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] + label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] + + cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) + cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) + indexer+=1 + return indexer + + + +def get_patches_num_scale(dir_img_f,dir_seg_f,img,label,height,width,indexer,scaler): + + + img_h=img.shape[0] + img_w=img.shape[1] + + height_scale=int(height*scaler) + width_scale=int(width*scaler) + + + nxf=img_w/float(width_scale) + nyf=img_h/float(height_scale) + + if nxf>int(nxf): + nxf=int(nxf)+1 + if nyf>int(nyf): + nyf=int(nyf)+1 + + nxf=int(nxf) + nyf=int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d=i*width_scale + index_x_u=(i+1)*width_scale + + index_y_d=j*height_scale + index_y_u=(j+1)*height_scale + + if index_x_u>img_w: + index_x_u=img_w + index_x_d=img_w-width_scale + if index_y_u>img_h: + index_y_u=img_h + index_y_d=img_h-height_scale + + + img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] + label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] + + img_patch=resize_image(img_patch,height,width) + label_patch=resize_image(label_patch,height,width) + + cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) + cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) + indexer+=1 + + return indexer + + + +def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, + dir_flow_train_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + augmentation=False,patches=False): + + imgs_cv_train=np.array(os.listdir(dir_img)) + segs_cv_train=np.array(os.listdir(dir_seg)) + + indexer=0 + for im, seg_i in tqdm(zip(imgs_cv_train,segs_cv_train)): + img_name=im.split('.')[0] + + if not patches: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', resize_image(cv2.imread(dir_img+'/'+im),input_height,input_width ) ) + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width ) ) + indexer+=1 + + if augmentation: + if rotation: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + rotation_90( resize_image(cv2.imread(dir_img+'/'+im), + input_height,input_width) ) ) + + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png', + rotation_90 ( resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width) ) ) + indexer+=1 + + if flip_aug: + for f_i in flip_index: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + resize_image(cv2.flip(cv2.imread(dir_img+'/'+im),f_i),input_height,input_width) ) + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , + resize_image(cv2.flip(cv2.imread(dir_seg+'/'+img_name+'.png'),f_i),input_height,input_width) ) + indexer+=1 + + if blur_aug: + for blur_i in blur_k: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + (resize_image(bluring(cv2.imread(dir_img+'/'+im),blur_i),input_height,input_width) ) ) + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , + resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width) ) + indexer+=1 + + + if binarization: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + resize_image(otsu_copy( cv2.imread(dir_img+'/'+im)),input_height,input_width )) + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png', + resize_image( cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width )) + indexer+=1 + + + + + + + if patches: + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + cv2.imread(dir_img+'/'+im),cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer) + + if augmentation: + + if rotation: + + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + rotation_90( cv2.imread(dir_img+'/'+im) ), + rotation_90( cv2.imread(dir_seg+'/'+img_name+'.png') ), + input_height,input_width,indexer=indexer) + if flip_aug: + for f_i in flip_index: + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + cv2.flip( cv2.imread(dir_img+'/'+im) , f_i), + cv2.flip( cv2.imread(dir_seg+'/'+img_name+'.png') ,f_i), + input_height,input_width,indexer=indexer) + if blur_aug: + for blur_i in blur_k: + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + bluring( cv2.imread(dir_img+'/'+im) , blur_i), + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer) + + + if scaling: + for sc_ind in scales: + indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, + cv2.imread(dir_img+'/'+im) , + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer,scaler=sc_ind) + if binarization: + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + otsu_copy( cv2.imread(dir_img+'/'+im)), + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer) + + + + if scaling_bluring: + for sc_ind in scales: + for blur_i in blur_k: + indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, + bluring( cv2.imread(dir_img+'/'+im) , blur_i) , + cv2.imread(dir_seg+'/'+img_name+'.png') , + input_height,input_width,indexer=indexer,scaler=sc_ind) + + if scaling_binarization: + for sc_ind in scales: + indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, + otsu_copy( cv2.imread(dir_img+'/'+im)) , + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer,scaler=sc_ind) + + + + + + From 1882dd8f53b665993c806ff5587562772f65c8a7 Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 14:05:55 +0100 Subject: [PATCH 004/492] Update config_params.json --- train/config_params.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index 52db6db..5066444 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -18,7 +18,7 @@ "scaling_binarization" : false, "rotation": false, "weighted_loss": true, - "dir_train": "/home/vahid/textline_gt_images/train_light", - "dir_eval": "/home/vahid/textline_gt_images/eval", - "dir_output": "/home/vahid/textline_gt_images/output" + "dir_train": "../train", + "dir_eval": "../eval", + "dir_output": "../output" } From e8afb370bafa617250ef3f15fe35a721e0a1ccbd Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 14:08:08 +0100 Subject: [PATCH 005/492] Update README --- train/README | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/train/README b/train/README index 7d8d790..8d478bd 100644 --- a/train/README +++ b/train/README @@ -4,17 +4,20 @@ how to train: format of ground truth: - Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and labels should be 0 and 1 for each class and pixel. - In the case of multiclass just set n_classes to the number of classes you have and the try to produce the labels by pixels from 0 , 1 ,2 .., n_classes-1. + Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and + labels should be 0 and 1 for each class and pixel. + In the case of multiclass just set n_classes to the number of classes you have and the try to produce the labels + by pixels set from 0 , 1 ,2 .., n_classes-1. The labels format should be png. If you have an image label for binary case it should look like this: - Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. + Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] + this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. -traing , evaluation and output: +training , evaluation and output: train and evaluation folder should have subfolder of images and labels. - And output folder should be free folder which the output model will be written there. + And output folder should be empty folder which the output model will be written there. patches: From 99a02a1bf55a8022110ca78d0363c2eae610cecf Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 14:11:37 +0100 Subject: [PATCH 006/492] Update README --- train/README | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train/README b/train/README index 8d478bd..54ea408 100644 --- a/train/README +++ b/train/README @@ -21,6 +21,7 @@ training , evaluation and output: patches: - if you want to train your model with patches, the height and width of patches should be defined and also number of batchs (how many patches should be seen by model by each iteration). + if you want to train your model with patches, the height and width of patches should be defined and also number of + batchs (how many patches should be seen by model by each iteration). In the case that model should see the image once, like page extraction, the patches should be set to false. From 7eb3dd26addb0131cf39c6bdbf0dcd88ed61d8d5 Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 16:11:31 +0100 Subject: [PATCH 007/492] Update README --- train/README | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/train/README b/train/README index 54ea408..e103b0b 100644 --- a/train/README +++ b/train/README @@ -1,8 +1,8 @@ -how to train: +# Train just run: python train.py with config_params.json -format of ground truth: +# Ground truth format Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and labels should be 0 and 1 for each class and pixel. @@ -15,11 +15,11 @@ format of ground truth: Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. -training , evaluation and output: +# Training , evaluation and output train and evaluation folder should have subfolder of images and labels. And output folder should be empty folder which the output model will be written there. -patches: +# Patches if you want to train your model with patches, the height and width of patches should be defined and also number of batchs (how many patches should be seen by model by each iteration). From cf18aa7fbb64900979b816b6b03ff20c5378b3a9 Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 16:13:37 +0100 Subject: [PATCH 008/492] Update README --- train/README | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/train/README b/train/README index e103b0b..5237d53 100644 --- a/train/README +++ b/train/README @@ -1,27 +1,2 @@ -# Train - just run: python train.py with config_params.json - - -# Ground truth format - - Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and - labels should be 0 and 1 for each class and pixel. - In the case of multiclass just set n_classes to the number of classes you have and the try to produce the labels - by pixels set from 0 , 1 ,2 .., n_classes-1. - The labels format should be png. - - If you have an image label for binary case it should look like this: - - Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] - this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. - -# Training , evaluation and output - train and evaluation folder should have subfolder of images and labels. - And output folder should be empty folder which the output model will be written there. - -# Patches - - if you want to train your model with patches, the height and width of patches should be defined and also number of - batchs (how many patches should be seen by model by each iteration). - In the case that model should see the image once, like page extraction, the patches should be set to false. + From ac542665815bea97752440bcf874a21ec939c047 Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 16:13:40 +0100 Subject: [PATCH 009/492] Delete README --- train/README | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 train/README diff --git a/train/README b/train/README deleted file mode 100644 index 5237d53..0000000 --- a/train/README +++ /dev/null @@ -1,2 +0,0 @@ - - From 350378af168d68f4709c1b98bc8e867e9b46ccfd Mon Sep 17 00:00:00 2001 From: "Rezanezhad, Vahid" Date: Thu, 5 Dec 2019 16:14:00 +0100 Subject: [PATCH 010/492] Add new file --- train/README.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 train/README.md diff --git a/train/README.md b/train/README.md new file mode 100644 index 0000000..c4dc27e --- /dev/null +++ b/train/README.md @@ -0,0 +1,26 @@ +# Train + just run: python train.py with config_params.json + + +# Ground truth format + + Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and + labels should be 0 and 1 for each class and pixel. + In the case of multiclass just set n_classes to the number of classes you have and the try to produce the labels + by pixels set from 0 , 1 ,2 .., n_classes-1. + The labels format should be png. + + If you have an image label for binary case it should look like this: + + Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] + this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. + +# Training , evaluation and output + train and evaluation folder should have subfolder of images and labels. + And output folder should be empty folder which the output model will be written there. + +# Patches + + if you want to train your model with patches, the height and width of patches should be defined and also number of + batchs (how many patches should be seen by model by each iteration). + In the case that model should see the image once, like page extraction, the patches should be set to false. \ No newline at end of file From 979b824aa8fe84619e9863372b45647ed8306327 Mon Sep 17 00:00:00 2001 From: "Gerber, Mike" Date: Mon, 9 Dec 2019 15:33:53 +0100 Subject: [PATCH 011/492] =?UTF-8?q?=F0=9F=93=9D=20howto:=20Be=20more=20ver?= =?UTF-8?q?bose=20with=20the=20subtree=20pull?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- train/.gitkeep | 0 train/README.md | 26 +++ train/__init__.py | 0 train/config_params.json | 24 +++ train/metrics.py | 338 +++++++++++++++++++++++++++++++++++++++ train/models.py | 317 ++++++++++++++++++++++++++++++++++++ train/train.py | 192 ++++++++++++++++++++++ train/utils.py | 336 ++++++++++++++++++++++++++++++++++++++ 8 files changed, 1233 insertions(+) create mode 100644 train/.gitkeep create mode 100644 train/README.md create mode 100644 train/__init__.py create mode 100644 train/config_params.json create mode 100644 train/metrics.py create mode 100644 train/models.py create mode 100644 train/train.py create mode 100644 train/utils.py diff --git a/train/.gitkeep b/train/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/train/README.md b/train/README.md new file mode 100644 index 0000000..c4dc27e --- /dev/null +++ b/train/README.md @@ -0,0 +1,26 @@ +# Train + just run: python train.py with config_params.json + + +# Ground truth format + + Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and + labels should be 0 and 1 for each class and pixel. + In the case of multiclass just set n_classes to the number of classes you have and the try to produce the labels + by pixels set from 0 , 1 ,2 .., n_classes-1. + The labels format should be png. + + If you have an image label for binary case it should look like this: + + Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] + this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. + +# Training , evaluation and output + train and evaluation folder should have subfolder of images and labels. + And output folder should be empty folder which the output model will be written there. + +# Patches + + if you want to train your model with patches, the height and width of patches should be defined and also number of + batchs (how many patches should be seen by model by each iteration). + In the case that model should see the image once, like page extraction, the patches should be set to false. \ No newline at end of file diff --git a/train/__init__.py b/train/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/train/config_params.json b/train/config_params.json new file mode 100644 index 0000000..5066444 --- /dev/null +++ b/train/config_params.json @@ -0,0 +1,24 @@ +{ + "n_classes" : 2, + "n_epochs" : 2, + "input_height" : 448, + "input_width" : 896, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : false, + "flip_aug" : false, + "elastic_aug" : false, + "blur_aug" : false, + "scaling" : false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "rotation": false, + "weighted_loss": true, + "dir_train": "../train", + "dir_eval": "../eval", + "dir_output": "../output" +} diff --git a/train/metrics.py b/train/metrics.py new file mode 100644 index 0000000..c63cc22 --- /dev/null +++ b/train/metrics.py @@ -0,0 +1,338 @@ +from keras import backend as K +import tensorflow as tf +import numpy as np + +def focal_loss(gamma=2., alpha=4.): + + gamma = float(gamma) + alpha = float(alpha) + + def focal_loss_fixed(y_true, y_pred): + """Focal loss for multi-classification + FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t) + Notice: y_pred is probability after softmax + gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper + d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x) + Focal Loss for Dense Object Detection + https://arxiv.org/abs/1708.02002 + + Arguments: + y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls] + y_pred {tensor} -- model's output, shape of [batch_size, num_cls] + + Keyword Arguments: + gamma {float} -- (default: {2.0}) + alpha {float} -- (default: {4.0}) + + Returns: + [tensor] -- loss. + """ + epsilon = 1.e-9 + y_true = tf.convert_to_tensor(y_true, tf.float32) + y_pred = tf.convert_to_tensor(y_pred, tf.float32) + + model_out = tf.add(y_pred, epsilon) + ce = tf.multiply(y_true, -tf.log(model_out)) + weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma)) + fl = tf.multiply(alpha, tf.multiply(weight, ce)) + reduced_fl = tf.reduce_max(fl, axis=1) + return tf.reduce_mean(reduced_fl) + return focal_loss_fixed + +def weighted_categorical_crossentropy(weights=None): + """ weighted_categorical_crossentropy + + Args: + * weights: crossentropy weights + Returns: + * weighted categorical crossentropy function + """ + + def loss(y_true, y_pred): + labels_floats = tf.cast(y_true, tf.float32) + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) + + if weights is not None: + weight_mask = tf.maximum(tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) + per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + return tf.reduce_mean(per_pixel_loss) + return loss +def image_categorical_cross_entropy(y_true, y_pred, weights=None): + """ + :param y_true: tensor of shape (batch_size, height, width) representing the ground truth. + :param y_pred: tensor of shape (batch_size, height, width) representing the prediction. + :return: The mean cross-entropy on softmaxed tensors. + """ + + labels_floats = tf.cast(y_true, tf.float32) + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) + + if weights is not None: + weight_mask = tf.maximum( + tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) + per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + + return tf.reduce_mean(per_pixel_loss) +def class_tversky(y_true, y_pred): + smooth = 1.0#1.00 + + y_true = K.permute_dimensions(y_true, (3,1,2,0)) + y_pred = K.permute_dimensions(y_pred, (3,1,2,0)) + + y_true_pos = K.batch_flatten(y_true) + y_pred_pos = K.batch_flatten(y_pred) + true_pos = K.sum(y_true_pos * y_pred_pos, 1) + false_neg = K.sum(y_true_pos * (1-y_pred_pos), 1) + false_pos = K.sum((1-y_true_pos)*y_pred_pos, 1) + alpha = 0.2#0.5 + beta=0.8 + return (true_pos + smooth)/(true_pos + alpha*false_neg + (beta)*false_pos + smooth) + +def focal_tversky_loss(y_true,y_pred): + pt_1 = class_tversky(y_true, y_pred) + gamma =1.3#4./3.0#1.3#4.0/3.00# 0.75 + return K.sum(K.pow((1-pt_1), gamma)) + +def generalized_dice_coeff2(y_true, y_pred): + n_el = 1 + for dim in y_true.shape: + n_el *= int(dim) + n_cl = y_true.shape[-1] + w = K.zeros(shape=(n_cl,)) + w = (K.sum(y_true, axis=(0,1,2)))/(n_el) + w = 1/(w**2+0.000001) + numerator = y_true*y_pred + numerator = w*K.sum(numerator,(0,1,2)) + numerator = K.sum(numerator) + denominator = y_true+y_pred + denominator = w*K.sum(denominator,(0,1,2)) + denominator = K.sum(denominator) + return 2*numerator/denominator +def generalized_dice_coeff(y_true, y_pred): + axes = tuple(range(1, len(y_pred.shape)-1)) + Ncl = y_pred.shape[-1] + w = K.zeros(shape=(Ncl,)) + w = K.sum(y_true, axis=axes) + w = 1/(w**2+0.000001) + # Compute gen dice coef: + numerator = y_true*y_pred + numerator = w*K.sum(numerator,axes) + numerator = K.sum(numerator) + + denominator = y_true+y_pred + denominator = w*K.sum(denominator,axes) + denominator = K.sum(denominator) + + gen_dice_coef = 2*numerator/denominator + + return gen_dice_coef + +def generalized_dice_loss(y_true, y_pred): + return 1 - generalized_dice_coeff2(y_true, y_pred) +def soft_dice_loss(y_true, y_pred, epsilon=1e-6): + ''' + Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions. + Assumes the `channels_last` format. + + # Arguments + y_true: b x X x Y( x Z...) x c One hot encoding of ground truth + y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) + epsilon: Used for numerical stability to avoid divide by zero errors + + # References + V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation + https://arxiv.org/abs/1606.04797 + More details on Dice loss formulation + https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72) + + Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022 + ''' + + # skip the batch and class axis for calculating Dice score + axes = tuple(range(1, len(y_pred.shape)-1)) + + numerator = 2. * K.sum(y_pred * y_true, axes) + + denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) + return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch + +def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False): + """ + Compute mean metrics of two segmentation masks, via Keras. + + IoU(A,B) = |A & B| / (| A U B|) + Dice(A,B) = 2*|A & B| / (|A| + |B|) + + Args: + y_true: true masks, one-hot encoded. + y_pred: predicted masks, either softmax outputs, or one-hot encoded. + metric_name: metric to be computed, either 'iou' or 'dice'. + metric_type: one of 'standard' (default), 'soft', 'naive'. + In the standard version, y_pred is one-hot encoded and the mean + is taken only over classes that are present (in y_true or y_pred). + The 'soft' version of the metrics are computed without one-hot + encoding y_pred. + The 'naive' version return mean metrics where absent classes contribute + to the class mean as 1.0 (instead of being dropped from the mean). + drop_last = True: boolean flag to drop last class (usually reserved + for background class in semantic segmentation) + mean_per_class = False: return mean along batch axis for each class. + verbose = False: print intermediate results such as intersection, union + (as number of pixels). + Returns: + IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True + in which case it returns the per-class metric, averaged over the batch. + + Inputs are B*W*H*N tensors, with + B = batch size, + W = width, + H = height, + N = number of classes + """ + + flag_soft = (metric_type == 'soft') + flag_naive_mean = (metric_type == 'naive') + + # always assume one or more classes + num_classes = K.shape(y_true)[-1] + + if not flag_soft: + # get one-hot encoded masks from y_pred (true masks should already be one-hot) + y_pred = K.one_hot(K.argmax(y_pred), num_classes) + y_true = K.one_hot(K.argmax(y_true), num_classes) + + # if already one-hot, could have skipped above command + # keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64) + y_true = K.cast(y_true, 'float32') + y_pred = K.cast(y_pred, 'float32') + + # intersection and union shapes are batch_size * n_classes (values = area in pixels) + axes = (1,2) # W,H axes of each image + intersection = K.sum(K.abs(y_true * y_pred), axis=axes) + mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes) + union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot + + smooth = .001 + iou = (intersection + smooth) / (union + smooth) + dice = 2 * (intersection + smooth)/(mask_sum + smooth) + + metric = {'iou': iou, 'dice': dice}[metric_name] + + # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise + mask = K.cast(K.not_equal(union, 0), 'float32') + + if drop_last: + metric = metric[:,:-1] + mask = mask[:,:-1] + + if verbose: + print('intersection, union') + print(K.eval(intersection), K.eval(union)) + print(K.eval(intersection/union)) + + # return mean metrics: remaining axes are (batch, classes) + if flag_naive_mean: + return K.mean(metric) + + # take mean only over non-absent classes + class_count = K.sum(mask, axis=0) + non_zero = tf.greater(class_count, 0) + non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero) + non_zero_count = tf.boolean_mask(class_count, non_zero) + + if verbose: + print('Counts of inputs with class present, metrics for non-absent classes') + print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count)) + + return K.mean(non_zero_sum / non_zero_count) + +def mean_iou(y_true, y_pred, **kwargs): + """ + Compute mean Intersection over Union of two segmentation masks, via Keras. + + Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs. + """ + return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs) +def Mean_IOU(y_true, y_pred): + nb_classes = K.int_shape(y_pred)[-1] + iou = [] + true_pixels = K.argmax(y_true, axis=-1) + pred_pixels = K.argmax(y_pred, axis=-1) + void_labels = K.equal(K.sum(y_true, axis=-1), 0) + for i in range(0, nb_classes): # exclude first label (background) and last label (void) + true_labels = K.equal(true_pixels, i)# & ~void_labels + pred_labels = K.equal(pred_pixels, i)# & ~void_labels + inter = tf.to_int32(true_labels & pred_labels) + union = tf.to_int32(true_labels | pred_labels) + legal_batches = K.sum(tf.to_int32(true_labels), axis=1)>0 + ious = K.sum(inter, axis=1)/K.sum(union, axis=1) + iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects + iou = tf.stack(iou) + legal_labels = ~tf.debugging.is_nan(iou) + iou = tf.gather(iou, indices=tf.where(legal_labels)) + return K.mean(iou) + +def iou_vahid(y_true, y_pred): + nb_classes = tf.shape(y_true)[-1]+tf.to_int32(1) + true_pixels = K.argmax(y_true, axis=-1) + pred_pixels = K.argmax(y_pred, axis=-1) + iou = [] + + for i in tf.range(nb_classes): + tp=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.equal(pred_pixels, i) ) ) + fp=K.sum( tf.to_int32( K.not_equal(true_pixels, i) & K.equal(pred_pixels, i) ) ) + fn=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.not_equal(pred_pixels, i) ) ) + iouh=tp/(tp+fp+fn) + iou.append(iouh) + return K.mean(iou) + + +def IoU_metric(Yi,y_predi): + ## mean Intersection over Union + ## Mean IoU = TP/(FN + TP + FP) + y_predi = np.argmax(y_predi, axis=3) + y_testi = np.argmax(Yi, axis=3) + IoUs = [] + Nclass = int(np.max(Yi)) + 1 + for c in range(Nclass): + TP = np.sum( (Yi == c)&(y_predi==c) ) + FP = np.sum( (Yi != c)&(y_predi==c) ) + FN = np.sum( (Yi == c)&(y_predi != c)) + IoU = TP/float(TP + FP + FN) + IoUs.append(IoU) + return K.cast( np.mean(IoUs) ,dtype='float32' ) + + +def IoU_metric_keras(y_true, y_pred): + ## mean Intersection over Union + ## Mean IoU = TP/(FN + TP + FP) + init = tf.global_variables_initializer() + sess = tf.Session() + sess.run(init) + + return IoU_metric(y_true.eval(session=sess), y_pred.eval(session=sess)) + +def jaccard_distance_loss(y_true, y_pred, smooth=100): + """ + Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) + = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) + + The jaccard distance loss is usefull for unbalanced datasets. This has been + shifted so it converges on 0 and is smoothed to avoid exploding or disapearing + gradient. + + Ref: https://en.wikipedia.org/wiki/Jaccard_index + + @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 + @author: wassname + """ + intersection = K.sum(K.abs(y_true * y_pred), axis=-1) + sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) + jac = (intersection + smooth) / (sum_ - intersection + smooth) + return (1 - jac) * smooth + + diff --git a/train/models.py b/train/models.py new file mode 100644 index 0000000..7c806b4 --- /dev/null +++ b/train/models.py @@ -0,0 +1,317 @@ +from keras.models import * +from keras.layers import * +from keras import layers +from keras.regularizers import l2 + +resnet50_Weights_path='./pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' +IMAGE_ORDERING ='channels_last' +MERGE_AXIS=-1 + + +def one_side_pad( x ): + x = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(x) + if IMAGE_ORDERING == 'channels_first': + x = Lambda(lambda x : x[: , : , :-1 , :-1 ] )(x) + elif IMAGE_ORDERING == 'channels_last': + x = Lambda(lambda x : x[: , :-1 , :-1 , : ] )(x) + return x + +def identity_block(input_tensor, kernel_size, filters, stage, block): + """The identity block is the block that has no conv layer at shortcut. + # Arguments + input_tensor: input tensor + kernel_size: defualt 3, the kernel size of middle conv layer at main path + filters: list of integers, the filterss of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + # Returns + Output tensor for the block. + """ + filters1, filters2, filters3 = filters + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2a')(input_tensor) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) + x = Activation('relu')(x) + + x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , + padding='same', name=conv_name_base + '2b')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) + x = Activation('relu')(x) + + x = Conv2D(filters3 , (1, 1), data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) + + x = layers.add([x, input_tensor]) + x = Activation('relu')(x) + return x + + +def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): + """conv_block is the block that has a conv layer at shortcut + # Arguments + input_tensor: input tensor + kernel_size: defualt 3, the kernel size of middle conv layer at main path + filters: list of integers, the filterss of 3 conv layer at main path + stage: integer, current stage label, used for generating layer names + block: 'a','b'..., current block label, used for generating layer names + # Returns + Output tensor for the block. + Note that from stage 3, the first conv layer at main path is with strides=(2,2) + And the shortcut should have strides=(2,2) as well + """ + filters1, filters2, filters3 = filters + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, + name=conv_name_base + '2a')(input_tensor) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) + x = Activation('relu')(x) + + x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , padding='same', + name=conv_name_base + '2b')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) + x = Activation('relu')(x) + + x = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x) + x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) + + shortcut = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, + name=conv_name_base + '1')(input_tensor) + shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) + + x = layers.add([x, shortcut]) + x = Activation('relu')(x) + return x + + +def resnet50_unet_light(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): + assert input_height%32 == 0 + assert input_width%32 == 0 + + + img_input = Input(shape=(input_height,input_width , 3 )) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) + + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x ) + + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + + if pretraining: + model=Model( img_input , x ).load_weights(resnet50_Weights_path) + + + v512_2048 = Conv2D( 512 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f5 ) + v512_2048 = ( BatchNormalization(axis=bn_axis))(v512_2048) + v512_2048 = Activation('relu')(v512_2048) + + + + v512_1024=Conv2D( 512 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f4 ) + v512_1024 = ( BatchNormalization(axis=bn_axis))(v512_1024) + v512_1024 = Activation('relu')(v512_1024) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(v512_2048) + o = ( concatenate([ o ,v512_1024],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([ o ,f3],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f2],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 128 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay) ) )(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f1],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 64 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,img_input],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 32 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + + o = Conv2D( n_classes , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( o ) + o = ( BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + + + model = Model( img_input , o ) + return model + +def resnet50_unet(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): + assert input_height%32 == 0 + assert input_width%32 == 0 + + + img_input = Input(shape=(input_height,input_width , 3 )) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) + + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x ) + + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + if pretraining: + Model( img_input , x ).load_weights(resnet50_Weights_path) + + v1024_2048 = Conv2D( 1024 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f5 ) + v1024_2048 = ( BatchNormalization(axis=bn_axis))(v1024_2048) + v1024_2048 = Activation('relu')(v1024_2048) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(v1024_2048) + o = ( concatenate([ o ,f4],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([ o ,f3],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f2],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING))(o) + o = ( Conv2D( 128 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay) ) )(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,f1],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 64 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) + o = ( concatenate([o,img_input],axis=MERGE_AXIS ) ) + o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) + o = ( Conv2D( 32 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) + o = ( BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + + o = Conv2D( n_classes , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( o ) + o = ( BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + + model = Model( img_input , o ) + + + + + return model diff --git a/train/train.py b/train/train.py new file mode 100644 index 0000000..07c7418 --- /dev/null +++ b/train/train.py @@ -0,0 +1,192 @@ +import os +import sys +import tensorflow as tf +from keras.backend.tensorflow_backend import set_session +import keras , warnings +from keras.optimizers import * +from sacred import Experiment +from models import * +from utils import * +from metrics import * + + +def configuration(): + keras.backend.clear_session() + tf.reset_default_graph() + warnings.filterwarnings('ignore') + + os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' + config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) + + + config.gpu_options.allow_growth = True + config.gpu_options.per_process_gpu_memory_fraction=0.95#0.95 + config.gpu_options.visible_device_list="0" + set_session(tf.Session(config=config)) + +def get_dirs_or_files(input_data): + if os.path.isdir(input_data): + image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') + # Check if training dir exists + assert os.path.isdir(image_input), "{} is not a directory".format(image_input) + assert os.path.isdir(labels_input), "{} is not a directory".format(labels_input) + return image_input, labels_input + +ex = Experiment() + +@ex.config +def config_params(): + n_classes=None # Number of classes. If your case study is binary case the set it to 2 and otherwise give your number of cases. + n_epochs=1 + input_height=224*1 + input_width=224*1 + weight_decay=1e-6 # Weight decay of l2 regularization of model layers. + n_batch=1 # Number of batches at each iteration. + learning_rate=1e-4 + patches=False # Make patches of image in order to use all information of image. In the case of page + # extraction this should be set to false since model should see all image. + augmentation=False + flip_aug=False # Flip image (augmentation). + elastic_aug=False # Elastic transformation (augmentation). + blur_aug=False # Blur patches of image (augmentation). + scaling=False # Scaling of patches (augmentation) will be imposed if this set to true. + binarization=False # Otsu thresholding. Used for augmentation in the case of binary case like textline prediction. For multicases should not be applied. + dir_train=None # Directory of training dataset (sub-folders should be named images and labels). + dir_eval=None # Directory of validation dataset (sub-folders should be named images and labels). + dir_output=None # Directory of output where the model should be saved. + pretraining=False # Set true to load pretrained weights of resnet50 encoder. + weighted_loss=False # Set True if classes are unbalanced and you want to use weighted loss function. + scaling_bluring=False + rotation: False + scaling_binarization=False + blur_k=['blur','guass','median'] # Used in order to blur image. Used for augmentation. + scales=[0.9 , 1.1 ] # Scale patches with these scales. Used for augmentation. + flip_index=[0,1] # Flip image. Used for augmentation. + + +@ex.automain +def run(n_classes,n_epochs,input_height, + input_width,weight_decay,weighted_loss, + n_batch,patches,augmentation,flip_aug,blur_aug,scaling, binarization, + blur_k,scales,dir_train, + scaling_bluring,scaling_binarization,rotation, + flip_index,dir_eval ,dir_output,pretraining,learning_rate): + + dir_img,dir_seg=get_dirs_or_files(dir_train) + dir_img_val,dir_seg_val=get_dirs_or_files(dir_eval) + + # make first a directory in output for both training and evaluations in order to flow data from these directories. + dir_train_flowing=os.path.join(dir_output,'train') + dir_eval_flowing=os.path.join(dir_output,'eval') + + dir_flow_train_imgs=os.path.join(dir_train_flowing,'images') + dir_flow_train_labels=os.path.join(dir_train_flowing,'labels') + + dir_flow_eval_imgs=os.path.join(dir_eval_flowing,'images') + dir_flow_eval_labels=os.path.join(dir_eval_flowing,'labels') + + if os.path.isdir(dir_train_flowing): + os.system('rm -rf '+dir_train_flowing) + os.makedirs(dir_train_flowing) + else: + os.makedirs(dir_train_flowing) + + if os.path.isdir(dir_eval_flowing): + os.system('rm -rf '+dir_eval_flowing) + os.makedirs(dir_eval_flowing) + else: + os.makedirs(dir_eval_flowing) + + + os.mkdir(dir_flow_train_imgs) + os.mkdir(dir_flow_train_labels) + + os.mkdir(dir_flow_eval_imgs) + os.mkdir(dir_flow_eval_labels) + + + + #set the gpu configuration + configuration() + + + #writing patches into a sub-folder in order to be flowed from directory. + provide_patches(dir_img,dir_seg,dir_flow_train_imgs, + dir_flow_train_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + augmentation=augmentation,patches=patches) + + provide_patches(dir_img_val,dir_seg_val,dir_flow_eval_imgs, + dir_flow_eval_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + augmentation=False,patches=patches) + + if weighted_loss: + weights=np.zeros(n_classes) + for obj in os.listdir(dir_seg): + label_obj=cv2.imread(dir_seg+'/'+obj) + label_obj_one_hot=get_one_hot( label_obj,label_obj.shape[0],label_obj.shape[1],n_classes) + weights+=(label_obj_one_hot.sum(axis=0)).sum(axis=0) + + + weights=1.00/weights + + weights=weights/float(np.sum(weights)) + weights=weights/float(np.min(weights)) + weights=weights/float(np.sum(weights)) + + + + + #get our model. + model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + + #if you want to see the model structure just uncomment model summary. + #model.summary() + + + if not weighted_loss: + model.compile(loss='categorical_crossentropy', + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) + if weighted_loss: + model.compile(loss=weighted_categorical_crossentropy(weights), + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) + + mc = keras.callbacks.ModelCheckpoint('weights{epoch:08d}.h5', + save_weights_only=True, period=1) + + + #generating train and evaluation data + train_gen = data_gen(dir_flow_train_imgs,dir_flow_train_labels, batch_size = n_batch, + input_height=input_height, input_width=input_width,n_classes=n_classes ) + val_gen = data_gen(dir_flow_eval_imgs,dir_flow_eval_labels, batch_size = n_batch, + input_height=input_height, input_width=input_width,n_classes=n_classes ) + + + model.fit_generator( + train_gen, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch), + validation_data=val_gen, + validation_steps=1, + epochs=n_epochs) + + + + os.system('rm -rf '+dir_train_flowing) + os.system('rm -rf '+dir_eval_flowing) + + model.save(dir_output+'/'+'model'+'.h5') + + + + + + + + + + diff --git a/train/utils.py b/train/utils.py new file mode 100644 index 0000000..afdc9e5 --- /dev/null +++ b/train/utils.py @@ -0,0 +1,336 @@ +import os +import cv2 +import numpy as np +import seaborn as sns +from scipy.ndimage.interpolation import map_coordinates +from scipy.ndimage.filters import gaussian_filter +import random +from tqdm import tqdm + + + + +def bluring(img_in,kind): + if kind=='guass': + img_blur = cv2.GaussianBlur(img_in,(5,5),0) + elif kind=="median": + img_blur = cv2.medianBlur(img_in,5) + elif kind=='blur': + img_blur=cv2.blur(img_in,(5,5)) + return img_blur + +def color_images(seg, n_classes): + ann_u=range(n_classes) + if len(np.shape(seg))==3: + seg=seg[:,:,0] + + seg_img=np.zeros((np.shape(seg)[0],np.shape(seg)[1],3)).astype(float) + colors=sns.color_palette("hls", n_classes) + + for c in ann_u: + c=int(c) + segl=(seg==c) + seg_img[:,:,0]+=segl*(colors[c][0]) + seg_img[:,:,1]+=segl*(colors[c][1]) + seg_img[:,:,2]+=segl*(colors[c][2]) + return seg_img + + +def resize_image(seg_in,input_height,input_width): + return cv2.resize(seg_in,(input_width,input_height),interpolation=cv2.INTER_NEAREST) +def get_one_hot(seg,input_height,input_width,n_classes): + seg=seg[:,:,0] + seg_f=np.zeros((input_height, input_width,n_classes)) + for j in range(n_classes): + seg_f[:,:,j]=(seg==j).astype(int) + return seg_f + + +def IoU(Yi,y_predi): + ## mean Intersection over Union + ## Mean IoU = TP/(FN + TP + FP) + + IoUs = [] + classes_true=np.unique(Yi) + for c in classes_true: + TP = np.sum( (Yi == c)&(y_predi==c) ) + FP = np.sum( (Yi != c)&(y_predi==c) ) + FN = np.sum( (Yi == c)&(y_predi != c)) + IoU = TP/float(TP + FP + FN) + print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c,TP,FP,FN,IoU)) + IoUs.append(IoU) + mIoU = np.mean(IoUs) + print("_________________") + print("Mean IoU: {:4.3f}".format(mIoU)) + return mIoU +def data_gen(img_folder, mask_folder, batch_size,input_height, input_width,n_classes): + c = 0 + n = os.listdir(img_folder) #List of training images + random.shuffle(n) + while True: + img = np.zeros((batch_size, input_height, input_width, 3)).astype('float') + mask = np.zeros((batch_size, input_height, input_width, n_classes)).astype('float') + + for i in range(c, c+batch_size): #initially from 0 to 16, c = 0. + #print(img_folder+'/'+n[i]) + filename=n[i].split('.')[0] + train_img = cv2.imread(img_folder+'/'+n[i])/255. + train_img = cv2.resize(train_img, (input_width, input_height),interpolation=cv2.INTER_NEAREST)# Read an image from folder and resize + + img[i-c] = train_img #add to array - img[0], img[1], and so on. + train_mask = cv2.imread(mask_folder+'/'+filename+'.png') + #print(mask_folder+'/'+filename+'.png') + #print(train_mask.shape) + train_mask = get_one_hot( resize_image(train_mask,input_height,input_width),input_height,input_width,n_classes) + #train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] + + mask[i-c] = train_mask + + c+=batch_size + if(c+batch_size>=len(os.listdir(img_folder))): + c=0 + random.shuffle(n) + yield img, mask + +def otsu_copy(img): + img_r=np.zeros(img.shape) + img1=img[:,:,0] + img2=img[:,:,1] + img3=img[:,:,2] + _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold3 = cv2.threshold(img3, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + img_r[:,:,0]=threshold1 + img_r[:,:,1]=threshold1 + img_r[:,:,2]=threshold1 + return img_r + +def rotation_90(img): + img_rot=np.zeros((img.shape[1],img.shape[0],img.shape[2])) + img_rot[:,:,0]=img[:,:,0].T + img_rot[:,:,1]=img[:,:,1].T + img_rot[:,:,2]=img[:,:,2].T + return img_rot + +def get_patches(dir_img_f,dir_seg_f,img,label,height,width,indexer): + + + img_h=img.shape[0] + img_w=img.shape[1] + + nxf=img_w/float(width) + nyf=img_h/float(height) + + if nxf>int(nxf): + nxf=int(nxf)+1 + if nyf>int(nyf): + nyf=int(nyf)+1 + + nxf=int(nxf) + nyf=int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d=i*width + index_x_u=(i+1)*width + + index_y_d=j*height + index_y_u=(j+1)*height + + if index_x_u>img_w: + index_x_u=img_w + index_x_d=img_w-width + if index_y_u>img_h: + index_y_u=img_h + index_y_d=img_h-height + + + img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] + label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] + + cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) + cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) + indexer+=1 + return indexer + + + +def get_patches_num_scale(dir_img_f,dir_seg_f,img,label,height,width,indexer,scaler): + + + img_h=img.shape[0] + img_w=img.shape[1] + + height_scale=int(height*scaler) + width_scale=int(width*scaler) + + + nxf=img_w/float(width_scale) + nyf=img_h/float(height_scale) + + if nxf>int(nxf): + nxf=int(nxf)+1 + if nyf>int(nyf): + nyf=int(nyf)+1 + + nxf=int(nxf) + nyf=int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d=i*width_scale + index_x_u=(i+1)*width_scale + + index_y_d=j*height_scale + index_y_u=(j+1)*height_scale + + if index_x_u>img_w: + index_x_u=img_w + index_x_d=img_w-width_scale + if index_y_u>img_h: + index_y_u=img_h + index_y_d=img_h-height_scale + + + img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] + label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] + + img_patch=resize_image(img_patch,height,width) + label_patch=resize_image(label_patch,height,width) + + cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) + cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) + indexer+=1 + + return indexer + + + +def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, + dir_flow_train_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + augmentation=False,patches=False): + + imgs_cv_train=np.array(os.listdir(dir_img)) + segs_cv_train=np.array(os.listdir(dir_seg)) + + indexer=0 + for im, seg_i in tqdm(zip(imgs_cv_train,segs_cv_train)): + img_name=im.split('.')[0] + + if not patches: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', resize_image(cv2.imread(dir_img+'/'+im),input_height,input_width ) ) + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width ) ) + indexer+=1 + + if augmentation: + if rotation: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + rotation_90( resize_image(cv2.imread(dir_img+'/'+im), + input_height,input_width) ) ) + + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png', + rotation_90 ( resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width) ) ) + indexer+=1 + + if flip_aug: + for f_i in flip_index: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + resize_image(cv2.flip(cv2.imread(dir_img+'/'+im),f_i),input_height,input_width) ) + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , + resize_image(cv2.flip(cv2.imread(dir_seg+'/'+img_name+'.png'),f_i),input_height,input_width) ) + indexer+=1 + + if blur_aug: + for blur_i in blur_k: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + (resize_image(bluring(cv2.imread(dir_img+'/'+im),blur_i),input_height,input_width) ) ) + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , + resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width) ) + indexer+=1 + + + if binarization: + cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', + resize_image(otsu_copy( cv2.imread(dir_img+'/'+im)),input_height,input_width )) + + cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png', + resize_image( cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width )) + indexer+=1 + + + + + + + if patches: + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + cv2.imread(dir_img+'/'+im),cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer) + + if augmentation: + + if rotation: + + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + rotation_90( cv2.imread(dir_img+'/'+im) ), + rotation_90( cv2.imread(dir_seg+'/'+img_name+'.png') ), + input_height,input_width,indexer=indexer) + if flip_aug: + for f_i in flip_index: + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + cv2.flip( cv2.imread(dir_img+'/'+im) , f_i), + cv2.flip( cv2.imread(dir_seg+'/'+img_name+'.png') ,f_i), + input_height,input_width,indexer=indexer) + if blur_aug: + for blur_i in blur_k: + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + bluring( cv2.imread(dir_img+'/'+im) , blur_i), + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer) + + + if scaling: + for sc_ind in scales: + indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, + cv2.imread(dir_img+'/'+im) , + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer,scaler=sc_ind) + if binarization: + + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + otsu_copy( cv2.imread(dir_img+'/'+im)), + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer) + + + + if scaling_bluring: + for sc_ind in scales: + for blur_i in blur_k: + indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, + bluring( cv2.imread(dir_img+'/'+im) , blur_i) , + cv2.imread(dir_seg+'/'+img_name+'.png') , + input_height,input_width,indexer=indexer,scaler=sc_ind) + + if scaling_binarization: + for sc_ind in scales: + indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, + otsu_copy( cv2.imread(dir_img+'/'+im)) , + cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer,scaler=sc_ind) + + + + + + From 8084e136ba67513caa4e5309be70caff2b75fbea Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 10 Dec 2019 11:57:37 +0100 Subject: [PATCH 012/492] Update README --- train/README.md | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/train/README.md b/train/README.md index c4dc27e..16e5dce 100644 --- a/train/README.md +++ b/train/README.md @@ -4,16 +4,21 @@ # Ground truth format - Lables for each pixel is identified by a number . So if you have a binary case n_classes should be set to 2 and + Lables for each pixel is identified by a number . So if you have a + binary case n_classes should be set to 2 and labels should be 0 and 1 for each class and pixel. - In the case of multiclass just set n_classes to the number of classes you have and the try to produce the labels + In the case of multiclass just set n_classes to the number of classes + you have and the try to produce the labels by pixels set from 0 , 1 ,2 .., n_classes-1. The labels format should be png. If you have an image label for binary case it should look like this: - Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ,[[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] - this means that you have an image by 3*4*3 and pixel[0,0] belongs to class 1 and pixel[0,1] to class 0. + Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], + [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] , + [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] + This means that you have an image by 3*4*3 and pixel[0,0] belongs + to class 1 and pixel[0,1] to class 0. # Training , evaluation and output train and evaluation folder should have subfolder of images and labels. @@ -21,6 +26,11 @@ # Patches - if you want to train your model with patches, the height and width of patches should be defined and also number of + if you want to train your model with patches, the height and width of + patches should be defined and also number of batchs (how many patches should be seen by model by each iteration). - In the case that model should see the image once, like page extraction, the patches should be set to false. \ No newline at end of file + In the case that model should see the image once, like page extraction, + the patches should be set to false. +# Pretrained encoder +Download weights from this limk and add it to pretrained_model folder. +https://file.spk-berlin.de:8443/pretrained_encoder/ From 4229ad92d7460ed9fdc63a2837527586fde18de3 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 10 Dec 2019 11:58:02 +0100 Subject: [PATCH 013/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index 16e5dce..3ba90a1 100644 --- a/train/README.md +++ b/train/README.md @@ -32,5 +32,5 @@ In the case that model should see the image once, like page extraction, the patches should be set to false. # Pretrained encoder -Download weights from this limk and add it to pretrained_model folder. +Download weights from this link and add it to pretrained_model folder. https://file.spk-berlin.de:8443/pretrained_encoder/ From b5f9b9c54ad4ad746ab93bc7f81652f9158d75e5 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 10 Dec 2019 14:01:55 +0100 Subject: [PATCH 014/492] Update main.py --- train/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/train.py b/train/train.py index 07c7418..baeb847 100644 --- a/train/train.py +++ b/train/train.py @@ -169,7 +169,7 @@ def run(n_classes,n_epochs,input_height, model.fit_generator( train_gen, - steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch), + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch)-1, validation_data=val_gen, validation_steps=1, epochs=n_epochs) From df536d62c04825e05ea5aceb6067616db3b357a8 Mon Sep 17 00:00:00 2001 From: Clemens Neudecker <952378+cneud@users.noreply.github.com> Date: Tue, 10 Dec 2019 16:39:41 +0100 Subject: [PATCH 015/492] Add LICENSE --- train/LICENSE | 201 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 train/LICENSE diff --git a/train/LICENSE b/train/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/train/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From ad1360b179e0f4c39882bdd119e1760c7747db4d Mon Sep 17 00:00:00 2001 From: Clemens Neudecker <952378+cneud@users.noreply.github.com> Date: Wed, 15 Jan 2020 19:37:27 +0100 Subject: [PATCH 016/492] Update README.md --- train/README.md | 65 +++++++++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 27 deletions(-) diff --git a/train/README.md b/train/README.md index 3ba90a1..4c49f39 100644 --- a/train/README.md +++ b/train/README.md @@ -1,36 +1,47 @@ -# Train - just run: python train.py with config_params.json +# Pixelwise Segmentation +> Pixelwise segmentation for document images + +## Introduction +This repository contains the source code for training an encoder model for document image segmentation. + +## Installation +Either clone the repository via `git clone https://github.com/qurator-spk/sbb_pixelwise_segmentation.git` or download and unpack the [ZIP](https://github.com/qurator-spk/sbb_pixelwise_segmentation/archive/master.zip). + +## Usage + +### Train +To train a model, run: ``python train.py with config_params.json`` + +### Ground truth format +Lables for each pixel are identified by a number. So if you have a +binary case, ``n_classes`` should be set to ``2`` and labels should +be ``0`` and ``1`` for each class and pixel. + +In the case of multiclass, just set ``n_classes`` to the number of classes +you have and the try to produce the labels by pixels set from ``0 , 1 ,2 .., n_classes-1``. +The labels format should be png. - -# Ground truth format - - Lables for each pixel is identified by a number . So if you have a - binary case n_classes should be set to 2 and - labels should be 0 and 1 for each class and pixel. - In the case of multiclass just set n_classes to the number of classes - you have and the try to produce the labels - by pixels set from 0 , 1 ,2 .., n_classes-1. - The labels format should be png. - - If you have an image label for binary case it should look like this: +If you have an image label for a binary case it should look like this: Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] , [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] - This means that you have an image by 3*4*3 and pixel[0,0] belongs - to class 1 and pixel[0,1] to class 0. -# Training , evaluation and output - train and evaluation folder should have subfolder of images and labels. - And output folder should be empty folder which the output model will be written there. + This means that you have an image by `3*4*3` and `pixel[0,0]` belongs + to class `1` and `pixel[0,1]` belongs to class `0`. + +### Training , evaluation and output +The train and evaluation folders should contain subfolders of images and labels. +The output folder should be an empty folder where the output model will be written to. # Patches +If you want to train your model with patches, the height and width of +the patches should be defined and also the number of batches (how many patches +should be seen by the model in each iteration). + +In the case that the model should see the image once, like page extraction, +patches should be set to ``false``. - if you want to train your model with patches, the height and width of - patches should be defined and also number of - batchs (how many patches should be seen by model by each iteration). - In the case that model should see the image once, like page extraction, - the patches should be set to false. -# Pretrained encoder -Download weights from this link and add it to pretrained_model folder. -https://file.spk-berlin.de:8443/pretrained_encoder/ +### Pretrained encoder +Download our pretrained weights and add them to a ``pretrained_model`` folder: +~~https://file.spk-berlin.de:8443/pretrained_encoder/~~ From 66d7138343edc9fe3d7d918198a1f20b4112e42b Mon Sep 17 00:00:00 2001 From: Clemens Neudecker <952378+cneud@users.noreply.github.com> Date: Wed, 15 Jan 2020 19:43:31 +0100 Subject: [PATCH 017/492] Update README.md --- train/README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/train/README.md b/train/README.md index 4c49f39..18495a5 100644 --- a/train/README.md +++ b/train/README.md @@ -7,6 +7,9 @@ This repository contains the source code for training an encoder model for docum ## Installation Either clone the repository via `git clone https://github.com/qurator-spk/sbb_pixelwise_segmentation.git` or download and unpack the [ZIP](https://github.com/qurator-spk/sbb_pixelwise_segmentation/archive/master.zip). +### Pretrained encoder +Download our pretrained weights and add them to a ``pretrained_model`` folder: +~~https://file.spk-berlin.de:8443/pretrained_encoder/~~ ## Usage ### Train @@ -34,7 +37,7 @@ If you have an image label for a binary case it should look like this: The train and evaluation folders should contain subfolders of images and labels. The output folder should be an empty folder where the output model will be written to. -# Patches +### Patches If you want to train your model with patches, the height and width of the patches should be defined and also the number of batches (how many patches should be seen by the model in each iteration). @@ -42,6 +45,4 @@ should be seen by the model in each iteration). In the case that the model should see the image once, like page extraction, patches should be set to ``false``. -### Pretrained encoder -Download our pretrained weights and add them to a ``pretrained_model`` folder: -~~https://file.spk-berlin.de:8443/pretrained_encoder/~~ + From 4e216475dca544515488071f035cde639d053584 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 16 Jan 2020 15:53:39 +0100 Subject: [PATCH 018/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index 18495a5..ede05dd 100644 --- a/train/README.md +++ b/train/README.md @@ -9,7 +9,7 @@ Either clone the repository via `git clone https://github.com/qurator-spk/sbb_pi ### Pretrained encoder Download our pretrained weights and add them to a ``pretrained_model`` folder: -~~https://file.spk-berlin.de:8443/pretrained_encoder/~~ +https://qurator-data.de/pretrained_encoder/ ## Usage ### Train From b54285b19684e6a6b86a52448dc9afd4a38e95ea Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 16 Jan 2020 16:05:06 +0100 Subject: [PATCH 019/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index ede05dd..d0d26d6 100644 --- a/train/README.md +++ b/train/README.md @@ -9,7 +9,7 @@ Either clone the repository via `git clone https://github.com/qurator-spk/sbb_pi ### Pretrained encoder Download our pretrained weights and add them to a ``pretrained_model`` folder: -https://qurator-data.de/pretrained_encoder/ +https://qurator-data.de/sbb_pixelwise_segmentation/pretrained_encoder/ ## Usage ### Train From 070c2e046259441b712d11be21eb26c6db191b71 Mon Sep 17 00:00:00 2001 From: vahid Date: Tue, 22 Jun 2021 14:20:51 -0400 Subject: [PATCH 020/492] first updates, padding, rotations --- train/config_params.json | 22 ++-- train/train.py | 183 ++++++++++++++------------- train/utils.py | 265 +++++++++++++++++++++++++++++++-------- 3 files changed, 319 insertions(+), 151 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index 5066444..d8f1ac5 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,24 +1,24 @@ { - "n_classes" : 2, - "n_epochs" : 2, + "n_classes" : 3, + "n_epochs" : 1, "input_height" : 448, - "input_width" : 896, + "input_width" : 672, "weight_decay" : 1e-6, - "n_batch" : 1, + "n_batch" : 2, "learning_rate": 1e-4, "patches" : true, "pretraining" : true, - "augmentation" : false, + "augmentation" : true, "flip_aug" : false, - "elastic_aug" : false, - "blur_aug" : false, + "blur_aug" : true, "scaling" : false, "binarization" : false, "scaling_bluring" : false, "scaling_binarization" : false, + "scaling_flip" : false, "rotation": false, - "weighted_loss": true, - "dir_train": "../train", - "dir_eval": "../eval", - "dir_output": "../output" + "rotation_not_90": false, + "dir_train": "/home/vahid/Documents/handwrittens_train/train", + "dir_eval": "/home/vahid/Documents/handwrittens_train/eval", + "dir_output": "/home/vahid/Documents/handwrittens_train/output" } diff --git a/train/train.py b/train/train.py index baeb847..c256d83 100644 --- a/train/train.py +++ b/train/train.py @@ -8,7 +8,7 @@ from sacred import Experiment from models import * from utils import * from metrics import * - +from keras.models import load_model def configuration(): keras.backend.clear_session() @@ -47,7 +47,6 @@ def config_params(): # extraction this should be set to false since model should see all image. augmentation=False flip_aug=False # Flip image (augmentation). - elastic_aug=False # Elastic transformation (augmentation). blur_aug=False # Blur patches of image (augmentation). scaling=False # Scaling of patches (augmentation) will be imposed if this set to true. binarization=False # Otsu thresholding. Used for augmentation in the case of binary case like textline prediction. For multicases should not be applied. @@ -55,110 +54,116 @@ def config_params(): dir_eval=None # Directory of validation dataset (sub-folders should be named images and labels). dir_output=None # Directory of output where the model should be saved. pretraining=False # Set true to load pretrained weights of resnet50 encoder. - weighted_loss=False # Set True if classes are unbalanced and you want to use weighted loss function. scaling_bluring=False - rotation: False scaling_binarization=False + scaling_flip=False + thetha=[10,-10] blur_k=['blur','guass','median'] # Used in order to blur image. Used for augmentation. - scales=[0.9 , 1.1 ] # Scale patches with these scales. Used for augmentation. - flip_index=[0,1] # Flip image. Used for augmentation. + scales= [ 0.5, 2 ] # Scale patches with these scales. Used for augmentation. + flip_index=[0,1,-1] # Flip image. Used for augmentation. @ex.automain def run(n_classes,n_epochs,input_height, - input_width,weight_decay,weighted_loss, - n_batch,patches,augmentation,flip_aug,blur_aug,scaling, binarization, + input_width,weight_decay, + n_batch,patches,augmentation,flip_aug + ,blur_aug,scaling, binarization, blur_k,scales,dir_train, scaling_bluring,scaling_binarization,rotation, + rotation_not_90,thetha,scaling_flip, flip_index,dir_eval ,dir_output,pretraining,learning_rate): - dir_img,dir_seg=get_dirs_or_files(dir_train) - dir_img_val,dir_seg_val=get_dirs_or_files(dir_eval) + data_is_provided = False - # make first a directory in output for both training and evaluations in order to flow data from these directories. - dir_train_flowing=os.path.join(dir_output,'train') - dir_eval_flowing=os.path.join(dir_output,'eval') - - dir_flow_train_imgs=os.path.join(dir_train_flowing,'images') - dir_flow_train_labels=os.path.join(dir_train_flowing,'labels') - - dir_flow_eval_imgs=os.path.join(dir_eval_flowing,'images') - dir_flow_eval_labels=os.path.join(dir_eval_flowing,'labels') - - if os.path.isdir(dir_train_flowing): - os.system('rm -rf '+dir_train_flowing) - os.makedirs(dir_train_flowing) + if data_is_provided: + dir_train_flowing=os.path.join(dir_output,'train') + dir_eval_flowing=os.path.join(dir_output,'eval') + + dir_flow_train_imgs=os.path.join(dir_train_flowing,'images') + dir_flow_train_labels=os.path.join(dir_train_flowing,'labels') + + dir_flow_eval_imgs=os.path.join(dir_eval_flowing,'images') + dir_flow_eval_labels=os.path.join(dir_eval_flowing,'labels') + + configuration() + else: - os.makedirs(dir_train_flowing) + dir_img,dir_seg=get_dirs_or_files(dir_train) + dir_img_val,dir_seg_val=get_dirs_or_files(dir_eval) - if os.path.isdir(dir_eval_flowing): - os.system('rm -rf '+dir_eval_flowing) - os.makedirs(dir_eval_flowing) - else: - os.makedirs(dir_eval_flowing) + # make first a directory in output for both training and evaluations in order to flow data from these directories. + dir_train_flowing=os.path.join(dir_output,'train') + dir_eval_flowing=os.path.join(dir_output,'eval') - - os.mkdir(dir_flow_train_imgs) - os.mkdir(dir_flow_train_labels) - - os.mkdir(dir_flow_eval_imgs) - os.mkdir(dir_flow_eval_labels) - - - - #set the gpu configuration - configuration() - - - #writing patches into a sub-folder in order to be flowed from directory. - provide_patches(dir_img,dir_seg,dir_flow_train_imgs, - dir_flow_train_labels, - input_height,input_width,blur_k,blur_aug, - flip_aug,binarization,scaling,scales,flip_index, - scaling_bluring,scaling_binarization,rotation, - augmentation=augmentation,patches=patches) - - provide_patches(dir_img_val,dir_seg_val,dir_flow_eval_imgs, - dir_flow_eval_labels, - input_height,input_width,blur_k,blur_aug, - flip_aug,binarization,scaling,scales,flip_index, - scaling_bluring,scaling_binarization,rotation, - augmentation=False,patches=patches) + dir_flow_train_imgs=os.path.join(dir_train_flowing,'images/') + dir_flow_train_labels=os.path.join(dir_train_flowing,'labels/') - if weighted_loss: - weights=np.zeros(n_classes) - for obj in os.listdir(dir_seg): - label_obj=cv2.imread(dir_seg+'/'+obj) - label_obj_one_hot=get_one_hot( label_obj,label_obj.shape[0],label_obj.shape[1],n_classes) - weights+=(label_obj_one_hot.sum(axis=0)).sum(axis=0) + dir_flow_eval_imgs=os.path.join(dir_eval_flowing,'images/') + dir_flow_eval_labels=os.path.join(dir_eval_flowing,'labels/') + + if os.path.isdir(dir_train_flowing): + os.system('rm -rf '+dir_train_flowing) + os.makedirs(dir_train_flowing) + else: + os.makedirs(dir_train_flowing) + + if os.path.isdir(dir_eval_flowing): + os.system('rm -rf '+dir_eval_flowing) + os.makedirs(dir_eval_flowing) + else: + os.makedirs(dir_eval_flowing) - weights=1.00/weights + os.mkdir(dir_flow_train_imgs) + os.mkdir(dir_flow_train_labels) - weights=weights/float(np.sum(weights)) - weights=weights/float(np.min(weights)) - weights=weights/float(np.sum(weights)) + os.mkdir(dir_flow_eval_imgs) + os.mkdir(dir_flow_eval_labels) + + + #set the gpu configuration + configuration() - - + + #writing patches into a sub-folder in order to be flowed from directory. + provide_patches(dir_img,dir_seg,dir_flow_train_imgs, + dir_flow_train_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + rotation_not_90,thetha,scaling_flip, + augmentation=augmentation,patches=patches) - #get our model. - model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + provide_patches(dir_img_val,dir_seg_val,dir_flow_eval_imgs, + dir_flow_eval_labels, + input_height,input_width,blur_k,blur_aug, + flip_aug,binarization,scaling,scales,flip_index, + scaling_bluring,scaling_binarization,rotation, + rotation_not_90,thetha,scaling_flip, + augmentation=False,patches=patches) + + + continue_train = False + + if continue_train: + model_dir_start = '/home/vahid/Documents/struktur_full_data/output_multi/model_0.h5' + model = load_model (model_dir_start, compile = True, custom_objects={'soft_dice_loss': soft_dice_loss}) + index_start = 1 + else: + #get our model. + index_start = 0 + model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) #if you want to see the model structure just uncomment model summary. #model.summary() - if not weighted_loss: - model.compile(loss='categorical_crossentropy', - optimizer = Adam(lr=learning_rate),metrics=['accuracy']) - if weighted_loss: - model.compile(loss=weighted_categorical_crossentropy(weights), - optimizer = Adam(lr=learning_rate),metrics=['accuracy']) - - mc = keras.callbacks.ModelCheckpoint('weights{epoch:08d}.h5', - save_weights_only=True, period=1) - + + #model.compile(loss='categorical_crossentropy', + #optimizer = Adam(lr=learning_rate),metrics=['accuracy']) + + model.compile(loss=soft_dice_loss, + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) #generating train and evaluation data train_gen = data_gen(dir_flow_train_imgs,dir_flow_train_labels, batch_size = n_batch, @@ -166,20 +171,20 @@ def run(n_classes,n_epochs,input_height, val_gen = data_gen(dir_flow_eval_imgs,dir_flow_eval_labels, batch_size = n_batch, input_height=input_height, input_width=input_width,n_classes=n_classes ) + for i in range(index_start, n_epochs+index_start): + model.fit_generator( + train_gen, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch)-1, + validation_data=val_gen, + validation_steps=1, + epochs=1) + model.save(dir_output+'/'+'model_'+str(i)+'.h5') - model.fit_generator( - train_gen, - steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch)-1, - validation_data=val_gen, - validation_steps=1, - epochs=n_epochs) - - os.system('rm -rf '+dir_train_flowing) os.system('rm -rf '+dir_eval_flowing) - model.save(dir_output+'/'+'model'+'.h5') + #model.save(dir_output+'/'+'model'+'.h5') diff --git a/train/utils.py b/train/utils.py index afdc9e5..a77444e 100644 --- a/train/utils.py +++ b/train/utils.py @@ -6,7 +6,8 @@ from scipy.ndimage.interpolation import map_coordinates from scipy.ndimage.filters import gaussian_filter import random from tqdm import tqdm - +import imutils +import math @@ -19,6 +20,79 @@ def bluring(img_in,kind): img_blur=cv2.blur(img_in,(5,5)) return img_blur +def elastic_transform(image, alpha, sigma,seedj, random_state=None): + + """Elastic deformation of images as described in [Simard2003]_. + .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for + Convolutional Neural Networks applied to Visual Document Analysis", in + Proc. of the International Conference on Document Analysis and + Recognition, 2003. + """ + if random_state is None: + random_state = np.random.RandomState(seedj) + + shape = image.shape + dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha + dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha + dz = np.zeros_like(dx) + + x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) + indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1)) + + distored_image = map_coordinates(image, indices, order=1, mode='reflect') + return distored_image.reshape(image.shape) + +def rotation_90(img): + img_rot=np.zeros((img.shape[1],img.shape[0],img.shape[2])) + img_rot[:,:,0]=img[:,:,0].T + img_rot[:,:,1]=img[:,:,1].T + img_rot[:,:,2]=img[:,:,2].T + return img_rot + +def rotatedRectWithMaxArea(w, h, angle): + """ + Given a rectangle of size wxh that has been rotated by 'angle' (in + radians), computes the width and height of the largest possible + axis-aligned rectangle (maximal area) within the rotated rectangle. + """ + if w <= 0 or h <= 0: + return 0,0 + + width_is_longer = w >= h + side_long, side_short = (w,h) if width_is_longer else (h,w) + + # since the solutions for angle, -angle and 180-angle are all the same, + # if suffices to look at the first quadrant and the absolute values of sin,cos: + sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) + if side_short <= 2.*sin_a*cos_a*side_long or abs(sin_a-cos_a) < 1e-10: + # half constrained case: two crop corners touch the longer side, + # the other two corners are on the mid-line parallel to the longer line + x = 0.5*side_short + wr,hr = (x/sin_a,x/cos_a) if width_is_longer else (x/cos_a,x/sin_a) + else: + # fully constrained case: crop touches all 4 sides + cos_2a = cos_a*cos_a - sin_a*sin_a + wr,hr = (w*cos_a - h*sin_a)/cos_2a, (h*cos_a - w*sin_a)/cos_2a + + return wr,hr + +def rotate_max_area(image,rotated, rotated_label,angle): + """ image: cv2 image matrix object + angle: in degree + """ + wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], + math.radians(angle)) + h, w, _ = rotated.shape + y1 = h//2 - int(hr/2) + y2 = y1 + int(hr) + x1 = w//2 - int(wr/2) + x2 = x1 + int(wr) + return rotated[y1:y2, x1:x2],rotated_label[y1:y2, x1:x2] +def rotation_not_90_func(img,label,thetha): + rotated=imutils.rotate(img,thetha) + rotated_label=imutils.rotate(label,thetha) + return rotate_max_area(img, rotated,rotated_label,thetha) + def color_images(seg, n_classes): ann_u=range(n_classes) if len(np.shape(seg))==3: @@ -65,7 +139,7 @@ def IoU(Yi,y_predi): return mIoU def data_gen(img_folder, mask_folder, batch_size,input_height, input_width,n_classes): c = 0 - n = os.listdir(img_folder) #List of training images + n = [f for f in os.listdir(img_folder) if not f.startswith('.')]# os.listdir(img_folder) #List of training images random.shuffle(n) while True: img = np.zeros((batch_size, input_height, input_width, 3)).astype('float') @@ -73,18 +147,26 @@ def data_gen(img_folder, mask_folder, batch_size,input_height, input_width,n_cla for i in range(c, c+batch_size): #initially from 0 to 16, c = 0. #print(img_folder+'/'+n[i]) - filename=n[i].split('.')[0] - train_img = cv2.imread(img_folder+'/'+n[i])/255. - train_img = cv2.resize(train_img, (input_width, input_height),interpolation=cv2.INTER_NEAREST)# Read an image from folder and resize - - img[i-c] = train_img #add to array - img[0], img[1], and so on. - train_mask = cv2.imread(mask_folder+'/'+filename+'.png') - #print(mask_folder+'/'+filename+'.png') - #print(train_mask.shape) - train_mask = get_one_hot( resize_image(train_mask,input_height,input_width),input_height,input_width,n_classes) - #train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] - - mask[i-c] = train_mask + + try: + filename=n[i].split('.')[0] + + train_img = cv2.imread(img_folder+'/'+n[i])/255. + train_img = cv2.resize(train_img, (input_width, input_height),interpolation=cv2.INTER_NEAREST)# Read an image from folder and resize + + img[i-c] = train_img #add to array - img[0], img[1], and so on. + train_mask = cv2.imread(mask_folder+'/'+filename+'.png') + #print(mask_folder+'/'+filename+'.png') + #print(train_mask.shape) + train_mask = get_one_hot( resize_image(train_mask,input_height,input_width),input_height,input_width,n_classes) + #train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] + + mask[i-c] = train_mask + except: + img[i-c] = np.ones((input_height, input_width, 3)).astype('float') + mask[i-c] = np.zeros((input_height, input_width, n_classes)).astype('float') + + c+=batch_size if(c+batch_size>=len(os.listdir(img_folder))): @@ -104,16 +186,10 @@ def otsu_copy(img): img_r[:,:,1]=threshold1 img_r[:,:,2]=threshold1 return img_r - -def rotation_90(img): - img_rot=np.zeros((img.shape[1],img.shape[0],img.shape[2])) - img_rot[:,:,0]=img[:,:,0].T - img_rot[:,:,1]=img[:,:,1].T - img_rot[:,:,2]=img[:,:,2].T - return img_rot - def get_patches(dir_img_f,dir_seg_f,img,label,height,width,indexer): + if img.shape[0]int(nxf): + nxf=int(nxf)+1 + if nyf>int(nyf): + nyf=int(nyf)+1 + + nxf=int(nxf) + nyf=int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d=i*width_scale + index_x_u=(i+1)*width_scale + + index_y_d=j*height_scale + index_y_u=(j+1)*height_scale + + if index_x_u>img_w: + index_x_u=img_w + index_x_d=img_w-width_scale + if index_y_u>img_h: + index_y_u=img_h + index_y_d=img_h-height_scale + + + img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] + label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] + + #img_patch=resize_image(img_patch,height,width) + #label_patch=resize_image(label_patch,height,width) + + cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) + cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) + indexer+=1 + + return indexer def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, @@ -211,6 +366,7 @@ def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, input_height,input_width,blur_k,blur_aug, flip_aug,binarization,scaling,scales,flip_index, scaling_bluring,scaling_binarization,rotation, + rotation_not_90,thetha,scaling_flip, augmentation=False,patches=False): imgs_cv_train=np.array(os.listdir(dir_img)) @@ -218,25 +374,15 @@ def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, indexer=0 for im, seg_i in tqdm(zip(imgs_cv_train,segs_cv_train)): + #print(im, seg_i) img_name=im.split('.')[0] - + print(img_name,'img_name') if not patches: cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', resize_image(cv2.imread(dir_img+'/'+im),input_height,input_width ) ) cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width ) ) indexer+=1 if augmentation: - if rotation: - cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', - rotation_90( resize_image(cv2.imread(dir_img+'/'+im), - input_height,input_width) ) ) - - - cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png', - rotation_90 ( resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width) ) ) - indexer+=1 - if flip_aug: for f_i in flip_index: cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', @@ -270,10 +416,10 @@ def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, if patches: - + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, - cv2.imread(dir_img+'/'+im),cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width,indexer=indexer) + cv2.imread(dir_img+'/'+im),cv2.imread(dir_seg+'/'+img_name+'.png'), + input_height,input_width,indexer=indexer) if augmentation: @@ -284,29 +430,37 @@ def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, rotation_90( cv2.imread(dir_img+'/'+im) ), rotation_90( cv2.imread(dir_seg+'/'+img_name+'.png') ), input_height,input_width,indexer=indexer) + + if rotation_not_90: + + for thetha_i in thetha: + img_max_rotated,label_max_rotated=rotation_not_90_func(cv2.imread(dir_img+'/'+im),cv2.imread(dir_seg+'/'+img_name+'.png'),thetha_i) + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, + img_max_rotated, + label_max_rotated, + input_height,input_width,indexer=indexer) if flip_aug: for f_i in flip_index: - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, cv2.flip( cv2.imread(dir_img+'/'+im) , f_i), cv2.flip( cv2.imread(dir_seg+'/'+img_name+'.png') ,f_i), input_height,input_width,indexer=indexer) if blur_aug: for blur_i in blur_k: + indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, bluring( cv2.imread(dir_img+'/'+im) , blur_i), cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width,indexer=indexer) - + input_height,input_width,indexer=indexer) + if scaling: for sc_ind in scales: - indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, - cv2.imread(dir_img+'/'+im) , - cv2.imread(dir_seg+'/'+img_name+'.png'), + indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, + cv2.imread(dir_img+'/'+im) , + cv2.imread(dir_seg+'/'+img_name+'.png'), input_height,input_width,indexer=indexer,scaler=sc_ind) if binarization: - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, otsu_copy( cv2.imread(dir_img+'/'+im)), cv2.imread(dir_seg+'/'+img_name+'.png'), @@ -317,17 +471,26 @@ def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, if scaling_bluring: for sc_ind in scales: for blur_i in blur_k: - indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, + indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, bluring( cv2.imread(dir_img+'/'+im) , blur_i) , cv2.imread(dir_seg+'/'+img_name+'.png') , input_height,input_width,indexer=indexer,scaler=sc_ind) if scaling_binarization: for sc_ind in scales: - indexer=get_patches_num_scale(dir_flow_train_imgs,dir_flow_train_labels, - otsu_copy( cv2.imread(dir_img+'/'+im)) , - cv2.imread(dir_seg+'/'+img_name+'.png'), + indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, + otsu_copy( cv2.imread(dir_img+'/'+im)) , + cv2.imread(dir_seg+'/'+img_name+'.png'), input_height,input_width,indexer=indexer,scaler=sc_ind) + + if scaling_flip: + for sc_ind in scales: + for f_i in flip_index: + indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, + cv2.flip( cv2.imread(dir_img+'/'+im) , f_i) , + cv2.flip(cv2.imread(dir_seg+'/'+img_name+'.png') ,f_i) , + input_height,input_width,indexer=indexer,scaler=sc_ind) + From 8884b90f052c9d29d10dcce7f8636d41437181b8 Mon Sep 17 00:00:00 2001 From: vahid Date: Tue, 22 Jun 2021 18:47:59 -0400 Subject: [PATCH 021/492] continue training, losses and etc --- train/config_params.json | 14 +++++--- train/train.py | 77 ++++++++++++++++++++++++++++++---------- train/utils.py | 2 -- 3 files changed, 69 insertions(+), 24 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index d8f1ac5..eaa50e1 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,6 +1,6 @@ { "n_classes" : 3, - "n_epochs" : 1, + "n_epochs" : 2, "input_height" : 448, "input_width" : 672, "weight_decay" : 1e-6, @@ -8,16 +8,22 @@ "learning_rate": 1e-4, "patches" : true, "pretraining" : true, - "augmentation" : true, + "augmentation" : false, "flip_aug" : false, - "blur_aug" : true, - "scaling" : false, + "blur_aug" : false, + "scaling" : true, "binarization" : false, "scaling_bluring" : false, "scaling_binarization" : false, "scaling_flip" : false, "rotation": false, "rotation_not_90": false, + "continue_training": false, + "index_start": 0, + "dir_of_start_model": " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, "dir_train": "/home/vahid/Documents/handwrittens_train/train", "dir_eval": "/home/vahid/Documents/handwrittens_train/eval", "dir_output": "/home/vahid/Documents/handwrittens_train/output" diff --git a/train/train.py b/train/train.py index c256d83..0cc5ef3 100644 --- a/train/train.py +++ b/train/train.py @@ -9,6 +9,7 @@ from models import * from utils import * from metrics import * from keras.models import load_model +from tqdm import tqdm def configuration(): keras.backend.clear_session() @@ -61,19 +62,24 @@ def config_params(): blur_k=['blur','guass','median'] # Used in order to blur image. Used for augmentation. scales= [ 0.5, 2 ] # Scale patches with these scales. Used for augmentation. flip_index=[0,1,-1] # Flip image. Used for augmentation. - + continue_training = False # If + index_start = 0 + dir_of_start_model = '' + is_loss_soft_dice = False + weighted_loss = False + data_is_provided = False @ex.automain def run(n_classes,n_epochs,input_height, - input_width,weight_decay, + input_width,weight_decay,weighted_loss, + index_start,dir_of_start_model,is_loss_soft_dice, n_batch,patches,augmentation,flip_aug ,blur_aug,scaling, binarization, - blur_k,scales,dir_train, + blur_k,scales,dir_train,data_is_provided, scaling_bluring,scaling_binarization,rotation, - rotation_not_90,thetha,scaling_flip, + rotation_not_90,thetha,scaling_flip,continue_training, flip_index,dir_eval ,dir_output,pretraining,learning_rate): - data_is_provided = False if data_is_provided: dir_train_flowing=os.path.join(dir_output,'train') @@ -143,12 +149,43 @@ def run(n_classes,n_epochs,input_height, augmentation=False,patches=patches) - continue_train = False + + if weighted_loss: + weights=np.zeros(n_classes) + if data_is_provided: + for obj in os.listdir(dir_flow_train_labels): + try: + label_obj=cv2.imread(dir_flow_train_labels+'/'+obj) + label_obj_one_hot=get_one_hot( label_obj,label_obj.shape[0],label_obj.shape[1],n_classes) + weights+=(label_obj_one_hot.sum(axis=0)).sum(axis=0) + except: + pass + else: + + for obj in os.listdir(dir_seg): + try: + label_obj=cv2.imread(dir_seg+'/'+obj) + label_obj_one_hot=get_one_hot( label_obj,label_obj.shape[0],label_obj.shape[1],n_classes) + weights+=(label_obj_one_hot.sum(axis=0)).sum(axis=0) + except: + pass + - if continue_train: - model_dir_start = '/home/vahid/Documents/struktur_full_data/output_multi/model_0.h5' - model = load_model (model_dir_start, compile = True, custom_objects={'soft_dice_loss': soft_dice_loss}) - index_start = 1 + weights=1.00/weights + + weights=weights/float(np.sum(weights)) + weights=weights/float(np.min(weights)) + weights=weights/float(np.sum(weights)) + + + + if continue_training: + if is_loss_soft_dice: + model = load_model (dir_of_start_model, compile = True, custom_objects={'soft_dice_loss': soft_dice_loss}) + if weighted_loss: + model = load_model (dir_of_start_model, compile = True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) + if not is_loss_soft_dice and not weighted_loss: + model = load_model (dir_of_start_model, compile = True) else: #get our model. index_start = 0 @@ -158,12 +195,16 @@ def run(n_classes,n_epochs,input_height, #model.summary() + if not is_loss_soft_dice and not weighted_loss: + model.compile(loss='categorical_crossentropy', + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) + if is_loss_soft_dice: + model.compile(loss=soft_dice_loss, + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) - #model.compile(loss='categorical_crossentropy', - #optimizer = Adam(lr=learning_rate),metrics=['accuracy']) - - model.compile(loss=soft_dice_loss, - optimizer = Adam(lr=learning_rate),metrics=['accuracy']) + if weighted_loss: + model.compile(loss=weighted_categorical_crossentropy(weights), + optimizer = Adam(lr=learning_rate),metrics=['accuracy']) #generating train and evaluation data train_gen = data_gen(dir_flow_train_imgs,dir_flow_train_labels, batch_size = n_batch, @@ -171,7 +212,7 @@ def run(n_classes,n_epochs,input_height, val_gen = data_gen(dir_flow_eval_imgs,dir_flow_eval_labels, batch_size = n_batch, input_height=input_height, input_width=input_width,n_classes=n_classes ) - for i in range(index_start, n_epochs+index_start): + for i in tqdm(range(index_start, n_epochs+index_start)): model.fit_generator( train_gen, steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch)-1, @@ -181,8 +222,8 @@ def run(n_classes,n_epochs,input_height, model.save(dir_output+'/'+'model_'+str(i)+'.h5') - os.system('rm -rf '+dir_train_flowing) - os.system('rm -rf '+dir_eval_flowing) + #os.system('rm -rf '+dir_train_flowing) + #os.system('rm -rf '+dir_eval_flowing) #model.save(dir_output+'/'+'model'+'.h5') diff --git a/train/utils.py b/train/utils.py index a77444e..19ab46e 100644 --- a/train/utils.py +++ b/train/utils.py @@ -374,9 +374,7 @@ def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, indexer=0 for im, seg_i in tqdm(zip(imgs_cv_train,segs_cv_train)): - #print(im, seg_i) img_name=im.split('.')[0] - print(img_name,'img_name') if not patches: cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', resize_image(cv2.imread(dir_img+'/'+im),input_height,input_width ) ) cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width ) ) From 2d9ba854674db7169c3aceb4fca562b96bbed1f1 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 23 Jun 2021 07:25:49 -0400 Subject: [PATCH 022/492] Update README.md --- train/README.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/train/README.md b/train/README.md index d0d26d6..87a59ce 100644 --- a/train/README.md +++ b/train/README.md @@ -23,14 +23,16 @@ be ``0`` and ``1`` for each class and pixel. In the case of multiclass, just set ``n_classes`` to the number of classes you have and the try to produce the labels by pixels set from ``0 , 1 ,2 .., n_classes-1``. The labels format should be png. +Our lables are 3 channel png images but only information of first channel is used. +If you have an image label with height and width of 10, for a binary case the first channel should look like this: -If you have an image label for a binary case it should look like this: + Label: [ [1, 0, 0, 1, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ..., + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] - Label: [ [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]], - [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] , - [[1 0 0 1], [1 0 0 1] ,[1 0 0 1]] ] - - This means that you have an image by `3*4*3` and `pixel[0,0]` belongs + This means that you have an image by `10*10*3` and `pixel[0,0]` belongs to class `1` and `pixel[0,1]` belongs to class `0`. ### Training , evaluation and output From 15407393e20a5c66556a0ab8e364f2206156ad27 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 23 Jun 2021 07:55:36 -0400 Subject: [PATCH 023/492] Update README.md --- train/README.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/train/README.md b/train/README.md index 87a59ce..464a9a4 100644 --- a/train/README.md +++ b/train/README.md @@ -39,12 +39,15 @@ If you have an image label with height and width of 10, for a binary case the fi The train and evaluation folders should contain subfolders of images and labels. The output folder should be an empty folder where the output model will be written to. -### Patches -If you want to train your model with patches, the height and width of -the patches should be defined and also the number of batches (how many patches -should be seen by the model in each iteration). - -In the case that the model should see the image once, like page extraction, -patches should be set to ``false``. +### Parameter configuration +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be set to ``false``. +* n_batch: Number of batches at each iteration. +* n_classes: Number of classes. In the case of binary classification this should be 2. +* n_epochs: Number of epochs. +* input_height: This indicates the height of model's input. +* input_width: This indicates the width of model's input. +* weight_decay: Weight decay of l2 regularization of model layers. +* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. +* flip_aug: If ``true``, different types of filp will applied on image. Type of flips is given by "flip_index" in train.py file. From 491cdbf9342ffeebabe088b60371c2f18dd8cfaf Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 23 Jun 2021 08:21:12 -0400 Subject: [PATCH 024/492] Update README.md --- train/README.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index 464a9a4..af8595f 100644 --- a/train/README.md +++ b/train/README.md @@ -48,6 +48,18 @@ The output folder should be an empty folder where the output model will be writt * input_width: This indicates the width of model's input. * weight_decay: Weight decay of l2 regularization of model layers. * augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. -* flip_aug: If ``true``, different types of filp will applied on image. Type of flips is given by "flip_index" in train.py file. +* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" in train.py file. +* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" in train.py file. +* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" in train.py file. +* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rothation angles are given with "thetha" in train.py file. +* rotation: If ``true``, 90 degree rotation will be applied on image. +* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. +* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. +* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. +* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set "index_start" to 3 to start naming model with index 3. +* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data should be in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resize and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. From 76c75d1365ee31e5637c763c89e664e7bbc45b0d Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 23 Jun 2021 08:22:03 -0400 Subject: [PATCH 025/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index af8595f..c38aea1 100644 --- a/train/README.md +++ b/train/README.md @@ -59,7 +59,7 @@ The output folder should be an empty folder where the output model will be writt * scaling_flip: If ``true``, combination of scaling and flip will be applied on image. * continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set "index_start" to 3 to start naming model with index 3. * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data should be in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". * dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resize and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. From 310a709ac7d2b1632580b53d6b4b3c127230e808 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 23 Jun 2021 08:23:20 -0400 Subject: [PATCH 026/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index c38aea1..5272def 100644 --- a/train/README.md +++ b/train/README.md @@ -60,6 +60,6 @@ The output folder should be an empty folder where the output model will be writt * continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set "index_start" to 3 to start naming model with index 3. * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` * data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". -* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resize and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. +* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. From b1c8bdf10624e3580c46105c2f323a0bc14b8178 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 29 Jun 2021 07:19:32 -0400 Subject: [PATCH 027/492] Update README.md --- train/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train/README.md b/train/README.md index 5272def..363ba21 100644 --- a/train/README.md +++ b/train/README.md @@ -34,6 +34,8 @@ If you have an image label with height and width of 10, for a binary case the fi This means that you have an image by `10*10*3` and `pixel[0,0]` belongs to class `1` and `pixel[0,1]` belongs to class `0`. + + A small sample of training data for binarization experiment can be found here https://qurator-data.de/binarization_training_data_sample/ which contains images and lables folders. ### Training , evaluation and output The train and evaluation folders should contain subfolders of images and labels. From 49853bb291ff048874c8d0d8a4683968211b9ac8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 29 Jun 2021 07:21:34 -0400 Subject: [PATCH 028/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index 363ba21..529d7c7 100644 --- a/train/README.md +++ b/train/README.md @@ -35,7 +35,7 @@ If you have an image label with height and width of 10, for a binary case the fi This means that you have an image by `10*10*3` and `pixel[0,0]` belongs to class `1` and `pixel[0,1]` belongs to class `0`. - A small sample of training data for binarization experiment can be found here https://qurator-data.de/binarization_training_data_sample/ which contains images and lables folders. + A small sample of training data for binarization experiment can be found here [Training data sample](https://qurator-data.de/binarization_training_data_sample/) which contains images and lables folders. ### Training , evaluation and output The train and evaluation folders should contain subfolders of images and labels. From 09c0d5e318e1115b99dc3c9635179851370b54fe Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 29 Jun 2021 07:22:13 -0400 Subject: [PATCH 029/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index 529d7c7..58f3eae 100644 --- a/train/README.md +++ b/train/README.md @@ -35,7 +35,7 @@ If you have an image label with height and width of 10, for a binary case the fi This means that you have an image by `10*10*3` and `pixel[0,0]` belongs to class `1` and `pixel[0,1]` belongs to class `0`. - A small sample of training data for binarization experiment can be found here [Training data sample](https://qurator-data.de/binarization_training_data_sample/) which contains images and lables folders. + A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/binarization_training_data_sample/) , which contains images and lables folders. ### Training , evaluation and output The train and evaluation folders should contain subfolders of images and labels. From bcc900be1732ac5c9a94d2d99e37673c745d96af Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 29 Jun 2021 07:22:34 -0400 Subject: [PATCH 030/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index 58f3eae..0f0eb55 100644 --- a/train/README.md +++ b/train/README.md @@ -35,7 +35,7 @@ If you have an image label with height and width of 10, for a binary case the fi This means that you have an image by `10*10*3` and `pixel[0,0]` belongs to class `1` and `pixel[0,1]` belongs to class `0`. - A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/binarization_training_data_sample/) , which contains images and lables folders. + A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/binarization_training_data_sample/), which contains images and lables folders. ### Training , evaluation and output The train and evaluation folders should contain subfolders of images and labels. From 083f5ae881436fad4e3f0e5b2caac068fa7bcf54 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 14 Jul 2021 06:01:33 -0400 Subject: [PATCH 031/492] Update README.md --- train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index 0f0eb55..8acfa12 100644 --- a/train/README.md +++ b/train/README.md @@ -35,7 +35,7 @@ If you have an image label with height and width of 10, for a binary case the fi This means that you have an image by `10*10*3` and `pixel[0,0]` belongs to class `1` and `pixel[0,1]` belongs to class `0`. - A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/binarization_training_data_sample/), which contains images and lables folders. + A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/~vahid.rezanezhad/binarization_training_data_sample/), which contains images and lables folders. ### Training , evaluation and output The train and evaluation folders should contain subfolders of images and labels. From 5282caa3286f121f9195263d5419c3876c7d9b4f Mon Sep 17 00:00:00 2001 From: vahid Date: Mon, 22 Aug 2022 13:03:10 +0200 Subject: [PATCH 032/492] supposed to solve https://github.com/qurator-spk/sbb_binarization/issues/41 --- ..._model_load_pretrained_weights_and_save.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 train/build_model_load_pretrained_weights_and_save.py diff --git a/train/build_model_load_pretrained_weights_and_save.py b/train/build_model_load_pretrained_weights_and_save.py new file mode 100644 index 0000000..251e698 --- /dev/null +++ b/train/build_model_load_pretrained_weights_and_save.py @@ -0,0 +1,33 @@ +import os +import sys +import tensorflow as tf +import keras , warnings +from keras.optimizers import * +from sacred import Experiment +from models import * +from utils import * +from metrics import * + + + + +def configuration(): + gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) + + +if __name__=='__main__': + n_classes = 2 + input_height = 224 + input_width = 448 + weight_decay = 1e-6 + pretraining = False + dir_of_weights = 'model_bin_sbb_ens.h5' + + #configuration() + + model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + model.load_weights(dir_of_weights) + model.save('./name_in_another_python_version.h5') + + From 57dae564b359f905f636bb4579aff12d7e336d36 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 4 Apr 2024 11:26:28 +0200 Subject: [PATCH 033/492] adjusting to tf2 --- ..._model_load_pretrained_weights_and_save.py | 4 ++-- train/metrics.py | 2 +- train/models.py | 8 +++---- train/train.py | 24 +++++++------------ 4 files changed, 15 insertions(+), 23 deletions(-) diff --git a/train/build_model_load_pretrained_weights_and_save.py b/train/build_model_load_pretrained_weights_and_save.py index 251e698..3b1a577 100644 --- a/train/build_model_load_pretrained_weights_and_save.py +++ b/train/build_model_load_pretrained_weights_and_save.py @@ -1,8 +1,8 @@ import os import sys import tensorflow as tf -import keras , warnings -from keras.optimizers import * +import warnings +from tensorflow.keras.optimizers import * from sacred import Experiment from models import * from utils import * diff --git a/train/metrics.py b/train/metrics.py index c63cc22..1768960 100644 --- a/train/metrics.py +++ b/train/metrics.py @@ -1,4 +1,4 @@ -from keras import backend as K +from tensorflow.keras import backend as K import tensorflow as tf import numpy as np diff --git a/train/models.py b/train/models.py index 7c806b4..40a21a1 100644 --- a/train/models.py +++ b/train/models.py @@ -1,7 +1,7 @@ -from keras.models import * -from keras.layers import * -from keras import layers -from keras.regularizers import l2 +from tensorflow.keras.models import * +from tensorflow.keras.layers import * +from tensorflow.keras import layers +from tensorflow.keras.regularizers import l2 resnet50_Weights_path='./pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' IMAGE_ORDERING ='channels_last' diff --git a/train/train.py b/train/train.py index 0cc5ef3..142b79b 100644 --- a/train/train.py +++ b/train/train.py @@ -1,29 +1,21 @@ import os import sys import tensorflow as tf -from keras.backend.tensorflow_backend import set_session -import keras , warnings -from keras.optimizers import * +from tensorflow.compat.v1.keras.backend import set_session +import warnings +from tensorflow.keras.optimizers import * from sacred import Experiment from models import * from utils import * from metrics import * -from keras.models import load_model +from tensorflow.keras.models import load_model from tqdm import tqdm def configuration(): - keras.backend.clear_session() - tf.reset_default_graph() - warnings.filterwarnings('ignore') - - os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' - config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) - - + config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True - config.gpu_options.per_process_gpu_memory_fraction=0.95#0.95 - config.gpu_options.visible_device_list="0" - set_session(tf.Session(config=config)) + session = tf.compat.v1.Session(config=config) + set_session(session) def get_dirs_or_files(input_data): if os.path.isdir(input_data): @@ -219,7 +211,7 @@ def run(n_classes,n_epochs,input_height, validation_data=val_gen, validation_steps=1, epochs=1) - model.save(dir_output+'/'+'model_'+str(i)+'.h5') + model.save(dir_output+'/'+'model_'+str(i)) #os.system('rm -rf '+dir_train_flowing) From ced1f851e267cf986d0e1dbf1bb63e15db31c823 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 4 Apr 2024 11:30:12 +0200 Subject: [PATCH 034/492] adding requirements --- train/requirements.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 train/requirements.txt diff --git a/train/requirements.txt b/train/requirements.txt new file mode 100644 index 0000000..f804172 --- /dev/null +++ b/train/requirements.txt @@ -0,0 +1,7 @@ +tensorflow == 2.12.1 +sacred +opencv-python +seaborn +tqdm +imutils + From 45652294972f2ce7c8d1f473621901f322b9c4b6 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 10 Apr 2024 20:03:02 +0200 Subject: [PATCH 035/492] use headless cv2 --- train/requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/train/requirements.txt b/train/requirements.txt index f804172..cbe2d88 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -1,7 +1,6 @@ tensorflow == 2.12.1 sacred -opencv-python +opencv-python-headless seaborn tqdm imutils - From d0b039505956af90594d14a6535add8deeaa8583 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 10 Apr 2024 20:26:26 +0200 Subject: [PATCH 036/492] add info on helpful tools (fix #14) --- train/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/train/README.md b/train/README.md index 8acfa12..89fa227 100644 --- a/train/README.md +++ b/train/README.md @@ -10,6 +10,16 @@ Either clone the repository via `git clone https://github.com/qurator-spk/sbb_pi ### Pretrained encoder Download our pretrained weights and add them to a ``pretrained_model`` folder: https://qurator-data.de/sbb_pixelwise_segmentation/pretrained_encoder/ + +### Helpful tools +* [`pagexml2img`](https://github.com/qurator-spk/page2img) +> Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, +each class will be defined with a RGB value and beside images, a text file of classes will also be produced. +* [`cocoSegmentationToPng`](https://github.com/nightrome/cocostuffapi/blob/17acf33aef3c6cc2d6aca46dcf084266c2778cf0/PythonAPI/pycocotools/cocostuffhelper.py#L130) +> Convert COCO GT or results for a single image to a segmentation map and write it to disk. +* [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) +> Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. + ## Usage ### Train From 39aa88669b98f364b33520ce45ff42b126be686c Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 10 Apr 2024 21:40:23 +0200 Subject: [PATCH 037/492] update parameter config docs (fix #11) --- train/train.py | 57 +++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/train/train.py b/train/train.py index 142b79b..9f833e0 100644 --- a/train/train.py +++ b/train/train.py @@ -29,37 +29,36 @@ ex = Experiment() @ex.config def config_params(): - n_classes=None # Number of classes. If your case study is binary case the set it to 2 and otherwise give your number of cases. - n_epochs=1 - input_height=224*1 - input_width=224*1 + n_classes=None # Number of classes. In the case of binary classification this should be 2. + n_epochs=1 # Number of epochs. + input_height=224*1 # Height of model's input in pixels. + input_width=224*1 # Width of model's input in pixels. weight_decay=1e-6 # Weight decay of l2 regularization of model layers. n_batch=1 # Number of batches at each iteration. - learning_rate=1e-4 - patches=False # Make patches of image in order to use all information of image. In the case of page - # extraction this should be set to false since model should see all image. - augmentation=False - flip_aug=False # Flip image (augmentation). - blur_aug=False # Blur patches of image (augmentation). - scaling=False # Scaling of patches (augmentation) will be imposed if this set to true. - binarization=False # Otsu thresholding. Used for augmentation in the case of binary case like textline prediction. For multicases should not be applied. - dir_train=None # Directory of training dataset (sub-folders should be named images and labels). - dir_eval=None # Directory of validation dataset (sub-folders should be named images and labels). - dir_output=None # Directory of output where the model should be saved. - pretraining=False # Set true to load pretrained weights of resnet50 encoder. - scaling_bluring=False - scaling_binarization=False - scaling_flip=False - thetha=[10,-10] - blur_k=['blur','guass','median'] # Used in order to blur image. Used for augmentation. - scales= [ 0.5, 2 ] # Scale patches with these scales. Used for augmentation. - flip_index=[0,1,-1] # Flip image. Used for augmentation. - continue_training = False # If - index_start = 0 - dir_of_start_model = '' - is_loss_soft_dice = False - weighted_loss = False - data_is_provided = False + learning_rate=1e-4 # Set the learning rate. + patches=False # Divides input image into smaller patches (input size of the model) when set to true. For the model to see the full image, like page extraction, set this to false. + augmentation=False # To apply any kind of augmentation, this parameter must be set to true. + flip_aug=False # If true, different types of flipping will be applied to the image. Types of flips are defined with "flip_index" in train.py. + blur_aug=False # If true, different types of blurring will be applied to the image. Types of blur are defined with "blur_k" in train.py. + scaling=False # If true, scaling will be applied to the image. The amount of scaling is defined with "scales" in train.py. + binarization=False # If true, Otsu thresholding will be applied to augment the input with binarized images. + dir_train=None # Directory of training dataset with subdirectories having the names "images" and "labels". + dir_eval=None # Directory of validation dataset with subdirectories having the names "images" and "labels". + dir_output=None # Directory where the output model will be saved. + pretraining=False # Set to true to load pretrained weights of ResNet50 encoder. + scaling_bluring=False # If true, a combination of scaling and blurring will be applied to the image. + scaling_binarization=False # If true, a combination of scaling and binarization will be applied to the image. + scaling_flip=False # If true, a combination of scaling and flipping will be applied to the image. + thetha=[10,-10] # Rotate image by these angles for augmentation. + blur_k=['blur','gauss','median'] # Blur image for augmentation. + scales=[0.5,2] # Scale patches for augmentation. + flip_index=[0,1,-1] # Flip image for augmentation. + continue_training = False # Set to true if you would like to continue training an already trained a model. + index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. + dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. + is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. + weighted_loss = False # Use weighted categorical cross entropy as loss fucntion. When set to true, "is_loss_soft_dice" must be false. + data_is_provided = False # Only set this to true when you have already provided the input data and the train and eval data are in "dir_output". @ex.automain def run(n_classes,n_epochs,input_height, From 666a62622ee95f2c155eb6db6dfa58bd31f15971 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 10 Apr 2024 22:20:23 +0200 Subject: [PATCH 038/492] code formatting with black; typos --- train/README.md | 6 +- ..._model_load_pretrained_weights_and_save.py | 14 +- train/config_params.json | 6 +- train/metrics.py | 209 ++--- train/models.py | 237 +++--- train/requirements.txt | 2 + train/train.py | 272 +++---- train/utils.py | 763 +++++++++--------- 8 files changed, 741 insertions(+), 768 deletions(-) diff --git a/train/README.md b/train/README.md index 89fa227..899c9a3 100644 --- a/train/README.md +++ b/train/README.md @@ -48,7 +48,7 @@ If you have an image label with height and width of 10, for a binary case the fi A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/~vahid.rezanezhad/binarization_training_data_sample/), which contains images and lables folders. ### Training , evaluation and output -The train and evaluation folders should contain subfolders of images and labels. +The train and evaluation folders should contain subfolders of images and labels. The output folder should be an empty folder where the output model will be written to. ### Parameter configuration @@ -63,7 +63,7 @@ The output folder should be an empty folder where the output model will be writt * flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" in train.py file. * blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" in train.py file. * scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" in train.py file. -* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rothation angles are given with "thetha" in train.py file. +* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" in train.py file. * rotation: If ``true``, 90 degree rotation will be applied on image. * binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. * scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. @@ -73,5 +73,3 @@ The output folder should be an empty folder where the output model will be writt * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` * data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". * dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. - - diff --git a/train/build_model_load_pretrained_weights_and_save.py b/train/build_model_load_pretrained_weights_and_save.py index 3b1a577..125611e 100644 --- a/train/build_model_load_pretrained_weights_and_save.py +++ b/train/build_model_load_pretrained_weights_and_save.py @@ -9,25 +9,21 @@ from utils import * from metrics import * - - def configuration(): gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) -if __name__=='__main__': +if __name__ == '__main__': n_classes = 2 input_height = 224 input_width = 448 weight_decay = 1e-6 pretraining = False dir_of_weights = 'model_bin_sbb_ens.h5' - - #configuration() - - model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + + # configuration() + + model = resnet50_unet(n_classes, input_height, input_width, weight_decay, pretraining) model.load_weights(dir_of_weights) model.save('./name_in_another_python_version.h5') - - diff --git a/train/config_params.json b/train/config_params.json index eaa50e1..7505a81 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -24,7 +24,7 @@ "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, - "dir_train": "/home/vahid/Documents/handwrittens_train/train", - "dir_eval": "/home/vahid/Documents/handwrittens_train/eval", - "dir_output": "/home/vahid/Documents/handwrittens_train/output" + "dir_train": "/train", + "dir_eval": "/eval", + "dir_output": "/output" } diff --git a/train/metrics.py b/train/metrics.py index 1768960..cd30b02 100644 --- a/train/metrics.py +++ b/train/metrics.py @@ -2,8 +2,8 @@ from tensorflow.keras import backend as K import tensorflow as tf import numpy as np -def focal_loss(gamma=2., alpha=4.): +def focal_loss(gamma=2., alpha=4.): gamma = float(gamma) alpha = float(alpha) @@ -37,8 +37,10 @@ def focal_loss(gamma=2., alpha=4.): fl = tf.multiply(alpha, tf.multiply(weight, ce)) reduced_fl = tf.reduce_max(fl, axis=1) return tf.reduce_mean(reduced_fl) + return focal_loss_fixed + def weighted_categorical_crossentropy(weights=None): """ weighted_categorical_crossentropy @@ -50,117 +52,131 @@ def weighted_categorical_crossentropy(weights=None): def loss(y_true, y_pred): labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) - + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) + if weights is not None: weight_mask = tf.maximum(tf.reduce_max(tf.constant( np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) + * labels_floats, axis=-1), 1.0) per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] return tf.reduce_mean(per_pixel_loss) + return loss + + def image_categorical_cross_entropy(y_true, y_pred, weights=None): """ :param y_true: tensor of shape (batch_size, height, width) representing the ground truth. :param y_pred: tensor of shape (batch_size, height, width) representing the prediction. :return: The mean cross-entropy on softmaxed tensors. """ - + labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) - + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats, logits=y_pred) + if weights is not None: weight_mask = tf.maximum( - tf.reduce_max(tf.constant( - np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) + tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] - - return tf.reduce_mean(per_pixel_loss) -def class_tversky(y_true, y_pred): - smooth = 1.0#1.00 - y_true = K.permute_dimensions(y_true, (3,1,2,0)) - y_pred = K.permute_dimensions(y_pred, (3,1,2,0)) + return tf.reduce_mean(per_pixel_loss) + + +def class_tversky(y_true, y_pred): + smooth = 1.0 # 1.00 + + y_true = K.permute_dimensions(y_true, (3, 1, 2, 0)) + y_pred = K.permute_dimensions(y_pred, (3, 1, 2, 0)) y_true_pos = K.batch_flatten(y_true) y_pred_pos = K.batch_flatten(y_pred) true_pos = K.sum(y_true_pos * y_pred_pos, 1) - false_neg = K.sum(y_true_pos * (1-y_pred_pos), 1) - false_pos = K.sum((1-y_true_pos)*y_pred_pos, 1) - alpha = 0.2#0.5 - beta=0.8 - return (true_pos + smooth)/(true_pos + alpha*false_neg + (beta)*false_pos + smooth) + false_neg = K.sum(y_true_pos * (1 - y_pred_pos), 1) + false_pos = K.sum((1 - y_true_pos) * y_pred_pos, 1) + alpha = 0.2 # 0.5 + beta = 0.8 + return (true_pos + smooth) / (true_pos + alpha * false_neg + beta * false_pos + smooth) -def focal_tversky_loss(y_true,y_pred): + +def focal_tversky_loss(y_true, y_pred): pt_1 = class_tversky(y_true, y_pred) - gamma =1.3#4./3.0#1.3#4.0/3.00# 0.75 - return K.sum(K.pow((1-pt_1), gamma)) + gamma = 1.3 # 4./3.0#1.3#4.0/3.00# 0.75 + return K.sum(K.pow((1 - pt_1), gamma)) + def generalized_dice_coeff2(y_true, y_pred): n_el = 1 - for dim in y_true.shape: + for dim in y_true.shape: n_el *= int(dim) n_cl = y_true.shape[-1] w = K.zeros(shape=(n_cl,)) - w = (K.sum(y_true, axis=(0,1,2)))/(n_el) - w = 1/(w**2+0.000001) - numerator = y_true*y_pred - numerator = w*K.sum(numerator,(0,1,2)) + w = (K.sum(y_true, axis=(0, 1, 2))) / n_el + w = 1 / (w ** 2 + 0.000001) + numerator = y_true * y_pred + numerator = w * K.sum(numerator, (0, 1, 2)) numerator = K.sum(numerator) - denominator = y_true+y_pred - denominator = w*K.sum(denominator,(0,1,2)) + denominator = y_true + y_pred + denominator = w * K.sum(denominator, (0, 1, 2)) denominator = K.sum(denominator) - return 2*numerator/denominator + return 2 * numerator / denominator + + def generalized_dice_coeff(y_true, y_pred): - axes = tuple(range(1, len(y_pred.shape)-1)) + axes = tuple(range(1, len(y_pred.shape) - 1)) Ncl = y_pred.shape[-1] w = K.zeros(shape=(Ncl,)) w = K.sum(y_true, axis=axes) - w = 1/(w**2+0.000001) + w = 1 / (w ** 2 + 0.000001) # Compute gen dice coef: - numerator = y_true*y_pred - numerator = w*K.sum(numerator,axes) + numerator = y_true * y_pred + numerator = w * K.sum(numerator, axes) numerator = K.sum(numerator) - denominator = y_true+y_pred - denominator = w*K.sum(denominator,axes) + denominator = y_true + y_pred + denominator = w * K.sum(denominator, axes) denominator = K.sum(denominator) - gen_dice_coef = 2*numerator/denominator + gen_dice_coef = 2 * numerator / denominator return gen_dice_coef + def generalized_dice_loss(y_true, y_pred): return 1 - generalized_dice_coeff2(y_true, y_pred) -def soft_dice_loss(y_true, y_pred, epsilon=1e-6): - ''' + + +def soft_dice_loss(y_true, y_pred, epsilon=1e-6): + """ Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions. Assumes the `channels_last` format. - + # Arguments y_true: b x X x Y( x Z...) x c One hot encoding of ground truth - y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) + y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax) epsilon: Used for numerical stability to avoid divide by zero errors - + # References - V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation + V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation https://arxiv.org/abs/1606.04797 - More details on Dice loss formulation + More details on Dice loss formulation https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72) - + Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022 - ''' - + """ + # skip the batch and class axis for calculating Dice score - axes = tuple(range(1, len(y_pred.shape)-1)) - + axes = tuple(range(1, len(y_pred.shape) - 1)) + numerator = 2. * K.sum(y_pred * y_true, axes) denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) - return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch + return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch -def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False): + +def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last=True, mean_per_class=False, + verbose=False): """ Compute mean metrics of two segmentation masks, via Keras. @@ -193,13 +209,13 @@ def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = H = height, N = number of classes """ - + flag_soft = (metric_type == 'soft') flag_naive_mean = (metric_type == 'naive') - + # always assume one or more classes num_classes = K.shape(y_true)[-1] - + if not flag_soft: # get one-hot encoded masks from y_pred (true masks should already be one-hot) y_pred = K.one_hot(K.argmax(y_pred), num_classes) @@ -211,29 +227,29 @@ def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = y_pred = K.cast(y_pred, 'float32') # intersection and union shapes are batch_size * n_classes (values = area in pixels) - axes = (1,2) # W,H axes of each image + axes = (1, 2) # W,H axes of each image intersection = K.sum(K.abs(y_true * y_pred), axis=axes) mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes) - union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot + union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot smooth = .001 iou = (intersection + smooth) / (union + smooth) - dice = 2 * (intersection + smooth)/(mask_sum + smooth) + dice = 2 * (intersection + smooth) / (mask_sum + smooth) metric = {'iou': iou, 'dice': dice}[metric_name] # define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise - mask = K.cast(K.not_equal(union, 0), 'float32') - + mask = K.cast(K.not_equal(union, 0), 'float32') + if drop_last: - metric = metric[:,:-1] - mask = mask[:,:-1] - + metric = metric[:, :-1] + mask = mask[:, :-1] + if verbose: print('intersection, union') print(K.eval(intersection), K.eval(union)) - print(K.eval(intersection/union)) - + print(K.eval(intersection / union)) + # return mean metrics: remaining axes are (batch, classes) if flag_naive_mean: return K.mean(metric) @@ -243,13 +259,14 @@ def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = non_zero = tf.greater(class_count, 0) non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero) non_zero_count = tf.boolean_mask(class_count, non_zero) - + if verbose: print('Counts of inputs with class present, metrics for non-absent classes') print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count)) - + return K.mean(non_zero_sum / non_zero_count) + def mean_iou(y_true, y_pred, **kwargs): """ Compute mean Intersection over Union of two segmentation masks, via Keras. @@ -257,65 +274,69 @@ def mean_iou(y_true, y_pred, **kwargs): Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs. """ return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs) + + def Mean_IOU(y_true, y_pred): nb_classes = K.int_shape(y_pred)[-1] iou = [] true_pixels = K.argmax(y_true, axis=-1) pred_pixels = K.argmax(y_pred, axis=-1) void_labels = K.equal(K.sum(y_true, axis=-1), 0) - for i in range(0, nb_classes): # exclude first label (background) and last label (void) - true_labels = K.equal(true_pixels, i)# & ~void_labels - pred_labels = K.equal(pred_pixels, i)# & ~void_labels + for i in range(0, nb_classes): # exclude first label (background) and last label (void) + true_labels = K.equal(true_pixels, i) # & ~void_labels + pred_labels = K.equal(pred_pixels, i) # & ~void_labels inter = tf.to_int32(true_labels & pred_labels) union = tf.to_int32(true_labels | pred_labels) - legal_batches = K.sum(tf.to_int32(true_labels), axis=1)>0 - ious = K.sum(inter, axis=1)/K.sum(union, axis=1) - iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects + legal_batches = K.sum(tf.to_int32(true_labels), axis=1) > 0 + ious = K.sum(inter, axis=1) / K.sum(union, axis=1) + iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects iou = tf.stack(iou) legal_labels = ~tf.debugging.is_nan(iou) iou = tf.gather(iou, indices=tf.where(legal_labels)) return K.mean(iou) + def iou_vahid(y_true, y_pred): - nb_classes = tf.shape(y_true)[-1]+tf.to_int32(1) + nb_classes = tf.shape(y_true)[-1] + tf.to_int32(1) true_pixels = K.argmax(y_true, axis=-1) pred_pixels = K.argmax(y_pred, axis=-1) iou = [] - + for i in tf.range(nb_classes): - tp=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.equal(pred_pixels, i) ) ) - fp=K.sum( tf.to_int32( K.not_equal(true_pixels, i) & K.equal(pred_pixels, i) ) ) - fn=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.not_equal(pred_pixels, i) ) ) - iouh=tp/(tp+fp+fn) + tp = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.equal(pred_pixels, i))) + fp = K.sum(tf.to_int32(K.not_equal(true_pixels, i) & K.equal(pred_pixels, i))) + fn = K.sum(tf.to_int32(K.equal(true_pixels, i) & K.not_equal(pred_pixels, i))) + iouh = tp / (tp + fp + fn) iou.append(iouh) return K.mean(iou) - - -def IoU_metric(Yi,y_predi): - ## mean Intersection over Union - ## Mean IoU = TP/(FN + TP + FP) + + +def IoU_metric(Yi, y_predi): + # mean Intersection over Union + # Mean IoU = TP/(FN + TP + FP) y_predi = np.argmax(y_predi, axis=3) y_testi = np.argmax(Yi, axis=3) IoUs = [] Nclass = int(np.max(Yi)) + 1 for c in range(Nclass): - TP = np.sum( (Yi == c)&(y_predi==c) ) - FP = np.sum( (Yi != c)&(y_predi==c) ) - FN = np.sum( (Yi == c)&(y_predi != c)) - IoU = TP/float(TP + FP + FN) + TP = np.sum((Yi == c) & (y_predi == c)) + FP = np.sum((Yi != c) & (y_predi == c)) + FN = np.sum((Yi == c) & (y_predi != c)) + IoU = TP / float(TP + FP + FN) IoUs.append(IoU) - return K.cast( np.mean(IoUs) ,dtype='float32' ) + return K.cast(np.mean(IoUs), dtype='float32') def IoU_metric_keras(y_true, y_pred): - ## mean Intersection over Union - ## Mean IoU = TP/(FN + TP + FP) + # mean Intersection over Union + # Mean IoU = TP/(FN + TP + FP) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) - + return IoU_metric(y_true.eval(session=sess), y_pred.eval(session=sess)) + def jaccard_distance_loss(y_true, y_pred, smooth=100): """ Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) @@ -334,5 +355,3 @@ def jaccard_distance_loss(y_true, y_pred, smooth=100): sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) jac = (intersection + smooth) / (sum_ - intersection + smooth) return (1 - jac) * smooth - - diff --git a/train/models.py b/train/models.py index 40a21a1..f06823e 100644 --- a/train/models.py +++ b/train/models.py @@ -3,19 +3,20 @@ from tensorflow.keras.layers import * from tensorflow.keras import layers from tensorflow.keras.regularizers import l2 -resnet50_Weights_path='./pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' -IMAGE_ORDERING ='channels_last' -MERGE_AXIS=-1 +resnet50_Weights_path = './pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' +IMAGE_ORDERING = 'channels_last' +MERGE_AXIS = -1 -def one_side_pad( x ): +def one_side_pad(x): x = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(x) if IMAGE_ORDERING == 'channels_first': - x = Lambda(lambda x : x[: , : , :-1 , :-1 ] )(x) + x = Lambda(lambda x: x[:, :, :-1, :-1])(x) elif IMAGE_ORDERING == 'channels_last': - x = Lambda(lambda x : x[: , :-1 , :-1 , : ] )(x) + x = Lambda(lambda x: x[:, :-1, :-1, :])(x) return x + def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments @@ -28,7 +29,7 @@ def identity_block(input_tensor, kernel_size, filters, stage, block): Output tensor for the block. """ filters1, filters2, filters3 = filters - + if IMAGE_ORDERING == 'channels_last': bn_axis = 3 else: @@ -37,16 +38,16 @@ def identity_block(input_tensor, kernel_size, filters, stage, block): conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' - x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2a')(input_tensor) + x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) - x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , + x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) - x = Conv2D(filters3 , (1, 1), data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x) + x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) @@ -68,7 +69,7 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)) And the shortcut should have strides=(2,2) as well """ filters1, filters2, filters3 = filters - + if IMAGE_ORDERING == 'channels_last': bn_axis = 3 else: @@ -77,20 +78,20 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)) conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' - x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, + x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, strides=strides, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) - x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING , padding='same', + x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) - x = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x) + x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) - shortcut = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING , strides=strides, + shortcut = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, strides=strides, name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) @@ -99,12 +100,11 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)) return x -def resnet50_unet_light(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): - assert input_height%32 == 0 - assert input_width%32 == 0 +def resnet50_unet_light(n_classes, input_height=224, input_width=224, weight_decay=1e-6, pretraining=False): + assert input_height % 32 == 0 + assert input_width % 32 == 0 - - img_input = Input(shape=(input_height,input_width , 3 )) + img_input = Input(shape=(input_height, input_width, 3)) if IMAGE_ORDERING == 'channels_last': bn_axis = 3 @@ -112,25 +112,24 @@ def resnet50_unet_light(n_classes,input_height=224,input_width=224,weight_decay= bn_axis = 1 x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2), kernel_regularizer=l2(weight_decay), + name='conv1')(x) f1 = x x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) - x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) - + x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x ) - + f2 = one_side_pad(x) x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x + f3 = x x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') @@ -138,85 +137,72 @@ def resnet50_unet_light(n_classes,input_height=224,input_width=224,weight_decay= x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x + f4 = x x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x - + f5 = x if pretraining: - model=Model( img_input , x ).load_weights(resnet50_Weights_path) + model = Model(img_input, x).load_weights(resnet50_Weights_path) - - v512_2048 = Conv2D( 512 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f5 ) - v512_2048 = ( BatchNormalization(axis=bn_axis))(v512_2048) + v512_2048 = Conv2D(512, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f5) + v512_2048 = (BatchNormalization(axis=bn_axis))(v512_2048) v512_2048 = Activation('relu')(v512_2048) - - - v512_1024=Conv2D( 512 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f4 ) - v512_1024 = ( BatchNormalization(axis=bn_axis))(v512_1024) + v512_1024 = Conv2D(512, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(f4) + v512_1024 = (BatchNormalization(axis=bn_axis))(v512_1024) v512_1024 = Activation('relu')(v512_1024) - - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(v512_2048) - o = ( concatenate([ o ,v512_1024],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) - o = ( Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(v512_2048) + o = (concatenate([o, v512_1024], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([ o ,f3],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) - o = ( Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f3], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([o,f2],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING))(o) - o = ( Conv2D( 128 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay) ) )(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f2], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([o,f1],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) - o = ( Conv2D( 64 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f1], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([o,img_input],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) - o = ( Conv2D( 32 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, img_input], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - - o = Conv2D( n_classes , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( o ) - o = ( BatchNormalization(axis=bn_axis))(o) + o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = (Activation('softmax'))(o) - - model = Model( img_input , o ) + model = Model(img_input, o) return model -def resnet50_unet(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): - assert input_height%32 == 0 - assert input_width%32 == 0 - - img_input = Input(shape=(input_height,input_width , 3 )) +def resnet50_unet(n_classes, input_height=224, input_width=224, weight_decay=1e-6, pretraining=False): + assert input_height % 32 == 0 + assert input_width % 32 == 0 + + img_input = Input(shape=(input_height, input_width, 3)) if IMAGE_ORDERING == 'channels_last': bn_axis = 3 @@ -224,25 +210,24 @@ def resnet50_unet(n_classes,input_height=224,input_width=224,weight_decay=1e-6,p bn_axis = 1 x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) - x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2), kernel_regularizer=l2(weight_decay), + name='conv1')(x) f1 = x x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) - x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) - + x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') - f2 = one_side_pad(x ) - + f2 = one_side_pad(x) x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') - f3 = x + f3 = x x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') @@ -250,68 +235,60 @@ def resnet50_unet(n_classes,input_height=224,input_width=224,weight_decay=1e-6,p x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') - f4 = x + f4 = x x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') - f5 = x + f5 = x if pretraining: - Model( img_input , x ).load_weights(resnet50_Weights_path) + Model(img_input, x).load_weights(resnet50_Weights_path) - v1024_2048 = Conv2D( 1024 , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( f5 ) - v1024_2048 = ( BatchNormalization(axis=bn_axis))(v1024_2048) + v1024_2048 = Conv2D(1024, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))( + f5) + v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) v1024_2048 = Activation('relu')(v1024_2048) - - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(v1024_2048) - o = ( concatenate([ o ,f4],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) - o = ( Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(v1024_2048) + o = (concatenate([o, f4], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([ o ,f3],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(o) - o = ( Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f3], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([o,f2],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING))(o) - o = ( Conv2D( 128 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay) ) )(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f2], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([o,f1],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) - o = ( Conv2D( 64 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f1], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - o = ( UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(o) - o = ( concatenate([o,img_input],axis=MERGE_AXIS ) ) - o = ( ZeroPadding2D((1,1) , data_format=IMAGE_ORDERING ))(o) - o = ( Conv2D( 32 , (3, 3), padding='valid' , data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) ))(o) - o = ( BatchNormalization(axis=bn_axis))(o) + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, img_input], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = Activation('relu')(o) - - - o = Conv2D( n_classes , (1, 1) , padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay) )( o ) - o = ( BatchNormalization(axis=bn_axis))(o) + + o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) + o = (BatchNormalization(axis=bn_axis))(o) o = (Activation('softmax'))(o) - - model = Model( img_input , o ) - - + model = Model(img_input, o) return model diff --git a/train/requirements.txt b/train/requirements.txt index cbe2d88..20b6a32 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -4,3 +4,5 @@ opencv-python-headless seaborn tqdm imutils +numpy +scipy diff --git a/train/train.py b/train/train.py index 9f833e0..03faf46 100644 --- a/train/train.py +++ b/train/train.py @@ -11,12 +11,14 @@ from metrics import * from tensorflow.keras.models import load_model from tqdm import tqdm + def configuration(): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=config) set_session(session) + def get_dirs_or_files(input_data): if os.path.isdir(input_data): image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') @@ -25,205 +27,187 @@ def get_dirs_or_files(input_data): assert os.path.isdir(labels_input), "{} is not a directory".format(labels_input) return image_input, labels_input + ex = Experiment() + @ex.config def config_params(): - n_classes=None # Number of classes. In the case of binary classification this should be 2. - n_epochs=1 # Number of epochs. - input_height=224*1 # Height of model's input in pixels. - input_width=224*1 # Width of model's input in pixels. - weight_decay=1e-6 # Weight decay of l2 regularization of model layers. - n_batch=1 # Number of batches at each iteration. - learning_rate=1e-4 # Set the learning rate. - patches=False # Divides input image into smaller patches (input size of the model) when set to true. For the model to see the full image, like page extraction, set this to false. - augmentation=False # To apply any kind of augmentation, this parameter must be set to true. - flip_aug=False # If true, different types of flipping will be applied to the image. Types of flips are defined with "flip_index" in train.py. - blur_aug=False # If true, different types of blurring will be applied to the image. Types of blur are defined with "blur_k" in train.py. - scaling=False # If true, scaling will be applied to the image. The amount of scaling is defined with "scales" in train.py. - binarization=False # If true, Otsu thresholding will be applied to augment the input with binarized images. - dir_train=None # Directory of training dataset with subdirectories having the names "images" and "labels". - dir_eval=None # Directory of validation dataset with subdirectories having the names "images" and "labels". - dir_output=None # Directory where the output model will be saved. - pretraining=False # Set to true to load pretrained weights of ResNet50 encoder. - scaling_bluring=False # If true, a combination of scaling and blurring will be applied to the image. - scaling_binarization=False # If true, a combination of scaling and binarization will be applied to the image. - scaling_flip=False # If true, a combination of scaling and flipping will be applied to the image. - thetha=[10,-10] # Rotate image by these angles for augmentation. - blur_k=['blur','gauss','median'] # Blur image for augmentation. - scales=[0.5,2] # Scale patches for augmentation. - flip_index=[0,1,-1] # Flip image for augmentation. - continue_training = False # Set to true if you would like to continue training an already trained a model. - index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. - dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. - is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. - weighted_loss = False # Use weighted categorical cross entropy as loss fucntion. When set to true, "is_loss_soft_dice" must be false. - data_is_provided = False # Only set this to true when you have already provided the input data and the train and eval data are in "dir_output". + n_classes = None # Number of classes. In the case of binary classification this should be 2. + n_epochs = 1 # Number of epochs. + input_height = 224 * 1 # Height of model's input in pixels. + input_width = 224 * 1 # Width of model's input in pixels. + weight_decay = 1e-6 # Weight decay of l2 regularization of model layers. + n_batch = 1 # Number of batches at each iteration. + learning_rate = 1e-4 # Set the learning rate. + patches = False # Divides input image into smaller patches (input size of the model) when set to true. For the model to see the full image, like page extraction, set this to false. + augmentation = False # To apply any kind of augmentation, this parameter must be set to true. + flip_aug = False # If true, different types of flipping will be applied to the image. Types of flips are defined with "flip_index" in train.py. + blur_aug = False # If true, different types of blurring will be applied to the image. Types of blur are defined with "blur_k" in train.py. + scaling = False # If true, scaling will be applied to the image. The amount of scaling is defined with "scales" in train.py. + binarization = False # If true, Otsu thresholding will be applied to augment the input with binarized images. + dir_train = None # Directory of training dataset with subdirectories having the names "images" and "labels". + dir_eval = None # Directory of validation dataset with subdirectories having the names "images" and "labels". + dir_output = None # Directory where the output model will be saved. + pretraining = False # Set to true to load pretrained weights of ResNet50 encoder. + scaling_bluring = False # If true, a combination of scaling and blurring will be applied to the image. + scaling_binarization = False # If true, a combination of scaling and binarization will be applied to the image. + scaling_flip = False # If true, a combination of scaling and flipping will be applied to the image. + thetha = [10, -10] # Rotate image by these angles for augmentation. + blur_k = ['blur', 'gauss', 'median'] # Blur image for augmentation. + scales = [0.5, 2] # Scale patches for augmentation. + flip_index = [0, 1, -1] # Flip image for augmentation. + continue_training = False # Set to true if you would like to continue training an already trained a model. + index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. + dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. + is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. + weighted_loss = False # Use weighted categorical cross entropy as loss fucntion. When set to true, "is_loss_soft_dice" must be false. + data_is_provided = False # Only set this to true when you have already provided the input data and the train and eval data are in "dir_output". + @ex.automain -def run(n_classes,n_epochs,input_height, - input_width,weight_decay,weighted_loss, - index_start,dir_of_start_model,is_loss_soft_dice, - n_batch,patches,augmentation,flip_aug - ,blur_aug,scaling, binarization, - blur_k,scales,dir_train,data_is_provided, - scaling_bluring,scaling_binarization,rotation, - rotation_not_90,thetha,scaling_flip,continue_training, - flip_index,dir_eval ,dir_output,pretraining,learning_rate): - - +def run(n_classes, n_epochs, input_height, + input_width, weight_decay, weighted_loss, + index_start, dir_of_start_model, is_loss_soft_dice, + n_batch, patches, augmentation, flip_aug, + blur_aug, scaling, binarization, + blur_k, scales, dir_train, data_is_provided, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, continue_training, + flip_index, dir_eval, dir_output, pretraining, learning_rate): if data_is_provided: - dir_train_flowing=os.path.join(dir_output,'train') - dir_eval_flowing=os.path.join(dir_output,'eval') - - dir_flow_train_imgs=os.path.join(dir_train_flowing,'images') - dir_flow_train_labels=os.path.join(dir_train_flowing,'labels') - - dir_flow_eval_imgs=os.path.join(dir_eval_flowing,'images') - dir_flow_eval_labels=os.path.join(dir_eval_flowing,'labels') - + dir_train_flowing = os.path.join(dir_output, 'train') + dir_eval_flowing = os.path.join(dir_output, 'eval') + + dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images') + dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels') + + dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images') + dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels') + configuration() - + else: - dir_img,dir_seg=get_dirs_or_files(dir_train) - dir_img_val,dir_seg_val=get_dirs_or_files(dir_eval) - + dir_img, dir_seg = get_dirs_or_files(dir_train) + dir_img_val, dir_seg_val = get_dirs_or_files(dir_eval) + # make first a directory in output for both training and evaluations in order to flow data from these directories. - dir_train_flowing=os.path.join(dir_output,'train') - dir_eval_flowing=os.path.join(dir_output,'eval') - - dir_flow_train_imgs=os.path.join(dir_train_flowing,'images/') - dir_flow_train_labels=os.path.join(dir_train_flowing,'labels/') - - dir_flow_eval_imgs=os.path.join(dir_eval_flowing,'images/') - dir_flow_eval_labels=os.path.join(dir_eval_flowing,'labels/') - + dir_train_flowing = os.path.join(dir_output, 'train') + dir_eval_flowing = os.path.join(dir_output, 'eval') + + dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images/') + dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels/') + + dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images/') + dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels/') + if os.path.isdir(dir_train_flowing): - os.system('rm -rf '+dir_train_flowing) + os.system('rm -rf ' + dir_train_flowing) os.makedirs(dir_train_flowing) else: os.makedirs(dir_train_flowing) - + if os.path.isdir(dir_eval_flowing): - os.system('rm -rf '+dir_eval_flowing) + os.system('rm -rf ' + dir_eval_flowing) os.makedirs(dir_eval_flowing) else: os.makedirs(dir_eval_flowing) - os.mkdir(dir_flow_train_imgs) os.mkdir(dir_flow_train_labels) - + os.mkdir(dir_flow_eval_imgs) os.mkdir(dir_flow_eval_labels) - - - #set the gpu configuration + + # set the gpu configuration configuration() - - #writing patches into a sub-folder in order to be flowed from directory. - provide_patches(dir_img,dir_seg,dir_flow_train_imgs, + # writing patches into a sub-folder in order to be flowed from directory. + provide_patches(dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, - input_height,input_width,blur_k,blur_aug, - flip_aug,binarization,scaling,scales,flip_index, - scaling_bluring,scaling_binarization,rotation, - rotation_not_90,thetha,scaling_flip, - augmentation=augmentation,patches=patches) - - provide_patches(dir_img_val,dir_seg_val,dir_flow_eval_imgs, - dir_flow_eval_labels, - input_height,input_width,blur_k,blur_aug, - flip_aug,binarization,scaling,scales,flip_index, - scaling_bluring,scaling_binarization,rotation, - rotation_not_90,thetha,scaling_flip, - augmentation=False,patches=patches) - + input_height, input_width, blur_k, blur_aug, + flip_aug, binarization, scaling, scales, flip_index, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, + augmentation=augmentation, patches=patches) + + provide_patches(dir_img_val, dir_seg_val, dir_flow_eval_imgs, + dir_flow_eval_labels, + input_height, input_width, blur_k, blur_aug, + flip_aug, binarization, scaling, scales, flip_index, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, + augmentation=False, patches=patches) - if weighted_loss: - weights=np.zeros(n_classes) + weights = np.zeros(n_classes) if data_is_provided: for obj in os.listdir(dir_flow_train_labels): try: - label_obj=cv2.imread(dir_flow_train_labels+'/'+obj) - label_obj_one_hot=get_one_hot( label_obj,label_obj.shape[0],label_obj.shape[1],n_classes) - weights+=(label_obj_one_hot.sum(axis=0)).sum(axis=0) + label_obj = cv2.imread(dir_flow_train_labels + '/' + obj) + label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) + weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) except: pass else: - + for obj in os.listdir(dir_seg): try: - label_obj=cv2.imread(dir_seg+'/'+obj) - label_obj_one_hot=get_one_hot( label_obj,label_obj.shape[0],label_obj.shape[1],n_classes) - weights+=(label_obj_one_hot.sum(axis=0)).sum(axis=0) + label_obj = cv2.imread(dir_seg + '/' + obj) + label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) + weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) except: pass - - weights=1.00/weights - - weights=weights/float(np.sum(weights)) - weights=weights/float(np.min(weights)) - weights=weights/float(np.sum(weights)) - - - + weights = 1.00 / weights + + weights = weights / float(np.sum(weights)) + weights = weights / float(np.min(weights)) + weights = weights / float(np.sum(weights)) + if continue_training: if is_loss_soft_dice: - model = load_model (dir_of_start_model, compile = True, custom_objects={'soft_dice_loss': soft_dice_loss}) + model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) if weighted_loss: - model = load_model (dir_of_start_model, compile = True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) + model = load_model(dir_of_start_model, compile=True, + custom_objects={'loss': weighted_categorical_crossentropy(weights)}) if not is_loss_soft_dice and not weighted_loss: - model = load_model (dir_of_start_model, compile = True) + model = load_model(dir_of_start_model, compile=True) else: - #get our model. + # get our model. index_start = 0 - model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) - - #if you want to see the model structure just uncomment model summary. - #model.summary() - + model = resnet50_unet(n_classes, input_height, input_width, weight_decay, pretraining) + + # if you want to see the model structure just uncomment model summary. + # model.summary() if not is_loss_soft_dice and not weighted_loss: model.compile(loss='categorical_crossentropy', - optimizer = Adam(lr=learning_rate),metrics=['accuracy']) - if is_loss_soft_dice: + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if is_loss_soft_dice: model.compile(loss=soft_dice_loss, - optimizer = Adam(lr=learning_rate),metrics=['accuracy']) - + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if weighted_loss: model.compile(loss=weighted_categorical_crossentropy(weights), - optimizer = Adam(lr=learning_rate),metrics=['accuracy']) - - #generating train and evaluation data - train_gen = data_gen(dir_flow_train_imgs,dir_flow_train_labels, batch_size = n_batch, - input_height=input_height, input_width=input_width,n_classes=n_classes ) - val_gen = data_gen(dir_flow_eval_imgs,dir_flow_eval_labels, batch_size = n_batch, - input_height=input_height, input_width=input_width,n_classes=n_classes ) - - for i in tqdm(range(index_start, n_epochs+index_start)): + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + + # generating train and evaluation data + train_gen = data_gen(dir_flow_train_imgs, dir_flow_train_labels, batch_size=n_batch, + input_height=input_height, input_width=input_width, n_classes=n_classes) + val_gen = data_gen(dir_flow_eval_imgs, dir_flow_eval_labels, batch_size=n_batch, + input_height=input_height, input_width=input_width, n_classes=n_classes) + + for i in tqdm(range(index_start, n_epochs + index_start)): model.fit_generator( train_gen, - steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs))/n_batch)-1, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, validation_data=val_gen, validation_steps=1, epochs=1) - model.save(dir_output+'/'+'model_'+str(i)) - - - #os.system('rm -rf '+dir_train_flowing) - #os.system('rm -rf '+dir_eval_flowing) - - #model.save(dir_output+'/'+'model'+'.h5') - - - - - - - - + model.save(dir_output + '/' + 'model_' + str(i)) + # os.system('rm -rf '+dir_train_flowing) + # os.system('rm -rf '+dir_eval_flowing) + # model.save(dir_output+'/'+'model'+'.h5') diff --git a/train/utils.py b/train/utils.py index 19ab46e..7c65f18 100644 --- a/train/utils.py +++ b/train/utils.py @@ -10,18 +10,17 @@ import imutils import math - -def bluring(img_in,kind): - if kind=='guass': - img_blur = cv2.GaussianBlur(img_in,(5,5),0) - elif kind=="median": - img_blur = cv2.medianBlur(img_in,5) - elif kind=='blur': - img_blur=cv2.blur(img_in,(5,5)) +def bluring(img_in, kind): + if kind == 'gauss': + img_blur = cv2.GaussianBlur(img_in, (5, 5), 0) + elif kind == "median": + img_blur = cv2.medianBlur(img_in, 5) + elif kind == 'blur': + img_blur = cv2.blur(img_in, (5, 5)) return img_blur -def elastic_transform(image, alpha, sigma,seedj, random_state=None): - + +def elastic_transform(image, alpha, sigma, seedj, random_state=None): """Elastic deformation of images as described in [Simard2003]_. .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in @@ -37,461 +36,459 @@ def elastic_transform(image, alpha, sigma,seedj, random_state=None): dz = np.zeros_like(dx) x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) - indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1)) + indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1)) distored_image = map_coordinates(image, indices, order=1, mode='reflect') return distored_image.reshape(image.shape) + def rotation_90(img): - img_rot=np.zeros((img.shape[1],img.shape[0],img.shape[2])) - img_rot[:,:,0]=img[:,:,0].T - img_rot[:,:,1]=img[:,:,1].T - img_rot[:,:,2]=img[:,:,2].T + img_rot = np.zeros((img.shape[1], img.shape[0], img.shape[2])) + img_rot[:, :, 0] = img[:, :, 0].T + img_rot[:, :, 1] = img[:, :, 1].T + img_rot[:, :, 2] = img[:, :, 2].T return img_rot + def rotatedRectWithMaxArea(w, h, angle): - """ + """ Given a rectangle of size wxh that has been rotated by 'angle' (in radians), computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle. """ - if w <= 0 or h <= 0: - return 0,0 + if w <= 0 or h <= 0: + return 0, 0 - width_is_longer = w >= h - side_long, side_short = (w,h) if width_is_longer else (h,w) + width_is_longer = w >= h + side_long, side_short = (w, h) if width_is_longer else (h, w) - # since the solutions for angle, -angle and 180-angle are all the same, - # if suffices to look at the first quadrant and the absolute values of sin,cos: - sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) - if side_short <= 2.*sin_a*cos_a*side_long or abs(sin_a-cos_a) < 1e-10: - # half constrained case: two crop corners touch the longer side, - # the other two corners are on the mid-line parallel to the longer line - x = 0.5*side_short - wr,hr = (x/sin_a,x/cos_a) if width_is_longer else (x/cos_a,x/sin_a) - else: - # fully constrained case: crop touches all 4 sides - cos_2a = cos_a*cos_a - sin_a*sin_a - wr,hr = (w*cos_a - h*sin_a)/cos_2a, (h*cos_a - w*sin_a)/cos_2a + # since the solutions for angle, -angle and 180-angle are all the same, + # if suffices to look at the first quadrant and the absolute values of sin,cos: + sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) + if side_short <= 2. * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: + # half constrained case: two crop corners touch the longer side, + # the other two corners are on the mid-line parallel to the longer line + x = 0.5 * side_short + wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) + else: + # fully constrained case: crop touches all 4 sides + cos_2a = cos_a * cos_a - sin_a * sin_a + wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a - return wr,hr + return wr, hr -def rotate_max_area(image,rotated, rotated_label,angle): + +def rotate_max_area(image, rotated, rotated_label, angle): """ image: cv2 image matrix object angle: in degree """ wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle)) h, w, _ = rotated.shape - y1 = h//2 - int(hr/2) + y1 = h // 2 - int(hr / 2) y2 = y1 + int(hr) - x1 = w//2 - int(wr/2) + x1 = w // 2 - int(wr / 2) x2 = x1 + int(wr) - return rotated[y1:y2, x1:x2],rotated_label[y1:y2, x1:x2] -def rotation_not_90_func(img,label,thetha): - rotated=imutils.rotate(img,thetha) - rotated_label=imutils.rotate(label,thetha) - return rotate_max_area(img, rotated,rotated_label,thetha) + return rotated[y1:y2, x1:x2], rotated_label[y1:y2, x1:x2] + + +def rotation_not_90_func(img, label, thetha): + rotated = imutils.rotate(img, thetha) + rotated_label = imutils.rotate(label, thetha) + return rotate_max_area(img, rotated, rotated_label, thetha) + def color_images(seg, n_classes): - ann_u=range(n_classes) - if len(np.shape(seg))==3: - seg=seg[:,:,0] - - seg_img=np.zeros((np.shape(seg)[0],np.shape(seg)[1],3)).astype(float) - colors=sns.color_palette("hls", n_classes) - + ann_u = range(n_classes) + if len(np.shape(seg)) == 3: + seg = seg[:, :, 0] + + seg_img = np.zeros((np.shape(seg)[0], np.shape(seg)[1], 3)).astype(float) + colors = sns.color_palette("hls", n_classes) + for c in ann_u: - c=int(c) - segl=(seg==c) - seg_img[:,:,0]+=segl*(colors[c][0]) - seg_img[:,:,1]+=segl*(colors[c][1]) - seg_img[:,:,2]+=segl*(colors[c][2]) + c = int(c) + segl = (seg == c) + seg_img[:, :, 0] += segl * (colors[c][0]) + seg_img[:, :, 1] += segl * (colors[c][1]) + seg_img[:, :, 2] += segl * (colors[c][2]) return seg_img - -def resize_image(seg_in,input_height,input_width): - return cv2.resize(seg_in,(input_width,input_height),interpolation=cv2.INTER_NEAREST) -def get_one_hot(seg,input_height,input_width,n_classes): - seg=seg[:,:,0] - seg_f=np.zeros((input_height, input_width,n_classes)) + +def resize_image(seg_in, input_height, input_width): + return cv2.resize(seg_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) + + +def get_one_hot(seg, input_height, input_width, n_classes): + seg = seg[:, :, 0] + seg_f = np.zeros((input_height, input_width, n_classes)) for j in range(n_classes): - seg_f[:,:,j]=(seg==j).astype(int) + seg_f[:, :, j] = (seg == j).astype(int) return seg_f - -def IoU(Yi,y_predi): + +def IoU(Yi, y_predi): ## mean Intersection over Union ## Mean IoU = TP/(FN + TP + FP) IoUs = [] - classes_true=np.unique(Yi) + classes_true = np.unique(Yi) for c in classes_true: - TP = np.sum( (Yi == c)&(y_predi==c) ) - FP = np.sum( (Yi != c)&(y_predi==c) ) - FN = np.sum( (Yi == c)&(y_predi != c)) - IoU = TP/float(TP + FP + FN) - print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c,TP,FP,FN,IoU)) + TP = np.sum((Yi == c) & (y_predi == c)) + FP = np.sum((Yi != c) & (y_predi == c)) + FN = np.sum((Yi == c) & (y_predi != c)) + IoU = TP / float(TP + FP + FN) + print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c, TP, FP, FN, IoU)) IoUs.append(IoU) mIoU = np.mean(IoUs) print("_________________") print("Mean IoU: {:4.3f}".format(mIoU)) return mIoU -def data_gen(img_folder, mask_folder, batch_size,input_height, input_width,n_classes): + + +def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_classes): c = 0 - n = [f for f in os.listdir(img_folder) if not f.startswith('.')]# os.listdir(img_folder) #List of training images + n = [f for f in os.listdir(img_folder) if not f.startswith('.')] # os.listdir(img_folder) #List of training images random.shuffle(n) while True: img = np.zeros((batch_size, input_height, input_width, 3)).astype('float') mask = np.zeros((batch_size, input_height, input_width, n_classes)).astype('float') - - for i in range(c, c+batch_size): #initially from 0 to 16, c = 0. - #print(img_folder+'/'+n[i]) - - try: - filename=n[i].split('.')[0] - - train_img = cv2.imread(img_folder+'/'+n[i])/255. - train_img = cv2.resize(train_img, (input_width, input_height),interpolation=cv2.INTER_NEAREST)# Read an image from folder and resize - - img[i-c] = train_img #add to array - img[0], img[1], and so on. - train_mask = cv2.imread(mask_folder+'/'+filename+'.png') - #print(mask_folder+'/'+filename+'.png') - #print(train_mask.shape) - train_mask = get_one_hot( resize_image(train_mask,input_height,input_width),input_height,input_width,n_classes) - #train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] - - mask[i-c] = train_mask - except: - img[i-c] = np.ones((input_height, input_width, 3)).astype('float') - mask[i-c] = np.zeros((input_height, input_width, n_classes)).astype('float') - - - c+=batch_size - if(c+batch_size>=len(os.listdir(img_folder))): - c=0 + for i in range(c, c + batch_size): # initially from 0 to 16, c = 0. + # print(img_folder+'/'+n[i]) + + try: + filename = n[i].split('.')[0] + + train_img = cv2.imread(img_folder + '/' + n[i]) / 255. + train_img = cv2.resize(train_img, (input_width, input_height), + interpolation=cv2.INTER_NEAREST) # Read an image from folder and resize + + img[i - c] = train_img # add to array - img[0], img[1], and so on. + train_mask = cv2.imread(mask_folder + '/' + filename + '.png') + # print(mask_folder+'/'+filename+'.png') + # print(train_mask.shape) + train_mask = get_one_hot(resize_image(train_mask, input_height, input_width), input_height, input_width, + n_classes) + # train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] + + mask[i - c] = train_mask + except: + img[i - c] = np.ones((input_height, input_width, 3)).astype('float') + mask[i - c] = np.zeros((input_height, input_width, n_classes)).astype('float') + + c += batch_size + if c + batch_size >= len(os.listdir(img_folder)): + c = 0 random.shuffle(n) yield img, mask - + + def otsu_copy(img): - img_r=np.zeros(img.shape) - img1=img[:,:,0] - img2=img[:,:,1] - img3=img[:,:,2] - _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) - _, threshold2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) - _, threshold3 = cv2.threshold(img3, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) - img_r[:,:,0]=threshold1 - img_r[:,:,1]=threshold1 - img_r[:,:,2]=threshold1 + img_r = np.zeros(img.shape) + img1 = img[:, :, 0] + img2 = img[:, :, 1] + img3 = img[:, :, 2] + _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + _, threshold2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + _, threshold3 = cv2.threshold(img3, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + img_r[:, :, 0] = threshold1 + img_r[:, :, 1] = threshold1 + img_r[:, :, 2] = threshold1 return img_r -def get_patches(dir_img_f,dir_seg_f,img,label,height,width,indexer): - if img.shape[0]int(nxf): - nxf=int(nxf)+1 - if nyf>int(nyf): - nyf=int(nyf)+1 - - nxf=int(nxf) - nyf=int(nyf) - + +def get_patches(dir_img_f, dir_seg_f, img, label, height, width, indexer): + if img.shape[0] < height or img.shape[1] < width: + img, label = do_padding(img, label, height, width) + + img_h = img.shape[0] + img_w = img.shape[1] + + nxf = img_w / float(width) + nyf = img_h / float(height) + + if nxf > int(nxf): + nxf = int(nxf) + 1 + if nyf > int(nyf): + nyf = int(nyf) + 1 + + nxf = int(nxf) + nyf = int(nyf) + for i in range(nxf): for j in range(nyf): - index_x_d=i*width - index_x_u=(i+1)*width - - index_y_d=j*height - index_y_u=(j+1)*height - - if index_x_u>img_w: - index_x_u=img_w - index_x_d=img_w-width - if index_y_u>img_h: - index_y_u=img_h - index_y_d=img_h-height - - - img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] - label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] - - cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) - cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) - indexer+=1 - - return indexer + index_x_d = i * width + index_x_u = (i + 1) * width -def do_padding(img,label,height,width): - - height_new=img.shape[0] - width_new=img.shape[1] - - h_start=0 - w_start=0 - - if img.shape[0]int(nxf): - nxf=int(nxf)+1 - if nyf>int(nyf): - nyf=int(nyf)+1 - - nxf=int(nxf) - nyf=int(nyf) - - for i in range(nxf): - for j in range(nyf): - index_x_d=i*width_scale - index_x_u=(i+1)*width_scale - - index_y_d=j*height_scale - index_y_u=(j+1)*height_scale - - if index_x_u>img_w: - index_x_u=img_w - index_x_d=img_w-width_scale - if index_y_u>img_h: - index_y_u=img_h - index_y_d=img_h-height_scale - - - img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] - label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] - - img_patch=resize_image(img_patch,height,width) - label_patch=resize_image(label_patch,height,width) - - cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) - cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) - indexer+=1 + index_y_d = j * height + index_y_u = (j + 1) * height - return indexer + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - width + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - height -def get_patches_num_scale_new(dir_img_f,dir_seg_f,img,label,height,width,indexer,scaler): - img=resize_image(img,int(img.shape[0]*scaler),int(img.shape[1]*scaler)) - label=resize_image(label,int(label.shape[0]*scaler),int(label.shape[1]*scaler)) - - if img.shape[0]int(nxf): - nxf=int(nxf)+1 - if nyf>int(nyf): - nyf=int(nyf)+1 - - nxf=int(nxf) - nyf=int(nyf) - - for i in range(nxf): - for j in range(nyf): - index_x_d=i*width_scale - index_x_u=(i+1)*width_scale - - index_y_d=j*height_scale - index_y_u=(j+1)*height_scale - - if index_x_u>img_w: - index_x_u=img_w - index_x_d=img_w-width_scale - if index_y_u>img_h: - index_y_u=img_h - index_y_d=img_h-height_scale - - - img_patch=img[index_y_d:index_y_u,index_x_d:index_x_u,:] - label_patch=label[index_y_d:index_y_u,index_x_d:index_x_u,:] - - #img_patch=resize_image(img_patch,height,width) - #label_patch=resize_image(label_patch,height,width) - - cv2.imwrite(dir_img_f+'/img_'+str(indexer)+'.png', img_patch ) - cv2.imwrite(dir_seg_f+'/img_'+str(indexer)+'.png' , label_patch ) - indexer+=1 + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] + + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) + cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) + indexer += 1 return indexer -def provide_patches(dir_img,dir_seg,dir_flow_train_imgs, +def do_padding(img, label, height, width): + height_new = img.shape[0] + width_new = img.shape[1] + + h_start = 0 + w_start = 0 + + if img.shape[0] < height: + h_start = int(abs(height - img.shape[0]) / 2.) + height_new = height + + if img.shape[1] < width: + w_start = int(abs(width - img.shape[1]) / 2.) + width_new = width + + img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 255 + label_new = np.zeros((height_new, width_new, label.shape[2])).astype(float) + + img_new[h_start:h_start + img.shape[0], w_start:w_start + img.shape[1], :] = np.copy(img[:, :, :]) + label_new[h_start:h_start + label.shape[0], w_start:w_start + label.shape[1], :] = np.copy(label[:, :, :]) + + return img_new, label_new + + +def get_patches_num_scale(dir_img_f, dir_seg_f, img, label, height, width, indexer, n_patches, scaler): + if img.shape[0] < height or img.shape[1] < width: + img, label = do_padding(img, label, height, width) + + img_h = img.shape[0] + img_w = img.shape[1] + + height_scale = int(height * scaler) + width_scale = int(width * scaler) + + nxf = img_w / float(width_scale) + nyf = img_h / float(height_scale) + + if nxf > int(nxf): + nxf = int(nxf) + 1 + if nyf > int(nyf): + nyf = int(nyf) + 1 + + nxf = int(nxf) + nyf = int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d = i * width_scale + index_x_u = (i + 1) * width_scale + + index_y_d = j * height_scale + index_y_u = (j + 1) * height_scale + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - width_scale + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - height_scale + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] + + img_patch = resize_image(img_patch, height, width) + label_patch = resize_image(label_patch, height, width) + + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) + cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) + indexer += 1 + + return indexer + + +def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, indexer, scaler): + img = resize_image(img, int(img.shape[0] * scaler), int(img.shape[1] * scaler)) + label = resize_image(label, int(label.shape[0] * scaler), int(label.shape[1] * scaler)) + + if img.shape[0] < height or img.shape[1] < width: + img, label = do_padding(img, label, height, width) + + img_h = img.shape[0] + img_w = img.shape[1] + + height_scale = int(height * 1) + width_scale = int(width * 1) + + nxf = img_w / float(width_scale) + nyf = img_h / float(height_scale) + + if nxf > int(nxf): + nxf = int(nxf) + 1 + if nyf > int(nyf): + nyf = int(nyf) + 1 + + nxf = int(nxf) + nyf = int(nyf) + + for i in range(nxf): + for j in range(nyf): + index_x_d = i * width_scale + index_x_u = (i + 1) * width_scale + + index_y_d = j * height_scale + index_y_u = (j + 1) * height_scale + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - width_scale + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - height_scale + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] + + # img_patch=resize_image(img_patch,height,width) + # label_patch=resize_image(label_patch,height,width) + + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) + cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) + indexer += 1 + + return indexer + + +def provide_patches(dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, - input_height,input_width,blur_k,blur_aug, - flip_aug,binarization,scaling,scales,flip_index, - scaling_bluring,scaling_binarization,rotation, - rotation_not_90,thetha,scaling_flip, - augmentation=False,patches=False): - - imgs_cv_train=np.array(os.listdir(dir_img)) - segs_cv_train=np.array(os.listdir(dir_seg)) - - indexer=0 - for im, seg_i in tqdm(zip(imgs_cv_train,segs_cv_train)): - img_name=im.split('.')[0] + input_height, input_width, blur_k, blur_aug, + flip_aug, binarization, scaling, scales, flip_index, + scaling_bluring, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, + augmentation=False, patches=False): + imgs_cv_train = np.array(os.listdir(dir_img)) + segs_cv_train = np.array(os.listdir(dir_seg)) + + indexer = 0 + for im, seg_i in tqdm(zip(imgs_cv_train, segs_cv_train)): + img_name = im.split('.')[0] if not patches: - cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', resize_image(cv2.imread(dir_img+'/'+im),input_height,input_width ) ) - cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width ) ) - indexer+=1 - + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_img + '/' + im), input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + indexer += 1 + if augmentation: if flip_aug: for f_i in flip_index: - cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', - resize_image(cv2.flip(cv2.imread(dir_img+'/'+im),f_i),input_height,input_width) ) - - cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , - resize_image(cv2.flip(cv2.imread(dir_seg+'/'+img_name+'.png'),f_i),input_height,input_width) ) - indexer+=1 - - if blur_aug: - for blur_i in blur_k: - cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', - (resize_image(bluring(cv2.imread(dir_img+'/'+im),blur_i),input_height,input_width) ) ) - - cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png' , - resize_image(cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width) ) - indexer+=1 - - - if binarization: - cv2.imwrite(dir_flow_train_imgs+'/img_'+str(indexer)+'.png', - resize_image(otsu_copy( cv2.imread(dir_img+'/'+im)),input_height,input_width )) - - cv2.imwrite(dir_flow_train_labels+'/img_'+str(indexer)+'.png', - resize_image( cv2.imread(dir_seg+'/'+img_name+'.png'),input_height,input_width )) - indexer+=1 - - - + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(cv2.flip(cv2.imread(dir_img + '/' + im), f_i), input_height, + input_width)) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), + input_height, input_width)) + indexer += 1 + + if blur_aug: + for blur_i in blur_k: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + (resize_image(bluring(cv2.imread(dir_img + '/' + im), blur_i), input_height, + input_width))) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, + input_width)) + indexer += 1 + + if binarization: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(otsu_copy(cv2.imread(dir_img + '/' + im)), input_height, input_width)) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + indexer += 1 - - if patches: - - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, - cv2.imread(dir_img+'/'+im),cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width,indexer=indexer) - + + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + cv2.imread(dir_img + '/' + im), cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) + if augmentation: - + if rotation: - - - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, - rotation_90( cv2.imread(dir_img+'/'+im) ), - rotation_90( cv2.imread(dir_seg+'/'+img_name+'.png') ), - input_height,input_width,indexer=indexer) - + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + rotation_90(cv2.imread(dir_img + '/' + im)), + rotation_90(cv2.imread(dir_seg + '/' + img_name + '.png')), + input_height, input_width, indexer=indexer) + if rotation_not_90: - + for thetha_i in thetha: - img_max_rotated,label_max_rotated=rotation_not_90_func(cv2.imread(dir_img+'/'+im),cv2.imread(dir_seg+'/'+img_name+'.png'),thetha_i) - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, - img_max_rotated, - label_max_rotated, - input_height,input_width,indexer=indexer) + img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/' + im), + cv2.imread( + dir_seg + '/' + img_name + '.png'), + thetha_i) + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + img_max_rotated, + label_max_rotated, + input_height, input_width, indexer=indexer) if flip_aug: for f_i in flip_index: - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, - cv2.flip( cv2.imread(dir_img+'/'+im) , f_i), - cv2.flip( cv2.imread(dir_seg+'/'+img_name+'.png') ,f_i), - input_height,input_width,indexer=indexer) - if blur_aug: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + cv2.flip(cv2.imread(dir_img + '/' + im), f_i), + cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), + input_height, input_width, indexer=indexer) + if blur_aug: for blur_i in blur_k: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + bluring(cv2.imread(dir_img + '/' + im), blur_i), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, - bluring( cv2.imread(dir_img+'/'+im) , blur_i), - cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width,indexer=indexer) - - - if scaling: + if scaling: for sc_ind in scales: - indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, - cv2.imread(dir_img+'/'+im) , - cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width,indexer=indexer,scaler=sc_ind) + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + cv2.imread(dir_img + '/' + im), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer, scaler=sc_ind) if binarization: - indexer=get_patches(dir_flow_train_imgs,dir_flow_train_labels, - otsu_copy( cv2.imread(dir_img+'/'+im)), - cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width,indexer=indexer) - - - - if scaling_bluring: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + otsu_copy(cv2.imread(dir_img + '/' + im)), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) + + if scaling_bluring: for sc_ind in scales: for blur_i in blur_k: - indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, - bluring( cv2.imread(dir_img+'/'+im) , blur_i) , - cv2.imread(dir_seg+'/'+img_name+'.png') , - input_height,input_width,indexer=indexer,scaler=sc_ind) + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + bluring(cv2.imread(dir_img + '/' + im), blur_i), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer, + scaler=sc_ind) - if scaling_binarization: + if scaling_binarization: for sc_ind in scales: - indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, - otsu_copy( cv2.imread(dir_img+'/'+im)) , - cv2.imread(dir_seg+'/'+img_name+'.png'), - input_height,input_width,indexer=indexer,scaler=sc_ind) - - if scaling_flip: + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + otsu_copy(cv2.imread(dir_img + '/' + im)), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer, scaler=sc_ind) + + if scaling_flip: for sc_ind in scales: for f_i in flip_index: - indexer=get_patches_num_scale_new(dir_flow_train_imgs,dir_flow_train_labels, - cv2.flip( cv2.imread(dir_img+'/'+im) , f_i) , - cv2.flip(cv2.imread(dir_seg+'/'+img_name+'.png') ,f_i) , - input_height,input_width,indexer=indexer,scaler=sc_ind) - - - - - - - + indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, + cv2.flip(cv2.imread(dir_img + '/' + im), f_i), + cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), + f_i), + input_height, input_width, indexer=indexer, + scaler=sc_ind) From 6e06742e66be00aba83919a3d49774ed1f54c790 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 16 Apr 2024 01:00:48 +0200 Subject: [PATCH 039/492] first working update of branch --- train/config_params.json | 19 ++- train/models.py | 179 +++++++++++++++++++++++++ train/train.py | 132 ++++++++++++------- train/utils.py | 273 +++++++++++++++++++++++++-------------- 4 files changed, 452 insertions(+), 151 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index 7505a81..bd47a52 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,8 +1,9 @@ { - "n_classes" : 3, + "model_name" : "hybrid_transformer_cnn", + "n_classes" : 2, "n_epochs" : 2, "input_height" : 448, - "input_width" : 672, + "input_width" : 448, "weight_decay" : 1e-6, "n_batch" : 2, "learning_rate": 1e-4, @@ -18,13 +19,21 @@ "scaling_flip" : false, "rotation": false, "rotation_not_90": false, + "num_patches_xy": [28, 28], + "transformer_patchsize": 1, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], "continue_training": false, - "index_start": 0, - "dir_of_start_model": " ", + "index_start" : 0, + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, "dir_train": "/train", "dir_eval": "/eval", - "dir_output": "/output" + "dir_output": "/out" } diff --git a/train/models.py b/train/models.py index f06823e..f7a7ad8 100644 --- a/train/models.py +++ b/train/models.py @@ -1,13 +1,81 @@ +import tensorflow as tf +from tensorflow import keras from tensorflow.keras.models import * from tensorflow.keras.layers import * from tensorflow.keras import layers from tensorflow.keras.regularizers import l2 +mlp_head_units = [2048, 1024] +projection_dim = 64 +transformer_layers = 8 +num_heads = 4 resnet50_Weights_path = './pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' IMAGE_ORDERING = 'channels_last' MERGE_AXIS = -1 +transformer_units = [ + projection_dim * 2, + projection_dim, +] # Size of the transformer layers +def mlp(x, hidden_units, dropout_rate): + for units in hidden_units: + x = layers.Dense(units, activation=tf.nn.gelu)(x) + x = layers.Dropout(dropout_rate)(x) + return x + +class Patches(layers.Layer): + def __init__(self, patch_size):#__init__(self, **kwargs):#:__init__(self, patch_size):#__init__(self, **kwargs): + super(Patches, self).__init__() + self.patch_size = patch_size + + def call(self, images): + print(tf.shape(images)[1],'images') + print(self.patch_size,'self.patch_size') + batch_size = tf.shape(images)[0] + patches = tf.image.extract_patches( + images=images, + sizes=[1, self.patch_size, self.patch_size, 1], + strides=[1, self.patch_size, self.patch_size, 1], + rates=[1, 1, 1, 1], + padding="VALID", + ) + patch_dims = patches.shape[-1] + print(patches.shape,patch_dims,'patch_dims') + patches = tf.reshape(patches, [batch_size, -1, patch_dims]) + return patches + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'patch_size': self.patch_size, + }) + return config + +class PatchEncoder(layers.Layer): + def __init__(self, num_patches, projection_dim): + super(PatchEncoder, self).__init__() + self.num_patches = num_patches + self.projection = layers.Dense(units=projection_dim) + self.position_embedding = layers.Embedding( + input_dim=num_patches, output_dim=projection_dim + ) + + def call(self, patch): + positions = tf.range(start=0, limit=self.num_patches, delta=1) + encoded = self.projection(patch) + self.position_embedding(positions) + return encoded + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'num_patches': self.num_patches, + 'projection': self.projection, + 'position_embedding': self.position_embedding, + }) + return config + + def one_side_pad(x): x = ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING)(x) if IMAGE_ORDERING == 'channels_first': @@ -292,3 +360,114 @@ def resnet50_unet(n_classes, input_height=224, input_width=224, weight_decay=1e- model = Model(img_input, o) return model + + +def vit_resnet50_unet(n_classes,patch_size, num_patches, input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): + inputs = layers.Input(shape=(input_height, input_width, 3)) + IMAGE_ORDERING = 'channels_last' + bn_axis=3 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(inputs) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x) + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + if pretraining: + model = keras.Model(inputs, x).load_weights(resnet50_Weights_path) + + num_patches = x.shape[1]*x.shape[2] + patches = Patches(patch_size)(x) + # Encode patches. + encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) + + for _ in range(transformer_layers): + # Layer normalization 1. + x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) + # Create a multi-head attention layer. + attention_output = layers.MultiHeadAttention( + num_heads=num_heads, key_dim=projection_dim, dropout=0.1 + )(x1, x1) + # Skip connection 1. + x2 = layers.Add()([attention_output, encoded_patches]) + # Layer normalization 2. + x3 = layers.LayerNormalization(epsilon=1e-6)(x2) + # MLP. + x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1) + # Skip connection 2. + encoded_patches = layers.Add()([x3, x2]) + + encoded_patches = tf.reshape(encoded_patches, [-1, x.shape[1], x.shape[2], 64]) + + v1024_2048 = Conv2D( 1024 , (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(encoded_patches) + v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) + v1024_2048 = Activation('relu')(v1024_2048) + + o = (UpSampling2D( (2, 2), data_format=IMAGE_ORDERING))(v1024_2048) + o = (concatenate([o, f4],axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o ,f3], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f2], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f1], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, inputs],axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + + model = keras.Model(inputs=inputs, outputs=o) + + return model diff --git a/train/train.py b/train/train.py index 03faf46..6e6a172 100644 --- a/train/train.py +++ b/train/train.py @@ -10,6 +10,7 @@ from utils import * from metrics import * from tensorflow.keras.models import load_model from tqdm import tqdm +import json def configuration(): @@ -42,9 +43,13 @@ def config_params(): learning_rate = 1e-4 # Set the learning rate. patches = False # Divides input image into smaller patches (input size of the model) when set to true. For the model to see the full image, like page extraction, set this to false. augmentation = False # To apply any kind of augmentation, this parameter must be set to true. - flip_aug = False # If true, different types of flipping will be applied to the image. Types of flips are defined with "flip_index" in train.py. - blur_aug = False # If true, different types of blurring will be applied to the image. Types of blur are defined with "blur_k" in train.py. - scaling = False # If true, scaling will be applied to the image. The amount of scaling is defined with "scales" in train.py. + flip_aug = False # If true, different types of flipping will be applied to the image. Types of flips are defined with "flip_index" in config_params.json. + blur_aug = False # If true, different types of blurring will be applied to the image. Types of blur are defined with "blur_k" in config_params.json. + padding_white = False # If true, white padding will be applied to the image. + padding_black = False # If true, black padding will be applied to the image. + scaling = False # If true, scaling will be applied to the image. The amount of scaling is defined with "scales" in config_params.json. + degrading = False # If true, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" in config_params.json. + brightening = False # If true, brightening will be applied to the image. The amount of brightening is defined with "brightness" in config_params.json. binarization = False # If true, Otsu thresholding will be applied to augment the input with binarized images. dir_train = None # Directory of training dataset with subdirectories having the names "images" and "labels". dir_eval = None # Directory of validation dataset with subdirectories having the names "images" and "labels". @@ -52,13 +57,18 @@ def config_params(): pretraining = False # Set to true to load pretrained weights of ResNet50 encoder. scaling_bluring = False # If true, a combination of scaling and blurring will be applied to the image. scaling_binarization = False # If true, a combination of scaling and binarization will be applied to the image. + scaling_brightness = False # If true, a combination of scaling and brightening will be applied to the image. scaling_flip = False # If true, a combination of scaling and flipping will be applied to the image. - thetha = [10, -10] # Rotate image by these angles for augmentation. - blur_k = ['blur', 'gauss', 'median'] # Blur image for augmentation. - scales = [0.5, 2] # Scale patches for augmentation. - flip_index = [0, 1, -1] # Flip image for augmentation. + thetha = None # Rotate image by these angles for augmentation. + blur_k = None # Blur image for augmentation. + scales = None # Scale patches for augmentation. + degrade_scales = None # Degrade image for augmentation. + brightness = None # Brighten image for augmentation. + flip_index = None # Flip image for augmentation. continue_training = False # Set to true if you would like to continue training an already trained a model. - index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. + transformer_patchsize = None # Patch size of vision transformer patches. + num_patches_xy = None # Number of patches for vision transformer. + index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. weighted_loss = False # Use weighted categorical cross entropy as loss fucntion. When set to true, "is_loss_soft_dice" must be false. @@ -66,15 +76,19 @@ def config_params(): @ex.automain -def run(n_classes, n_epochs, input_height, +def run(_config, n_classes, n_epochs, input_height, input_width, weight_decay, weighted_loss, index_start, dir_of_start_model, is_loss_soft_dice, n_batch, patches, augmentation, flip_aug, - blur_aug, scaling, binarization, - blur_k, scales, dir_train, data_is_provided, - scaling_bluring, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, continue_training, - flip_index, dir_eval, dir_output, pretraining, learning_rate): + blur_aug, padding_white, padding_black, scaling, degrading, + brightening, binarization, blur_k, scales, degrade_scales, + brightness, dir_train, data_is_provided, scaling_bluring, + scaling_brightness, scaling_binarization, rotation, rotation_not_90, + thetha, scaling_flip, continue_training, transformer_patchsize, + num_patches_xy, model_name, flip_index, dir_eval, dir_output, + pretraining, learning_rate): + + num_patches = num_patches_xy[0]*num_patches_xy[1] if data_is_provided: dir_train_flowing = os.path.join(dir_output, 'train') dir_eval_flowing = os.path.join(dir_output, 'eval') @@ -121,23 +135,28 @@ def run(n_classes, n_epochs, input_height, # set the gpu configuration configuration() + + imgs_list=np.array(os.listdir(dir_img)) + segs_list=np.array(os.listdir(dir_seg)) + + imgs_list_test=np.array(os.listdir(dir_img_val)) + segs_list_test=np.array(os.listdir(dir_seg_val)) # writing patches into a sub-folder in order to be flowed from directory. - provide_patches(dir_img, dir_seg, dir_flow_train_imgs, - dir_flow_train_labels, - input_height, input_width, blur_k, blur_aug, - flip_aug, binarization, scaling, scales, flip_index, - scaling_bluring, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, - augmentation=augmentation, patches=patches) - - provide_patches(dir_img_val, dir_seg_val, dir_flow_eval_imgs, - dir_flow_eval_labels, - input_height, input_width, blur_k, blur_aug, - flip_aug, binarization, scaling, scales, flip_index, - scaling_bluring, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, - augmentation=False, patches=patches) + provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, + dir_flow_train_labels, input_height, input_width, blur_k, + blur_aug, padding_white, padding_black, flip_aug, binarization, + scaling, degrading, brightening, scales, degrade_scales, brightness, + flip_index, scaling_bluring, scaling_brightness, scaling_binarization, + rotation, rotation_not_90, thetha, scaling_flip, augmentation=augmentation, + patches=patches) + + provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, + dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, + blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, + scaling, degrading, brightening, scales, degrade_scales, brightness, + flip_index, scaling_bluring, scaling_brightness, scaling_binarization, + rotation, rotation_not_90, thetha, scaling_flip, augmentation=False, patches=patches) if weighted_loss: weights = np.zeros(n_classes) @@ -166,38 +185,50 @@ def run(n_classes, n_epochs, input_height, weights = weights / float(np.sum(weights)) if continue_training: - if is_loss_soft_dice: - model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) - if weighted_loss: - model = load_model(dir_of_start_model, compile=True, - custom_objects={'loss': weighted_categorical_crossentropy(weights)}) - if not is_loss_soft_dice and not weighted_loss: - model = load_model(dir_of_start_model, compile=True) + if model_name=='resnet50_unet': + if is_loss_soft_dice: + model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) + if weighted_loss: + model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) + if not is_loss_soft_dice and not weighted_loss: + model = load_model(dir_of_start_model , compile=True) + elif model_name=='hybrid_transformer_cnn': + if is_loss_soft_dice: + model = load_model(dir_of_start_model, compile=True, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches,'soft_dice_loss': soft_dice_loss}) + if weighted_loss: + model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) + if not is_loss_soft_dice and not weighted_loss: + model = load_model(dir_of_start_model , compile=True,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) else: - # get our model. index_start = 0 - model = resnet50_unet(n_classes, input_height, input_width, weight_decay, pretraining) - - # if you want to see the model structure just uncomment model summary. - # model.summary() + if model_name=='resnet50_unet': + model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + elif model_name=='hybrid_transformer_cnn': + model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width,weight_decay,pretraining) + + #if you want to see the model structure just uncomment model summary. + #model.summary() + if not is_loss_soft_dice and not weighted_loss: model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - if is_loss_soft_dice: + if is_loss_soft_dice: model.compile(loss=soft_dice_loss, optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - if weighted_loss: model.compile(loss=weighted_categorical_crossentropy(weights), optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - + # generating train and evaluation data train_gen = data_gen(dir_flow_train_imgs, dir_flow_train_labels, batch_size=n_batch, input_height=input_height, input_width=input_width, n_classes=n_classes) val_gen = data_gen(dir_flow_eval_imgs, dir_flow_eval_labels, batch_size=n_batch, input_height=input_height, input_width=input_width, n_classes=n_classes) - + + ##img_validation_patches = os.listdir(dir_flow_eval_imgs) + ##score_best=[] + ##score_best.append(0) for i in tqdm(range(index_start, n_epochs + index_start)): model.fit_generator( train_gen, @@ -205,9 +236,12 @@ def run(n_classes, n_epochs, input_height, validation_data=val_gen, validation_steps=1, epochs=1) - model.save(dir_output + '/' + 'model_' + str(i)) + model.save(dir_output+'/'+'model_'+str(i)) + + with open(dir_output+'/'+'model_'+str(i)+'/'+"config.json", "w") as fp: + json.dump(_config, fp) # encode dict into JSON - # os.system('rm -rf '+dir_train_flowing) - # os.system('rm -rf '+dir_eval_flowing) + #os.system('rm -rf '+dir_train_flowing) + #os.system('rm -rf '+dir_eval_flowing) - # model.save(dir_output+'/'+'model'+'.h5') + #model.save(dir_output+'/'+'model'+'.h5') diff --git a/train/utils.py b/train/utils.py index 7c65f18..c2786ec 100644 --- a/train/utils.py +++ b/train/utils.py @@ -9,6 +9,15 @@ from tqdm import tqdm import imutils import math +def do_brightening(img_in_dir, factor): + im = Image.open(img_in_dir) + enhancer = ImageEnhance.Brightness(im) + out_img = enhancer.enhance(factor) + out_img = out_img.convert('RGB') + opencv_img = np.array(out_img) + opencv_img = opencv_img[:,:,::-1].copy() + return opencv_img + def bluring(img_in, kind): if kind == 'gauss': @@ -138,11 +147,11 @@ def IoU(Yi, y_predi): FP = np.sum((Yi != c) & (y_predi == c)) FN = np.sum((Yi == c) & (y_predi != c)) IoU = TP / float(TP + FP + FN) - print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c, TP, FP, FN, IoU)) + #print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c, TP, FP, FN, IoU)) IoUs.append(IoU) mIoU = np.mean(IoUs) - print("_________________") - print("Mean IoU: {:4.3f}".format(mIoU)) + #print("_________________") + #print("Mean IoU: {:4.3f}".format(mIoU)) return mIoU @@ -241,124 +250,170 @@ def get_patches(dir_img_f, dir_seg_f, img, label, height, width, indexer): return indexer -def do_padding(img, label, height, width): - height_new = img.shape[0] - width_new = img.shape[1] +def do_padding_white(img): + img_org_h = img.shape[0] + img_org_w = img.shape[1] + + index_start_h = 4 + index_start_w = 4 + + img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1]+ 2*index_start_w, img.shape[2])) + 255 + img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] + + return img_padded.astype(float) + +def do_degrading(img, scale): + img_org_h = img.shape[0] + img_org_w = img.shape[1] + + img_res = resize_image(img, int(img_org_h * scale), int(img_org_w * scale)) + + return resize_image(img_res, img_org_h, img_org_w) + + +def do_padding_black(img): + img_org_h = img.shape[0] + img_org_w = img.shape[1] + + index_start_h = 4 + index_start_w = 4 + + img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1] + 2*index_start_w, img.shape[2])) + img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] + + return img_padded.astype(float) + + +def do_padding_label(img): + img_org_h = img.shape[0] + img_org_w = img.shape[1] + + index_start_h = 4 + index_start_w = 4 + + img_padded = np.zeros((img.shape[0] + 2*index_start_h, img.shape[1] + 2*index_start_w, img.shape[2])) + img_padded[index_start_h: index_start_h + img.shape[0], index_start_w: index_start_w + img.shape[1], :] = img[:, :, :] + + return img_padded.astype(np.int16) + +def do_padding(img, label, height, width): + height_new=img.shape[0] + width_new=img.shape[1] + h_start = 0 w_start = 0 - + if img.shape[0] < height: h_start = int(abs(height - img.shape[0]) / 2.) height_new = height - + if img.shape[1] < width: w_start = int(abs(width - img.shape[1]) / 2.) width_new = width - + img_new = np.ones((height_new, width_new, img.shape[2])).astype(float) * 255 label_new = np.zeros((height_new, width_new, label.shape[2])).astype(float) - + img_new[h_start:h_start + img.shape[0], w_start:w_start + img.shape[1], :] = np.copy(img[:, :, :]) label_new[h_start:h_start + label.shape[0], w_start:w_start + label.shape[1], :] = np.copy(label[:, :, :]) - - return img_new, label_new + + return img_new,label_new def get_patches_num_scale(dir_img_f, dir_seg_f, img, label, height, width, indexer, n_patches, scaler): if img.shape[0] < height or img.shape[1] < width: img, label = do_padding(img, label, height, width) - + img_h = img.shape[0] img_w = img.shape[1] - + height_scale = int(height * scaler) width_scale = int(width * scaler) - + + nxf = img_w / float(width_scale) nyf = img_h / float(height_scale) - + if nxf > int(nxf): nxf = int(nxf) + 1 if nyf > int(nyf): nyf = int(nyf) + 1 - + nxf = int(nxf) nyf = int(nyf) - + for i in range(nxf): for j in range(nyf): index_x_d = i * width_scale index_x_u = (i + 1) * width_scale - + index_y_d = j * height_scale index_y_u = (j + 1) * height_scale - + if index_x_u > img_w: index_x_u = img_w index_x_d = img_w - width_scale if index_y_u > img_h: index_y_u = img_h index_y_d = img_h - height_scale - + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] - + img_patch = resize_image(img_patch, height, width) label_patch = resize_image(label_patch, height, width) - + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) indexer += 1 - + return indexer def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, indexer, scaler): img = resize_image(img, int(img.shape[0] * scaler), int(img.shape[1] * scaler)) label = resize_image(label, int(label.shape[0] * scaler), int(label.shape[1] * scaler)) - + if img.shape[0] < height or img.shape[1] < width: img, label = do_padding(img, label, height, width) - + img_h = img.shape[0] img_w = img.shape[1] - + height_scale = int(height * 1) width_scale = int(width * 1) - + nxf = img_w / float(width_scale) nyf = img_h / float(height_scale) - + if nxf > int(nxf): nxf = int(nxf) + 1 if nyf > int(nyf): nyf = int(nyf) + 1 - + nxf = int(nxf) nyf = int(nyf) - + for i in range(nxf): for j in range(nyf): index_x_d = i * width_scale index_x_u = (i + 1) * width_scale - + index_y_d = j * height_scale index_y_u = (j + 1) * height_scale - + if index_x_u > img_w: index_x_u = img_w index_x_d = img_w - width_scale if index_y_u > img_h: index_y_u = img_h index_y_d = img_h - height_scale - + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] label_patch = label[index_y_d:index_y_u, index_x_d:index_x_u, :] - - # img_patch=resize_image(img_patch,height,width) - # label_patch=resize_image(label_patch,height,width) - + cv2.imwrite(dir_img_f + '/img_' + str(indexer) + '.png', img_patch) cv2.imwrite(dir_seg_f + '/img_' + str(indexer) + '.png', label_patch) indexer += 1 @@ -366,78 +421,65 @@ def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, i return indexer -def provide_patches(dir_img, dir_seg, dir_flow_train_imgs, - dir_flow_train_labels, - input_height, input_width, blur_k, blur_aug, - flip_aug, binarization, scaling, scales, flip_index, - scaling_bluring, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, - augmentation=False, patches=False): - imgs_cv_train = np.array(os.listdir(dir_img)) - segs_cv_train = np.array(os.listdir(dir_seg)) - +def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow_train_imgs, + dir_flow_train_labels, input_height, input_width, blur_k, blur_aug, + padding_white, padding_black, flip_aug, binarization, scaling, degrading, + brightening, scales, degrade_scales, brightness, flip_index, + scaling_bluring, scaling_brightness, scaling_binarization, rotation, + rotation_not_90, thetha, scaling_flip, augmentation=False, patches=False): + indexer = 0 - for im, seg_i in tqdm(zip(imgs_cv_train, segs_cv_train)): + for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)): img_name = im.split('.')[0] if not patches: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_img + '/' + im), input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_img + '/' + im), input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) indexer += 1 - + if augmentation: if flip_aug: for f_i in flip_index: cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - resize_image(cv2.flip(cv2.imread(dir_img + '/' + im), f_i), input_height, - input_width)) - + resize_image(cv2.flip(cv2.imread(dir_img+'/'+im),f_i),input_height,input_width) ) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), - input_height, input_width)) + resize_image(cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), input_height, input_width)) indexer += 1 - - if blur_aug: + + if blur_aug: for blur_i in blur_k: cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - (resize_image(bluring(cv2.imread(dir_img + '/' + im), blur_i), input_height, - input_width))) - + (resize_image(bluring(cv2.imread(dir_img + '/' + im), blur_i), input_height, input_width))) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, - input_width)) + resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) indexer += 1 - + if binarization: cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(otsu_copy(cv2.imread(dir_img + '/' + im)), input_height, input_width)) - + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) indexer += 1 - + + if patches: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, cv2.imread(dir_img + '/' + im), cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width, indexer=indexer) - + if augmentation: - if rotation: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - rotation_90(cv2.imread(dir_img + '/' + im)), - rotation_90(cv2.imread(dir_seg + '/' + img_name + '.png')), - input_height, input_width, indexer=indexer) - + rotation_90(cv2.imread(dir_img + '/' + im)), + rotation_90(cv2.imread(dir_seg + '/' + img_name + '.png')), + input_height, input_width, indexer=indexer) + if rotation_not_90: - for thetha_i in thetha: - img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/' + im), - cv2.imread( - dir_seg + '/' + img_name + '.png'), - thetha_i) + img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/'+im), + cv2.imread(dir_seg + '/'+img_name + '.png'), thetha_i) indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, img_max_rotated, label_max_rotated, @@ -448,47 +490,84 @@ def provide_patches(dir_img, dir_seg, dir_flow_train_imgs, cv2.flip(cv2.imread(dir_img + '/' + im), f_i), cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), input_height, input_width, indexer=indexer) - if blur_aug: + if blur_aug: for blur_i in blur_k: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, bluring(cv2.imread(dir_img + '/' + im), blur_i), cv2.imread(dir_seg + '/' + img_name + '.png'), - input_height, input_width, indexer=indexer) - - if scaling: + input_height, input_width, indexer=indexer) + if padding_black: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + do_padding_black(cv2.imread(dir_img + '/' + im)), + do_padding_label(cv2.imread(dir_seg + '/' + img_name + '.png')), + input_height, input_width, indexer=indexer) + + if padding_white: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + do_padding_white(cv2.imread(dir_img + '/'+im)), + do_padding_label(cv2.imread(dir_seg + '/' + img_name + '.png')), + input_height, input_width, indexer=indexer) + + if brightening: + for factor in brightness: + try: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + do_brightening(dir_img + '/' +im, factor), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) + except: + pass + if scaling: for sc_ind in scales: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, - cv2.imread(dir_img + '/' + im), + cv2.imread(dir_img + '/' + im) , cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width, indexer=indexer, scaler=sc_ind) + + if degrading: + for degrade_scale_ind in degrade_scales: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + do_degrading(cv2.imread(dir_img + '/' + im), degrade_scale_ind), + cv2.imread(dir_seg + '/' + img_name + '.png'), + input_height, input_width, indexer=indexer) + if binarization: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, otsu_copy(cv2.imread(dir_img + '/' + im)), cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width, indexer=indexer) - if scaling_bluring: + if scaling_brightness: + for sc_ind in scales: + for factor in brightness: + try: + indexer = get_patches_num_scale_new(dir_flow_train_imgs, + dir_flow_train_labels, + do_brightening(dir_img + '/' + im, factor) + ,cv2.imread(dir_seg + '/' + img_name + '.png') + ,input_height, input_width, indexer=indexer, scaler=sc_ind) + except: + pass + + if scaling_bluring: for sc_ind in scales: for blur_i in blur_k: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, bluring(cv2.imread(dir_img + '/' + im), blur_i), cv2.imread(dir_seg + '/' + img_name + '.png'), - input_height, input_width, indexer=indexer, - scaler=sc_ind) + input_height, input_width, indexer=indexer, scaler=sc_ind) - if scaling_binarization: + if scaling_binarization: for sc_ind in scales: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, otsu_copy(cv2.imread(dir_img + '/' + im)), cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width, indexer=indexer, scaler=sc_ind) - - if scaling_flip: + + if scaling_flip: for sc_ind in scales: for f_i in flip_index: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, - cv2.flip(cv2.imread(dir_img + '/' + im), f_i), - cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), - f_i), - input_height, input_width, indexer=indexer, - scaler=sc_ind) + cv2.flip( cv2.imread(dir_img + '/' + im), f_i), + cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), + input_height, input_width, indexer=indexer, scaler=sc_ind) From ca63c097c3c30b58513d708f476139c590ac2d94 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 29 Apr 2024 20:59:36 +0200 Subject: [PATCH 040/492] integrating first working classification training model --- train/config_params.json | 20 ++- train/models.py | 69 +++++++- train/requirements.txt | 1 + train/train.py | 374 ++++++++++++++++++++++++--------------- train/utils.py | 113 ++++++++++++ 5 files changed, 419 insertions(+), 158 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index bd47a52..43ad1bc 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,13 +1,15 @@ { - "model_name" : "hybrid_transformer_cnn", + "model_name" : "resnet50_unet", + "task": "classification", "n_classes" : 2, - "n_epochs" : 2, - "input_height" : 448, - "input_width" : 448, + "n_epochs" : 7, + "input_height" : 224, + "input_width" : 224, "weight_decay" : 1e-6, - "n_batch" : 2, + "n_batch" : 6, "learning_rate": 1e-4, - "patches" : true, + "f1_threshold_classification": 0.8, + "patches" : false, "pretraining" : true, "augmentation" : false, "flip_aug" : false, @@ -33,7 +35,7 @@ "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, - "dir_train": "/train", - "dir_eval": "/eval", - "dir_output": "/out" + "dir_train": "/home/vahid/Downloads/image_classification_data/train", + "dir_eval": "/home/vahid/Downloads/image_classification_data/eval", + "dir_output": "/home/vahid/Downloads/image_classification_data/output" } diff --git a/train/models.py b/train/models.py index f7a7ad8..a6de1ef 100644 --- a/train/models.py +++ b/train/models.py @@ -400,7 +400,7 @@ def vit_resnet50_unet(n_classes,patch_size, num_patches, input_height=224,input_ f5 = x if pretraining: - model = keras.Model(inputs, x).load_weights(resnet50_Weights_path) + model = Model(inputs, x).load_weights(resnet50_Weights_path) num_patches = x.shape[1]*x.shape[2] patches = Patches(patch_size)(x) @@ -468,6 +468,71 @@ def vit_resnet50_unet(n_classes,patch_size, num_patches, input_height=224,input_ o = (BatchNormalization(axis=bn_axis))(o) o = (Activation('softmax'))(o) - model = keras.Model(inputs=inputs, outputs=o) + model = Model(inputs=inputs, outputs=o) + return model + +def resnet50_classifier(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): + include_top=True + assert input_height%32 == 0 + assert input_width%32 == 0 + + + img_input = Input(shape=(input_height,input_width , 3 )) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x) + + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x ) + + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + if pretraining: + Model(img_input, x).load_weights(resnet50_Weights_path) + + x = AveragePooling2D((7, 7), name='avg_pool')(x) + x = Flatten()(x) + + ## + x = Dense(256, activation='relu', name='fc512')(x) + x=Dropout(0.2)(x) + ## + x = Dense(n_classes, activation='softmax', name='fc1000')(x) + model = Model(img_input, x) + + + + return model diff --git a/train/requirements.txt b/train/requirements.txt index 20b6a32..3e56438 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -6,3 +6,4 @@ tqdm imutils numpy scipy +scikit-learn diff --git a/train/train.py b/train/train.py index 6e6a172..efcd3ac 100644 --- a/train/train.py +++ b/train/train.py @@ -11,6 +11,7 @@ from metrics import * from tensorflow.keras.models import load_model from tqdm import tqdm import json +from sklearn.metrics import f1_score def configuration(): @@ -73,6 +74,8 @@ def config_params(): is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. weighted_loss = False # Use weighted categorical cross entropy as loss fucntion. When set to true, "is_loss_soft_dice" must be false. data_is_provided = False # Only set this to true when you have already provided the input data and the train and eval data are in "dir_output". + task = "segmentation" # This parameter defines task of model which can be segmentation, enhancement or classification. + f1_threshold_classification = None # This threshold is used to consider models with an evaluation f1 scores bigger than it. The selected model weights undergo a weights ensembling. And avreage ensembled model will be written to output. @ex.automain @@ -86,162 +89,239 @@ def run(_config, n_classes, n_epochs, input_height, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, continue_training, transformer_patchsize, num_patches_xy, model_name, flip_index, dir_eval, dir_output, - pretraining, learning_rate): + pretraining, learning_rate, task, f1_threshold_classification): - num_patches = num_patches_xy[0]*num_patches_xy[1] - if data_is_provided: - dir_train_flowing = os.path.join(dir_output, 'train') - dir_eval_flowing = os.path.join(dir_output, 'eval') - - dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images') - dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels') - - dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images') - dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels') - - configuration() - - else: - dir_img, dir_seg = get_dirs_or_files(dir_train) - dir_img_val, dir_seg_val = get_dirs_or_files(dir_eval) - - # make first a directory in output for both training and evaluations in order to flow data from these directories. - dir_train_flowing = os.path.join(dir_output, 'train') - dir_eval_flowing = os.path.join(dir_output, 'eval') - - dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images/') - dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels/') - - dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images/') - dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels/') - - if os.path.isdir(dir_train_flowing): - os.system('rm -rf ' + dir_train_flowing) - os.makedirs(dir_train_flowing) - else: - os.makedirs(dir_train_flowing) - - if os.path.isdir(dir_eval_flowing): - os.system('rm -rf ' + dir_eval_flowing) - os.makedirs(dir_eval_flowing) - else: - os.makedirs(dir_eval_flowing) - - os.mkdir(dir_flow_train_imgs) - os.mkdir(dir_flow_train_labels) - - os.mkdir(dir_flow_eval_imgs) - os.mkdir(dir_flow_eval_labels) - - # set the gpu configuration - configuration() + if task == "segmentation": - imgs_list=np.array(os.listdir(dir_img)) - segs_list=np.array(os.listdir(dir_seg)) - - imgs_list_test=np.array(os.listdir(dir_img_val)) - segs_list_test=np.array(os.listdir(dir_seg_val)) - - # writing patches into a sub-folder in order to be flowed from directory. - provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, - dir_flow_train_labels, input_height, input_width, blur_k, - blur_aug, padding_white, padding_black, flip_aug, binarization, - scaling, degrading, brightening, scales, degrade_scales, brightness, - flip_index, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, augmentation=augmentation, - patches=patches) - - provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, - dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, - blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, - scaling, degrading, brightening, scales, degrade_scales, brightness, - flip_index, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, augmentation=False, patches=patches) - - if weighted_loss: - weights = np.zeros(n_classes) + num_patches = num_patches_xy[0]*num_patches_xy[1] if data_is_provided: - for obj in os.listdir(dir_flow_train_labels): - try: - label_obj = cv2.imread(dir_flow_train_labels + '/' + obj) - label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) - weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) - except: - pass + dir_train_flowing = os.path.join(dir_output, 'train') + dir_eval_flowing = os.path.join(dir_output, 'eval') + + dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images') + dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels') + + dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images') + dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels') + + configuration() + else: + dir_img, dir_seg = get_dirs_or_files(dir_train) + dir_img_val, dir_seg_val = get_dirs_or_files(dir_eval) - for obj in os.listdir(dir_seg): - try: - label_obj = cv2.imread(dir_seg + '/' + obj) - label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) - weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) - except: - pass + # make first a directory in output for both training and evaluations in order to flow data from these directories. + dir_train_flowing = os.path.join(dir_output, 'train') + dir_eval_flowing = os.path.join(dir_output, 'eval') - weights = 1.00 / weights + dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images/') + dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels/') - weights = weights / float(np.sum(weights)) - weights = weights / float(np.min(weights)) - weights = weights / float(np.sum(weights)) + dir_flow_eval_imgs = os.path.join(dir_eval_flowing, 'images/') + dir_flow_eval_labels = os.path.join(dir_eval_flowing, 'labels/') - if continue_training: - if model_name=='resnet50_unet': - if is_loss_soft_dice: - model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) - if weighted_loss: - model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) - if not is_loss_soft_dice and not weighted_loss: - model = load_model(dir_of_start_model , compile=True) - elif model_name=='hybrid_transformer_cnn': - if is_loss_soft_dice: - model = load_model(dir_of_start_model, compile=True, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches,'soft_dice_loss': soft_dice_loss}) - if weighted_loss: - model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) - if not is_loss_soft_dice and not weighted_loss: - model = load_model(dir_of_start_model , compile=True,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) - else: - index_start = 0 - if model_name=='resnet50_unet': - model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) - elif model_name=='hybrid_transformer_cnn': - model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width,weight_decay,pretraining) - - #if you want to see the model structure just uncomment model summary. - #model.summary() - + if os.path.isdir(dir_train_flowing): + os.system('rm -rf ' + dir_train_flowing) + os.makedirs(dir_train_flowing) + else: + os.makedirs(dir_train_flowing) - if not is_loss_soft_dice and not weighted_loss: + if os.path.isdir(dir_eval_flowing): + os.system('rm -rf ' + dir_eval_flowing) + os.makedirs(dir_eval_flowing) + else: + os.makedirs(dir_eval_flowing) + + os.mkdir(dir_flow_train_imgs) + os.mkdir(dir_flow_train_labels) + + os.mkdir(dir_flow_eval_imgs) + os.mkdir(dir_flow_eval_labels) + + # set the gpu configuration + configuration() + + imgs_list=np.array(os.listdir(dir_img)) + segs_list=np.array(os.listdir(dir_seg)) + + imgs_list_test=np.array(os.listdir(dir_img_val)) + segs_list_test=np.array(os.listdir(dir_seg_val)) + + # writing patches into a sub-folder in order to be flowed from directory. + provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, + dir_flow_train_labels, input_height, input_width, blur_k, + blur_aug, padding_white, padding_black, flip_aug, binarization, + scaling, degrading, brightening, scales, degrade_scales, brightness, + flip_index, scaling_bluring, scaling_brightness, scaling_binarization, + rotation, rotation_not_90, thetha, scaling_flip, augmentation=augmentation, + patches=patches) + + provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, + dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, + blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, + scaling, degrading, brightening, scales, degrade_scales, brightness, + flip_index, scaling_bluring, scaling_brightness, scaling_binarization, + rotation, rotation_not_90, thetha, scaling_flip, augmentation=False, patches=patches) + + if weighted_loss: + weights = np.zeros(n_classes) + if data_is_provided: + for obj in os.listdir(dir_flow_train_labels): + try: + label_obj = cv2.imread(dir_flow_train_labels + '/' + obj) + label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) + weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) + except: + pass + else: + + for obj in os.listdir(dir_seg): + try: + label_obj = cv2.imread(dir_seg + '/' + obj) + label_obj_one_hot = get_one_hot(label_obj, label_obj.shape[0], label_obj.shape[1], n_classes) + weights += (label_obj_one_hot.sum(axis=0)).sum(axis=0) + except: + pass + + weights = 1.00 / weights + + weights = weights / float(np.sum(weights)) + weights = weights / float(np.min(weights)) + weights = weights / float(np.sum(weights)) + + if continue_training: + if model_name=='resnet50_unet': + if is_loss_soft_dice: + model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) + if weighted_loss: + model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) + if not is_loss_soft_dice and not weighted_loss: + model = load_model(dir_of_start_model , compile=True) + elif model_name=='hybrid_transformer_cnn': + if is_loss_soft_dice: + model = load_model(dir_of_start_model, compile=True, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches,'soft_dice_loss': soft_dice_loss}) + if weighted_loss: + model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) + if not is_loss_soft_dice and not weighted_loss: + model = load_model(dir_of_start_model , compile=True,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) + else: + index_start = 0 + if model_name=='resnet50_unet': + model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + elif model_name=='hybrid_transformer_cnn': + model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width,weight_decay,pretraining) + + #if you want to see the model structure just uncomment model summary. + #model.summary() + + + if not is_loss_soft_dice and not weighted_loss: + model.compile(loss='categorical_crossentropy', + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if is_loss_soft_dice: + model.compile(loss=soft_dice_loss, + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if weighted_loss: + model.compile(loss=weighted_categorical_crossentropy(weights), + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + + # generating train and evaluation data + train_gen = data_gen(dir_flow_train_imgs, dir_flow_train_labels, batch_size=n_batch, + input_height=input_height, input_width=input_width, n_classes=n_classes) + val_gen = data_gen(dir_flow_eval_imgs, dir_flow_eval_labels, batch_size=n_batch, + input_height=input_height, input_width=input_width, n_classes=n_classes) + + ##img_validation_patches = os.listdir(dir_flow_eval_imgs) + ##score_best=[] + ##score_best.append(0) + for i in tqdm(range(index_start, n_epochs + index_start)): + model.fit_generator( + train_gen, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, + validation_data=val_gen, + validation_steps=1, + epochs=1) + model.save(dir_output+'/'+'model_'+str(i)) + + with open(dir_output+'/'+'model_'+str(i)+'/'+"config.json", "w") as fp: + json.dump(_config, fp) # encode dict into JSON + + #os.system('rm -rf '+dir_train_flowing) + #os.system('rm -rf '+dir_eval_flowing) + + #model.save(dir_output+'/'+'model'+'.h5') + elif task=='classification': + configuration() + model = resnet50_classifier(n_classes, input_height, input_width,weight_decay,pretraining) + + opt_adam = Adam(learning_rate=0.001) model.compile(loss='categorical_crossentropy', - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - if is_loss_soft_dice: - model.compile(loss=soft_dice_loss, - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - if weighted_loss: - model.compile(loss=weighted_categorical_crossentropy(weights), - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - - # generating train and evaluation data - train_gen = data_gen(dir_flow_train_imgs, dir_flow_train_labels, batch_size=n_batch, - input_height=input_height, input_width=input_width, n_classes=n_classes) - val_gen = data_gen(dir_flow_eval_imgs, dir_flow_eval_labels, batch_size=n_batch, - input_height=input_height, input_width=input_width, n_classes=n_classes) - - ##img_validation_patches = os.listdir(dir_flow_eval_imgs) - ##score_best=[] - ##score_best.append(0) - for i in tqdm(range(index_start, n_epochs + index_start)): - model.fit_generator( - train_gen, - steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, - validation_data=val_gen, - validation_steps=1, - epochs=1) - model.save(dir_output+'/'+'model_'+str(i)) - - with open(dir_output+'/'+'model_'+str(i)+'/'+"config.json", "w") as fp: - json.dump(_config, fp) # encode dict into JSON + optimizer = opt_adam,metrics=['accuracy']) - #os.system('rm -rf '+dir_train_flowing) - #os.system('rm -rf '+dir_eval_flowing) - #model.save(dir_output+'/'+'model'+'.h5') + testX, testY = generate_data_from_folder_evaluation(dir_eval, input_height, input_width, n_classes) + + #print(testY.shape, testY) + + y_tot=np.zeros((testX.shape[0],n_classes)) + indexer=0 + + score_best=[] + score_best.append(0) + + num_rows = return_number_of_total_training_data(dir_train) + + weights=[] + + for i in range(n_epochs): + #history = model.fit(trainX, trainY, epochs=1, batch_size=n_batch, validation_data=(testX, testY), verbose=2)#,class_weight=weights) + history = model.fit( generate_data_from_folder_training(dir_train, n_batch , input_height, input_width, n_classes), steps_per_epoch=num_rows / n_batch, verbose=0)#,class_weight=weights) + + y_pr_class = [] + for jj in range(testY.shape[0]): + y_pr=model.predict(testX[jj,:,:,:].reshape(1,input_height,input_width,3), verbose=0) + y_pr_ind= np.argmax(y_pr,axis=1) + #print(y_pr_ind, 'y_pr_ind') + y_pr_class.append(y_pr_ind) + + + y_pr_class = np.array(y_pr_class) + #model.save('./models_save/model_'+str(i)+'.h5') + #y_pr_class=np.argmax(y_pr,axis=1) + f1score=f1_score(np.argmax(testY,axis=1), y_pr_class, average='macro') + + print(i,f1score) + + if f1score>score_best[0]: + score_best[0]=f1score + model.save(os.path.join(dir_output,'model_best')) + + + ##best_model=keras.models.clone_model(model) + ##best_model.build() + ##best_model.set_weights(model.get_weights()) + if f1score > f1_threshold_classification: + weights.append(model.get_weights() ) + y_tot=y_tot+y_pr + + indexer+=1 + y_tot=y_tot/float(indexer) + + + new_weights=list() + + for weights_list_tuple in zip(*weights): + new_weights.append( [np.array(weights_).mean(axis=0) for weights_ in zip(*weights_list_tuple)] ) + + new_weights = [np.array(x) for x in new_weights] + + model_weight_averaged=tf.keras.models.clone_model(model) + + model_weight_averaged.set_weights(new_weights) + + #y_tot_end=np.argmax(y_tot,axis=1) + #print(f1_score(np.argmax(testY,axis=1), y_tot_end, average='macro')) + + ##best_model.save('model_taza.h5') + model_weight_averaged.save(os.path.join(dir_output,'model_ens_avg')) + diff --git a/train/utils.py b/train/utils.py index c2786ec..af3c5f8 100644 --- a/train/utils.py +++ b/train/utils.py @@ -8,6 +8,119 @@ import random from tqdm import tqdm import imutils import math +from tensorflow.keras.utils import to_categorical + + +def return_number_of_total_training_data(path_classes): + sub_classes = os.listdir(path_classes) + n_tot = 0 + for sub_c in sub_classes: + sub_files = os.listdir(os.path.join(path_classes,sub_c)) + n_tot = n_tot + len(sub_files) + return n_tot + + + +def generate_data_from_folder_evaluation(path_classes, height, width, n_classes): + sub_classes = os.listdir(path_classes) + #n_classes = len(sub_classes) + all_imgs = [] + labels = [] + dicts =dict() + indexer= 0 + for sub_c in sub_classes: + sub_files = os.listdir(os.path.join(path_classes,sub_c )) + sub_files = [os.path.join(path_classes,sub_c )+'/' + x for x in sub_files] + #print( os.listdir(os.path.join(path_classes,sub_c )) ) + all_imgs = all_imgs + sub_files + sub_labels = list( np.zeros( len(sub_files) ) +indexer ) + + #print( len(sub_labels) ) + labels = labels + sub_labels + dicts[sub_c] = indexer + indexer +=1 + + + categories = to_categorical(range(n_classes)).astype(np.int16)#[ [1 , 0, 0 , 0 , 0 , 0] , [0 , 1, 0 , 0 , 0 , 0] , [0 , 0, 1 , 0 , 0 , 0] , [0 , 0, 0 , 1 , 0 , 0] , [0 , 0, 0 , 0 , 1 , 0] , [0 , 0, 0 , 0 , 0 , 1] ] + ret_x= np.zeros((len(labels), height,width, 3)).astype(np.int16) + ret_y= np.zeros((len(labels), n_classes)).astype(np.int16) + + #print(all_imgs) + for i in range(len(all_imgs)): + row = all_imgs[i] + #####img = cv2.imread(row, 0) + #####img= resize_image (img, height, width) + #####img = img.astype(np.uint16) + #####ret_x[i, :,:,0] = img[:,:] + #####ret_x[i, :,:,1] = img[:,:] + #####ret_x[i, :,:,2] = img[:,:] + + img = cv2.imread(row) + img= resize_image (img, height, width) + img = img.astype(np.uint16) + ret_x[i, :,:] = img[:,:,:] + + ret_y[i, :] = categories[ int( labels[i] ) ][:] + + return ret_x/255., ret_y + +def generate_data_from_folder_training(path_classes, batchsize, height, width, n_classes): + sub_classes = os.listdir(path_classes) + n_classes = len(sub_classes) + + all_imgs = [] + labels = [] + dicts =dict() + indexer= 0 + for sub_c in sub_classes: + sub_files = os.listdir(os.path.join(path_classes,sub_c )) + sub_files = [os.path.join(path_classes,sub_c )+'/' + x for x in sub_files] + #print( os.listdir(os.path.join(path_classes,sub_c )) ) + all_imgs = all_imgs + sub_files + sub_labels = list( np.zeros( len(sub_files) ) +indexer ) + + #print( len(sub_labels) ) + labels = labels + sub_labels + dicts[sub_c] = indexer + indexer +=1 + + ids = np.array(range(len(labels))) + random.shuffle(ids) + + shuffled_labels = np.array(labels)[ids] + shuffled_files = np.array(all_imgs)[ids] + categories = to_categorical(range(n_classes)).astype(np.int16)#[ [1 , 0, 0 , 0 , 0 , 0] , [0 , 1, 0 , 0 , 0 , 0] , [0 , 0, 1 , 0 , 0 , 0] , [0 , 0, 0 , 1 , 0 , 0] , [0 , 0, 0 , 0 , 1 , 0] , [0 , 0, 0 , 0 , 0 , 1] ] + ret_x= np.zeros((batchsize, height,width, 3)).astype(np.int16) + ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) + batchcount = 0 + while True: + for i in range(len(shuffled_files)): + row = shuffled_files[i] + #print(row) + ###img = cv2.imread(row, 0) + ###img= resize_image (img, height, width) + ###img = img.astype(np.uint16) + ###ret_x[batchcount, :,:,0] = img[:,:] + ###ret_x[batchcount, :,:,1] = img[:,:] + ###ret_x[batchcount, :,:,2] = img[:,:] + + img = cv2.imread(row) + img= resize_image (img, height, width) + img = img.astype(np.uint16) + ret_x[batchcount, :,:,:] = img[:,:,:] + + #print(int(shuffled_labels[i]) ) + #print( categories[int(shuffled_labels[i])] ) + ret_y[batchcount, :] = categories[ int( shuffled_labels[i] ) ][:] + + batchcount+=1 + + if batchcount>=batchsize: + ret_x = ret_x/255. + yield (ret_x, ret_y) + ret_x= np.zeros((batchsize, height,width, 3)).astype(np.int16) + ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) + batchcount = 0 def do_brightening(img_in_dir, factor): im = Image.open(img_in_dir) From c989f7ac6111314a394700e833abe351f5daae43 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 6 May 2024 18:31:48 +0200 Subject: [PATCH 041/492] adding enhancement training --- train/config_params.json | 20 +++++----- train/gt_for_enhancement_creator.py | 31 +++++++++++++++ train/models.py | 27 ++++++++----- train/train.py | 47 ++++++++++++---------- train/utils.py | 62 ++++++++++++++++------------- 5 files changed, 119 insertions(+), 68 deletions(-) create mode 100644 train/gt_for_enhancement_creator.py diff --git a/train/config_params.json b/train/config_params.json index 43ad1bc..1c7a940 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,15 +1,15 @@ { "model_name" : "resnet50_unet", - "task": "classification", - "n_classes" : 2, - "n_epochs" : 7, - "input_height" : 224, - "input_width" : 224, + "task": "enhancement", + "n_classes" : 3, + "n_epochs" : 3, + "input_height" : 448, + "input_width" : 448, "weight_decay" : 1e-6, - "n_batch" : 6, + "n_batch" : 3, "learning_rate": 1e-4, "f1_threshold_classification": 0.8, - "patches" : false, + "patches" : true, "pretraining" : true, "augmentation" : false, "flip_aug" : false, @@ -35,7 +35,7 @@ "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, - "dir_train": "/home/vahid/Downloads/image_classification_data/train", - "dir_eval": "/home/vahid/Downloads/image_classification_data/eval", - "dir_output": "/home/vahid/Downloads/image_classification_data/output" + "dir_train": "./training_data_sample_enhancement", + "dir_eval": "./eval", + "dir_output": "./out" } diff --git a/train/gt_for_enhancement_creator.py b/train/gt_for_enhancement_creator.py new file mode 100644 index 0000000..9a4274f --- /dev/null +++ b/train/gt_for_enhancement_creator.py @@ -0,0 +1,31 @@ +import cv2 +import os + +def resize_image(seg_in, input_height, input_width): + return cv2.resize(seg_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) + + +dir_imgs = './training_data_sample_enhancement/images' +dir_out_imgs = './training_data_sample_enhancement/images_gt' +dir_out_labs = './training_data_sample_enhancement/labels_gt' + +ls_imgs = os.listdir(dir_imgs) + + +ls_scales = [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] + + +for img in ls_imgs: + img_name = img.split('.')[0] + img_type = img.split('.')[1] + image = cv2.imread(os.path.join(dir_imgs, img)) + for i, scale in enumerate(ls_scales): + height_sc = int(image.shape[0]*scale) + width_sc = int(image.shape[1]*scale) + + image_down_scaled = resize_image(image, height_sc, width_sc) + image_back_to_org_scale = resize_image(image_down_scaled, image.shape[0], image.shape[1]) + + cv2.imwrite(os.path.join(dir_out_imgs, img_name+'_'+str(i)+'.'+img_type), image_back_to_org_scale) + cv2.imwrite(os.path.join(dir_out_labs, img_name+'_'+str(i)+'.'+img_type), image) + diff --git a/train/models.py b/train/models.py index a6de1ef..4cceacd 100644 --- a/train/models.py +++ b/train/models.py @@ -168,7 +168,7 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)) return x -def resnet50_unet_light(n_classes, input_height=224, input_width=224, weight_decay=1e-6, pretraining=False): +def resnet50_unet_light(n_classes, input_height=224, input_width=224, taks="segmentation", weight_decay=1e-6, pretraining=False): assert input_height % 32 == 0 assert input_width % 32 == 0 @@ -259,14 +259,17 @@ def resnet50_unet_light(n_classes, input_height=224, input_width=224, weight_dec o = Activation('relu')(o) o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = (Activation('softmax'))(o) + if task == "segmentation": + o = (BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + else: + o = (Activation('sigmoid'))(o) model = Model(img_input, o) return model -def resnet50_unet(n_classes, input_height=224, input_width=224, weight_decay=1e-6, pretraining=False): +def resnet50_unet(n_classes, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): assert input_height % 32 == 0 assert input_width % 32 == 0 @@ -354,15 +357,18 @@ def resnet50_unet(n_classes, input_height=224, input_width=224, weight_decay=1e- o = Activation('relu')(o) o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = (Activation('softmax'))(o) + if task == "segmentation": + o = (BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + else: + o = (Activation('sigmoid'))(o) model = Model(img_input, o) return model -def vit_resnet50_unet(n_classes,patch_size, num_patches, input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): +def vit_resnet50_unet(n_classes, patch_size, num_patches, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): inputs = layers.Input(shape=(input_height, input_width, 3)) IMAGE_ORDERING = 'channels_last' bn_axis=3 @@ -465,8 +471,11 @@ def vit_resnet50_unet(n_classes,patch_size, num_patches, input_height=224,input_ o = Activation('relu')(o) o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(o) - o = (BatchNormalization(axis=bn_axis))(o) - o = (Activation('softmax'))(o) + if task == "segmentation": + o = (BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + else: + o = (Activation('sigmoid'))(o) model = Model(inputs=inputs, outputs=o) diff --git a/train/train.py b/train/train.py index efcd3ac..595debe 100644 --- a/train/train.py +++ b/train/train.py @@ -1,5 +1,6 @@ import os import sys +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf from tensorflow.compat.v1.keras.backend import set_session import warnings @@ -91,7 +92,7 @@ def run(_config, n_classes, n_epochs, input_height, num_patches_xy, model_name, flip_index, dir_eval, dir_output, pretraining, learning_rate, task, f1_threshold_classification): - if task == "segmentation": + if task == "segmentation" or "enhancement": num_patches = num_patches_xy[0]*num_patches_xy[1] if data_is_provided: @@ -153,7 +154,7 @@ def run(_config, n_classes, n_epochs, input_height, blur_aug, padding_white, padding_black, flip_aug, binarization, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, augmentation=augmentation, + rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=augmentation, patches=patches) provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, @@ -161,7 +162,7 @@ def run(_config, n_classes, n_epochs, input_height, blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, augmentation=False, patches=patches) + rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches) if weighted_loss: weights = np.zeros(n_classes) @@ -191,45 +192,49 @@ def run(_config, n_classes, n_epochs, input_height, if continue_training: if model_name=='resnet50_unet': - if is_loss_soft_dice: + if is_loss_soft_dice and task == "segmentation": model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) - if weighted_loss: + if weighted_loss and task == "segmentation": model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) if not is_loss_soft_dice and not weighted_loss: model = load_model(dir_of_start_model , compile=True) elif model_name=='hybrid_transformer_cnn': - if is_loss_soft_dice: + if is_loss_soft_dice and task == "segmentation": model = load_model(dir_of_start_model, compile=True, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches,'soft_dice_loss': soft_dice_loss}) - if weighted_loss: + if weighted_loss and task == "segmentation": model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) if not is_loss_soft_dice and not weighted_loss: model = load_model(dir_of_start_model , compile=True,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) else: index_start = 0 if model_name=='resnet50_unet': - model = resnet50_unet(n_classes, input_height, input_width,weight_decay,pretraining) + model = resnet50_unet(n_classes, input_height, input_width, task, weight_decay, pretraining) elif model_name=='hybrid_transformer_cnn': - model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width,weight_decay,pretraining) + model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width, task, weight_decay, pretraining) #if you want to see the model structure just uncomment model summary. #model.summary() - - if not is_loss_soft_dice and not weighted_loss: - model.compile(loss='categorical_crossentropy', - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - if is_loss_soft_dice: - model.compile(loss=soft_dice_loss, - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) - if weighted_loss: - model.compile(loss=weighted_categorical_crossentropy(weights), - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if task == "segmentation": + if not is_loss_soft_dice and not weighted_loss: + model.compile(loss='categorical_crossentropy', + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if is_loss_soft_dice: + model.compile(loss=soft_dice_loss, + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + if weighted_loss: + model.compile(loss=weighted_categorical_crossentropy(weights), + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + elif task == "enhancement": + model.compile(loss='mean_squared_error', + optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + # generating train and evaluation data train_gen = data_gen(dir_flow_train_imgs, dir_flow_train_labels, batch_size=n_batch, - input_height=input_height, input_width=input_width, n_classes=n_classes) + input_height=input_height, input_width=input_width, n_classes=n_classes, task=task) val_gen = data_gen(dir_flow_eval_imgs, dir_flow_eval_labels, batch_size=n_batch, - input_height=input_height, input_width=input_width, n_classes=n_classes) + input_height=input_height, input_width=input_width, n_classes=n_classes, task=task) ##img_validation_patches = os.listdir(dir_flow_eval_imgs) ##score_best=[] diff --git a/train/utils.py b/train/utils.py index af3c5f8..0c5a458 100644 --- a/train/utils.py +++ b/train/utils.py @@ -268,7 +268,7 @@ def IoU(Yi, y_predi): return mIoU -def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_classes): +def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_classes, task='segmentation'): c = 0 n = [f for f in os.listdir(img_folder) if not f.startswith('.')] # os.listdir(img_folder) #List of training images random.shuffle(n) @@ -277,8 +277,6 @@ def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_c mask = np.zeros((batch_size, input_height, input_width, n_classes)).astype('float') for i in range(c, c + batch_size): # initially from 0 to 16, c = 0. - # print(img_folder+'/'+n[i]) - try: filename = n[i].split('.')[0] @@ -287,11 +285,14 @@ def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_c interpolation=cv2.INTER_NEAREST) # Read an image from folder and resize img[i - c] = train_img # add to array - img[0], img[1], and so on. - train_mask = cv2.imread(mask_folder + '/' + filename + '.png') - # print(mask_folder+'/'+filename+'.png') - # print(train_mask.shape) - train_mask = get_one_hot(resize_image(train_mask, input_height, input_width), input_height, input_width, - n_classes) + if task == "segmentation": + train_mask = cv2.imread(mask_folder + '/' + filename + '.png') + train_mask = get_one_hot(resize_image(train_mask, input_height, input_width), input_height, input_width, + n_classes) + elif task == "enhancement": + train_mask = cv2.imread(mask_folder + '/' + filename + '.png')/255. + train_mask = resize_image(train_mask, input_height, input_width) + # train_mask = train_mask.reshape(224, 224, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3] mask[i - c] = train_mask @@ -539,14 +540,19 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow padding_white, padding_black, flip_aug, binarization, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index, scaling_bluring, scaling_brightness, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, augmentation=False, patches=False): + rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=False): indexer = 0 for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)): img_name = im.split('.')[0] + if task == "segmentation": + dir_of_label_file = os.path.join(dir_seg, img_name + '.png') + elif task=="enhancement": + dir_of_label_file = os.path.join(dir_seg, im) + if not patches: cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_img + '/' + im), input_height, input_width)) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) indexer += 1 if augmentation: @@ -556,7 +562,7 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow resize_image(cv2.flip(cv2.imread(dir_img+'/'+im),f_i),input_height,input_width) ) cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), input_height, input_width)) + resize_image(cv2.flip(cv2.imread(dir_of_label_file), f_i), input_height, input_width)) indexer += 1 if blur_aug: @@ -565,7 +571,7 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow (resize_image(bluring(cv2.imread(dir_img + '/' + im), blur_i), input_height, input_width))) cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) indexer += 1 if binarization: @@ -573,26 +579,26 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow resize_image(otsu_copy(cv2.imread(dir_img + '/' + im)), input_height, input_width)) cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_seg + '/' + img_name + '.png'), input_height, input_width)) + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) indexer += 1 if patches: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - cv2.imread(dir_img + '/' + im), cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_img + '/' + im), cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer) if augmentation: if rotation: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, rotation_90(cv2.imread(dir_img + '/' + im)), - rotation_90(cv2.imread(dir_seg + '/' + img_name + '.png')), + rotation_90(cv2.imread(dir_of_label_file)), input_height, input_width, indexer=indexer) if rotation_not_90: for thetha_i in thetha: img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/'+im), - cv2.imread(dir_seg + '/'+img_name + '.png'), thetha_i) + cv2.imread(dir_of_label_file), thetha_i) indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, img_max_rotated, label_max_rotated, @@ -601,24 +607,24 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow for f_i in flip_index: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, cv2.flip(cv2.imread(dir_img + '/' + im), f_i), - cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), + cv2.flip(cv2.imread(dir_of_label_file), f_i), input_height, input_width, indexer=indexer) if blur_aug: for blur_i in blur_k: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, bluring(cv2.imread(dir_img + '/' + im), blur_i), - cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer) if padding_black: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, do_padding_black(cv2.imread(dir_img + '/' + im)), - do_padding_label(cv2.imread(dir_seg + '/' + img_name + '.png')), + do_padding_label(cv2.imread(dir_of_label_file)), input_height, input_width, indexer=indexer) if padding_white: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, do_padding_white(cv2.imread(dir_img + '/'+im)), - do_padding_label(cv2.imread(dir_seg + '/' + img_name + '.png')), + do_padding_label(cv2.imread(dir_of_label_file)), input_height, input_width, indexer=indexer) if brightening: @@ -626,7 +632,7 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow try: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, do_brightening(dir_img + '/' +im, factor), - cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer) except: pass @@ -634,20 +640,20 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow for sc_ind in scales: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, cv2.imread(dir_img + '/' + im) , - cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer, scaler=sc_ind) if degrading: for degrade_scale_ind in degrade_scales: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, do_degrading(cv2.imread(dir_img + '/' + im), degrade_scale_ind), - cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer) if binarization: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, otsu_copy(cv2.imread(dir_img + '/' + im)), - cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer) if scaling_brightness: @@ -657,7 +663,7 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, do_brightening(dir_img + '/' + im, factor) - ,cv2.imread(dir_seg + '/' + img_name + '.png') + ,cv2.imread(dir_of_label_file) ,input_height, input_width, indexer=indexer, scaler=sc_ind) except: pass @@ -667,14 +673,14 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow for blur_i in blur_k: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, bluring(cv2.imread(dir_img + '/' + im), blur_i), - cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer, scaler=sc_ind) if scaling_binarization: for sc_ind in scales: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, otsu_copy(cv2.imread(dir_img + '/' + im)), - cv2.imread(dir_seg + '/' + img_name + '.png'), + cv2.imread(dir_of_label_file), input_height, input_width, indexer=indexer, scaler=sc_ind) if scaling_flip: @@ -682,5 +688,5 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow for f_i in flip_index: indexer = get_patches_num_scale_new(dir_flow_train_imgs, dir_flow_train_labels, cv2.flip( cv2.imread(dir_img + '/' + im), f_i), - cv2.flip(cv2.imread(dir_seg + '/' + img_name + '.png'), f_i), + cv2.flip(cv2.imread(dir_of_label_file), f_i), input_height, input_width, indexer=indexer, scaler=sc_ind) From e1f62c2e9827030e3386ff678a131481d70e8e14 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 7 May 2024 13:34:03 +0200 Subject: [PATCH 042/492] inference script is added --- train/config_params.json | 17 +- train/inference.py | 490 +++++++++++++++++++++++++++++++++++++++ train/train.py | 42 ++-- train/utils.py | 30 +-- 4 files changed, 537 insertions(+), 42 deletions(-) create mode 100644 train/inference.py diff --git a/train/config_params.json b/train/config_params.json index 1c7a940..8a56de5 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,12 +1,12 @@ { - "model_name" : "resnet50_unet", - "task": "enhancement", - "n_classes" : 3, - "n_epochs" : 3, + "backbone_type" : "nontransformer", + "task": "classification", + "n_classes" : 2, + "n_epochs" : 20, "input_height" : 448, "input_width" : 448, "weight_decay" : 1e-6, - "n_batch" : 3, + "n_batch" : 6, "learning_rate": 1e-4, "f1_threshold_classification": 0.8, "patches" : true, @@ -21,7 +21,7 @@ "scaling_flip" : false, "rotation": false, "rotation_not_90": false, - "num_patches_xy": [28, 28], + "transformer_num_patches_xy": [28, 28], "transformer_patchsize": 1, "blur_k" : ["blur","guass","median"], "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], @@ -29,13 +29,14 @@ "degrade_scales" : [0.2, 0.4], "flip_index" : [0, 1, -1], "thetha" : [10, -10], + "classification_classes_name" : {"0":"apple", "1":"orange"}, "continue_training": false, "index_start" : 0, "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, - "dir_train": "./training_data_sample_enhancement", + "dir_train": "./train", "dir_eval": "./eval", - "dir_output": "./out" + "dir_output": "./output" } diff --git a/train/inference.py b/train/inference.py new file mode 100644 index 0000000..6911bea --- /dev/null +++ b/train/inference.py @@ -0,0 +1,490 @@ +#! /usr/bin/env python3 + +__version__= '1.0' + +import argparse +import sys +import os +import numpy as np +import warnings +import xml.etree.ElementTree as et +import pandas as pd +from tqdm import tqdm +import csv +import cv2 +import seaborn as sns +import matplotlib.pyplot as plt +from tensorflow.keras.models import load_model +import tensorflow as tf +from tensorflow.keras import backend as K +from tensorflow.keras import layers +import tensorflow.keras.losses +from tensorflow.keras.layers import * +import click +import json +from tensorflow.python.keras import backend as tensorflow_backend + + + + + + +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + +__doc__=\ +""" +Tool to load model and predict for given image. +""" + +projection_dim = 64 +patch_size = 1 +num_patches =28*28 +class Patches(layers.Layer): + def __init__(self, **kwargs): + super(Patches, self).__init__() + self.patch_size = patch_size + + def call(self, images): + print(tf.shape(images)[1],'images') + print(self.patch_size,'self.patch_size') + batch_size = tf.shape(images)[0] + patches = tf.image.extract_patches( + images=images, + sizes=[1, self.patch_size, self.patch_size, 1], + strides=[1, self.patch_size, self.patch_size, 1], + rates=[1, 1, 1, 1], + padding="VALID", + ) + patch_dims = patches.shape[-1] + print(patches.shape,patch_dims,'patch_dims') + patches = tf.reshape(patches, [batch_size, -1, patch_dims]) + return patches + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'patch_size': self.patch_size, + }) + return config + + +class PatchEncoder(layers.Layer): + def __init__(self, **kwargs): + super(PatchEncoder, self).__init__() + self.num_patches = num_patches + self.projection = layers.Dense(units=projection_dim) + self.position_embedding = layers.Embedding( + input_dim=num_patches, output_dim=projection_dim + ) + + def call(self, patch): + positions = tf.range(start=0, limit=self.num_patches, delta=1) + encoded = self.projection(patch) + self.position_embedding(positions) + return encoded + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'num_patches': self.num_patches, + 'projection': self.projection, + 'position_embedding': self.position_embedding, + }) + return config + + +class sbb_predict: + def __init__(self,image, model, task, config_params_model, patches='false',save='false', ground_truth=None,weights_dir=None ): + self.image=image + self.patches=patches + self.save=save + self.model_dir=model + self.ground_truth=ground_truth + self.weights_dir=weights_dir + self.task=task + self.config_params_model=config_params_model + + def resize_image(self,img_in,input_height,input_width): + return cv2.resize( img_in, ( input_width,input_height) ,interpolation=cv2.INTER_NEAREST) + + + def color_images(self,seg): + ann_u=range(self.n_classes) + if len(np.shape(seg))==3: + seg=seg[:,:,0] + + seg_img=np.zeros((np.shape(seg)[0],np.shape(seg)[1],3)).astype(np.uint8) + colors=sns.color_palette("hls", self.n_classes) + + for c in ann_u: + c=int(c) + segl=(seg==c) + seg_img[:,:,0][seg==c]=c + seg_img[:,:,1][seg==c]=c + seg_img[:,:,2][seg==c]=c + return seg_img + + def otsu_copy_binary(self,img): + img_r=np.zeros((img.shape[0],img.shape[1],3)) + img1=img[:,:,0] + + #print(img.min()) + #print(img[:,:,0].min()) + #blur = cv2.GaussianBlur(img,(5,5)) + #ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) + retval1, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + + + + img_r[:,:,0]=threshold1 + img_r[:,:,1]=threshold1 + img_r[:,:,2]=threshold1 + #img_r=img_r/float(np.max(img_r))*255 + return img_r + + def otsu_copy(self,img): + img_r=np.zeros((img.shape[0],img.shape[1],3)) + #img1=img[:,:,0] + + #print(img.min()) + #print(img[:,:,0].min()) + #blur = cv2.GaussianBlur(img,(5,5)) + #ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold1 = cv2.threshold(img[:,:,0], 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold2 = cv2.threshold(img[:,:,1], 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold3 = cv2.threshold(img[:,:,2], 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + + + + img_r[:,:,0]=threshold1 + img_r[:,:,1]=threshold2 + img_r[:,:,2]=threshold3 + ###img_r=img_r/float(np.max(img_r))*255 + return img_r + + def soft_dice_loss(self,y_true, y_pred, epsilon=1e-6): + + axes = tuple(range(1, len(y_pred.shape)-1)) + + numerator = 2. * K.sum(y_pred * y_true, axes) + + denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) + return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch + + def weighted_categorical_crossentropy(self,weights=None): + + def loss(y_true, y_pred): + labels_floats = tf.cast(y_true, tf.float32) + per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) + + if weights is not None: + weight_mask = tf.maximum(tf.reduce_max(tf.constant( + np.array(weights, dtype=np.float32)[None, None, None]) + * labels_floats, axis=-1), 1.0) + per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + return tf.reduce_mean(per_pixel_loss) + return self.loss + + + def IoU(self,Yi,y_predi): + ## mean Intersection over Union + ## Mean IoU = TP/(FN + TP + FP) + + IoUs = [] + Nclass = np.unique(Yi) + for c in Nclass: + TP = np.sum( (Yi == c)&(y_predi==c) ) + FP = np.sum( (Yi != c)&(y_predi==c) ) + FN = np.sum( (Yi == c)&(y_predi != c)) + IoU = TP/float(TP + FP + FN) + if self.n_classes>2: + print("class {:02.0f}: #TP={:6.0f}, #FP={:6.0f}, #FN={:5.0f}, IoU={:4.3f}".format(c,TP,FP,FN,IoU)) + IoUs.append(IoU) + if self.n_classes>2: + mIoU = np.mean(IoUs) + print("_________________") + print("Mean IoU: {:4.3f}".format(mIoU)) + return mIoU + elif self.n_classes==2: + mIoU = IoUs[1] + print("_________________") + print("IoU: {:4.3f}".format(mIoU)) + return mIoU + + def start_new_session_and_model(self): + + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + + session = tf.compat.v1.Session(config=config) # tf.InteractiveSession() + tensorflow_backend.set_session(session) + #tensorflow.keras.layers.custom_layer = PatchEncoder + #tensorflow.keras.layers.custom_layer = Patches + self.model = load_model(self.model_dir , compile=False,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) + #config = tf.ConfigProto() + #config.gpu_options.allow_growth=True + + #self.session = tf.InteractiveSession() + #keras.losses.custom_loss = self.weighted_categorical_crossentropy + #self.model = load_model(self.model_dir , compile=False) + + + ##if self.weights_dir!=None: + ##self.model.load_weights(self.weights_dir) + + if self.task != 'classification': + self.img_height=self.model.layers[len(self.model.layers)-1].output_shape[1] + self.img_width=self.model.layers[len(self.model.layers)-1].output_shape[2] + self.n_classes=self.model.layers[len(self.model.layers)-1].output_shape[3] + + def visualize_model_output(self, prediction, img, task): + if task == "binarization": + prediction = prediction * -1 + prediction = prediction + 1 + added_image = prediction * 255 + else: + unique_classes = np.unique(prediction[:,:,0]) + rgb_colors = {'0' : [255, 255, 255], + '1' : [255, 0, 0], + '2' : [255, 125, 0], + '3' : [255, 0, 125], + '4' : [125, 125, 125], + '5' : [125, 125, 0], + '6' : [0, 125, 255], + '7' : [0, 125, 0], + '8' : [125, 125, 125], + '9' : [0, 125, 255], + '10' : [125, 0, 125], + '11' : [0, 255, 0], + '12' : [0, 0, 255], + '13' : [0, 255, 255], + '14' : [255, 125, 125], + '15' : [255, 0, 255]} + + output = np.zeros(prediction.shape) + + for unq_class in unique_classes: + rgb_class_unique = rgb_colors[str(int(unq_class))] + output[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] + output[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] + output[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] + + + + img = self.resize_image(img, output.shape[0], output.shape[1]) + + output = output.astype(np.int32) + img = img.astype(np.int32) + + + + added_image = cv2.addWeighted(img,0.5,output,0.1,0) + + return added_image + + def predict(self): + self.start_new_session_and_model() + if self.task == 'classification': + classes_names = self.config_params_model['classification_classes_name'] + img_1ch = img=cv2.imread(self.image, 0) + + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (self.config_params_model['input_height'], self.config_params_model['input_width']), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = self.model.predict(img_in, verbose=0) + index_class = np.argmax(label_p_pred[0]) + + print("Predicted Class: {}".format(classes_names[str(int(index_class))])) + else: + if self.patches: + #def textline_contours(img,input_width,input_height,n_classes,model): + + img=cv2.imread(self.image) + self.img_org = np.copy(img) + + if img.shape[0] < self.img_height: + img = cv2.resize(img, (img.shape[1], self.img_width), interpolation=cv2.INTER_NEAREST) + + if img.shape[1] < self.img_width: + img = cv2.resize(img, (self.img_height, img.shape[0]), interpolation=cv2.INTER_NEAREST) + margin = int(0 * self.img_width) + width_mid = self.img_width - 2 * margin + height_mid = self.img_height - 2 * margin + img = img / float(255.0) + + img_h = img.shape[0] + img_w = img.shape[1] + + prediction_true = np.zeros((img_h, img_w, 3)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + self.img_width + else: + index_x_d = i * width_mid + index_x_u = index_x_d + self.img_width + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + self.img_height + else: + index_y_d = j * height_mid + index_y_u = index_y_d + self.img_height + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - self.img_width + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - self.img_height + + img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + label_p_pred = self.model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), + verbose=0) + + if self.task == 'enhancement': + seg = label_p_pred[0, :, :, :] + seg = seg * 255 + elif self.task == 'segmentation' or self.task == 'binarization': + seg = np.argmax(label_p_pred, axis=3)[0] + seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + + + if i == 0 and j == 0: + seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg + elif i == nxf - 1 and j == nyf - 1: + seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0] + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg + elif i == 0 and j == nyf - 1: + seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg + elif i == nxf - 1 and j == 0: + seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0] + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg + elif i == 0 and j != 0 and j != nyf - 1: + seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg + elif i == nxf - 1 and j != 0 and j != nyf - 1: + seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0] + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg + elif i != 0 and i != nxf - 1 and j == 0: + seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin] + prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg + elif i != 0 and i != nxf - 1 and j == nyf - 1: + seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg + else: + seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin] + prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg + prediction_true = prediction_true.astype(int) + prediction_true = cv2.resize(prediction_true, (self.img_org.shape[1], self.img_org.shape[0]), interpolation=cv2.INTER_NEAREST) + return prediction_true + + else: + + img=cv2.imread(self.image) + self.img_org = np.copy(img) + + width=self.img_width + height=self.img_height + + img=img/255.0 + img=self.resize_image(img,self.img_height,self.img_width) + + + label_p_pred=self.model.predict( + img.reshape(1,img.shape[0],img.shape[1],img.shape[2])) + + if self.task == 'enhancement': + seg = label_p_pred[0, :, :, :] + seg = seg * 255 + elif self.task == 'segmentation' or self.task == 'binarization': + seg = np.argmax(label_p_pred, axis=3)[0] + seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + + prediction_true = seg.astype(int) + + prediction_true = cv2.resize(prediction_true, (self.img_org.shape[1], self.img_org.shape[0]), interpolation=cv2.INTER_NEAREST) + return prediction_true + + + + def run(self): + res=self.predict() + if self.task == 'classification': + pass + else: + img_seg_overlayed = self.visualize_model_output(res, self.img_org, self.task) + cv2.imwrite('./test.png',img_seg_overlayed) + ##if self.save!=None: + ##img=np.repeat(res[:, :, np.newaxis]*255, 3, axis=2) + ##cv2.imwrite(self.save,img) + + ###if self.ground_truth!=None: + ###gt_img=cv2.imread(self.ground_truth) + ###self.IoU(gt_img[:,:,0],res) + ##plt.imshow(res) + ##plt.show() + +@click.command() +@click.option( + "--image", + "-i", + help="image filename", + type=click.Path(exists=True, dir_okay=False), +) +@click.option( + "--patches/--no-patches", + "-p/-nop", + is_flag=True, + help="if this parameter set to true, this tool will try to do inference in patches.", +) +@click.option( + "--save", + "-s", + help="save prediction as a png file in current folder.", +) +@click.option( + "--model", + "-m", + help="directory of models", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--ground_truth/--no-ground_truth", + "-gt/-nogt", + is_flag=True, + help="ground truth directory if you want to see the iou of prediction.", +) +@click.option( + "--model_weights/--no-model_weights", + "-mw/-nomw", + is_flag=True, + help="previous model weights which are saved.", +) +def main(image, model, patches, save, ground_truth, model_weights): + + with open(os.path.join(model,'config.json')) as f: + config_params_model = json.load(f) + task = 'classification' + x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, model_weights) + x.run() + +if __name__=="__main__": + main() + + + + diff --git a/train/train.py b/train/train.py index 595debe..28363d2 100644 --- a/train/train.py +++ b/train/train.py @@ -69,7 +69,7 @@ def config_params(): flip_index = None # Flip image for augmentation. continue_training = False # Set to true if you would like to continue training an already trained a model. transformer_patchsize = None # Patch size of vision transformer patches. - num_patches_xy = None # Number of patches for vision transformer. + transformer_num_patches_xy = None # Number of patches for vision transformer. index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. @@ -77,6 +77,8 @@ def config_params(): data_is_provided = False # Only set this to true when you have already provided the input data and the train and eval data are in "dir_output". task = "segmentation" # This parameter defines task of model which can be segmentation, enhancement or classification. f1_threshold_classification = None # This threshold is used to consider models with an evaluation f1 scores bigger than it. The selected model weights undergo a weights ensembling. And avreage ensembled model will be written to output. + classification_classes_name = None # Dictionary of classification classes names. + backbone_type = None # As backbone we have 2 types of backbones. A vision transformer alongside a CNN and we call it "transformer" and only CNN called "nontransformer" @ex.automain @@ -89,12 +91,12 @@ def run(_config, n_classes, n_epochs, input_height, brightness, dir_train, data_is_provided, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, continue_training, transformer_patchsize, - num_patches_xy, model_name, flip_index, dir_eval, dir_output, - pretraining, learning_rate, task, f1_threshold_classification): + transformer_num_patches_xy, backbone_type, flip_index, dir_eval, dir_output, + pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name): - if task == "segmentation" or "enhancement": + if task == "segmentation" or task == "enhancement": - num_patches = num_patches_xy[0]*num_patches_xy[1] + num_patches = transformer_num_patches_xy[0]*transformer_num_patches_xy[1] if data_is_provided: dir_train_flowing = os.path.join(dir_output, 'train') dir_eval_flowing = os.path.join(dir_output, 'eval') @@ -191,14 +193,14 @@ def run(_config, n_classes, n_epochs, input_height, weights = weights / float(np.sum(weights)) if continue_training: - if model_name=='resnet50_unet': + if backbone_type=='nontransformer': if is_loss_soft_dice and task == "segmentation": model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) if weighted_loss and task == "segmentation": model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) if not is_loss_soft_dice and not weighted_loss: model = load_model(dir_of_start_model , compile=True) - elif model_name=='hybrid_transformer_cnn': + elif backbone_type=='transformer': if is_loss_soft_dice and task == "segmentation": model = load_model(dir_of_start_model, compile=True, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches,'soft_dice_loss': soft_dice_loss}) if weighted_loss and task == "segmentation": @@ -207,9 +209,9 @@ def run(_config, n_classes, n_epochs, input_height, model = load_model(dir_of_start_model , compile=True,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) else: index_start = 0 - if model_name=='resnet50_unet': + if backbone_type=='nontransformer': model = resnet50_unet(n_classes, input_height, input_width, task, weight_decay, pretraining) - elif model_name=='hybrid_transformer_cnn': + elif backbone_type=='nontransformer': model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width, task, weight_decay, pretraining) #if you want to see the model structure just uncomment model summary. @@ -246,9 +248,9 @@ def run(_config, n_classes, n_epochs, input_height, validation_data=val_gen, validation_steps=1, epochs=1) - model.save(dir_output+'/'+'model_'+str(i)) + model.save(os.path.join(dir_output,'model_'+str(i))) - with open(dir_output+'/'+'model_'+str(i)+'/'+"config.json", "w") as fp: + with open(os.path.join(os.path.join(dir_output,'model_'+str(i)),"config.json"), "w") as fp: json.dump(_config, fp) # encode dict into JSON #os.system('rm -rf '+dir_train_flowing) @@ -257,14 +259,15 @@ def run(_config, n_classes, n_epochs, input_height, #model.save(dir_output+'/'+'model'+'.h5') elif task=='classification': configuration() - model = resnet50_classifier(n_classes, input_height, input_width,weight_decay,pretraining) + model = resnet50_classifier(n_classes, input_height, input_width, weight_decay, pretraining) opt_adam = Adam(learning_rate=0.001) model.compile(loss='categorical_crossentropy', optimizer = opt_adam,metrics=['accuracy']) - - testX, testY = generate_data_from_folder_evaluation(dir_eval, input_height, input_width, n_classes) + + list_classes = list(classification_classes_name.values()) + testX, testY = generate_data_from_folder_evaluation(dir_eval, input_height, input_width, n_classes, list_classes) #print(testY.shape, testY) @@ -280,7 +283,7 @@ def run(_config, n_classes, n_epochs, input_height, for i in range(n_epochs): #history = model.fit(trainX, trainY, epochs=1, batch_size=n_batch, validation_data=(testX, testY), verbose=2)#,class_weight=weights) - history = model.fit( generate_data_from_folder_training(dir_train, n_batch , input_height, input_width, n_classes), steps_per_epoch=num_rows / n_batch, verbose=0)#,class_weight=weights) + history = model.fit( generate_data_from_folder_training(dir_train, n_batch , input_height, input_width, n_classes, list_classes), steps_per_epoch=num_rows / n_batch, verbose=0)#,class_weight=weights) y_pr_class = [] for jj in range(testY.shape[0]): @@ -301,10 +304,6 @@ def run(_config, n_classes, n_epochs, input_height, score_best[0]=f1score model.save(os.path.join(dir_output,'model_best')) - - ##best_model=keras.models.clone_model(model) - ##best_model.build() - ##best_model.set_weights(model.get_weights()) if f1score > f1_threshold_classification: weights.append(model.get_weights() ) y_tot=y_tot+y_pr @@ -329,4 +328,9 @@ def run(_config, n_classes, n_epochs, input_height, ##best_model.save('model_taza.h5') model_weight_averaged.save(os.path.join(dir_output,'model_ens_avg')) + with open(os.path.join( os.path.join(dir_output,'model_ens_avg'), "config.json"), "w") as fp: + json.dump(_config, fp) # encode dict into JSON + + with open(os.path.join( os.path.join(dir_output,'model_best'), "config.json"), "w") as fp: + json.dump(_config, fp) # encode dict into JSON diff --git a/train/utils.py b/train/utils.py index 0c5a458..3a0375a 100644 --- a/train/utils.py +++ b/train/utils.py @@ -21,14 +21,14 @@ def return_number_of_total_training_data(path_classes): -def generate_data_from_folder_evaluation(path_classes, height, width, n_classes): - sub_classes = os.listdir(path_classes) +def generate_data_from_folder_evaluation(path_classes, height, width, n_classes, list_classes): + #sub_classes = os.listdir(path_classes) #n_classes = len(sub_classes) all_imgs = [] labels = [] - dicts =dict() - indexer= 0 - for sub_c in sub_classes: + #dicts =dict() + #indexer= 0 + for indexer, sub_c in enumerate(list_classes): sub_files = os.listdir(os.path.join(path_classes,sub_c )) sub_files = [os.path.join(path_classes,sub_c )+'/' + x for x in sub_files] #print( os.listdir(os.path.join(path_classes,sub_c )) ) @@ -37,8 +37,8 @@ def generate_data_from_folder_evaluation(path_classes, height, width, n_classes) #print( len(sub_labels) ) labels = labels + sub_labels - dicts[sub_c] = indexer - indexer +=1 + #dicts[sub_c] = indexer + #indexer +=1 categories = to_categorical(range(n_classes)).astype(np.int16)#[ [1 , 0, 0 , 0 , 0 , 0] , [0 , 1, 0 , 0 , 0 , 0] , [0 , 0, 1 , 0 , 0 , 0] , [0 , 0, 0 , 1 , 0 , 0] , [0 , 0, 0 , 0 , 1 , 0] , [0 , 0, 0 , 0 , 0 , 1] ] @@ -64,15 +64,15 @@ def generate_data_from_folder_evaluation(path_classes, height, width, n_classes) return ret_x/255., ret_y -def generate_data_from_folder_training(path_classes, batchsize, height, width, n_classes): - sub_classes = os.listdir(path_classes) - n_classes = len(sub_classes) +def generate_data_from_folder_training(path_classes, batchsize, height, width, n_classes, list_classes): + #sub_classes = os.listdir(path_classes) + #n_classes = len(sub_classes) all_imgs = [] labels = [] - dicts =dict() - indexer= 0 - for sub_c in sub_classes: + #dicts =dict() + #indexer= 0 + for indexer, sub_c in enumerate(list_classes): sub_files = os.listdir(os.path.join(path_classes,sub_c )) sub_files = [os.path.join(path_classes,sub_c )+'/' + x for x in sub_files] #print( os.listdir(os.path.join(path_classes,sub_c )) ) @@ -81,8 +81,8 @@ def generate_data_from_folder_training(path_classes, batchsize, height, width, n #print( len(sub_labels) ) labels = labels + sub_labels - dicts[sub_c] = indexer - indexer +=1 + #dicts[sub_c] = indexer + #indexer +=1 ids = np.array(range(len(labels))) random.shuffle(ids) From bc2ca7180208a780d2d34710b66bac379a096385 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 7 May 2024 16:24:12 +0200 Subject: [PATCH 043/492] modifications --- train/inference.py | 108 +++++++-------------------------------------- 1 file changed, 17 insertions(+), 91 deletions(-) diff --git a/train/inference.py b/train/inference.py index 6911bea..94e318d 100644 --- a/train/inference.py +++ b/train/inference.py @@ -1,25 +1,16 @@ -#! /usr/bin/env python3 - -__version__= '1.0' - -import argparse import sys import os import numpy as np import warnings -import xml.etree.ElementTree as et -import pandas as pd -from tqdm import tqdm -import csv import cv2 import seaborn as sns -import matplotlib.pyplot as plt from tensorflow.keras.models import load_model import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras import layers import tensorflow.keras.losses from tensorflow.keras.layers import * +from models import * import click import json from tensorflow.python.keras import backend as tensorflow_backend @@ -37,70 +28,13 @@ __doc__=\ Tool to load model and predict for given image. """ -projection_dim = 64 -patch_size = 1 -num_patches =28*28 -class Patches(layers.Layer): - def __init__(self, **kwargs): - super(Patches, self).__init__() - self.patch_size = patch_size - - def call(self, images): - print(tf.shape(images)[1],'images') - print(self.patch_size,'self.patch_size') - batch_size = tf.shape(images)[0] - patches = tf.image.extract_patches( - images=images, - sizes=[1, self.patch_size, self.patch_size, 1], - strides=[1, self.patch_size, self.patch_size, 1], - rates=[1, 1, 1, 1], - padding="VALID", - ) - patch_dims = patches.shape[-1] - print(patches.shape,patch_dims,'patch_dims') - patches = tf.reshape(patches, [batch_size, -1, patch_dims]) - return patches - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'patch_size': self.patch_size, - }) - return config - - -class PatchEncoder(layers.Layer): - def __init__(self, **kwargs): - super(PatchEncoder, self).__init__() - self.num_patches = num_patches - self.projection = layers.Dense(units=projection_dim) - self.position_embedding = layers.Embedding( - input_dim=num_patches, output_dim=projection_dim - ) - - def call(self, patch): - positions = tf.range(start=0, limit=self.num_patches, delta=1) - encoded = self.projection(patch) + self.position_embedding(positions) - return encoded - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'num_patches': self.num_patches, - 'projection': self.projection, - 'position_embedding': self.position_embedding, - }) - return config - - class sbb_predict: - def __init__(self,image, model, task, config_params_model, patches='false',save='false', ground_truth=None,weights_dir=None ): + def __init__(self,image, model, task, config_params_model, patches, save, ground_truth): self.image=image self.patches=patches self.save=save self.model_dir=model self.ground_truth=ground_truth - self.weights_dir=weights_dir self.task=task self.config_params_model=config_params_model @@ -426,16 +360,12 @@ class sbb_predict: pass else: img_seg_overlayed = self.visualize_model_output(res, self.img_org, self.task) - cv2.imwrite('./test.png',img_seg_overlayed) - ##if self.save!=None: - ##img=np.repeat(res[:, :, np.newaxis]*255, 3, axis=2) - ##cv2.imwrite(self.save,img) - - ###if self.ground_truth!=None: - ###gt_img=cv2.imread(self.ground_truth) - ###self.IoU(gt_img[:,:,0],res) - ##plt.imshow(res) - ##plt.show() + if self.save: + cv2.imwrite(self.save,img_seg_overlayed) + + if self.ground_truth: + gt_img=cv2.imread(self.ground_truth) + self.IoU(gt_img[:,:,0],res[:,:,0]) @click.command() @click.option( @@ -463,23 +393,19 @@ class sbb_predict: required=True, ) @click.option( - "--ground_truth/--no-ground_truth", - "-gt/-nogt", - is_flag=True, + "--ground_truth", + "-gt", help="ground truth directory if you want to see the iou of prediction.", ) -@click.option( - "--model_weights/--no-model_weights", - "-mw/-nomw", - is_flag=True, - help="previous model weights which are saved.", -) -def main(image, model, patches, save, ground_truth, model_weights): - +def main(image, model, patches, save, ground_truth): with open(os.path.join(model,'config.json')) as f: config_params_model = json.load(f) - task = 'classification' - x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, model_weights) + task = config_params_model['task'] + if task != 'classification': + if not save: + print("Error: You used one of segmentation or binarization task but not set -s, you need a filename to save visualized output with -s") + sys.exit(1) + x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth) x.run() if __name__=="__main__": From 241cb907cbb691988866011fdad5af12eb4986ae Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 8 May 2024 14:47:16 +0200 Subject: [PATCH 044/492] Update train.py avoid ensembling if no model weights met the threshold f1 score in the case of classification --- train/train.py | 46 +++++++++++++--------------------------------- 1 file changed, 13 insertions(+), 33 deletions(-) diff --git a/train/train.py b/train/train.py index 28363d2..78974d3 100644 --- a/train/train.py +++ b/train/train.py @@ -268,36 +268,26 @@ def run(_config, n_classes, n_epochs, input_height, list_classes = list(classification_classes_name.values()) testX, testY = generate_data_from_folder_evaluation(dir_eval, input_height, input_width, n_classes, list_classes) - - #print(testY.shape, testY) y_tot=np.zeros((testX.shape[0],n_classes)) - indexer=0 score_best=[] score_best.append(0) num_rows = return_number_of_total_training_data(dir_train) - weights=[] for i in range(n_epochs): - #history = model.fit(trainX, trainY, epochs=1, batch_size=n_batch, validation_data=(testX, testY), verbose=2)#,class_weight=weights) - history = model.fit( generate_data_from_folder_training(dir_train, n_batch , input_height, input_width, n_classes, list_classes), steps_per_epoch=num_rows / n_batch, verbose=0)#,class_weight=weights) + history = model.fit( generate_data_from_folder_training(dir_train, n_batch , input_height, input_width, n_classes, list_classes), steps_per_epoch=num_rows / n_batch, verbose=1)#,class_weight=weights) y_pr_class = [] for jj in range(testY.shape[0]): y_pr=model.predict(testX[jj,:,:,:].reshape(1,input_height,input_width,3), verbose=0) y_pr_ind= np.argmax(y_pr,axis=1) - #print(y_pr_ind, 'y_pr_ind') y_pr_class.append(y_pr_ind) - y_pr_class = np.array(y_pr_class) - #model.save('./models_save/model_'+str(i)+'.h5') - #y_pr_class=np.argmax(y_pr,axis=1) f1score=f1_score(np.argmax(testY,axis=1), y_pr_class, average='macro') - print(i,f1score) if f1score>score_best[0]: @@ -306,30 +296,20 @@ def run(_config, n_classes, n_epochs, input_height, if f1score > f1_threshold_classification: weights.append(model.get_weights() ) - y_tot=y_tot+y_pr - indexer+=1 - y_tot=y_tot/float(indexer) - - new_weights=list() - - for weights_list_tuple in zip(*weights): - new_weights.append( [np.array(weights_).mean(axis=0) for weights_ in zip(*weights_list_tuple)] ) - - new_weights = [np.array(x) for x in new_weights] - - model_weight_averaged=tf.keras.models.clone_model(model) - - model_weight_averaged.set_weights(new_weights) - - #y_tot_end=np.argmax(y_tot,axis=1) - #print(f1_score(np.argmax(testY,axis=1), y_tot_end, average='macro')) - - ##best_model.save('model_taza.h5') - model_weight_averaged.save(os.path.join(dir_output,'model_ens_avg')) - with open(os.path.join( os.path.join(dir_output,'model_ens_avg'), "config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON + if len(weights) >= 1: + new_weights=list() + for weights_list_tuple in zip(*weights): + new_weights.append( [np.array(weights_).mean(axis=0) for weights_ in zip(*weights_list_tuple)] ) + + new_weights = [np.array(x) for x in new_weights] + model_weight_averaged=tf.keras.models.clone_model(model) + model_weight_averaged.set_weights(new_weights) + + model_weight_averaged.save(os.path.join(dir_output,'model_ens_avg')) + with open(os.path.join( os.path.join(dir_output,'model_ens_avg'), "config.json"), "w") as fp: + json.dump(_config, fp) # encode dict into JSON with open(os.path.join( os.path.join(dir_output,'model_best'), "config.json"), "w") as fp: json.dump(_config, fp) # encode dict into JSON From d277ec4b31dd28a3da3d38e9f9fd37b5c3e17fb2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 12 May 2024 08:32:28 +0200 Subject: [PATCH 045/492] Update utils.py --- train/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/train/utils.py b/train/utils.py index 3a0375a..271d977 100644 --- a/train/utils.py +++ b/train/utils.py @@ -9,6 +9,7 @@ from tqdm import tqdm import imutils import math from tensorflow.keras.utils import to_categorical +from PIL import Image, ImageEnhance def return_number_of_total_training_data(path_classes): From d6a057ba702f31c03db0401ab97fcd1a444b89a0 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 16 May 2024 15:03:23 +0200 Subject: [PATCH 046/492] adding page xml to label generator --- train/pagexml2label.py | 1009 ++++++++++++++++++++++++++++++++++++++++ train/requirements.txt | 1 + 2 files changed, 1010 insertions(+) create mode 100644 train/pagexml2label.py diff --git a/train/pagexml2label.py b/train/pagexml2label.py new file mode 100644 index 0000000..715f99f --- /dev/null +++ b/train/pagexml2label.py @@ -0,0 +1,1009 @@ +import click +import sys +import os +import numpy as np +import warnings +import xml.etree.ElementTree as ET +from tqdm import tqdm +import cv2 +from shapely import geometry + +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + +__doc__=\ +""" +tool to extract 2d or 3d RGB images from page xml data. In former case output will be 1 +2D image array which each class has filled with a pixel value. In the case of 3D RGB image +each class will be defined with a RGB value and beside images a text file of classes also will be produced. +This classes.txt file is required for dhsegment tool. +""" +KERNEL = np.ones((5, 5), np.uint8) + +class pagexml2word: + def __init__(self,dir_in, out_dir,output_type,experiment): + self.dir=dir_in + self.output_dir=out_dir + self.output_type=output_type + self.experiment=experiment + + def get_content_of_dir(self): + """ + Listing all ground truth page xml files. All files are needed to have xml format. + """ + + gt_all=os.listdir(self.dir) + self.gt_list=[file for file in gt_all if file.split('.')[ len(file.split('.'))-1 ]=='xml' ] + + def return_parent_contours(self,contours, hierarchy): + contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] + return contours_parent + def filter_contours_area_of_image_tables(self,image, contours, hierarchy, max_area, min_area): + found_polygons_early = list() + + jv = 0 + for c in contours: + if len(c) < 3: # A polygon cannot have less than 3 points + continue + + polygon = geometry.Polygon([point[0] for point in c]) + # area = cv2.contourArea(c) + area = polygon.area + ##print(np.prod(thresh.shape[:2])) + # Check that polygon has area greater than minimal area + # print(hierarchy[0][jv][3],hierarchy ) + if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : + # print(c[0][0][1]) + found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) + jv += 1 + return found_polygons_early + + def return_contours_of_interested_region(self,region_pre_p, pixel, min_area=0.0002): + + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + contours_imgs = self.return_parent_contours(contours_imgs, hierarchy) + contours_imgs = self.filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) + + return contours_imgs + + def get_images_of_ground_truth(self): + """ + Reading the page xml files and write the ground truth images into given output directory. + """ + for index in tqdm(range(len(self.gt_list))): + #try: + tree1 = ET.parse(self.dir+'/'+self.gt_list[index]) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + + + for jj in root1.iter(link+'Page'): + y_len=int(jj.attrib['imageHeight']) + x_len=int(jj.attrib['imageWidth']) + if self.experiment=='word': + region_tags=np.unique([x for x in alltags if x.endswith('Word')]) + co_word=[] + + for tag in region_tags: + if tag.endswith('}Word') or tag.endswith('}word'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_word.append(np.array(c_t_in)) + + img = np.zeros( (y_len,x_len, 3) ) + if self.output_type == '2d': + img_poly=cv2.fillPoly(img, pts =co_word, color=(1,1,1)) + elif self.output_type == '3d': + img_poly=cv2.fillPoly(img, pts =co_word, color=(255,0,0)) + + try: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + + elif self.experiment=='glyph': + region_tags=np.unique([x for x in alltags if x.endswith('Glyph')]) + co_glyph=[] + + for tag in region_tags: + if tag.endswith('}Glyph') or tag.endswith('}glyph'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_glyph.append(np.array(c_t_in)) + + img = np.zeros( (y_len,x_len, 3) ) + if self.output_type == '2d': + img_poly=cv2.fillPoly(img, pts =co_glyph, color=(1,1,1)) + elif self.output_type == '3d': + img_poly=cv2.fillPoly(img, pts =co_glyph, color=(255,0,0)) + + try: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + elif self.experiment=='textline': + region_tags=np.unique([x for x in alltags if x.endswith('TextLine')]) + co_line=[] + + for tag in region_tags: + if tag.endswith('}TextLine') or tag.endswith('}textline'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_line.append(np.array(c_t_in)) + + img = np.zeros( (y_len,x_len, 3) ) + if self.output_type == '2d': + img_poly=cv2.fillPoly(img, pts =co_line, color=(1,1,1)) + elif self.output_type == '3d': + img_poly=cv2.fillPoly(img, pts =co_line, color=(255,0,0)) + + try: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + elif self.experiment=='layout_for_main_regions': + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + #print(region_tags) + co_text=[] + co_sep=[] + co_img=[] + #co_graphic=[] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_text.append(np.array(c_t_in)) + + elif tag.endswith('}ImageRegion') or tag.endswith('}GraphicRegion') or tag.endswith('}imageregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + + elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + + + img = np.zeros( (y_len,x_len,3) ) + + if self.output_type == '3d': + img_poly=cv2.fillPoly(img, pts =co_text, color=(255,0,0)) + img_poly=cv2.fillPoly(img, pts =co_img, color=(0,255,0)) + img_poly=cv2.fillPoly(img, pts =co_sep, color=(0,0,255)) + ##img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) + elif self.output_type == '2d': + img_poly=cv2.fillPoly(img, pts =co_text, color=(1,1,1)) + img_poly=cv2.fillPoly(img, pts =co_img, color=(2,2,2)) + img_poly=cv2.fillPoly(img, pts =co_sep, color=(3,3,3)) + + try: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + elif self.experiment=='textregion': + region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) + co_textregion=[] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_textregion.append(np.array(c_t_in)) + + img = np.zeros( (y_len,x_len,3) ) + if self.output_type == '3d': + img_poly=cv2.fillPoly(img, pts =co_textregion, color=(255,0,0)) + elif self.output_type == '2d': + img_poly=cv2.fillPoly(img, pts =co_textregion, color=(1,1,1)) + + + try: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + elif self.experiment=='layout': + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + + co_text_paragraph=[] + co_text_drop=[] + co_text_heading=[] + co_text_header=[] + co_text_marginalia=[] + co_text_catch=[] + co_text_page_number=[] + co_text_signature_mark=[] + co_sep=[] + co_img=[] + co_table=[] + co_graphic=[] + co_graphic_text_annotation=[] + co_graphic_decoration=[] + co_noise=[] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + for nn in root1.iter(tag): + c_t_in_drop=[] + c_t_in_paragraph=[] + c_t_in_heading=[] + c_t_in_header=[] + c_t_in_page_number=[] + c_t_in_signature_mark=[] + c_t_in_catch=[] + c_t_in_marginalia=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + + coords=bool(vv.attrib) + if coords: + #print('birda1') + p_h=vv.attrib['points'].split(' ') + + + + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + #if nn.attrib['type']=='paragraph': + + c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + elif "type" in nn.attrib and nn.attrib['type']=='heading': + c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + + c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + elif "type" in nn.attrib and nn.attrib['type']=='header': + c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + elif "type" in nn.attrib and nn.attrib['type']=='page-number': + + c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + + c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + else: + + c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + + break + else: + pass + + + if vv.tag==link+'Point': + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + #if nn.attrib['type']=='paragraph': + + c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='heading': + c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + + c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + elif "type" in nn.attrib and nn.attrib['type']=='header': + c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif "type" in nn.attrib and nn.attrib['type']=='page-number': + + c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + + c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + + else: + c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + + #c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + + if len(c_t_in_drop)>0: + co_text_drop.append(np.array(c_t_in_drop)) + if len(c_t_in_paragraph)>0: + co_text_paragraph.append(np.array(c_t_in_paragraph)) + if len(c_t_in_heading)>0: + co_text_heading.append(np.array(c_t_in_heading)) + + if len(c_t_in_header)>0: + co_text_header.append(np.array(c_t_in_header)) + if len(c_t_in_page_number)>0: + co_text_page_number.append(np.array(c_t_in_page_number)) + if len(c_t_in_catch)>0: + co_text_catch.append(np.array(c_t_in_catch)) + + if len(c_t_in_signature_mark)>0: + co_text_signature_mark.append(np.array(c_t_in_signature_mark)) + + if len(c_t_in_marginalia)>0: + co_text_marginalia.append(np.array(c_t_in_marginalia)) + + + elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + c_t_in_text_annotation=[] + c_t_in_decoration=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + #c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + #if nn.attrib['type']=='paragraph': + + c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + elif "type" in nn.attrib and nn.attrib['type']=='decoration': + + c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + else: + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + + break + else: + pass + + + if vv.tag==link+'Point': + + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + #if nn.attrib['type']=='paragraph': + + c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='decoration': + + c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + else: + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if len(c_t_in_text_annotation)>0: + co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) + if len(c_t_in_decoration)>0: + co_graphic_decoration.append(np.array(c_t_in_decoration)) + if len(c_t_in)>0: + co_graphic.append(np.array(c_t_in)) + + + + elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + + elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + + + elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_table.append(np.array(c_t_in)) + + elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_noise.append(np.array(c_t_in)) + + + img = np.zeros( (y_len,x_len,3) ) + + if self.output_type == '3d': + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(255,0,0)) + + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(255,125,0)) + img_poly=cv2.fillPoly(img, pts =co_text_header, color=(255,0,125)) + img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(125,255,125)) + img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(125,125,0)) + img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(0,125,255)) + img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(0,125,0)) + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(125,125,125)) + img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(0,125,255)) + + img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(125,0,125)) + img_poly=cv2.fillPoly(img, pts =co_img, color=(0,255,0)) + img_poly=cv2.fillPoly(img, pts =co_sep, color=(0,0,255)) + img_poly=cv2.fillPoly(img, pts =co_table, color=(0,255,255)) + img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) + img_poly=cv2.fillPoly(img, pts =co_noise, color=(255,0,255)) + elif self.output_type == '2d': + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) + + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) + img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) + img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(3,3,3)) + img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(4,4,4)) + img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(5,5,5)) + img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(6,6,6)) + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(7,7,7)) + img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(8,8,8)) + + img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(9,9,9)) + img_poly=cv2.fillPoly(img, pts =co_img, color=(10,10,10)) + img_poly=cv2.fillPoly(img, pts =co_sep, color=(11,11,11)) + img_poly=cv2.fillPoly(img, pts =co_table, color=(12,12,12)) + img_poly=cv2.fillPoly(img, pts =co_graphic, color=(13,13,14)) + img_poly=cv2.fillPoly(img, pts =co_noise, color=(15,15,15)) + + try: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + + elif self.experiment=='layout_for_main_regions_new_concept': + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + #print(region_tags) + co_text=[] + co_sep=[] + co_img=[] + co_drop = [] + co_graphic=[] + co_table = [] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + c_t_in_drop = [] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + else: + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + if len(c_t_in)>0: + co_text.append(np.array(c_t_in)) + if len(c_t_in_drop)>0: + co_drop.append(np.array(c_t_in_drop)) + + elif tag.endswith('}ImageRegion') or tag.endswith('}GraphicRegion') or tag.endswith('}imageregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + + elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_table.append(np.array(c_t_in)) + + img_boundary = np.zeros( (y_len,x_len) ) + + + co_text_eroded = [] + for con in co_text: + #try: + img_boundary_in = np.zeros( (y_len,x_len) ) + img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + #print('bidiahhhhaaa') + + + + #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica + img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=2) + + pixel = 1 + min_size = 0 + con_eroded = self.return_contours_of_interested_region(img_boundary_in,pixel, min_size ) + + try: + co_text_eroded.append(con_eroded[0]) + except: + co_text_eroded.append(con) + + img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=4) + #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) + + boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] + + img_boundary[:,:][boundary[:,:]==1] =1 + + + ###co_table_eroded = [] + ###for con in co_table: + ####try: + ###img_boundary_in = np.zeros( (y_len,x_len) ) + ###img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + ####print('bidiahhhhaaa') + + + + #####img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica + ###img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=2) + + ###pixel = 1 + ###min_size = 0 + ###con_eroded = self.return_contours_of_interested_region(img_boundary_in,pixel, min_size ) + + ###try: + ###co_table_eroded.append(con_eroded[0]) + ###except: + ###co_table_eroded.append(con) + + ###img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=4) + + ###boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] + + ###img_boundary[:,:][boundary[:,:]==1] =1 + #except: + #pass + + #for con in co_img: + #img_boundary_in = np.zeros( (y_len,x_len) ) + #img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=3) + + #boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] + + #img_boundary[:,:][boundary[:,:]==1] =1 + + + #for con in co_sep: + + #img_boundary_in = np.zeros( (y_len,x_len) ) + #img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=3) + + #boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] + + img_boundary[:,:][boundary[:,:]==1] =1 + for con in co_drop: + img_boundary_in = np.zeros( (y_len,x_len) ) + img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=3) + + boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] + + img_boundary[:,:][boundary[:,:]==1] =1 + + + img = np.zeros( (y_len,x_len,3) ) + + if self.output_type == '2d': + img_poly=cv2.fillPoly(img, pts =co_img, color=(2,2,2)) + + img_poly=cv2.fillPoly(img, pts =co_text_eroded, color=(1,1,1)) + ##img_poly=cv2.fillPoly(img, pts =co_graphic, color=(4,4,4)) + ###img_poly=cv2.fillPoly(img, pts =co_table, color=(1,1,1)) + + img_poly=cv2.fillPoly(img, pts =co_drop, color=(1,1,1)) + img_poly[:,:][img_boundary[:,:]==1] = 4 + img_poly=cv2.fillPoly(img, pts =co_sep, color=(3,3,3)) + elif self.output_type == '3d': + img_poly=cv2.fillPoly(img, pts =co_img, color=(0,255,0)) + img_poly=cv2.fillPoly(img, pts =co_text_eroded, color=(255,0,0)) + img_poly=cv2.fillPoly(img, pts =co_drop, color=(0,125,255)) + + img_poly[:,:,0][img_boundary[:,:]==1]=255 + img_poly[:,:,1][img_boundary[:,:]==1]=125 + img_poly[:,:,2][img_boundary[:,:]==1]=125 + + img_poly=cv2.fillPoly(img, pts =co_sep, color=(0,0,255)) + ##img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) + + #print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png') + try: + #print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png') + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + + + #except: + #pass + def run(self): + self.get_content_of_dir() + self.get_images_of_ground_truth() + + +@click.command() +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out", + "-do", + help="directory where ground truth images would be written", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--type_output", + "-to", + help="this defines how output should be. A 2d image array or a 3d image array encoded with RGB color. Just pass 2d or 3d. The file will be saved one directory up. 2D image array is 3d but only information of one channel would be enough since all channels have the same values.", +) +@click.option( + "--experiment", + "-exp", + help="experiment of ineterst. Word , textline , glyph and textregion are desired options.", +) + +def main(dir_xml,dir_out,type_output,experiment): + x=pagexml2word(dir_xml,dir_out,type_output,experiment) + x.run() +if __name__=="__main__": + main() + + + diff --git a/train/requirements.txt b/train/requirements.txt index 3e56438..efee9df 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -7,3 +7,4 @@ imutils numpy scipy scikit-learn +shapely From faeac997e15c3dd824a029e8e798fc3e7a262a8c Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 17 May 2024 09:10:13 +0200 Subject: [PATCH 047/492] page to label enable textline new concept --- train/pagexml2label.py | 73 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/train/pagexml2label.py b/train/pagexml2label.py index 715f99f..b094e9b 100644 --- a/train/pagexml2label.py +++ b/train/pagexml2label.py @@ -217,6 +217,79 @@ class pagexml2word: except: cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + elif self.experiment == 'textline_new_concept': + region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) + co_line = [] + + for tag in region_tags: + if tag.endswith('}TextLine') or tag.endswith('}textline'): + # print('sth') + for nn in root1.iter(tag): + c_t_in = [] + sumi = 0 + for vv in nn.iter(): + # check the format of coords + if vv.tag == link + 'Coords': + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + if vv.tag == link + 'Point': + c_t_in.append([int(np.float(vv.attrib['x'])), int(np.float(vv.attrib['y']))]) + sumi += 1 + # print(vv.tag,'in') + elif vv.tag != link + 'Point' and sumi >= 1: + break + co_line.append(np.array(c_t_in)) + + img_boundary = np.zeros((y_len, x_len)) + co_textline_eroded = [] + for con in co_line: + # try: + img_boundary_in = np.zeros((y_len, x_len)) + img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + # print('bidiahhhhaaa') + + # img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica + img_boundary_in = cv2.erode(img_boundary_in[:, :], KERNEL, iterations=1) + + pixel = 1 + min_size = 0 + con_eroded = self.return_contours_of_interested_region(img_boundary_in, pixel, min_size) + + try: + co_textline_eroded.append(con_eroded[0]) + except: + co_textline_eroded.append(con) + + img_boundary_in_dilated = cv2.dilate(img_boundary_in[:, :], KERNEL, iterations=3) + # img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) + + boundary = img_boundary_in_dilated[:, :] - img_boundary_in[:, :] + + img_boundary[:, :][boundary[:, :] == 1] = 1 + + img = np.zeros((y_len, x_len, 3)) + if self.output_type == '2d': + img_poly = cv2.fillPoly(img, pts=co_textline_eroded, color=(1, 1, 1)) + img_poly[:, :][img_boundary[:, :] == 1] = 2 + elif self.output_type == '3d': + img_poly = cv2.fillPoly(img, pts=co_textline_eroded, color=(255, 0, 0)) + img_poly[:, :, 0][img_boundary[:, :] == 1] = 255 + img_poly[:, :, 1][img_boundary[:, :] == 1] = 125 + img_poly[:, :, 2][img_boundary[:, :] == 1] = 125 + + try: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('-')[1].split('.')[0] + '.png', + img_poly) + except: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('.')[0] + '.png', img_poly) + elif self.experiment=='layout_for_main_regions': region_tags=np.unique([x for x in alltags if x.endswith('Region')]) #print(region_tags) From b2085a1d01ec6a501a6f0752f492ab71f3015723 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 17 May 2024 09:08:25 +0200 Subject: [PATCH 048/492] update requirements --- train/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/train/requirements.txt b/train/requirements.txt index efee9df..d8f9003 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -8,3 +8,4 @@ numpy scipy scikit-learn shapely +click From f1c2913c0394dbb64a5464afc183d3600a222f6b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 May 2024 12:38:24 +0200 Subject: [PATCH 049/492] page2label with a dynamic layout --- train/custom_config_page2label.json | 6 + train/pagexml2label.py | 490 +++++++++++++++++++++++++++- 2 files changed, 479 insertions(+), 17 deletions(-) create mode 100644 train/custom_config_page2label.json diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json new file mode 100644 index 0000000..75c4b96 --- /dev/null +++ b/train/custom_config_page2label.json @@ -0,0 +1,6 @@ +{ +"textregions":{"paragraph":1, "heading": 2, "header":2,"drop-capital": 3, "marginal":4 }, +"imageregion":5, +"separatorregion":6, +"graphicregions" :{"handwritten-annotation":7, "decoration": 8, "signature": 9, "stamp": 10} +} diff --git a/train/pagexml2label.py b/train/pagexml2label.py index b094e9b..6907e84 100644 --- a/train/pagexml2label.py +++ b/train/pagexml2label.py @@ -7,6 +7,7 @@ import xml.etree.ElementTree as ET from tqdm import tqdm import cv2 from shapely import geometry +import json with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -21,11 +22,12 @@ This classes.txt file is required for dhsegment tool. KERNEL = np.ones((5, 5), np.uint8) class pagexml2word: - def __init__(self,dir_in, out_dir,output_type,experiment): + def __init__(self,dir_in, out_dir,output_type,experiment,layout_config): self.dir=dir_in self.output_dir=out_dir self.output_type=output_type self.experiment=experiment + self.layout_config=layout_config def get_content_of_dir(self): """ @@ -77,7 +79,7 @@ class pagexml2word: return contours_imgs - def get_images_of_ground_truth(self): + def get_images_of_ground_truth(self, config_params): """ Reading the page xml files and write the ground truth images into given output directory. """ @@ -93,6 +95,445 @@ class pagexml2word: for jj in root1.iter(link+'Page'): y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) + + if self.layout_config: + keys = list(config_params.keys()) + #values = config_params.values() + + if 'textregions' in keys: + types_text_dict = config_params['textregions'] + types_text = list(types_text_dict.keys()) + types_text_label = list(types_text_dict.values()) + if 'graphicregions' in keys: + types_graphic_dict = config_params['graphicregions'] + types_graphic = list(types_graphic_dict.keys()) + types_graphic_label = list(types_graphic_dict.values()) + + + types_text_label_rgb = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (0,125,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,255), (0,255,125)] + + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + + co_text_paragraph=[] + co_text_drop=[] + co_text_heading=[] + co_text_header=[] + co_text_marginalia=[] + co_text_catch=[] + co_text_page_number=[] + co_text_signature_mark=[] + co_sep=[] + co_img=[] + co_table=[] + co_graphic_signature=[] + co_graphic_text_annotation=[] + co_graphic_decoration=[] + co_graphic_stamp=[] + co_noise=[] + + for tag in region_tags: + if 'textregions' in keys: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + for nn in root1.iter(tag): + c_t_in_drop=[] + c_t_in_paragraph=[] + c_t_in_heading=[] + c_t_in_header=[] + c_t_in_page_number=[] + c_t_in_signature_mark=[] + c_t_in_catch=[] + c_t_in_marginalia=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + + coords=bool(vv.attrib) + if coords: + #print('birda1') + p_h=vv.attrib['points'].split(' ') + + if "drop-capital" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "heading" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='heading': + c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "signature-mark" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='signature-mark': + c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "header" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='header': + c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "catch-word" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "page-number" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='page-number': + c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "marginalia" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='marginalia': + c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "paragraph" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='paragraph': + c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + break + else: + pass + + + if vv.tag==link+'Point': + if "drop-capital" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "heading" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='heading': + c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "signature-mark" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='signature-mark': + c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "header" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='header': + c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "catch-word" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "page-number" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='page-number': + c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "marginalia" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='marginalia': + c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "paragraph" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='paragraph': + c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif vv.tag!=link+'Point' and sumi>=1: + break + + if len(c_t_in_drop)>0: + co_text_drop.append(np.array(c_t_in_drop)) + if len(c_t_in_paragraph)>0: + co_text_paragraph.append(np.array(c_t_in_paragraph)) + if len(c_t_in_heading)>0: + co_text_heading.append(np.array(c_t_in_heading)) + + if len(c_t_in_header)>0: + co_text_header.append(np.array(c_t_in_header)) + if len(c_t_in_page_number)>0: + co_text_page_number.append(np.array(c_t_in_page_number)) + if len(c_t_in_catch)>0: + co_text_catch.append(np.array(c_t_in_catch)) + + if len(c_t_in_signature_mark)>0: + co_text_signature_mark.append(np.array(c_t_in_signature_mark)) + + if len(c_t_in_marginalia)>0: + co_text_marginalia.append(np.array(c_t_in_marginalia)) + + + if 'graphicregions' in keys: + if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in_stamp=[] + c_t_in_text_annotation=[] + c_t_in_decoration=[] + c_t_in_signature=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + if "handwritten-annotation" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "decoration" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='decoration': + c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "stamp" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='stamp': + c_t_in_stamp.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "signature" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='signature': + c_t_in_signature.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + + break + else: + pass + + + if vv.tag==link+'Point': + if "handwritten-annotation" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "decoration" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='decoration': + c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "stamp" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='stamp': + c_t_in_stamp.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "signature" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='signature': + c_t_in_signature.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if len(c_t_in_text_annotation)>0: + co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) + if len(c_t_in_decoration)>0: + co_graphic_decoration.append(np.array(c_t_in_decoration)) + if len(c_t_in_stamp)>0: + co_graphic_stamp.append(np.array(c_t_in_stamp)) + if len(c_t_in_signature)>0: + co_graphic_signature.append(np.array(c_t_in_signature)) + + if 'imageregion' in keys: + if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + + + if 'separatorregion' in keys: + if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + + + if 'tableregion' in keys: + if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_table.append(np.array(c_t_in)) + + if 'noiseregion' in keys: + if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_noise.append(np.array(c_t_in)) + + img = np.zeros( (y_len,x_len,3) ) + + if self.output_type == '3d': + + if 'graphicregions' in keys: + if "handwritten-annotation" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=types_text_label_rgb[ config_params['graphicregions']['handwritten-annotation']]) + if "signature" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=types_text_label_rgb[ config_params['graphicregions']['signature']]) + if "decoration" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=types_text_label_rgb[ config_params['graphicregions']['decoration']]) + if "stamp" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=types_text_label_rgb[ config_params['graphicregions']['stamp']]) + + if 'imageregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_img, color=types_text_label_rgb[ config_params['imageregion']]) + if 'separatorregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_sep, color=types_text_label_rgb[ config_params['separatorregion']]) + if 'tableregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_table, color=types_text_label_rgb[ config_params['tableregion']]) + if 'noiseregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_noise, color=types_text_label_rgb[ config_params['noiseregion']]) + + if 'textregions' in keys: + if "paragraph" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=types_text_label_rgb[ config_params['textregions']['paragraph']]) + if "heading" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=types_text_label_rgb[ config_params['textregions']['heading']]) + if "header" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_header, color=types_text_label_rgb[ config_params['textregions']['header']]) + if "catch-word" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_catch, color=types_text_label_rgb[ config_params['textregions']['catch-word']]) + if "signature-mark" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=types_text_label_rgb[ config_params['textregions']['signature-mark']]) + if "page-number" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=types_text_label_rgb[ config_params['textregions']['page-number']]) + if "marginalia" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=types_text_label_rgb[ config_params['textregions']['marginalia']]) + if "drop-capital" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_drop, color=types_text_label_rgb[ config_params['textregions']['drop-capital']]) + + elif self.output_type == '2d': + if 'graphicregions' in keys: + if "handwritten-annotation" in types_graphic: + color_label = config_params['graphicregions']['handwritten-annotation'] + img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(color_label,color_label,color_label)) + if "signature" in types_graphic: + color_label = config_params['graphicregions']['signature'] + img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=(color_label,color_label,color_label)) + if "decoration" in types_graphic: + color_label = config_params['graphicregions']['decoration'] + img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(color_label,color_label,color_label)) + if "stamp" in types_graphic: + color_label = config_params['graphicregions']['stamp'] + img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=(color_label,color_label,color_label)) + + if 'imageregion' in keys: + color_label = config_params['imageregion'] + img_poly=cv2.fillPoly(img, pts =co_img, color=(color_label,color_label,color_label)) + if 'separatorregion' in keys: + color_label = config_params['separatorregion'] + img_poly=cv2.fillPoly(img, pts =co_sep, color=(color_label,color_label,color_label)) + if 'tableregion' in keys: + color_label = config_params['tableregion'] + img_poly=cv2.fillPoly(img, pts =co_table, color=(color_label,color_label,color_label)) + if 'noiseregion' in keys: + color_label = config_params['noiseregion'] + img_poly=cv2.fillPoly(img, pts =co_noise, color=(color_label,color_label,color_label)) + + if 'textregions' in keys: + if "paragraph" in types_text: + color_label = config_params['textregions']['paragraph'] + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(color_label,color_label,color_label)) + if "heading" in types_text: + color_label = config_params['textregions']['heading'] + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(color_label,color_label,color_label)) + if "header" in types_text: + color_label = config_params['textregions']['header'] + img_poly=cv2.fillPoly(img, pts =co_text_header, color=(color_label,color_label,color_label)) + if "catch-word" in types_text: + color_label = config_params['textregions']['catch-word'] + img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(color_label,color_label,color_label)) + if "signature-mark" in types_text: + color_label = config_params['textregions']['signature-mark'] + img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(color_label,color_label,color_label)) + if "page-number" in types_text: + color_label = config_params['textregions']['page-number'] + img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(color_label,color_label,color_label)) + if "marginalia" in types_text: + color_label = config_params['textregions']['marginalia'] + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(color_label,color_label,color_label)) + if "drop-capital" in types_text: + color_label = config_params['textregions']['drop-capital'] + img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(color_label,color_label,color_label)) + + + + + try: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) + + + #print(values[0]) if self.experiment=='word': region_tags=np.unique([x for x in alltags if x.endswith('Word')]) co_word=[] @@ -302,6 +743,7 @@ class pagexml2word: if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): #print('sth') for nn in root1.iter(tag): + print(nn.attrib['type']) c_t_in=[] sumi=0 for vv in nn.iter(): @@ -373,20 +815,19 @@ class pagexml2word: elif vv.tag!=link+'Point' and sumi>=1: break co_sep.append(np.array(c_t_in)) - - - img = np.zeros( (y_len,x_len,3) ) + img_poly = np.zeros( (y_len,x_len,3) ) + if self.output_type == '3d': - img_poly=cv2.fillPoly(img, pts =co_text, color=(255,0,0)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(0,255,0)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(0,0,255)) + img_poly=cv2.fillPoly(img_poly, pts =co_text, color=(255,0,0)) + img_poly=cv2.fillPoly(img_poly, pts =co_img, color=(0,255,0)) + img_poly=cv2.fillPoly(img_poly, pts =co_sep, color=(0,0,255)) ##img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) elif self.output_type == '2d': - img_poly=cv2.fillPoly(img, pts =co_text, color=(1,1,1)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(3,3,3)) + img_poly=cv2.fillPoly(img_poly, pts =co_text, color=(1,1,1)) + img_poly=cv2.fillPoly(img_poly, pts =co_img, color=(2,2,2)) + img_poly=cv2.fillPoly(img_poly, pts =co_sep, color=(3,3,3)) try: cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) @@ -752,7 +1193,7 @@ class pagexml2word: img = np.zeros( (y_len,x_len,3) ) - + if self.output_type == '3d': img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(255,0,0)) @@ -1043,9 +1484,9 @@ class pagexml2word: #except: #pass - def run(self): + def run(self,config_params): self.get_content_of_dir() - self.get_images_of_ground_truth() + self.get_images_of_ground_truth(config_params) @click.command() @@ -1061,6 +1502,14 @@ class pagexml2word: help="directory where ground truth images would be written", type=click.Path(exists=True, file_okay=False), ) + +@click.option( + "--layout_config", + "-lc", + help="experiment of ineterst. Word , textline , glyph and textregion are desired options.", + type=click.Path(exists=True, dir_okay=False), +) + @click.option( "--type_output", "-to", @@ -1072,9 +1521,16 @@ class pagexml2word: help="experiment of ineterst. Word , textline , glyph and textregion are desired options.", ) -def main(dir_xml,dir_out,type_output,experiment): - x=pagexml2word(dir_xml,dir_out,type_output,experiment) - x.run() + +def main(dir_xml,dir_out,type_output,experiment,layout_config): + if layout_config: + with open(layout_config) as f: + config_params = json.load(f) + else: + print("passed") + config_params = None + x=pagexml2word(dir_xml,dir_out,type_output,experiment, layout_config) + x.run(config_params) if __name__=="__main__": main() From 47c6bf6b97db0e8ea9eb3e796cf9261ddaa2e4db Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 23 May 2024 11:14:14 +0200 Subject: [PATCH 050/492] dynamic layout decorated with artificial class on text elements boundry --- train/custom_config_page2label.json | 6 +- train/pagexml2label.py | 117 +++++++++++++++++++++++----- 2 files changed, 103 insertions(+), 20 deletions(-) diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json index 75c4b96..85b5d7e 100644 --- a/train/custom_config_page2label.json +++ b/train/custom_config_page2label.json @@ -1,6 +1,8 @@ { -"textregions":{"paragraph":1, "heading": 2, "header":2,"drop-capital": 3, "marginal":4 }, +"textregions":{"paragraph":1, "heading": 2, "header":2,"drop-capital": 3, "marginalia":4 ,"page-number":1 , "catch-word":1 }, "imageregion":5, "separatorregion":6, -"graphicregions" :{"handwritten-annotation":7, "decoration": 8, "signature": 9, "stamp": 10} +"graphicregions" :{"handwritten-annotation":7, "decoration": 8, "signature": 9, "stamp": 10}, +"artificial_class_on_boundry": ["paragraph","header", "heading", "marginalia", "page-number", "catch-word", "drop-capital"], +"artificial_class_label":11 } diff --git a/train/pagexml2label.py b/train/pagexml2label.py index 6907e84..5311c24 100644 --- a/train/pagexml2label.py +++ b/train/pagexml2label.py @@ -78,7 +78,37 @@ class pagexml2word: contours_imgs = self.filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) return contours_imgs + def update_region_contours(self, co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len): + co_text_eroded = [] + for con in co_text: + #try: + img_boundary_in = np.zeros( (y_len,x_len) ) + img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + #print('bidiahhhhaaa') + + + + #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica + if erosion_rate > 0: + img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_rate) + + pixel = 1 + min_size = 0 + con_eroded = self.return_contours_of_interested_region(img_boundary_in,pixel, min_size ) + + try: + co_text_eroded.append(con_eroded[0]) + except: + co_text_eroded.append(con) + + img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_rate) + #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) + + boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] + + img_boundary[:,:][boundary[:,:]==1] =1 + return co_text_eroded, img_boundary def get_images_of_ground_truth(self, config_params): """ Reading the page xml files and write the ground truth images into given output directory. @@ -98,6 +128,10 @@ class pagexml2word: if self.layout_config: keys = list(config_params.keys()) + if "artificial_class_on_boundry" in keys: + elements_with_artificial_class = list(config_params['artificial_class_on_boundry']) + artificial_class_rgb_color = (255,255,0) + artificial_class_label = config_params['artificial_class_label'] #values = config_params.values() if 'textregions' in keys: @@ -110,7 +144,7 @@ class pagexml2word: types_graphic_label = list(types_graphic_dict.values()) - types_text_label_rgb = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (0,125,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,255), (0,255,125)] + labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125)] region_tags=np.unique([x for x in alltags if x.endswith('Region')]) @@ -429,46 +463,90 @@ class pagexml2word: break co_noise.append(np.array(c_t_in)) + if "artificial_class_on_boundry" in keys: + img_boundary = np.zeros( (y_len,x_len) ) + if "paragraph" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_paragraph, img_boundary = self.update_region_contours(co_text_paragraph, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "drop-capital" in elements_with_artificial_class: + erosion_rate = 0 + dilation_rate = 4 + co_text_drop, img_boundary = self.update_region_contours(co_text_drop, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "catch-word" in elements_with_artificial_class: + erosion_rate = 0 + dilation_rate = 4 + co_text_catch, img_boundary = self.update_region_contours(co_text_catch, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "page-number" in elements_with_artificial_class: + erosion_rate = 0 + dilation_rate = 4 + co_text_page_number, img_boundary = self.update_region_contours(co_text_page_number, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "header" in elements_with_artificial_class: + erosion_rate = 1 + dilation_rate = 4 + co_text_header, img_boundary = self.update_region_contours(co_text_header, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "heading" in elements_with_artificial_class: + erosion_rate = 1 + dilation_rate = 4 + co_text_heading, img_boundary = self.update_region_contours(co_text_heading, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "signature-mark" in elements_with_artificial_class: + erosion_rate = 1 + dilation_rate = 4 + co_text_signature_mark, img_boundary = self.update_region_contours(co_text_signature_mark, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "marginalia" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_marginalia, img_boundary = self.update_region_contours(co_text_marginalia, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + + img = np.zeros( (y_len,x_len,3) ) if self.output_type == '3d': if 'graphicregions' in keys: if "handwritten-annotation" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=types_text_label_rgb[ config_params['graphicregions']['handwritten-annotation']]) + img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=labels_rgb_color[ config_params['graphicregions']['handwritten-annotation']]) if "signature" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=types_text_label_rgb[ config_params['graphicregions']['signature']]) + img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=labels_rgb_color[ config_params['graphicregions']['signature']]) if "decoration" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=types_text_label_rgb[ config_params['graphicregions']['decoration']]) + img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=labels_rgb_color[ config_params['graphicregions']['decoration']]) if "stamp" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=types_text_label_rgb[ config_params['graphicregions']['stamp']]) + img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=labels_rgb_color[ config_params['graphicregions']['stamp']]) if 'imageregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_img, color=types_text_label_rgb[ config_params['imageregion']]) + img_poly=cv2.fillPoly(img, pts =co_img, color=labels_rgb_color[ config_params['imageregion']]) if 'separatorregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_sep, color=types_text_label_rgb[ config_params['separatorregion']]) + img_poly=cv2.fillPoly(img, pts =co_sep, color=labels_rgb_color[ config_params['separatorregion']]) if 'tableregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_table, color=types_text_label_rgb[ config_params['tableregion']]) + img_poly=cv2.fillPoly(img, pts =co_table, color=labels_rgb_color[ config_params['tableregion']]) if 'noiseregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_noise, color=types_text_label_rgb[ config_params['noiseregion']]) + img_poly=cv2.fillPoly(img, pts =co_noise, color=labels_rgb_color[ config_params['noiseregion']]) if 'textregions' in keys: if "paragraph" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=types_text_label_rgb[ config_params['textregions']['paragraph']]) + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=labels_rgb_color[ config_params['textregions']['paragraph']]) if "heading" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=types_text_label_rgb[ config_params['textregions']['heading']]) + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=labels_rgb_color[ config_params['textregions']['heading']]) if "header" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_header, color=types_text_label_rgb[ config_params['textregions']['header']]) + img_poly=cv2.fillPoly(img, pts =co_text_header, color=labels_rgb_color[ config_params['textregions']['header']]) if "catch-word" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_catch, color=types_text_label_rgb[ config_params['textregions']['catch-word']]) + img_poly=cv2.fillPoly(img, pts =co_text_catch, color=labels_rgb_color[ config_params['textregions']['catch-word']]) if "signature-mark" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=types_text_label_rgb[ config_params['textregions']['signature-mark']]) + img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=labels_rgb_color[ config_params['textregions']['signature-mark']]) if "page-number" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=types_text_label_rgb[ config_params['textregions']['page-number']]) + img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=labels_rgb_color[ config_params['textregions']['page-number']]) if "marginalia" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=types_text_label_rgb[ config_params['textregions']['marginalia']]) + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=labels_rgb_color[ config_params['textregions']['marginalia']]) if "drop-capital" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_drop, color=types_text_label_rgb[ config_params['textregions']['drop-capital']]) + img_poly=cv2.fillPoly(img, pts =co_text_drop, color=labels_rgb_color[ config_params['textregions']['drop-capital']]) + + if "artificial_class_on_boundry" in keys: + img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] + img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] + img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + + + elif self.output_type == '2d': if 'graphicregions' in keys: @@ -523,6 +601,9 @@ class pagexml2word: if "drop-capital" in types_text: color_label = config_params['textregions']['drop-capital'] img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(color_label,color_label,color_label)) + + if "artificial_class_on_boundry" in keys: + img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label @@ -1506,7 +1587,7 @@ class pagexml2word: @click.option( "--layout_config", "-lc", - help="experiment of ineterst. Word , textline , glyph and textregion are desired options.", + help="config file of prefered layout.", type=click.Path(exists=True, dir_okay=False), ) From 348d323c7cd98c53bfdbde37c517c5217db14f11 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 23 May 2024 15:43:31 +0200 Subject: [PATCH 051/492] missing text types are added --- train/custom_config_page2label.json | 12 ++++---- train/pagexml2label.py | 48 ++++++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 7 deletions(-) diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json index 85b5d7e..254f4df 100644 --- a/train/custom_config_page2label.json +++ b/train/custom_config_page2label.json @@ -1,8 +1,8 @@ { -"textregions":{"paragraph":1, "heading": 2, "header":2,"drop-capital": 3, "marginalia":4 ,"page-number":1 , "catch-word":1 }, -"imageregion":5, -"separatorregion":6, -"graphicregions" :{"handwritten-annotation":7, "decoration": 8, "signature": 9, "stamp": 10}, -"artificial_class_on_boundry": ["paragraph","header", "heading", "marginalia", "page-number", "catch-word", "drop-capital"], -"artificial_class_label":11 +"textregions":{"paragraph":1, "heading": 1, "header":1,"drop-capital": 1, "marginalia":1 ,"page-number":1 , "catch-word":1 ,"footnote": 1, "footnote-continued": 1}, +"imageregion":2, +"separatorregion":3, +"graphicregions" :{"handwritten-annotation":2, "decoration": 2, "signature": 2, "stamp": 2}, +"artificial_class_on_boundry": ["paragraph","header", "heading", "marginalia", "page-number", "catch-word", "drop-capital","footnote", "footnote-continued"], +"artificial_class_label":4 } diff --git a/train/pagexml2label.py b/train/pagexml2label.py index 5311c24..63b7acf 100644 --- a/train/pagexml2label.py +++ b/train/pagexml2label.py @@ -113,6 +113,7 @@ class pagexml2word: """ Reading the page xml files and write the ground truth images into given output directory. """ + ## to do: add footnote to text regions for index in tqdm(range(len(self.gt_list))): #try: tree1 = ET.parse(self.dir+'/'+self.gt_list[index]) @@ -144,11 +145,13 @@ class pagexml2word: types_graphic_label = list(types_graphic_dict.values()) - labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125)] + labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125), (255,125,255), (125,255,0)] region_tags=np.unique([x for x in alltags if x.endswith('Region')]) co_text_paragraph=[] + co_text_footnote=[] + co_text_footnote_con=[] co_text_drop=[] co_text_heading=[] co_text_header=[] @@ -177,6 +180,8 @@ class pagexml2word: c_t_in_signature_mark=[] c_t_in_catch=[] c_t_in_marginalia=[] + c_t_in_footnote=[] + c_t_in_footnote_con=[] sumi=0 for vv in nn.iter(): # check the format of coords @@ -190,6 +195,14 @@ class pagexml2word: if "drop-capital" in types_text: if "type" in nn.attrib and nn.attrib['type']=='drop-capital': c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "footnote" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote': + c_t_in_footnote.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "footnote-continued" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': + c_t_in_footnote_con.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) if "heading" in types_text: if "type" in nn.attrib and nn.attrib['type']=='heading': @@ -231,6 +244,16 @@ class pagexml2word: c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) sumi+=1 + if "footnote" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote': + c_t_in_footnote.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "footnote-continued" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': + c_t_in_footnote_con.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + if "heading" in types_text: if "type" in nn.attrib and nn.attrib['type']=='heading': c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) @@ -272,6 +295,10 @@ class pagexml2word: if len(c_t_in_drop)>0: co_text_drop.append(np.array(c_t_in_drop)) + if len(c_t_in_footnote_con)>0: + co_text_footnote_con.append(np.array(c_t_in_footnote_con)) + if len(c_t_in_footnote)>0: + co_text_footnote.append(np.array(c_t_in_footnote)) if len(c_t_in_paragraph)>0: co_text_paragraph.append(np.array(c_t_in_paragraph)) if len(c_t_in_heading)>0: @@ -497,6 +524,15 @@ class pagexml2word: erosion_rate = 2 dilation_rate = 4 co_text_marginalia, img_boundary = self.update_region_contours(co_text_marginalia, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "footnote" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_footnote, img_boundary = self.update_region_contours(co_text_footnote, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "footnote-continued" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_footnote_con, img_boundary = self.update_region_contours(co_text_footnote_con, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + img = np.zeros( (y_len,x_len,3) ) @@ -525,6 +561,10 @@ class pagexml2word: if 'textregions' in keys: if "paragraph" in types_text: img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=labels_rgb_color[ config_params['textregions']['paragraph']]) + if "footnote" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=labels_rgb_color[ config_params['textregions']['footnote']]) + if "footnote-continued" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=labels_rgb_color[ config_params['textregions']['footnote-continued']]) if "heading" in types_text: img_poly=cv2.fillPoly(img, pts =co_text_heading, color=labels_rgb_color[ config_params['textregions']['heading']]) if "header" in types_text: @@ -580,6 +620,12 @@ class pagexml2word: if "paragraph" in types_text: color_label = config_params['textregions']['paragraph'] img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(color_label,color_label,color_label)) + if "footnote" in types_text: + color_label = config_params['textregions']['footnote'] + img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=(color_label,color_label,color_label)) + if "footnote-continued" in types_text: + color_label = config_params['textregions']['footnote-continued'] + img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=(color_label,color_label,color_label)) if "heading" in types_text: color_label = config_params['textregions']['heading'] img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(color_label,color_label,color_label)) From a83d53c27d09c962c54f441e225c70fbd820900b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 23 May 2024 17:14:31 +0200 Subject: [PATCH 052/492] use cases like textline, word and glyph are added --- train/custom_config_page2label.json | 11 +- train/pagexml2label.py | 1055 +++------------------------ 2 files changed, 93 insertions(+), 973 deletions(-) diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json index 254f4df..d6320fa 100644 --- a/train/custom_config_page2label.json +++ b/train/custom_config_page2label.json @@ -1,8 +1,9 @@ { -"textregions":{"paragraph":1, "heading": 1, "header":1,"drop-capital": 1, "marginalia":1 ,"page-number":1 , "catch-word":1 ,"footnote": 1, "footnote-continued": 1}, -"imageregion":2, -"separatorregion":3, -"graphicregions" :{"handwritten-annotation":2, "decoration": 2, "signature": 2, "stamp": 2}, +"use_case": "layout", +"textregions":{"paragraph":1, "heading": 2, "header":2,"drop-capital": 3, "marginalia":4 ,"page-number":1 , "catch-word":1 ,"footnote": 1, "footnote-continued": 1}, +"imageregion":5, +"separatorregion":6, +"graphicregions" :{"handwritten-annotation":5, "decoration": 5, "signature": 5, "stamp": 5}, "artificial_class_on_boundry": ["paragraph","header", "heading", "marginalia", "page-number", "catch-word", "drop-capital","footnote", "footnote-continued"], -"artificial_class_label":4 +"artificial_class_label":7 } diff --git a/train/pagexml2label.py b/train/pagexml2label.py index 63b7acf..16cda8b 100644 --- a/train/pagexml2label.py +++ b/train/pagexml2label.py @@ -21,13 +21,12 @@ This classes.txt file is required for dhsegment tool. """ KERNEL = np.ones((5, 5), np.uint8) -class pagexml2word: - def __init__(self,dir_in, out_dir,output_type,experiment,layout_config): +class pagexml2label: + def __init__(self,dir_in, out_dir,output_type,config): self.dir=dir_in self.output_dir=out_dir self.output_type=output_type - self.experiment=experiment - self.layout_config=layout_config + self.config=config def get_content_of_dir(self): """ @@ -127,7 +126,82 @@ class pagexml2word: y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) - if self.layout_config: + if self.config and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph'): + keys = list(config_params.keys()) + if "artificial_class_label" in keys: + artificial_class_rgb_color = (255,255,0) + artificial_class_label = config_params['artificial_class_label'] + + textline_rgb_color = (255, 0, 0) + + if config_params['use_case']=='textline': + region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) + elif config_params['use_case']=='word': + region_tags = np.unique([x for x in alltags if x.endswith('Word')]) + elif config_params['use_case']=='glyph': + region_tags = np.unique([x for x in alltags if x.endswith('Glyph')]) + co_use_case = [] + + for tag in region_tags: + if config_params['use_case']=='textline': + tag_endings = ['}TextLine','}textline'] + elif config_params['use_case']=='word': + tag_endings = ['}Word','}word'] + elif config_params['use_case']=='glyph': + tag_endings = ['}Glyph','}glyph'] + + if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): + for nn in root1.iter(tag): + c_t_in = [] + sumi = 0 + for vv in nn.iter(): + # check the format of coords + if vv.tag == link + 'Coords': + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + if vv.tag == link + 'Point': + c_t_in.append([int(np.float(vv.attrib['x'])), int(np.float(vv.attrib['y']))]) + sumi += 1 + elif vv.tag != link + 'Point' and sumi >= 1: + break + co_use_case.append(np.array(c_t_in)) + + + + if "artificial_class_label" in keys: + img_boundary = np.zeros((y_len, x_len)) + erosion_rate = 1 + dilation_rate = 3 + co_use_case, img_boundary = self.update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + + + img = np.zeros((y_len, x_len, 3)) + if self.output_type == '2d': + img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) + if "artificial_class_label" in keys: + img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label + elif self.output_type == '3d': + img_poly = cv2.fillPoly(img, pts=co_use_case, color=textline_rgb_color) + if "artificial_class_label" in keys: + img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] + img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] + img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + + try: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('-')[1].split('.')[0] + '.png', + img_poly) + except: + cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('.')[0] + '.png', img_poly) + + + if self.config and config_params['use_case']=='layout': keys = list(config_params.keys()) if "artificial_class_on_boundry" in keys: elements_with_artificial_class = list(config_params['artificial_class_on_boundry']) @@ -139,6 +213,7 @@ class pagexml2word: types_text_dict = config_params['textregions'] types_text = list(types_text_dict.keys()) types_text_label = list(types_text_dict.values()) + print(types_text) if 'graphicregions' in keys: types_graphic_dict = config_params['graphicregions'] types_graphic = list(types_graphic_dict.keys()) @@ -660,957 +735,6 @@ class pagexml2word: cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - #print(values[0]) - if self.experiment=='word': - region_tags=np.unique([x for x in alltags if x.endswith('Word')]) - co_word=[] - - for tag in region_tags: - if tag.endswith('}Word') or tag.endswith('}word'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_word.append(np.array(c_t_in)) - - img = np.zeros( (y_len,x_len, 3) ) - if self.output_type == '2d': - img_poly=cv2.fillPoly(img, pts =co_word, color=(1,1,1)) - elif self.output_type == '3d': - img_poly=cv2.fillPoly(img, pts =co_word, color=(255,0,0)) - - try: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - - elif self.experiment=='glyph': - region_tags=np.unique([x for x in alltags if x.endswith('Glyph')]) - co_glyph=[] - - for tag in region_tags: - if tag.endswith('}Glyph') or tag.endswith('}glyph'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_glyph.append(np.array(c_t_in)) - - img = np.zeros( (y_len,x_len, 3) ) - if self.output_type == '2d': - img_poly=cv2.fillPoly(img, pts =co_glyph, color=(1,1,1)) - elif self.output_type == '3d': - img_poly=cv2.fillPoly(img, pts =co_glyph, color=(255,0,0)) - - try: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - elif self.experiment=='textline': - region_tags=np.unique([x for x in alltags if x.endswith('TextLine')]) - co_line=[] - - for tag in region_tags: - if tag.endswith('}TextLine') or tag.endswith('}textline'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_line.append(np.array(c_t_in)) - - img = np.zeros( (y_len,x_len, 3) ) - if self.output_type == '2d': - img_poly=cv2.fillPoly(img, pts =co_line, color=(1,1,1)) - elif self.output_type == '3d': - img_poly=cv2.fillPoly(img, pts =co_line, color=(255,0,0)) - - try: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - elif self.experiment == 'textline_new_concept': - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - co_line = [] - - for tag in region_tags: - if tag.endswith('}TextLine') or tag.endswith('}textline'): - # print('sth') - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(np.float(vv.attrib['x'])), int(np.float(vv.attrib['y']))]) - sumi += 1 - # print(vv.tag,'in') - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_line.append(np.array(c_t_in)) - - img_boundary = np.zeros((y_len, x_len)) - co_textline_eroded = [] - for con in co_line: - # try: - img_boundary_in = np.zeros((y_len, x_len)) - img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - # print('bidiahhhhaaa') - - # img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica - img_boundary_in = cv2.erode(img_boundary_in[:, :], KERNEL, iterations=1) - - pixel = 1 - min_size = 0 - con_eroded = self.return_contours_of_interested_region(img_boundary_in, pixel, min_size) - - try: - co_textline_eroded.append(con_eroded[0]) - except: - co_textline_eroded.append(con) - - img_boundary_in_dilated = cv2.dilate(img_boundary_in[:, :], KERNEL, iterations=3) - # img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) - - boundary = img_boundary_in_dilated[:, :] - img_boundary_in[:, :] - - img_boundary[:, :][boundary[:, :] == 1] = 1 - - img = np.zeros((y_len, x_len, 3)) - if self.output_type == '2d': - img_poly = cv2.fillPoly(img, pts=co_textline_eroded, color=(1, 1, 1)) - img_poly[:, :][img_boundary[:, :] == 1] = 2 - elif self.output_type == '3d': - img_poly = cv2.fillPoly(img, pts=co_textline_eroded, color=(255, 0, 0)) - img_poly[:, :, 0][img_boundary[:, :] == 1] = 255 - img_poly[:, :, 1][img_boundary[:, :] == 1] = 125 - img_poly[:, :, 2][img_boundary[:, :] == 1] = 125 - - try: - cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('-')[1].split('.')[0] + '.png', - img_poly) - except: - cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('.')[0] + '.png', img_poly) - - elif self.experiment=='layout_for_main_regions': - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - #print(region_tags) - co_text=[] - co_sep=[] - co_img=[] - #co_graphic=[] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - #print('sth') - for nn in root1.iter(tag): - print(nn.attrib['type']) - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_text.append(np.array(c_t_in)) - - elif tag.endswith('}ImageRegion') or tag.endswith('}GraphicRegion') or tag.endswith('}imageregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - img_poly = np.zeros( (y_len,x_len,3) ) - - - if self.output_type == '3d': - img_poly=cv2.fillPoly(img_poly, pts =co_text, color=(255,0,0)) - img_poly=cv2.fillPoly(img_poly, pts =co_img, color=(0,255,0)) - img_poly=cv2.fillPoly(img_poly, pts =co_sep, color=(0,0,255)) - ##img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) - elif self.output_type == '2d': - img_poly=cv2.fillPoly(img_poly, pts =co_text, color=(1,1,1)) - img_poly=cv2.fillPoly(img_poly, pts =co_img, color=(2,2,2)) - img_poly=cv2.fillPoly(img_poly, pts =co_sep, color=(3,3,3)) - - try: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - elif self.experiment=='textregion': - region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) - co_textregion=[] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_textregion.append(np.array(c_t_in)) - - img = np.zeros( (y_len,x_len,3) ) - if self.output_type == '3d': - img_poly=cv2.fillPoly(img, pts =co_textregion, color=(255,0,0)) - elif self.output_type == '2d': - img_poly=cv2.fillPoly(img, pts =co_textregion, color=(1,1,1)) - - - try: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - elif self.experiment=='layout': - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - - co_text_paragraph=[] - co_text_drop=[] - co_text_heading=[] - co_text_header=[] - co_text_marginalia=[] - co_text_catch=[] - co_text_page_number=[] - co_text_signature_mark=[] - co_sep=[] - co_img=[] - co_table=[] - co_graphic=[] - co_graphic_text_annotation=[] - co_graphic_decoration=[] - co_noise=[] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - c_t_in_drop=[] - c_t_in_paragraph=[] - c_t_in_heading=[] - c_t_in_header=[] - c_t_in_page_number=[] - c_t_in_signature_mark=[] - c_t_in_catch=[] - c_t_in_marginalia=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - #print('birda1') - p_h=vv.attrib['points'].split(' ') - - - - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - #if nn.attrib['type']=='paragraph': - - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - elif "type" in nn.attrib and nn.attrib['type']=='header': - c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - - c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - else: - - c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - #if nn.attrib['type']=='paragraph': - - c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='heading': - c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - - elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - - c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) - sumi+=1 - elif "type" in nn.attrib and nn.attrib['type']=='header': - c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - - elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - - elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - - c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) - sumi+=1 - - else: - c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) - sumi+=1 - - #c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - - if len(c_t_in_drop)>0: - co_text_drop.append(np.array(c_t_in_drop)) - if len(c_t_in_paragraph)>0: - co_text_paragraph.append(np.array(c_t_in_paragraph)) - if len(c_t_in_heading)>0: - co_text_heading.append(np.array(c_t_in_heading)) - - if len(c_t_in_header)>0: - co_text_header.append(np.array(c_t_in_header)) - if len(c_t_in_page_number)>0: - co_text_page_number.append(np.array(c_t_in_page_number)) - if len(c_t_in_catch)>0: - co_text_catch.append(np.array(c_t_in_catch)) - - if len(c_t_in_signature_mark)>0: - co_text_signature_mark.append(np.array(c_t_in_signature_mark)) - - if len(c_t_in_marginalia)>0: - co_text_marginalia.append(np.array(c_t_in_marginalia)) - - - elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - c_t_in_text_annotation=[] - c_t_in_decoration=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - #c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - #if nn.attrib['type']=='paragraph': - - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) - else: - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - - break - else: - pass - - - if vv.tag==link+'Point': - - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - #if nn.attrib['type']=='paragraph': - - c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - elif "type" in nn.attrib and nn.attrib['type']=='decoration': - - c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) - sumi+=1 - else: - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if len(c_t_in_text_annotation)>0: - co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) - if len(c_t_in_decoration)>0: - co_graphic_decoration.append(np.array(c_t_in_decoration)) - if len(c_t_in)>0: - co_graphic.append(np.array(c_t_in)) - - - - elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - - elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - - - img = np.zeros( (y_len,x_len,3) ) - - if self.output_type == '3d': - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(255,0,0)) - - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(255,125,0)) - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(255,0,125)) - img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(125,255,125)) - img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(125,125,0)) - img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(0,125,255)) - img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(0,125,0)) - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(125,125,125)) - img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(0,125,255)) - - img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(125,0,125)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(0,255,0)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(0,0,255)) - img_poly=cv2.fillPoly(img, pts =co_table, color=(0,255,255)) - img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) - img_poly=cv2.fillPoly(img, pts =co_noise, color=(255,0,255)) - elif self.output_type == '2d': - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) - - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) - img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(3,3,3)) - img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(4,4,4)) - img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(5,5,5)) - img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(6,6,6)) - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(7,7,7)) - img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(8,8,8)) - - img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(9,9,9)) - img_poly=cv2.fillPoly(img, pts =co_img, color=(10,10,10)) - img_poly=cv2.fillPoly(img, pts =co_sep, color=(11,11,11)) - img_poly=cv2.fillPoly(img, pts =co_table, color=(12,12,12)) - img_poly=cv2.fillPoly(img, pts =co_graphic, color=(13,13,14)) - img_poly=cv2.fillPoly(img, pts =co_noise, color=(15,15,15)) - - try: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - - elif self.experiment=='layout_for_main_regions_new_concept': - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - #print(region_tags) - co_text=[] - co_sep=[] - co_img=[] - co_drop = [] - co_graphic=[] - co_table = [] - - for tag in region_tags: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - c_t_in_drop = [] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - else: - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - else: - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - if len(c_t_in)>0: - co_text.append(np.array(c_t_in)) - if len(c_t_in_drop)>0: - co_drop.append(np.array(c_t_in_drop)) - - elif tag.endswith('}ImageRegion') or tag.endswith('}GraphicRegion') or tag.endswith('}imageregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - - img_boundary = np.zeros( (y_len,x_len) ) - - - co_text_eroded = [] - for con in co_text: - #try: - img_boundary_in = np.zeros( (y_len,x_len) ) - img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - #print('bidiahhhhaaa') - - - - #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica - img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=2) - - pixel = 1 - min_size = 0 - con_eroded = self.return_contours_of_interested_region(img_boundary_in,pixel, min_size ) - - try: - co_text_eroded.append(con_eroded[0]) - except: - co_text_eroded.append(con) - - img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=4) - #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) - - boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - img_boundary[:,:][boundary[:,:]==1] =1 - - - ###co_table_eroded = [] - ###for con in co_table: - ####try: - ###img_boundary_in = np.zeros( (y_len,x_len) ) - ###img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - ####print('bidiahhhhaaa') - - - - #####img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica - ###img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=2) - - ###pixel = 1 - ###min_size = 0 - ###con_eroded = self.return_contours_of_interested_region(img_boundary_in,pixel, min_size ) - - ###try: - ###co_table_eroded.append(con_eroded[0]) - ###except: - ###co_table_eroded.append(con) - - ###img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=4) - - ###boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - ###img_boundary[:,:][boundary[:,:]==1] =1 - #except: - #pass - - #for con in co_img: - #img_boundary_in = np.zeros( (y_len,x_len) ) - #img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=3) - - #boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - #img_boundary[:,:][boundary[:,:]==1] =1 - - - #for con in co_sep: - - #img_boundary_in = np.zeros( (y_len,x_len) ) - #img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=3) - - #boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - img_boundary[:,:][boundary[:,:]==1] =1 - for con in co_drop: - img_boundary_in = np.zeros( (y_len,x_len) ) - img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=3) - - boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - img_boundary[:,:][boundary[:,:]==1] =1 - - - img = np.zeros( (y_len,x_len,3) ) - - if self.output_type == '2d': - img_poly=cv2.fillPoly(img, pts =co_img, color=(2,2,2)) - - img_poly=cv2.fillPoly(img, pts =co_text_eroded, color=(1,1,1)) - ##img_poly=cv2.fillPoly(img, pts =co_graphic, color=(4,4,4)) - ###img_poly=cv2.fillPoly(img, pts =co_table, color=(1,1,1)) - - img_poly=cv2.fillPoly(img, pts =co_drop, color=(1,1,1)) - img_poly[:,:][img_boundary[:,:]==1] = 4 - img_poly=cv2.fillPoly(img, pts =co_sep, color=(3,3,3)) - elif self.output_type == '3d': - img_poly=cv2.fillPoly(img, pts =co_img, color=(0,255,0)) - img_poly=cv2.fillPoly(img, pts =co_text_eroded, color=(255,0,0)) - img_poly=cv2.fillPoly(img, pts =co_drop, color=(0,125,255)) - - img_poly[:,:,0][img_boundary[:,:]==1]=255 - img_poly[:,:,1][img_boundary[:,:]==1]=125 - img_poly[:,:,2][img_boundary[:,:]==1]=125 - - img_poly=cv2.fillPoly(img, pts =co_sep, color=(0,0,255)) - ##img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) - - #print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png') - try: - #print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png') - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - - - #except: - #pass def run(self,config_params): self.get_content_of_dir() self.get_images_of_ground_truth(config_params) @@ -1631,9 +755,9 @@ class pagexml2word: ) @click.option( - "--layout_config", - "-lc", - help="config file of prefered layout.", + "--config", + "-cfg", + help="config file of prefered layout or use case.", type=click.Path(exists=True, dir_okay=False), ) @@ -1642,21 +766,16 @@ class pagexml2word: "-to", help="this defines how output should be. A 2d image array or a 3d image array encoded with RGB color. Just pass 2d or 3d. The file will be saved one directory up. 2D image array is 3d but only information of one channel would be enough since all channels have the same values.", ) -@click.option( - "--experiment", - "-exp", - help="experiment of ineterst. Word , textline , glyph and textregion are desired options.", -) -def main(dir_xml,dir_out,type_output,experiment,layout_config): - if layout_config: - with open(layout_config) as f: +def main(dir_xml,dir_out,type_output,config): + if config: + with open(config) as f: config_params = json.load(f) else: print("passed") config_params = None - x=pagexml2word(dir_xml,dir_out,type_output,experiment, layout_config) + x=pagexml2label(dir_xml,dir_out,type_output, config) x.run(config_params) if __name__=="__main__": main() From 61487bf782238ff7af96927f2c0c9108191f9ad0 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 23 May 2024 17:36:23 +0200 Subject: [PATCH 053/492] use case printspace is added --- train/pagexml2label.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/train/pagexml2label.py b/train/pagexml2label.py index 16cda8b..94596db 100644 --- a/train/pagexml2label.py +++ b/train/pagexml2label.py @@ -126,7 +126,7 @@ class pagexml2label: y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) - if self.config and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph'): + if self.config and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph' or config_params['use_case']=='printspace'): keys = list(config_params.keys()) if "artificial_class_label" in keys: artificial_class_rgb_color = (255,255,0) @@ -140,6 +140,9 @@ class pagexml2label: region_tags = np.unique([x for x in alltags if x.endswith('Word')]) elif config_params['use_case']=='glyph': region_tags = np.unique([x for x in alltags if x.endswith('Glyph')]) + elif config_params['use_case']=='printspace': + region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace')]) + co_use_case = [] for tag in region_tags: @@ -149,6 +152,8 @@ class pagexml2label: tag_endings = ['}Word','}word'] elif config_params['use_case']=='glyph': tag_endings = ['}Glyph','}glyph'] + elif config_params['use_case']=='printspace': + tag_endings = ['}PrintSpace','}printspace'] if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): for nn in root1.iter(tag): From d346b317fb5dea9afefa4fd95587f0c8201cd5d7 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 24 May 2024 14:42:58 +0200 Subject: [PATCH 054/492] machine based reading order training dataset generator is added --- train/generate_gt_for_training.py | 194 +++++ train/gt_for_enhancement_creator.py | 31 - train/gt_gen_utils.py | 1239 +++++++++++++++++++++++++++ train/pagexml2label.py | 789 ----------------- 4 files changed, 1433 insertions(+), 820 deletions(-) create mode 100644 train/generate_gt_for_training.py delete mode 100644 train/gt_for_enhancement_creator.py create mode 100644 train/gt_gen_utils.py delete mode 100644 train/pagexml2label.py diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py new file mode 100644 index 0000000..e296029 --- /dev/null +++ b/train/generate_gt_for_training.py @@ -0,0 +1,194 @@ +import click +import json +from gt_gen_utils import * +from tqdm import tqdm + +@click.group() +def main(): + pass + +@main.command() +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out", + "-do", + help="directory where ground truth images would be written", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--config", + "-cfg", + help="config file of prefered layout or use case.", + type=click.Path(exists=True, dir_okay=False), +) + +@click.option( + "--type_output", + "-to", + help="this defines how output should be. A 2d image array or a 3d image array encoded with RGB color. Just pass 2d or 3d. The file will be saved one directory up. 2D image array is 3d but only information of one channel would be enough since all channels have the same values.", +) + +def pagexml2label(dir_xml,dir_out,type_output,config): + if config: + with open(config) as f: + config_params = json.load(f) + else: + print("passed") + config_params = None + gt_list = get_content_of_dir(dir_xml) + get_images_of_ground_truth(gt_list,dir_xml,dir_out,type_output, config, config_params) + +@main.command() +@click.option( + "--dir_imgs", + "-dis", + help="directory of images with high resolution.", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out_images", + "-dois", + help="directory where degraded images will be written.", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_out_labels", + "-dols", + help="directory where original images will be written as labels.", + type=click.Path(exists=True, file_okay=False), +) +def image_enhancement(dir_imgs, dir_out_images, dir_out_labels): + #dir_imgs = './training_data_sample_enhancement/images' + #dir_out_images = './training_data_sample_enhancement/images_gt' + #dir_out_labels = './training_data_sample_enhancement/labels_gt' + + ls_imgs = os.listdir(dir_imgs) + ls_scales = [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] + + for img in tqdm(ls_imgs): + img_name = img.split('.')[0] + img_type = img.split('.')[1] + image = cv2.imread(os.path.join(dir_imgs, img)) + for i, scale in enumerate(ls_scales): + height_sc = int(image.shape[0]*scale) + width_sc = int(image.shape[1]*scale) + + image_down_scaled = resize_image(image, height_sc, width_sc) + image_back_to_org_scale = resize_image(image_down_scaled, image.shape[0], image.shape[1]) + + cv2.imwrite(os.path.join(dir_out_images, img_name+'_'+str(i)+'.'+img_type), image_back_to_org_scale) + cv2.imwrite(os.path.join(dir_out_labels, img_name+'_'+str(i)+'.'+img_type), image) + + +@main.command() +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_out_modal_image", + "-domi", + help="directory where ground truth images would be written", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_out_classes", + "-docl", + help="directory where ground truth classes would be written", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--input_height", + "-ih", + help="input_height", +) +@click.option( + "--input_width", + "-iw", + help="input_width", +) + +def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width): + xml_files_ind = os.listdir(dir_xml) + input_height = int(input_height) + input_width = int(input_width) + + indexer_start= 0#55166 + max_area = 1 + min_area = 0.0001 + + for ind_xml in tqdm(xml_files_ind): + indexer = 0 + #print(ind_xml) + #print('########################') + xml_file = os.path.join(dir_xml,ind_xml ) + f_name = ind_xml.split('.')[0] + file_name, id_paragraph, id_header,co_text_paragraph,\ + co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) + + id_all_text = id_paragraph + id_header + co_text_all = co_text_paragraph + co_text_header + + + _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_header) + + img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') + + for j in range(len(cy_main)): + img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12,int(x_min_main[j]):int(x_max_main[j]) ] = 1 + + + texts_corr_order_index = [index_tot_regions[tot_region_ref.index(i)] for i in id_all_text ] + texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] + + + co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area) + + arg_array = np.array(range(len(texts_corr_order_index_int))) + + labels_con = np.zeros((y_len,x_len,len(arg_array)),dtype='uint8') + for i in range(len(co_text_all)): + img_label = np.zeros((y_len,x_len,3),dtype='uint8') + img_label=cv2.fillPoly(img_label, pts =[co_text_all[i]], color=(1,1,1)) + + img_label[:,:,0][img_poly[:,:,0]==5] = 2 + img_label[:,:,0][img_header_and_sep[:,:]==1] = 3 + + labels_con[:,:,i] = img_label[:,:,0] + + for i in range(len(texts_corr_order_index_int)): + for j in range(len(texts_corr_order_index_int)): + if i!=j: + input_matrix = np.zeros((input_height,input_width,3)).astype(np.int8) + final_f_name = f_name+'_'+str(indexer+indexer_start) + order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] + if order_class_condition<0: + class_type = 1 + else: + class_type = 0 + + input_matrix[:,:,0] = resize_image(labels_con[:,:,i], input_height, input_width) + input_matrix[:,:,1] = resize_image(img_poly[:,:,0], input_height, input_width) + input_matrix[:,:,2] = resize_image(labels_con[:,:,j], input_height, input_width) + + np.save(os.path.join(dir_out_classes,final_f_name+'.npy' ), class_type) + + cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_matrix) + indexer = indexer+1 + + + +if __name__ == "__main__": + main() diff --git a/train/gt_for_enhancement_creator.py b/train/gt_for_enhancement_creator.py deleted file mode 100644 index 9a4274f..0000000 --- a/train/gt_for_enhancement_creator.py +++ /dev/null @@ -1,31 +0,0 @@ -import cv2 -import os - -def resize_image(seg_in, input_height, input_width): - return cv2.resize(seg_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) - - -dir_imgs = './training_data_sample_enhancement/images' -dir_out_imgs = './training_data_sample_enhancement/images_gt' -dir_out_labs = './training_data_sample_enhancement/labels_gt' - -ls_imgs = os.listdir(dir_imgs) - - -ls_scales = [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] - - -for img in ls_imgs: - img_name = img.split('.')[0] - img_type = img.split('.')[1] - image = cv2.imread(os.path.join(dir_imgs, img)) - for i, scale in enumerate(ls_scales): - height_sc = int(image.shape[0]*scale) - width_sc = int(image.shape[1]*scale) - - image_down_scaled = resize_image(image, height_sc, width_sc) - image_back_to_org_scale = resize_image(image_down_scaled, image.shape[0], image.shape[1]) - - cv2.imwrite(os.path.join(dir_out_imgs, img_name+'_'+str(i)+'.'+img_type), image_back_to_org_scale) - cv2.imwrite(os.path.join(dir_out_labs, img_name+'_'+str(i)+'.'+img_type), image) - diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py new file mode 100644 index 0000000..9862e29 --- /dev/null +++ b/train/gt_gen_utils.py @@ -0,0 +1,1239 @@ +import click +import sys +import os +import numpy as np +import warnings +import xml.etree.ElementTree as ET +from tqdm import tqdm +import cv2 +from shapely import geometry +from pathlib import Path + + +KERNEL = np.ones((5, 5), np.uint8) + +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + +def get_content_of_dir(dir_in): + """ + Listing all ground truth page xml files. All files are needed to have xml format. + """ + + gt_all=os.listdir(dir_in) + gt_list=[file for file in gt_all if file.split('.')[ len(file.split('.'))-1 ]=='xml' ] + return gt_list + +def return_parent_contours(contours, hierarchy): + contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] + return contours_parent +def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): + found_polygons_early = list() + + jv = 0 + for c in contours: + if len(c) < 3: # A polygon cannot have less than 3 points + continue + + polygon = geometry.Polygon([point[0] for point in c]) + # area = cv2.contourArea(c) + area = polygon.area + ##print(np.prod(thresh.shape[:2])) + # Check that polygon has area greater than minimal area + # print(hierarchy[0][jv][3],hierarchy ) + if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : + # print(c[0][0][1]) + found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) + jv += 1 + return found_polygons_early + +def filter_contours_area_of_image(image, contours, order_index, max_area, min_area): + found_polygons_early = list() + order_index_filtered = list() + #jv = 0 + for jv, c in enumerate(contours): + #print(len(c[0])) + c = c[0] + if len(c) < 3: # A polygon cannot have less than 3 points + continue + c_e = [point for point in c] + #print(c_e) + polygon = geometry.Polygon(c_e) + area = polygon.area + #print(area,'area') + if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : + found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) + order_index_filtered.append(order_index[jv]) + #jv += 1 + return found_polygons_early, order_index_filtered + +def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): + + # pixels of images are identified by 5 + if len(region_pre_p.shape) == 3: + cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + else: + cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = cnts_images.astype(np.uint8) + cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) + imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) + ret, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + contours_imgs = return_parent_contours(contours_imgs, hierarchy) + contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) + + return contours_imgs +def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len): + co_text_eroded = [] + for con in co_text: + #try: + img_boundary_in = np.zeros( (y_len,x_len) ) + img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + #print('bidiahhhhaaa') + + + + #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica + if erosion_rate > 0: + img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_rate) + + pixel = 1 + min_size = 0 + con_eroded = return_contours_of_interested_region(img_boundary_in,pixel, min_size ) + + try: + co_text_eroded.append(con_eroded[0]) + except: + co_text_eroded.append(con) + + + img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_rate) + #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) + + boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] + + img_boundary[:,:][boundary[:,:]==1] =1 + return co_text_eroded, img_boundary +def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_file, config_params): + """ + Reading the page xml files and write the ground truth images into given output directory. + """ + ## to do: add footnote to text regions + for index in tqdm(range(len(gt_list))): + #try: + tree1 = ET.parse(dir_in+'/'+gt_list[index]) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + + + for jj in root1.iter(link+'Page'): + y_len=int(jj.attrib['imageHeight']) + x_len=int(jj.attrib['imageWidth']) + + if config_file and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph' or config_params['use_case']=='printspace'): + keys = list(config_params.keys()) + if "artificial_class_label" in keys: + artificial_class_rgb_color = (255,255,0) + artificial_class_label = config_params['artificial_class_label'] + + textline_rgb_color = (255, 0, 0) + + if config_params['use_case']=='textline': + region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) + elif config_params['use_case']=='word': + region_tags = np.unique([x for x in alltags if x.endswith('Word')]) + elif config_params['use_case']=='glyph': + region_tags = np.unique([x for x in alltags if x.endswith('Glyph')]) + elif config_params['use_case']=='printspace': + region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace')]) + + co_use_case = [] + + for tag in region_tags: + if config_params['use_case']=='textline': + tag_endings = ['}TextLine','}textline'] + elif config_params['use_case']=='word': + tag_endings = ['}Word','}word'] + elif config_params['use_case']=='glyph': + tag_endings = ['}Glyph','}glyph'] + elif config_params['use_case']=='printspace': + tag_endings = ['}PrintSpace','}printspace'] + + if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): + for nn in root1.iter(tag): + c_t_in = [] + sumi = 0 + for vv in nn.iter(): + # check the format of coords + if vv.tag == link + 'Coords': + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + if vv.tag == link + 'Point': + c_t_in.append([int(np.float(vv.attrib['x'])), int(np.float(vv.attrib['y']))]) + sumi += 1 + elif vv.tag != link + 'Point' and sumi >= 1: + break + co_use_case.append(np.array(c_t_in)) + + + + if "artificial_class_label" in keys: + img_boundary = np.zeros((y_len, x_len)) + erosion_rate = 1 + dilation_rate = 3 + co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + + + img = np.zeros((y_len, x_len, 3)) + if output_type == '2d': + img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) + if "artificial_class_label" in keys: + img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label + elif output_type == '3d': + img_poly = cv2.fillPoly(img, pts=co_use_case, color=textline_rgb_color) + if "artificial_class_label" in keys: + img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] + img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] + img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + + try: + cv2.imwrite(output_dir + '/' + gt_list[index].split('-')[1].split('.')[0] + '.png', + img_poly) + except: + cv2.imwrite(output_dir + '/' + gt_list[index].split('.')[0] + '.png', img_poly) + + + if config_file and config_params['use_case']=='layout': + keys = list(config_params.keys()) + if "artificial_class_on_boundry" in keys: + elements_with_artificial_class = list(config_params['artificial_class_on_boundry']) + artificial_class_rgb_color = (255,255,0) + artificial_class_label = config_params['artificial_class_label'] + #values = config_params.values() + + if 'textregions' in keys: + types_text_dict = config_params['textregions'] + types_text = list(types_text_dict.keys()) + types_text_label = list(types_text_dict.values()) + print(types_text) + if 'graphicregions' in keys: + types_graphic_dict = config_params['graphicregions'] + types_graphic = list(types_graphic_dict.keys()) + types_graphic_label = list(types_graphic_dict.values()) + + + labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125), (255,125,255), (125,255,0)] + + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + + co_text_paragraph=[] + co_text_footnote=[] + co_text_footnote_con=[] + co_text_drop=[] + co_text_heading=[] + co_text_header=[] + co_text_marginalia=[] + co_text_catch=[] + co_text_page_number=[] + co_text_signature_mark=[] + co_sep=[] + co_img=[] + co_table=[] + co_graphic_signature=[] + co_graphic_text_annotation=[] + co_graphic_decoration=[] + co_graphic_stamp=[] + co_noise=[] + + for tag in region_tags: + if 'textregions' in keys: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + for nn in root1.iter(tag): + c_t_in_drop=[] + c_t_in_paragraph=[] + c_t_in_heading=[] + c_t_in_header=[] + c_t_in_page_number=[] + c_t_in_signature_mark=[] + c_t_in_catch=[] + c_t_in_marginalia=[] + c_t_in_footnote=[] + c_t_in_footnote_con=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + + coords=bool(vv.attrib) + if coords: + #print('birda1') + p_h=vv.attrib['points'].split(' ') + + if "drop-capital" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "footnote" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote': + c_t_in_footnote.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "footnote-continued" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': + c_t_in_footnote_con.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "heading" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='heading': + c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "signature-mark" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='signature-mark': + c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "header" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='header': + c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "catch-word" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "page-number" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='page-number': + c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "marginalia" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='marginalia': + c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "paragraph" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='paragraph': + c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + break + else: + pass + + + if vv.tag==link+'Point': + if "drop-capital" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "footnote" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote': + c_t_in_footnote.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "footnote-continued" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': + c_t_in_footnote_con.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "heading" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='heading': + c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "signature-mark" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='signature-mark': + c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "header" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='header': + c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "catch-word" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "page-number" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='page-number': + c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "marginalia" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='marginalia': + c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "paragraph" in types_text: + if "type" in nn.attrib and nn.attrib['type']=='paragraph': + c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif vv.tag!=link+'Point' and sumi>=1: + break + + if len(c_t_in_drop)>0: + co_text_drop.append(np.array(c_t_in_drop)) + if len(c_t_in_footnote_con)>0: + co_text_footnote_con.append(np.array(c_t_in_footnote_con)) + if len(c_t_in_footnote)>0: + co_text_footnote.append(np.array(c_t_in_footnote)) + if len(c_t_in_paragraph)>0: + co_text_paragraph.append(np.array(c_t_in_paragraph)) + if len(c_t_in_heading)>0: + co_text_heading.append(np.array(c_t_in_heading)) + + if len(c_t_in_header)>0: + co_text_header.append(np.array(c_t_in_header)) + if len(c_t_in_page_number)>0: + co_text_page_number.append(np.array(c_t_in_page_number)) + if len(c_t_in_catch)>0: + co_text_catch.append(np.array(c_t_in_catch)) + + if len(c_t_in_signature_mark)>0: + co_text_signature_mark.append(np.array(c_t_in_signature_mark)) + + if len(c_t_in_marginalia)>0: + co_text_marginalia.append(np.array(c_t_in_marginalia)) + + + if 'graphicregions' in keys: + if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in_stamp=[] + c_t_in_text_annotation=[] + c_t_in_decoration=[] + c_t_in_signature=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + if "handwritten-annotation" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "decoration" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='decoration': + c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "stamp" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='stamp': + c_t_in_stamp.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "signature" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='signature': + c_t_in_signature.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + + break + else: + pass + + + if vv.tag==link+'Point': + if "handwritten-annotation" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "decoration" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='decoration': + c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "stamp" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='stamp': + c_t_in_stamp.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if "signature" in types_graphic: + if "type" in nn.attrib and nn.attrib['type']=='signature': + c_t_in_signature.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if len(c_t_in_text_annotation)>0: + co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) + if len(c_t_in_decoration)>0: + co_graphic_decoration.append(np.array(c_t_in_decoration)) + if len(c_t_in_stamp)>0: + co_graphic_stamp.append(np.array(c_t_in_stamp)) + if len(c_t_in_signature)>0: + co_graphic_signature.append(np.array(c_t_in_signature)) + + if 'imageregion' in keys: + if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + + + if 'separatorregion' in keys: + if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + + + if 'tableregion' in keys: + if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_table.append(np.array(c_t_in)) + + if 'noiseregion' in keys: + if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_noise.append(np.array(c_t_in)) + + if "artificial_class_on_boundry" in keys: + img_boundary = np.zeros( (y_len,x_len) ) + if "paragraph" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_paragraph, img_boundary = update_region_contours(co_text_paragraph, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "drop-capital" in elements_with_artificial_class: + erosion_rate = 0 + dilation_rate = 4 + co_text_drop, img_boundary = update_region_contours(co_text_drop, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "catch-word" in elements_with_artificial_class: + erosion_rate = 0 + dilation_rate = 4 + co_text_catch, img_boundary = update_region_contours(co_text_catch, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "page-number" in elements_with_artificial_class: + erosion_rate = 0 + dilation_rate = 4 + co_text_page_number, img_boundary = update_region_contours(co_text_page_number, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "header" in elements_with_artificial_class: + erosion_rate = 1 + dilation_rate = 4 + co_text_header, img_boundary = update_region_contours(co_text_header, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "heading" in elements_with_artificial_class: + erosion_rate = 1 + dilation_rate = 4 + co_text_heading, img_boundary = update_region_contours(co_text_heading, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "signature-mark" in elements_with_artificial_class: + erosion_rate = 1 + dilation_rate = 4 + co_text_signature_mark, img_boundary = update_region_contours(co_text_signature_mark, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "marginalia" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_marginalia, img_boundary = update_region_contours(co_text_marginalia, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "footnote" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_footnote, img_boundary = update_region_contours(co_text_footnote, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "footnote-continued" in elements_with_artificial_class: + erosion_rate = 2 + dilation_rate = 4 + co_text_footnote_con, img_boundary = update_region_contours(co_text_footnote_con, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + + + + img = np.zeros( (y_len,x_len,3) ) + + if output_type == '3d': + + if 'graphicregions' in keys: + if "handwritten-annotation" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=labels_rgb_color[ config_params['graphicregions']['handwritten-annotation']]) + if "signature" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=labels_rgb_color[ config_params['graphicregions']['signature']]) + if "decoration" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=labels_rgb_color[ config_params['graphicregions']['decoration']]) + if "stamp" in types_graphic: + img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=labels_rgb_color[ config_params['graphicregions']['stamp']]) + + if 'imageregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_img, color=labels_rgb_color[ config_params['imageregion']]) + if 'separatorregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_sep, color=labels_rgb_color[ config_params['separatorregion']]) + if 'tableregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_table, color=labels_rgb_color[ config_params['tableregion']]) + if 'noiseregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_noise, color=labels_rgb_color[ config_params['noiseregion']]) + + if 'textregions' in keys: + if "paragraph" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=labels_rgb_color[ config_params['textregions']['paragraph']]) + if "footnote" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=labels_rgb_color[ config_params['textregions']['footnote']]) + if "footnote-continued" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=labels_rgb_color[ config_params['textregions']['footnote-continued']]) + if "heading" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=labels_rgb_color[ config_params['textregions']['heading']]) + if "header" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_header, color=labels_rgb_color[ config_params['textregions']['header']]) + if "catch-word" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_catch, color=labels_rgb_color[ config_params['textregions']['catch-word']]) + if "signature-mark" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=labels_rgb_color[ config_params['textregions']['signature-mark']]) + if "page-number" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=labels_rgb_color[ config_params['textregions']['page-number']]) + if "marginalia" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=labels_rgb_color[ config_params['textregions']['marginalia']]) + if "drop-capital" in types_text: + img_poly=cv2.fillPoly(img, pts =co_text_drop, color=labels_rgb_color[ config_params['textregions']['drop-capital']]) + + if "artificial_class_on_boundry" in keys: + img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] + img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] + img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + + + + + elif output_type == '2d': + if 'graphicregions' in keys: + if "handwritten-annotation" in types_graphic: + color_label = config_params['graphicregions']['handwritten-annotation'] + img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(color_label,color_label,color_label)) + if "signature" in types_graphic: + color_label = config_params['graphicregions']['signature'] + img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=(color_label,color_label,color_label)) + if "decoration" in types_graphic: + color_label = config_params['graphicregions']['decoration'] + img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(color_label,color_label,color_label)) + if "stamp" in types_graphic: + color_label = config_params['graphicregions']['stamp'] + img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=(color_label,color_label,color_label)) + + if 'imageregion' in keys: + color_label = config_params['imageregion'] + img_poly=cv2.fillPoly(img, pts =co_img, color=(color_label,color_label,color_label)) + if 'separatorregion' in keys: + color_label = config_params['separatorregion'] + img_poly=cv2.fillPoly(img, pts =co_sep, color=(color_label,color_label,color_label)) + if 'tableregion' in keys: + color_label = config_params['tableregion'] + img_poly=cv2.fillPoly(img, pts =co_table, color=(color_label,color_label,color_label)) + if 'noiseregion' in keys: + color_label = config_params['noiseregion'] + img_poly=cv2.fillPoly(img, pts =co_noise, color=(color_label,color_label,color_label)) + + if 'textregions' in keys: + if "paragraph" in types_text: + color_label = config_params['textregions']['paragraph'] + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(color_label,color_label,color_label)) + if "footnote" in types_text: + color_label = config_params['textregions']['footnote'] + img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=(color_label,color_label,color_label)) + if "footnote-continued" in types_text: + color_label = config_params['textregions']['footnote-continued'] + img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=(color_label,color_label,color_label)) + if "heading" in types_text: + color_label = config_params['textregions']['heading'] + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(color_label,color_label,color_label)) + if "header" in types_text: + color_label = config_params['textregions']['header'] + img_poly=cv2.fillPoly(img, pts =co_text_header, color=(color_label,color_label,color_label)) + if "catch-word" in types_text: + color_label = config_params['textregions']['catch-word'] + img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(color_label,color_label,color_label)) + if "signature-mark" in types_text: + color_label = config_params['textregions']['signature-mark'] + img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(color_label,color_label,color_label)) + if "page-number" in types_text: + color_label = config_params['textregions']['page-number'] + img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(color_label,color_label,color_label)) + if "marginalia" in types_text: + color_label = config_params['textregions']['marginalia'] + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(color_label,color_label,color_label)) + if "drop-capital" in types_text: + color_label = config_params['textregions']['drop-capital'] + img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(color_label,color_label,color_label)) + + if "artificial_class_on_boundry" in keys: + img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label + + + + + try: + cv2.imwrite(output_dir+'/'+gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + except: + cv2.imwrite(output_dir+'/'+gt_list[index].split('.')[0]+'.png',img_poly ) + + + +def find_new_features_of_contours(contours_main): + + #print(contours_main[0][0][:, 0]) + + areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) + M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] + cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + try: + x_min_main = np.array([np.min(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) + + argmin_x_main = np.array([np.argmin(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) + + x_min_from_argmin = np.array([contours_main[j][0][argmin_x_main[j], 0] for j in range(len(contours_main))]) + y_corr_x_min_from_argmin = np.array([contours_main[j][0][argmin_x_main[j], 1] for j in range(len(contours_main))]) + + x_max_main = np.array([np.max(contours_main[j][0][:, 0]) for j in range(len(contours_main))]) + + y_min_main = np.array([np.min(contours_main[j][0][:, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][0][:, 1]) for j in range(len(contours_main))]) + except: + x_min_main = np.array([np.min(contours_main[j][:, 0]) for j in range(len(contours_main))]) + + argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) for j in range(len(contours_main))]) + + x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] for j in range(len(contours_main))]) + y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] for j in range(len(contours_main))]) + + x_max_main = np.array([np.max(contours_main[j][:, 0]) for j in range(len(contours_main))]) + + y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))]) + y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))]) + + # dis_x=np.abs(x_max_main-x_min_main) + + return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin +def read_xml(xml_file): + file_name = Path(xml_file).stem + tree1 = ET.parse(xml_file) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + index_tot_regions = [] + tot_region_ref = [] + + for jj in root1.iter(link+'Page'): + y_len=int(jj.attrib['imageHeight']) + x_len=int(jj.attrib['imageWidth']) + + + for jj in root1.iter(link+'RegionRefIndexed'): + index_tot_regions.append(jj.attrib['index']) + tot_region_ref.append(jj.attrib['regionRef']) + + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + #print(region_tags) + co_text_paragraph=[] + co_text_drop=[] + co_text_heading=[] + co_text_header=[] + co_text_marginalia=[] + co_text_catch=[] + co_text_page_number=[] + co_text_signature_mark=[] + co_sep=[] + co_img=[] + co_table=[] + co_graphic=[] + co_graphic_text_annotation=[] + co_graphic_decoration=[] + co_noise=[] + + + co_text_paragraph_text=[] + co_text_drop_text=[] + co_text_heading_text=[] + co_text_header_text=[] + co_text_marginalia_text=[] + co_text_catch_text=[] + co_text_page_number_text=[] + co_text_signature_mark_text=[] + co_sep_text=[] + co_img_text=[] + co_table_text=[] + co_graphic_text=[] + co_graphic_text_annotation_text=[] + co_graphic_decoration_text=[] + co_noise_text=[] + + + id_paragraph = [] + id_header = [] + id_heading = [] + id_marginalia = [] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + for nn in root1.iter(tag): + for child2 in nn: + tag2 = child2.tag + #print(child2.tag) + if tag2.endswith('}TextEquiv') or tag2.endswith('}TextEquiv'): + #children2 = childtext.getchildren() + #rank = child2.find('Unicode').text + for childtext2 in child2: + #rank = childtext2.find('Unicode').text + #if childtext2.tag.endswith('}PlainText') or childtext2.tag.endswith('}PlainText'): + #print(childtext2.text) + if childtext2.tag.endswith('}Unicode') or childtext2.tag.endswith('}Unicode'): + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + co_text_drop_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='heading': + co_text_heading_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + co_text_signature_mark_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='header': + co_text_header_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + co_text_catch_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='page-number': + co_text_page_number_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + co_text_marginalia_text.append(childtext2.text) + else: + co_text_paragraph_text.append(childtext2.text) + c_t_in_drop=[] + c_t_in_paragraph=[] + c_t_in_heading=[] + c_t_in_header=[] + c_t_in_page_number=[] + c_t_in_signature_mark=[] + c_t_in_catch=[] + c_t_in_marginalia=[] + + + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + + coords=bool(vv.attrib) + if coords: + #print('birda1') + p_h=vv.attrib['points'].split(' ') + + + + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + #if nn.attrib['type']=='paragraph': + + c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + elif "type" in nn.attrib and nn.attrib['type']=='heading': + id_heading.append(nn.attrib['id']) + c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + + c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + elif "type" in nn.attrib and nn.attrib['type']=='header': + id_header.append(nn.attrib['id']) + c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + elif "type" in nn.attrib and nn.attrib['type']=='page-number': + + c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + id_marginalia.append(nn.attrib['id']) + + c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + else: + #print(nn.attrib['id']) + + id_paragraph.append(nn.attrib['id']) + + c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + + break + else: + pass + + + if vv.tag==link+'Point': + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + #if nn.attrib['type']=='paragraph': + + c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='heading': + id_heading.append(nn.attrib['id']) + c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + + c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + elif "type" in nn.attrib and nn.attrib['type']=='header': + id_header.append(nn.attrib['id']) + c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + + elif "type" in nn.attrib and nn.attrib['type']=='page-number': + + c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + id_marginalia.append(nn.attrib['id']) + + c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + + else: + id_paragraph.append(nn.attrib['id']) + c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + + #c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + + if len(c_t_in_drop)>0: + co_text_drop.append(np.array(c_t_in_drop)) + if len(c_t_in_paragraph)>0: + co_text_paragraph.append(np.array(c_t_in_paragraph)) + if len(c_t_in_heading)>0: + co_text_heading.append(np.array(c_t_in_heading)) + + if len(c_t_in_header)>0: + co_text_header.append(np.array(c_t_in_header)) + if len(c_t_in_page_number)>0: + co_text_page_number.append(np.array(c_t_in_page_number)) + if len(c_t_in_catch)>0: + co_text_catch.append(np.array(c_t_in_catch)) + + if len(c_t_in_signature_mark)>0: + co_text_signature_mark.append(np.array(c_t_in_signature_mark)) + + if len(c_t_in_marginalia)>0: + co_text_marginalia.append(np.array(c_t_in_marginalia)) + + + elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + c_t_in_text_annotation=[] + c_t_in_decoration=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + #c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + #if nn.attrib['type']=='paragraph': + + c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + elif "type" in nn.attrib and nn.attrib['type']=='decoration': + + c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + else: + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + + break + else: + pass + + + if vv.tag==link+'Point': + + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + #if nn.attrib['type']=='paragraph': + + c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='decoration': + + c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #print(c_t_in_paragraph) + sumi+=1 + else: + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + + if len(c_t_in_text_annotation)>0: + co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) + if len(c_t_in_decoration)>0: + co_graphic_decoration.append(np.array(c_t_in_decoration)) + if len(c_t_in)>0: + co_graphic.append(np.array(c_t_in)) + + + + elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + co_img_text.append(' ') + + + elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + + + elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_table.append(np.array(c_t_in)) + co_table_text.append(' ') + + elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_noise.append(np.array(c_t_in)) + co_noise_text.append(' ') + + + img = np.zeros( (y_len,x_len,3) ) + + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) + + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) + img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) + #img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(125,255,125)) + #img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(125,125,0)) + #img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(1,125,255)) + #img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(1,125,0)) + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(3,3,3)) + #img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(1,125,255)) + + #img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(125,0,125)) + img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) + img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) + #img_poly=cv2.fillPoly(img, pts =co_table, color=(1,255,255)) + #img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) + #img_poly=cv2.fillPoly(img, pts =co_noise, color=(255,0,255)) + + #print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.jpg') + ###try: + ####print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.jpg') + ###cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.jpg',img_poly ) + ###except: + ###cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.jpg',img_poly ) + return file_name, id_paragraph, id_header,co_text_paragraph, co_text_header,\ +tot_region_ref,x_len, y_len,index_tot_regions, img_poly + + + + +def bounding_box(cnt,color, corr_order_index ): + x, y, w, h = cv2.boundingRect(cnt) + x = int(x*scale_w) + y = int(y*scale_h) + + w = int(w*scale_w) + h = int(h*scale_h) + + return [x,y,w,h,int(color), int(corr_order_index)+1] + +def resize_image(seg_in,input_height,input_width): + return cv2.resize(seg_in,(input_width,input_height),interpolation=cv2.INTER_NEAREST) + +def make_image_from_bb(width_l, height_l, bb_all): + bb_all =np.array(bb_all) + img_remade = np.zeros((height_l,width_l )) + + for i in range(bb_all.shape[0]): + img_remade[bb_all[i,1]:bb_all[i,1]+bb_all[i,3],bb_all[i,0]:bb_all[i,0]+bb_all[i,2] ] = 1 + return img_remade diff --git a/train/pagexml2label.py b/train/pagexml2label.py deleted file mode 100644 index 94596db..0000000 --- a/train/pagexml2label.py +++ /dev/null @@ -1,789 +0,0 @@ -import click -import sys -import os -import numpy as np -import warnings -import xml.etree.ElementTree as ET -from tqdm import tqdm -import cv2 -from shapely import geometry -import json - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - -__doc__=\ -""" -tool to extract 2d or 3d RGB images from page xml data. In former case output will be 1 -2D image array which each class has filled with a pixel value. In the case of 3D RGB image -each class will be defined with a RGB value and beside images a text file of classes also will be produced. -This classes.txt file is required for dhsegment tool. -""" -KERNEL = np.ones((5, 5), np.uint8) - -class pagexml2label: - def __init__(self,dir_in, out_dir,output_type,config): - self.dir=dir_in - self.output_dir=out_dir - self.output_type=output_type - self.config=config - - def get_content_of_dir(self): - """ - Listing all ground truth page xml files. All files are needed to have xml format. - """ - - gt_all=os.listdir(self.dir) - self.gt_list=[file for file in gt_all if file.split('.')[ len(file.split('.'))-1 ]=='xml' ] - - def return_parent_contours(self,contours, hierarchy): - contours_parent = [contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1] - return contours_parent - def filter_contours_area_of_image_tables(self,image, contours, hierarchy, max_area, min_area): - found_polygons_early = list() - - jv = 0 - for c in contours: - if len(c) < 3: # A polygon cannot have less than 3 points - continue - - polygon = geometry.Polygon([point[0] for point in c]) - # area = cv2.contourArea(c) - area = polygon.area - ##print(np.prod(thresh.shape[:2])) - # Check that polygon has area greater than minimal area - # print(hierarchy[0][jv][3],hierarchy ) - if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - # print(c[0][0][1]) - found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) - jv += 1 - return found_polygons_early - - def return_contours_of_interested_region(self,region_pre_p, pixel, min_area=0.0002): - - # pixels of images are identified by 5 - if len(region_pre_p.shape) == 3: - cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 - else: - cnts_images = (region_pre_p[:, :] == pixel) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - contours_imgs = self.return_parent_contours(contours_imgs, hierarchy) - contours_imgs = self.filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) - - return contours_imgs - def update_region_contours(self, co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len): - co_text_eroded = [] - for con in co_text: - #try: - img_boundary_in = np.zeros( (y_len,x_len) ) - img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - #print('bidiahhhhaaa') - - - - #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica - if erosion_rate > 0: - img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_rate) - - pixel = 1 - min_size = 0 - con_eroded = self.return_contours_of_interested_region(img_boundary_in,pixel, min_size ) - - try: - co_text_eroded.append(con_eroded[0]) - except: - co_text_eroded.append(con) - - - img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_rate) - #img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=5) - - boundary = img_boundary_in_dilated[:,:] - img_boundary_in[:,:] - - img_boundary[:,:][boundary[:,:]==1] =1 - return co_text_eroded, img_boundary - def get_images_of_ground_truth(self, config_params): - """ - Reading the page xml files and write the ground truth images into given output directory. - """ - ## to do: add footnote to text regions - for index in tqdm(range(len(self.gt_list))): - #try: - tree1 = ET.parse(self.dir+'/'+self.gt_list[index]) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - - - for jj in root1.iter(link+'Page'): - y_len=int(jj.attrib['imageHeight']) - x_len=int(jj.attrib['imageWidth']) - - if self.config and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph' or config_params['use_case']=='printspace'): - keys = list(config_params.keys()) - if "artificial_class_label" in keys: - artificial_class_rgb_color = (255,255,0) - artificial_class_label = config_params['artificial_class_label'] - - textline_rgb_color = (255, 0, 0) - - if config_params['use_case']=='textline': - region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) - elif config_params['use_case']=='word': - region_tags = np.unique([x for x in alltags if x.endswith('Word')]) - elif config_params['use_case']=='glyph': - region_tags = np.unique([x for x in alltags if x.endswith('Glyph')]) - elif config_params['use_case']=='printspace': - region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace')]) - - co_use_case = [] - - for tag in region_tags: - if config_params['use_case']=='textline': - tag_endings = ['}TextLine','}textline'] - elif config_params['use_case']=='word': - tag_endings = ['}Word','}word'] - elif config_params['use_case']=='glyph': - tag_endings = ['}Glyph','}glyph'] - elif config_params['use_case']=='printspace': - tag_endings = ['}PrintSpace','}printspace'] - - if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): - for nn in root1.iter(tag): - c_t_in = [] - sumi = 0 - for vv in nn.iter(): - # check the format of coords - if vv.tag == link + 'Coords': - coords = bool(vv.attrib) - if coords: - p_h = vv.attrib['points'].split(' ') - c_t_in.append( - np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) - break - else: - pass - - if vv.tag == link + 'Point': - c_t_in.append([int(np.float(vv.attrib['x'])), int(np.float(vv.attrib['y']))]) - sumi += 1 - elif vv.tag != link + 'Point' and sumi >= 1: - break - co_use_case.append(np.array(c_t_in)) - - - - if "artificial_class_label" in keys: - img_boundary = np.zeros((y_len, x_len)) - erosion_rate = 1 - dilation_rate = 3 - co_use_case, img_boundary = self.update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - - - img = np.zeros((y_len, x_len, 3)) - if self.output_type == '2d': - img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) - if "artificial_class_label" in keys: - img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label - elif self.output_type == '3d': - img_poly = cv2.fillPoly(img, pts=co_use_case, color=textline_rgb_color) - if "artificial_class_label" in keys: - img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] - img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] - img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] - - try: - cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('-')[1].split('.')[0] + '.png', - img_poly) - except: - cv2.imwrite(self.output_dir + '/' + self.gt_list[index].split('.')[0] + '.png', img_poly) - - - if self.config and config_params['use_case']=='layout': - keys = list(config_params.keys()) - if "artificial_class_on_boundry" in keys: - elements_with_artificial_class = list(config_params['artificial_class_on_boundry']) - artificial_class_rgb_color = (255,255,0) - artificial_class_label = config_params['artificial_class_label'] - #values = config_params.values() - - if 'textregions' in keys: - types_text_dict = config_params['textregions'] - types_text = list(types_text_dict.keys()) - types_text_label = list(types_text_dict.values()) - print(types_text) - if 'graphicregions' in keys: - types_graphic_dict = config_params['graphicregions'] - types_graphic = list(types_graphic_dict.keys()) - types_graphic_label = list(types_graphic_dict.values()) - - - labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125), (255,125,255), (125,255,0)] - - region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - - co_text_paragraph=[] - co_text_footnote=[] - co_text_footnote_con=[] - co_text_drop=[] - co_text_heading=[] - co_text_header=[] - co_text_marginalia=[] - co_text_catch=[] - co_text_page_number=[] - co_text_signature_mark=[] - co_sep=[] - co_img=[] - co_table=[] - co_graphic_signature=[] - co_graphic_text_annotation=[] - co_graphic_decoration=[] - co_graphic_stamp=[] - co_noise=[] - - for tag in region_tags: - if 'textregions' in keys: - if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): - for nn in root1.iter(tag): - c_t_in_drop=[] - c_t_in_paragraph=[] - c_t_in_heading=[] - c_t_in_header=[] - c_t_in_page_number=[] - c_t_in_signature_mark=[] - c_t_in_catch=[] - c_t_in_marginalia=[] - c_t_in_footnote=[] - c_t_in_footnote_con=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - - coords=bool(vv.attrib) - if coords: - #print('birda1') - p_h=vv.attrib['points'].split(' ') - - if "drop-capital" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "footnote" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote': - c_t_in_footnote.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "footnote-continued" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': - c_t_in_footnote_con.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "heading" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='heading': - c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "signature-mark" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='signature-mark': - c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "header" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='header': - c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "catch-word" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "page-number" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='page-number': - c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "marginalia" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='marginalia': - c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "paragraph" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='paragraph': - c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - break - else: - pass - - - if vv.tag==link+'Point': - if "drop-capital" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "footnote" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote': - c_t_in_footnote.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "footnote-continued" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': - c_t_in_footnote_con.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "heading" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='heading': - c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "signature-mark" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='signature-mark': - c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "header" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='header': - c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "catch-word" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "page-number" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='page-number': - c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "marginalia" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='marginalia': - c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "paragraph" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='paragraph': - c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - - elif vv.tag!=link+'Point' and sumi>=1: - break - - if len(c_t_in_drop)>0: - co_text_drop.append(np.array(c_t_in_drop)) - if len(c_t_in_footnote_con)>0: - co_text_footnote_con.append(np.array(c_t_in_footnote_con)) - if len(c_t_in_footnote)>0: - co_text_footnote.append(np.array(c_t_in_footnote)) - if len(c_t_in_paragraph)>0: - co_text_paragraph.append(np.array(c_t_in_paragraph)) - if len(c_t_in_heading)>0: - co_text_heading.append(np.array(c_t_in_heading)) - - if len(c_t_in_header)>0: - co_text_header.append(np.array(c_t_in_header)) - if len(c_t_in_page_number)>0: - co_text_page_number.append(np.array(c_t_in_page_number)) - if len(c_t_in_catch)>0: - co_text_catch.append(np.array(c_t_in_catch)) - - if len(c_t_in_signature_mark)>0: - co_text_signature_mark.append(np.array(c_t_in_signature_mark)) - - if len(c_t_in_marginalia)>0: - co_text_marginalia.append(np.array(c_t_in_marginalia)) - - - if 'graphicregions' in keys: - if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in_stamp=[] - c_t_in_text_annotation=[] - c_t_in_decoration=[] - c_t_in_signature=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - if "handwritten-annotation" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "decoration" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "stamp" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='stamp': - c_t_in_stamp.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "signature" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='signature': - c_t_in_signature.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - - - break - else: - pass - - - if vv.tag==link+'Point': - if "handwritten-annotation" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "decoration" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "stamp" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='stamp': - c_t_in_stamp.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "signature" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='signature': - c_t_in_signature.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if len(c_t_in_text_annotation)>0: - co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) - if len(c_t_in_decoration)>0: - co_graphic_decoration.append(np.array(c_t_in_decoration)) - if len(c_t_in_stamp)>0: - co_graphic_stamp.append(np.array(c_t_in_stamp)) - if len(c_t_in_signature)>0: - co_graphic_signature.append(np.array(c_t_in_signature)) - - if 'imageregion' in keys: - if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_img.append(np.array(c_t_in)) - - - if 'separatorregion' in keys: - if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - elif vv.tag!=link+'Point' and sumi>=1: - break - co_sep.append(np.array(c_t_in)) - - - - if 'tableregion' in keys: - if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_table.append(np.array(c_t_in)) - - if 'noiseregion' in keys: - if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - #print('sth') - for nn in root1.iter(tag): - c_t_in=[] - sumi=0 - for vv in nn.iter(): - # check the format of coords - if vv.tag==link+'Coords': - coords=bool(vv.attrib) - if coords: - p_h=vv.attrib['points'].split(' ') - c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break - else: - pass - - - if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - #print(vv.tag,'in') - elif vv.tag!=link+'Point' and sumi>=1: - break - co_noise.append(np.array(c_t_in)) - - if "artificial_class_on_boundry" in keys: - img_boundary = np.zeros( (y_len,x_len) ) - if "paragraph" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text_paragraph, img_boundary = self.update_region_contours(co_text_paragraph, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "drop-capital" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 4 - co_text_drop, img_boundary = self.update_region_contours(co_text_drop, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "catch-word" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 4 - co_text_catch, img_boundary = self.update_region_contours(co_text_catch, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "page-number" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 4 - co_text_page_number, img_boundary = self.update_region_contours(co_text_page_number, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "header" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text_header, img_boundary = self.update_region_contours(co_text_header, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "heading" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text_heading, img_boundary = self.update_region_contours(co_text_heading, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "signature-mark" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 - co_text_signature_mark, img_boundary = self.update_region_contours(co_text_signature_mark, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "marginalia" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text_marginalia, img_boundary = self.update_region_contours(co_text_marginalia, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "footnote" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text_footnote, img_boundary = self.update_region_contours(co_text_footnote, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - if "footnote-continued" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 - co_text_footnote_con, img_boundary = self.update_region_contours(co_text_footnote_con, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) - - - - img = np.zeros( (y_len,x_len,3) ) - - if self.output_type == '3d': - - if 'graphicregions' in keys: - if "handwritten-annotation" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=labels_rgb_color[ config_params['graphicregions']['handwritten-annotation']]) - if "signature" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=labels_rgb_color[ config_params['graphicregions']['signature']]) - if "decoration" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=labels_rgb_color[ config_params['graphicregions']['decoration']]) - if "stamp" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=labels_rgb_color[ config_params['graphicregions']['stamp']]) - - if 'imageregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_img, color=labels_rgb_color[ config_params['imageregion']]) - if 'separatorregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_sep, color=labels_rgb_color[ config_params['separatorregion']]) - if 'tableregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_table, color=labels_rgb_color[ config_params['tableregion']]) - if 'noiseregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_noise, color=labels_rgb_color[ config_params['noiseregion']]) - - if 'textregions' in keys: - if "paragraph" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=labels_rgb_color[ config_params['textregions']['paragraph']]) - if "footnote" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=labels_rgb_color[ config_params['textregions']['footnote']]) - if "footnote-continued" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=labels_rgb_color[ config_params['textregions']['footnote-continued']]) - if "heading" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=labels_rgb_color[ config_params['textregions']['heading']]) - if "header" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_header, color=labels_rgb_color[ config_params['textregions']['header']]) - if "catch-word" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_catch, color=labels_rgb_color[ config_params['textregions']['catch-word']]) - if "signature-mark" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=labels_rgb_color[ config_params['textregions']['signature-mark']]) - if "page-number" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=labels_rgb_color[ config_params['textregions']['page-number']]) - if "marginalia" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=labels_rgb_color[ config_params['textregions']['marginalia']]) - if "drop-capital" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_drop, color=labels_rgb_color[ config_params['textregions']['drop-capital']]) - - if "artificial_class_on_boundry" in keys: - img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] - img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] - img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] - - - - - elif self.output_type == '2d': - if 'graphicregions' in keys: - if "handwritten-annotation" in types_graphic: - color_label = config_params['graphicregions']['handwritten-annotation'] - img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(color_label,color_label,color_label)) - if "signature" in types_graphic: - color_label = config_params['graphicregions']['signature'] - img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=(color_label,color_label,color_label)) - if "decoration" in types_graphic: - color_label = config_params['graphicregions']['decoration'] - img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(color_label,color_label,color_label)) - if "stamp" in types_graphic: - color_label = config_params['graphicregions']['stamp'] - img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=(color_label,color_label,color_label)) - - if 'imageregion' in keys: - color_label = config_params['imageregion'] - img_poly=cv2.fillPoly(img, pts =co_img, color=(color_label,color_label,color_label)) - if 'separatorregion' in keys: - color_label = config_params['separatorregion'] - img_poly=cv2.fillPoly(img, pts =co_sep, color=(color_label,color_label,color_label)) - if 'tableregion' in keys: - color_label = config_params['tableregion'] - img_poly=cv2.fillPoly(img, pts =co_table, color=(color_label,color_label,color_label)) - if 'noiseregion' in keys: - color_label = config_params['noiseregion'] - img_poly=cv2.fillPoly(img, pts =co_noise, color=(color_label,color_label,color_label)) - - if 'textregions' in keys: - if "paragraph" in types_text: - color_label = config_params['textregions']['paragraph'] - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(color_label,color_label,color_label)) - if "footnote" in types_text: - color_label = config_params['textregions']['footnote'] - img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=(color_label,color_label,color_label)) - if "footnote-continued" in types_text: - color_label = config_params['textregions']['footnote-continued'] - img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=(color_label,color_label,color_label)) - if "heading" in types_text: - color_label = config_params['textregions']['heading'] - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(color_label,color_label,color_label)) - if "header" in types_text: - color_label = config_params['textregions']['header'] - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(color_label,color_label,color_label)) - if "catch-word" in types_text: - color_label = config_params['textregions']['catch-word'] - img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(color_label,color_label,color_label)) - if "signature-mark" in types_text: - color_label = config_params['textregions']['signature-mark'] - img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(color_label,color_label,color_label)) - if "page-number" in types_text: - color_label = config_params['textregions']['page-number'] - img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(color_label,color_label,color_label)) - if "marginalia" in types_text: - color_label = config_params['textregions']['marginalia'] - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(color_label,color_label,color_label)) - if "drop-capital" in types_text: - color_label = config_params['textregions']['drop-capital'] - img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(color_label,color_label,color_label)) - - if "artificial_class_on_boundry" in keys: - img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label - - - - - try: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) - except: - cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.png',img_poly ) - - - def run(self,config_params): - self.get_content_of_dir() - self.get_images_of_ground_truth(config_params) - - -@click.command() -@click.option( - "--dir_xml", - "-dx", - help="directory of GT page-xml files", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--dir_out", - "-do", - help="directory where ground truth images would be written", - type=click.Path(exists=True, file_okay=False), -) - -@click.option( - "--config", - "-cfg", - help="config file of prefered layout or use case.", - type=click.Path(exists=True, dir_okay=False), -) - -@click.option( - "--type_output", - "-to", - help="this defines how output should be. A 2d image array or a 3d image array encoded with RGB color. Just pass 2d or 3d. The file will be saved one directory up. 2D image array is 3d but only information of one channel would be enough since all channels have the same values.", -) - - -def main(dir_xml,dir_out,type_output,config): - if config: - with open(config) as f: - config_params = json.load(f) - else: - print("passed") - config_params = None - x=pagexml2label(dir_xml,dir_out,type_output, config) - x.run(config_params) -if __name__=="__main__": - main() - - - From 9638098ae7e5269a597a98937f3c239270575525 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 24 May 2024 16:39:48 +0200 Subject: [PATCH 055/492] machine based reading order training is integrated --- train/models.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ train/train.py | 31 ++++++++++++++++++++++++++++ train/utils.py | 23 +++++++++++++++++++++ 3 files changed, 109 insertions(+) diff --git a/train/models.py b/train/models.py index 4cceacd..d852ac3 100644 --- a/train/models.py +++ b/train/models.py @@ -544,4 +544,59 @@ def resnet50_classifier(n_classes,input_height=224,input_width=224,weight_decay= + return model + +def machine_based_reading_order_model(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): + assert input_height%32 == 0 + assert input_width%32 == 0 + + img_input = Input(shape=(input_height,input_width , 3 )) + + if IMAGE_ORDERING == 'channels_last': + bn_axis = 3 + else: + bn_axis = 1 + + x1 = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(img_input) + x1 = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x1) + + x1 = BatchNormalization(axis=bn_axis, name='bn_conv1')(x1) + x1 = Activation('relu')(x1) + x1 = MaxPooling2D((3, 3) , data_format=IMAGE_ORDERING , strides=(2, 2))(x1) + + x1 = conv_block(x1, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x1 = identity_block(x1, 3, [64, 64, 256], stage=2, block='b') + x1 = identity_block(x1, 3, [64, 64, 256], stage=2, block='c') + + x1 = conv_block(x1, 3, [128, 128, 512], stage=3, block='a') + x1 = identity_block(x1, 3, [128, 128, 512], stage=3, block='b') + x1 = identity_block(x1, 3, [128, 128, 512], stage=3, block='c') + x1 = identity_block(x1, 3, [128, 128, 512], stage=3, block='d') + + x1 = conv_block(x1, 3, [256, 256, 1024], stage=4, block='a') + x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='b') + x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='c') + x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='d') + x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='e') + x1 = identity_block(x1, 3, [256, 256, 1024], stage=4, block='f') + + x1 = conv_block(x1, 3, [512, 512, 2048], stage=5, block='a') + x1 = identity_block(x1, 3, [512, 512, 2048], stage=5, block='b') + x1 = identity_block(x1, 3, [512, 512, 2048], stage=5, block='c') + + if pretraining: + Model(img_input , x1).load_weights(resnet50_Weights_path) + + x1 = AveragePooling2D((7, 7), name='avg_pool1')(x1) + flattened = Flatten()(x1) + + o = Dense(256, activation='relu', name='fc512')(flattened) + o=Dropout(0.2)(o) + + o = Dense(256, activation='relu', name='fc512a')(o) + o=Dropout(0.2)(o) + + o = Dense(n_classes, activation='sigmoid', name='fc1000')(o) + model = Model(img_input , o) + return model diff --git a/train/train.py b/train/train.py index 78974d3..f338c78 100644 --- a/train/train.py +++ b/train/train.py @@ -313,4 +313,35 @@ def run(_config, n_classes, n_epochs, input_height, with open(os.path.join( os.path.join(dir_output,'model_best'), "config.json"), "w") as fp: json.dump(_config, fp) # encode dict into JSON + + elif task=='reading_order': + configuration() + model = machine_based_reading_order_model(n_classes,input_height,input_width,weight_decay,pretraining) + + dir_flow_train_imgs = os.path.join(dir_train, 'images') + dir_flow_train_labels = os.path.join(dir_train, 'labels') + + classes = os.listdir(dir_flow_train_labels) + num_rows =len(classes) + #ls_test = os.listdir(dir_flow_train_labels) + + #f1score_tot = [0] + indexer_start = 0 + opt = SGD(lr=0.01, momentum=0.9) + opt_adam = tf.keras.optimizers.Adam(learning_rate=0.0001) + model.compile(loss="binary_crossentropy", + optimizer = opt_adam,metrics=['accuracy']) + for i in range(n_epochs): + history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes), steps_per_epoch=num_rows / n_batch, verbose=1) + model.save( os.path.join(dir_output,'model_'+str(i+indexer_start) )) + + with open(os.path.join(os.path.join(dir_output,'model_'+str(i)),"config.json"), "w") as fp: + json.dump(_config, fp) # encode dict into JSON + ''' + if f1score>f1score_tot[0]: + f1score_tot[0] = f1score + model_dir = os.path.join(dir_out,'model_best') + model.save(model_dir) + ''' + diff --git a/train/utils.py b/train/utils.py index 271d977..a2e8a9c 100644 --- a/train/utils.py +++ b/train/utils.py @@ -268,6 +268,29 @@ def IoU(Yi, y_predi): #print("Mean IoU: {:4.3f}".format(mIoU)) return mIoU +def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batchsize, height, width, n_classes): + all_labels_files = os.listdir(classes_file_dir) + ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) + ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) + batchcount = 0 + while True: + for i in all_labels_files: + file_name = i.split('.')[0] + img = cv2.imread(os.path.join(modal_dir,file_name+'.png')) + + label_class = int( np.load(os.path.join(classes_file_dir,i)) ) + + ret_x[batchcount, :,:,0] = img[:,:,0]/3.0 + ret_x[batchcount, :,:,2] = img[:,:,2]/3.0 + ret_x[batchcount, :,:,1] = img[:,:,1]/5.0 + + ret_y[batchcount, :] = label_class + batchcount+=1 + if batchcount>=batchsize: + yield (ret_x, ret_y) + ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) + ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) + batchcount = 0 def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_classes, task='segmentation'): c = 0 From ccf520d3c73d7c1132509434a206ddb2d504b5c2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 27 May 2024 17:23:49 +0200 Subject: [PATCH 056/492] adding rest_as_paragraph and rest_as_graphic to elements --- train/custom_config_page2label.json | 10 +- train/gt_gen_utils.py | 454 ++++++++++------------------ 2 files changed, 170 insertions(+), 294 deletions(-) diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json index d6320fa..e4c02cb 100644 --- a/train/custom_config_page2label.json +++ b/train/custom_config_page2label.json @@ -1,9 +1,9 @@ { "use_case": "layout", -"textregions":{"paragraph":1, "heading": 2, "header":2,"drop-capital": 3, "marginalia":4 ,"page-number":1 , "catch-word":1 ,"footnote": 1, "footnote-continued": 1}, -"imageregion":5, -"separatorregion":6, -"graphicregions" :{"handwritten-annotation":5, "decoration": 5, "signature": 5, "stamp": 5}, -"artificial_class_on_boundry": ["paragraph","header", "heading", "marginalia", "page-number", "catch-word", "drop-capital","footnote", "footnote-continued"], +"textregions":{ "rest_as_paragraph": 1, "header":2 , "heading":2 , "marginalia":3 }, +"imageregion":4, +"separatorregion":5, +"graphicregions" :{"rest_as_decoration":6}, +"artificial_class_on_boundry": ["paragraph"], "artificial_class_label":7 } diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 9862e29..9dc8377 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -180,7 +180,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ pass if vv.tag == link + 'Point': - c_t_in.append([int(np.float(vv.attrib['x'])), int(np.float(vv.attrib['y']))]) + c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) sumi += 1 elif vv.tag != link + 'Point' and sumi >= 1: break @@ -226,7 +226,6 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ types_text_dict = config_params['textregions'] types_text = list(types_text_dict.keys()) types_text_label = list(types_text_dict.values()) - print(types_text) if 'graphicregions' in keys: types_graphic_dict = config_params['graphicregions'] types_graphic = list(types_graphic_dict.keys()) @@ -235,41 +234,20 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ labels_rgb_color = [ (0,0,0), (255,0,0), (255,125,0), (255,0,125), (125,255,125), (125,125,0), (0,125,255), (0,125,0), (125,125,125), (255,0,255), (125,0,125), (0,255,0),(0,0,255), (0,255,255), (255,125,125), (0,125,125), (0,255,125), (255,125,255), (125,255,0)] + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - - co_text_paragraph=[] - co_text_footnote=[] - co_text_footnote_con=[] - co_text_drop=[] - co_text_heading=[] - co_text_header=[] - co_text_marginalia=[] - co_text_catch=[] - co_text_page_number=[] - co_text_signature_mark=[] + co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} + co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} co_sep=[] co_img=[] co_table=[] - co_graphic_signature=[] - co_graphic_text_annotation=[] - co_graphic_decoration=[] - co_graphic_stamp=[] co_noise=[] for tag in region_tags: if 'textregions' in keys: if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): for nn in root1.iter(tag): - c_t_in_drop=[] - c_t_in_paragraph=[] - c_t_in_heading=[] - c_t_in_header=[] - c_t_in_page_number=[] - c_t_in_signature_mark=[] - c_t_in_catch=[] - c_t_in_marginalia=[] - c_t_in_footnote=[] - c_t_in_footnote_con=[] + c_t_in = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} sumi=0 for vv in nn.iter(): # check the format of coords @@ -277,143 +255,63 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ coords=bool(vv.attrib) if coords: - #print('birda1') p_h=vv.attrib['points'].split(' ') - if "drop-capital" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "footnote" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote': - c_t_in_footnote.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "footnote-continued" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': - c_t_in_footnote_con.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "heading" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='heading': - c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "signature-mark" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='signature-mark': - c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "header" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='header': - c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "catch-word" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "page-number" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='page-number': - c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "marginalia" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='marginalia': - c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "paragraph" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='paragraph': - c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - + if "rest_as_paragraph" in types_text: + types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] + if len(types_text_without_paragraph) == 0: + if "type" in nn.attrib: + c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + elif len(types_text_without_paragraph) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_text_without_paragraph: + c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + else: + if "type" in nn.attrib: + c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) break else: pass - + if vv.tag==link+'Point': - if "drop-capital" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + if "rest_as_paragraph" in types_text: + types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] + if len(types_text_without_paragraph) == 0: + if "type" in nn.attrib: + c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + elif len(types_text_without_paragraph) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_text_without_paragraph: + c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + else: + c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + + else: + if "type" in nn.attrib: + c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) sumi+=1 - - if "footnote" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote': - c_t_in_footnote.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "footnote-continued" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='footnote-continued': - c_t_in_footnote_con.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "heading" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='heading': - c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "signature-mark" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='signature-mark': - c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "header" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='header': - c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "catch-word" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "page-number" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='page-number': - c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "marginalia" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='marginalia': - c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "paragraph" in types_text: - if "type" in nn.attrib and nn.attrib['type']=='paragraph': - c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - + elif vv.tag!=link+'Point' and sumi>=1: break - if len(c_t_in_drop)>0: - co_text_drop.append(np.array(c_t_in_drop)) - if len(c_t_in_footnote_con)>0: - co_text_footnote_con.append(np.array(c_t_in_footnote_con)) - if len(c_t_in_footnote)>0: - co_text_footnote.append(np.array(c_t_in_footnote)) - if len(c_t_in_paragraph)>0: - co_text_paragraph.append(np.array(c_t_in_paragraph)) - if len(c_t_in_heading)>0: - co_text_heading.append(np.array(c_t_in_heading)) - - if len(c_t_in_header)>0: - co_text_header.append(np.array(c_t_in_header)) - if len(c_t_in_page_number)>0: - co_text_page_number.append(np.array(c_t_in_page_number)) - if len(c_t_in_catch)>0: - co_text_catch.append(np.array(c_t_in_catch)) - - if len(c_t_in_signature_mark)>0: - co_text_signature_mark.append(np.array(c_t_in_signature_mark)) - - if len(c_t_in_marginalia)>0: - co_text_marginalia.append(np.array(c_t_in_marginalia)) - - + for element_text in list(c_t_in.keys()): + if len(c_t_in[element_text])>0: + co_text[element_text].append(np.array(c_t_in[element_text])) + if 'graphicregions' in keys: if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): #print('sth') for nn in root1.iter(tag): - c_t_in_stamp=[] - c_t_in_text_annotation=[] - c_t_in_decoration=[] - c_t_in_signature=[] + c_t_in_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} sumi=0 for vv in nn.iter(): # check the format of coords @@ -421,23 +319,22 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ coords=bool(vv.attrib) if coords: p_h=vv.attrib['points'].split(' ') - if "handwritten-annotation" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "decoration" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "stamp" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='stamp': - c_t_in_stamp.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - - if "signature" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='signature': - c_t_in_signature.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - + if "rest_as_decoration" in types_graphic: + types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] + if len(types_graphic_without_decoration) == 0: + if "type" in nn.attrib: + c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + elif len(types_graphic_without_decoration) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_graphic_without_decoration: + c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + else: + if "type" in nn.attrib: + c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) break else: @@ -445,34 +342,33 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if vv.tag==link+'Point': - if "handwritten-annotation" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + if "rest_as_decoration" in types_graphic: + types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] + if len(types_graphic_without_decoration) == 0: + if "type" in nn.attrib: + c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + elif len(types_graphic_without_decoration) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_graphic_without_decoration: + c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + else: + c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + + else: + if "type" in nn.attrib: + c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) sumi+=1 - if "decoration" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "stamp" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='stamp': - c_t_in_stamp.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 - - if "signature" in types_graphic: - if "type" in nn.attrib and nn.attrib['type']=='signature': - c_t_in_signature.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) - sumi+=1 + elif vv.tag!=link+'Point' and sumi>=1: + break + + for element_graphic in list(c_t_in_graphic.keys()): + if len(c_t_in_graphic[element_graphic])>0: + co_graphic[element_graphic].append(np.array(c_t_in_graphic[element_graphic])) - if len(c_t_in_text_annotation)>0: - co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) - if len(c_t_in_decoration)>0: - co_graphic_decoration.append(np.array(c_t_in_decoration)) - if len(c_t_in_stamp)>0: - co_graphic_stamp.append(np.array(c_t_in_stamp)) - if len(c_t_in_signature)>0: - co_graphic_signature.append(np.array(c_t_in_signature)) if 'imageregion' in keys: if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): @@ -491,7 +387,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif vv.tag!=link+'Point' and sumi>=1: @@ -517,7 +413,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif vv.tag!=link+'Point' and sumi>=1: @@ -545,7 +441,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: @@ -571,7 +467,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: @@ -583,59 +479,63 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if "paragraph" in elements_with_artificial_class: erosion_rate = 2 dilation_rate = 4 - co_text_paragraph, img_boundary = update_region_contours(co_text_paragraph, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text['paragraph'], img_boundary = update_region_contours(co_text['paragraph'], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "drop-capital" in elements_with_artificial_class: erosion_rate = 0 dilation_rate = 4 - co_text_drop, img_boundary = update_region_contours(co_text_drop, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["drop-capital"], img_boundary = update_region_contours(co_text["drop-capital"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "catch-word" in elements_with_artificial_class: erosion_rate = 0 dilation_rate = 4 - co_text_catch, img_boundary = update_region_contours(co_text_catch, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["catch-word"], img_boundary = update_region_contours(co_text["catch-word"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "page-number" in elements_with_artificial_class: erosion_rate = 0 dilation_rate = 4 - co_text_page_number, img_boundary = update_region_contours(co_text_page_number, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["page-number"], img_boundary = update_region_contours(co_text["page-number"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "header" in elements_with_artificial_class: erosion_rate = 1 dilation_rate = 4 - co_text_header, img_boundary = update_region_contours(co_text_header, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["header"], img_boundary = update_region_contours(co_text["header"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "heading" in elements_with_artificial_class: erosion_rate = 1 dilation_rate = 4 - co_text_heading, img_boundary = update_region_contours(co_text_heading, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["heading"], img_boundary = update_region_contours(co_text["heading"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "signature-mark" in elements_with_artificial_class: erosion_rate = 1 dilation_rate = 4 - co_text_signature_mark, img_boundary = update_region_contours(co_text_signature_mark, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["signature-mark"], img_boundary = update_region_contours(co_text["signature-mark"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "marginalia" in elements_with_artificial_class: erosion_rate = 2 dilation_rate = 4 - co_text_marginalia, img_boundary = update_region_contours(co_text_marginalia, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["marginalia"], img_boundary = update_region_contours(co_text["marginalia"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "footnote" in elements_with_artificial_class: erosion_rate = 2 dilation_rate = 4 - co_text_footnote, img_boundary = update_region_contours(co_text_footnote, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["footnote"], img_boundary = update_region_contours(co_text["footnote"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "footnote-continued" in elements_with_artificial_class: erosion_rate = 2 dilation_rate = 4 - co_text_footnote_con, img_boundary = update_region_contours(co_text_footnote_con, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + co_text["footnote-continued"], img_boundary = update_region_contours(co_text["footnote-continued"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) img = np.zeros( (y_len,x_len,3) ) if output_type == '3d': - if 'graphicregions' in keys: - if "handwritten-annotation" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=labels_rgb_color[ config_params['graphicregions']['handwritten-annotation']]) - if "signature" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=labels_rgb_color[ config_params['graphicregions']['signature']]) - if "decoration" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=labels_rgb_color[ config_params['graphicregions']['decoration']]) - if "stamp" in types_graphic: - img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=labels_rgb_color[ config_params['graphicregions']['stamp']]) + if 'rest_as_decoration' in types_graphic: + types_graphic[types_graphic=='rest_as_decoration'] = 'decoration' + for element_graphic in types_graphic: + if element_graphic == 'decoration': + color_label = labels_rgb_color[ config_params['graphicregions']['rest_as_decoration']] + else: + color_label = labels_rgb_color[ config_params['graphicregions'][element_graphic]] + img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) + else: + for element_graphic in types_graphic: + color_label = labels_rgb_color[ config_params['graphicregions'][element_graphic]] + img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) + if 'imageregion' in keys: img_poly=cv2.fillPoly(img, pts =co_img, color=labels_rgb_color[ config_params['imageregion']]) @@ -647,26 +547,19 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly=cv2.fillPoly(img, pts =co_noise, color=labels_rgb_color[ config_params['noiseregion']]) if 'textregions' in keys: - if "paragraph" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=labels_rgb_color[ config_params['textregions']['paragraph']]) - if "footnote" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=labels_rgb_color[ config_params['textregions']['footnote']]) - if "footnote-continued" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=labels_rgb_color[ config_params['textregions']['footnote-continued']]) - if "heading" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=labels_rgb_color[ config_params['textregions']['heading']]) - if "header" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_header, color=labels_rgb_color[ config_params['textregions']['header']]) - if "catch-word" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_catch, color=labels_rgb_color[ config_params['textregions']['catch-word']]) - if "signature-mark" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=labels_rgb_color[ config_params['textregions']['signature-mark']]) - if "page-number" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=labels_rgb_color[ config_params['textregions']['page-number']]) - if "marginalia" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=labels_rgb_color[ config_params['textregions']['marginalia']]) - if "drop-capital" in types_text: - img_poly=cv2.fillPoly(img, pts =co_text_drop, color=labels_rgb_color[ config_params['textregions']['drop-capital']]) + if 'rest_as_paragraph' in types_text: + types_text[types_text=='rest_as_paragraph'] = 'paragraph' + for element_text in types_text: + if element_text == 'paragraph': + color_label = labels_rgb_color[ config_params['textregions']['rest_as_paragraph']] + else: + color_label = labels_rgb_color[ config_params['textregions'][element_text]] + img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) + else: + for element_text in types_text: + color_label = labels_rgb_color[ config_params['textregions'][element_text]] + img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) + if "artificial_class_on_boundry" in keys: img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] @@ -678,18 +571,19 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ elif output_type == '2d': if 'graphicregions' in keys: - if "handwritten-annotation" in types_graphic: - color_label = config_params['graphicregions']['handwritten-annotation'] - img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(color_label,color_label,color_label)) - if "signature" in types_graphic: - color_label = config_params['graphicregions']['signature'] - img_poly=cv2.fillPoly(img, pts =co_graphic_signature, color=(color_label,color_label,color_label)) - if "decoration" in types_graphic: - color_label = config_params['graphicregions']['decoration'] - img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(color_label,color_label,color_label)) - if "stamp" in types_graphic: - color_label = config_params['graphicregions']['stamp'] - img_poly=cv2.fillPoly(img, pts =co_graphic_stamp, color=(color_label,color_label,color_label)) + if 'rest_as_decoration' in types_graphic: + types_graphic[types_graphic=='rest_as_decoration'] = 'decoration' + for element_graphic in types_graphic: + if element_graphic == 'decoration': + color_label = config_params['graphicregions']['rest_as_decoration'] + else: + color_label = config_params['graphicregions'][element_graphic] + img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) + else: + for element_graphic in types_graphic: + color_label = config_params['graphicregions'][element_graphic] + img_poly=cv2.fillPoly(img, pts =co_graphic[element_graphic], color=color_label) + if 'imageregion' in keys: color_label = config_params['imageregion'] @@ -705,36 +599,18 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly=cv2.fillPoly(img, pts =co_noise, color=(color_label,color_label,color_label)) if 'textregions' in keys: - if "paragraph" in types_text: - color_label = config_params['textregions']['paragraph'] - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(color_label,color_label,color_label)) - if "footnote" in types_text: - color_label = config_params['textregions']['footnote'] - img_poly=cv2.fillPoly(img, pts =co_text_footnote, color=(color_label,color_label,color_label)) - if "footnote-continued" in types_text: - color_label = config_params['textregions']['footnote-continued'] - img_poly=cv2.fillPoly(img, pts =co_text_footnote_con, color=(color_label,color_label,color_label)) - if "heading" in types_text: - color_label = config_params['textregions']['heading'] - img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(color_label,color_label,color_label)) - if "header" in types_text: - color_label = config_params['textregions']['header'] - img_poly=cv2.fillPoly(img, pts =co_text_header, color=(color_label,color_label,color_label)) - if "catch-word" in types_text: - color_label = config_params['textregions']['catch-word'] - img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(color_label,color_label,color_label)) - if "signature-mark" in types_text: - color_label = config_params['textregions']['signature-mark'] - img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(color_label,color_label,color_label)) - if "page-number" in types_text: - color_label = config_params['textregions']['page-number'] - img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(color_label,color_label,color_label)) - if "marginalia" in types_text: - color_label = config_params['textregions']['marginalia'] - img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(color_label,color_label,color_label)) - if "drop-capital" in types_text: - color_label = config_params['textregions']['drop-capital'] - img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(color_label,color_label,color_label)) + if 'rest_as_paragraph' in types_text: + types_text[types_text=='rest_as_paragraph'] = 'paragraph' + for element_text in types_text: + if element_text == 'paragraph': + color_label = config_params['textregions']['rest_as_paragraph'] + else: + color_label = config_params['textregions'][element_text] + img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) + else: + for element_text in types_text: + color_label = config_params['textregions'][element_text] + img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) if "artificial_class_on_boundry" in keys: img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label @@ -947,51 +823,51 @@ def read_xml(xml_file): if "type" in nn.attrib and nn.attrib['type']=='drop-capital': #if nn.attrib['type']=='paragraph': - c_t_in_drop.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_drop.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='heading': id_heading.append(nn.attrib['id']) - c_t_in_heading.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_heading.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': - c_t_in_signature_mark.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) #print(c_t_in_paragraph) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='header': id_header.append(nn.attrib['id']) - c_t_in_header.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_header.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='page-number': - c_t_in_page_number.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) #print(c_t_in_paragraph) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='marginalia': id_marginalia.append(nn.attrib['id']) - c_t_in_marginalia.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) #print(c_t_in_paragraph) sumi+=1 else: id_paragraph.append(nn.attrib['id']) - c_t_in_paragraph.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) #print(c_t_in_paragraph) sumi+=1 - #c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + #c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: @@ -1057,16 +933,16 @@ def read_xml(xml_file): if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': #if nn.attrib['type']=='paragraph': - c_t_in_text_annotation.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_text_annotation.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in_decoration.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) #print(c_t_in_paragraph) sumi+=1 else: - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 if len(c_t_in_text_annotation)>0: @@ -1096,7 +972,7 @@ def read_xml(xml_file): if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: @@ -1123,7 +999,7 @@ def read_xml(xml_file): if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: @@ -1150,7 +1026,7 @@ def read_xml(xml_file): if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: @@ -1176,7 +1052,7 @@ def read_xml(xml_file): if vv.tag==link+'Point': - c_t_in.append([ int(np.float(vv.attrib['x'])) , int(np.float(vv.attrib['y'])) ]) + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: From 467bbb2884e1b900e819370b1e88853c24d60e90 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 28 May 2024 10:01:17 +0200 Subject: [PATCH 057/492] pass degrading scales for image enhancement as a json file --- train/generate_gt_for_training.py | 16 ++++++++++------ train/scales_enhancement.json | 3 +++ 2 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 train/scales_enhancement.json diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index e296029..2a2a776 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -64,13 +64,17 @@ def pagexml2label(dir_xml,dir_out,type_output,config): help="directory where original images will be written as labels.", type=click.Path(exists=True, file_okay=False), ) -def image_enhancement(dir_imgs, dir_out_images, dir_out_labels): - #dir_imgs = './training_data_sample_enhancement/images' - #dir_out_images = './training_data_sample_enhancement/images_gt' - #dir_out_labels = './training_data_sample_enhancement/labels_gt' - +@click.option( + "--scales", + "-scs", + help="json dictionary where the scales are written.", + type=click.Path(exists=True, dir_okay=False), +) +def image_enhancement(dir_imgs, dir_out_images, dir_out_labels, scales): ls_imgs = os.listdir(dir_imgs) - ls_scales = [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] + with open(scales) as f: + scale_dict = json.load(f) + ls_scales = scale_dict['scales'] for img in tqdm(ls_imgs): img_name = img.split('.')[0] diff --git a/train/scales_enhancement.json b/train/scales_enhancement.json new file mode 100644 index 0000000..58034f0 --- /dev/null +++ b/train/scales_enhancement.json @@ -0,0 +1,3 @@ +{ + "scales" : [ 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] +} From cc7577d2c121ca14180bbc732355e35d7be80af8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 28 May 2024 10:14:16 +0200 Subject: [PATCH 058/492] min area size of text region passes as an argument for machine based reading order --- train/generate_gt_for_training.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 2a2a776..cf2b2a6 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -116,22 +116,28 @@ def image_enhancement(dir_imgs, dir_out_images, dir_out_labels, scales): @click.option( "--input_height", "-ih", - help="input_height", + help="input height", ) @click.option( "--input_width", "-iw", - help="input_width", + help="input width", +) +@click.option( + "--min_area_size", + "-min", + help="min area size of regions considered for reading order training.", ) -def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width): +def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size): xml_files_ind = os.listdir(dir_xml) input_height = int(input_height) input_width = int(input_width) + min_area = float(min_area_size) indexer_start= 0#55166 max_area = 1 - min_area = 0.0001 + #min_area = 0.0001 for ind_xml in tqdm(xml_files_ind): indexer = 0 From 4fb45a671114c8d44b100dd799e097a3b669c27a Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 28 May 2024 16:48:51 +0200 Subject: [PATCH 059/492] inference for reading order --- train/gt_gen_utils.py | 134 +++++++++-------------------- train/inference.py | 196 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 227 insertions(+), 103 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 9dc8377..0286ac7 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -38,11 +38,8 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m polygon = geometry.Polygon([point[0] for point in c]) # area = cv2.contourArea(c) area = polygon.area - ##print(np.prod(thresh.shape[:2])) # Check that polygon has area greater than minimal area - # print(hierarchy[0][jv][3],hierarchy ) if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : - # print(c[0][0][1]) found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.int32)) jv += 1 return found_polygons_early @@ -52,15 +49,12 @@ def filter_contours_area_of_image(image, contours, order_index, max_area, min_ar order_index_filtered = list() #jv = 0 for jv, c in enumerate(contours): - #print(len(c[0])) c = c[0] if len(c) < 3: # A polygon cannot have less than 3 points continue c_e = [point for point in c] - #print(c_e) polygon = geometry.Polygon(c_e) area = polygon.area - #print(area,'area') if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) order_index_filtered.append(order_index[jv]) @@ -88,12 +82,8 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len): co_text_eroded = [] for con in co_text: - #try: img_boundary_in = np.zeros( (y_len,x_len) ) img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) - #print('bidiahhhhaaa') - - #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica if erosion_rate > 0: @@ -626,8 +616,6 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ def find_new_features_of_contours(contours_main): - - #print(contours_main[0][0][:, 0]) areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] @@ -658,8 +646,6 @@ def find_new_features_of_contours(contours_main): y_min_main = np.array([np.min(contours_main[j][:, 1]) for j in range(len(contours_main))]) y_max_main = np.array([np.max(contours_main[j][:, 1]) for j in range(len(contours_main))]) - # dis_x=np.abs(x_max_main-x_min_main) - return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin def read_xml(xml_file): file_name = Path(xml_file).stem @@ -675,13 +661,11 @@ def read_xml(xml_file): y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) - for jj in root1.iter(link+'RegionRefIndexed'): index_tot_regions.append(jj.attrib['index']) tot_region_ref.append(jj.attrib['regionRef']) region_tags=np.unique([x for x in alltags if x.endswith('Region')]) - #print(region_tags) co_text_paragraph=[] co_text_drop=[] co_text_heading=[] @@ -698,7 +682,6 @@ def read_xml(xml_file): co_graphic_decoration=[] co_noise=[] - co_text_paragraph_text=[] co_text_drop_text=[] co_text_heading_text=[] @@ -715,7 +698,6 @@ def read_xml(xml_file): co_graphic_decoration_text=[] co_noise_text=[] - id_paragraph = [] id_header = [] id_heading = [] @@ -726,14 +708,8 @@ def read_xml(xml_file): for nn in root1.iter(tag): for child2 in nn: tag2 = child2.tag - #print(child2.tag) if tag2.endswith('}TextEquiv') or tag2.endswith('}TextEquiv'): - #children2 = childtext.getchildren() - #rank = child2.find('Unicode').text for childtext2 in child2: - #rank = childtext2.find('Unicode').text - #if childtext2.tag.endswith('}PlainText') or childtext2.tag.endswith('}PlainText'): - #print(childtext2.text) if childtext2.tag.endswith('}Unicode') or childtext2.tag.endswith('}Unicode'): if "type" in nn.attrib and nn.attrib['type']=='drop-capital': co_text_drop_text.append(childtext2.text) @@ -743,10 +719,10 @@ def read_xml(xml_file): co_text_signature_mark_text.append(childtext2.text) elif "type" in nn.attrib and nn.attrib['type']=='header': co_text_header_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - co_text_catch_text.append(childtext2.text) - elif "type" in nn.attrib and nn.attrib['type']=='page-number': - co_text_page_number_text.append(childtext2.text) + ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + ###co_text_catch_text.append(childtext2.text) + ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': + ###co_text_page_number_text.append(childtext2.text) elif "type" in nn.attrib and nn.attrib['type']=='marginalia': co_text_marginalia_text.append(childtext2.text) else: @@ -774,7 +750,6 @@ def read_xml(xml_file): if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - #if nn.attrib['type']=='paragraph': c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) @@ -792,27 +767,22 @@ def read_xml(xml_file): c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + ###c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - elif "type" in nn.attrib and nn.attrib['type']=='page-number': + ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) + ###c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) elif "type" in nn.attrib and nn.attrib['type']=='marginalia': id_marginalia.append(nn.attrib['id']) c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) else: - #print(nn.attrib['id']) - id_paragraph.append(nn.attrib['id']) c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) break else: @@ -821,7 +791,6 @@ def read_xml(xml_file): if vv.tag==link+'Point': if "type" in nn.attrib and nn.attrib['type']=='drop-capital': - #if nn.attrib['type']=='paragraph': c_t_in_drop.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 @@ -835,7 +804,6 @@ def read_xml(xml_file): elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='header': id_header.append(nn.attrib['id']) @@ -843,33 +811,26 @@ def read_xml(xml_file): sumi+=1 - elif "type" in nn.attrib and nn.attrib['type']=='catch-word': - c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - sumi+=1 + ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + ###c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + ###sumi+=1 + ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': - elif "type" in nn.attrib and nn.attrib['type']=='page-number': - - c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) - sumi+=1 + ###c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + ###sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='marginalia': id_marginalia.append(nn.attrib['id']) c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) sumi+=1 else: id_paragraph.append(nn.attrib['id']) c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) sumi+=1 - #c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - - #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: break @@ -895,7 +856,6 @@ def read_xml(xml_file): elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): - #print('sth') for nn in root1.iter(tag): c_t_in=[] c_t_in_text_annotation=[] @@ -907,40 +867,31 @@ def read_xml(xml_file): coords=bool(vv.attrib) if coords: p_h=vv.attrib['points'].split(' ') - #c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - #if nn.attrib['type']=='paragraph': - c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - + elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - #print(c_t_in_paragraph) + else: c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) - break else: pass if vv.tag==link+'Point': - if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': - #if nn.attrib['type']=='paragraph': - c_t_in_text_annotation.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='decoration': - c_t_in_decoration.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) - #print(c_t_in_paragraph) sumi+=1 + else: c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 @@ -955,7 +906,6 @@ def read_xml(xml_file): elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): - #print('sth') for nn in root1.iter(tag): c_t_in=[] sumi=0 @@ -974,7 +924,6 @@ def read_xml(xml_file): if vv.tag==link+'Point': c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 - #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: break co_img.append(np.array(c_t_in)) @@ -982,7 +931,6 @@ def read_xml(xml_file): elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): - #print('sth') for nn in root1.iter(tag): c_t_in=[] sumi=0 @@ -1001,7 +949,6 @@ def read_xml(xml_file): if vv.tag==link+'Point': c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 - #print(vv.tag,'in') elif vv.tag!=link+'Point' and sumi>=1: break co_sep.append(np.array(c_t_in)) @@ -1009,7 +956,6 @@ def read_xml(xml_file): elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): - #print('sth') for nn in root1.iter(tag): c_t_in=[] sumi=0 @@ -1028,14 +974,13 @@ def read_xml(xml_file): if vv.tag==link+'Point': c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 - #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: break co_table.append(np.array(c_t_in)) co_table_text.append(' ') elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): - #print('sth') for nn in root1.iter(tag): c_t_in=[] sumi=0 @@ -1054,40 +999,22 @@ def read_xml(xml_file): if vv.tag==link+'Point': c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 - #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: break co_noise.append(np.array(c_t_in)) co_noise_text.append(' ') - img = np.zeros( (y_len,x_len,3) ) - img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) - #img_poly=cv2.fillPoly(img, pts =co_text_catch, color=(125,255,125)) - #img_poly=cv2.fillPoly(img, pts =co_text_signature_mark, color=(125,125,0)) - #img_poly=cv2.fillPoly(img, pts =co_graphic_decoration, color=(1,125,255)) - #img_poly=cv2.fillPoly(img, pts =co_text_page_number, color=(1,125,0)) img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(3,3,3)) - #img_poly=cv2.fillPoly(img, pts =co_text_drop, color=(1,125,255)) - - #img_poly=cv2.fillPoly(img, pts =co_graphic_text_annotation, color=(125,0,125)) img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) - #img_poly=cv2.fillPoly(img, pts =co_table, color=(1,255,255)) - #img_poly=cv2.fillPoly(img, pts =co_graphic, color=(255,125,125)) - #img_poly=cv2.fillPoly(img, pts =co_noise, color=(255,0,255)) - #print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.jpg') - ###try: - ####print('yazdimmm',self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.jpg') - ###cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('-')[1].split('.')[0]+'.jpg',img_poly ) - ###except: - ###cv2.imwrite(self.output_dir+'/'+self.gt_list[index].split('.')[0]+'.jpg',img_poly ) - return file_name, id_paragraph, id_header,co_text_paragraph, co_text_header,\ + return tree1, root1, file_name, id_paragraph, id_header,co_text_paragraph, co_text_header,\ tot_region_ref,x_len, y_len,index_tot_regions, img_poly @@ -1113,3 +1040,24 @@ def make_image_from_bb(width_l, height_l, bb_all): for i in range(bb_all.shape[0]): img_remade[bb_all[i,1]:bb_all[i,1]+bb_all[i,3],bb_all[i,0]:bb_all[i,0]+bb_all[i,2] ] = 1 return img_remade + +def update_list_and_return_first_with_length_bigger_than_one(index_element_to_be_updated, innner_index_pr_pos, pr_list, pos_list,list_inp): + list_inp.pop(index_element_to_be_updated) + if len(pr_list)>0: + list_inp.insert(index_element_to_be_updated, pr_list) + else: + index_element_to_be_updated = index_element_to_be_updated -1 + + list_inp.insert(index_element_to_be_updated+1, [innner_index_pr_pos]) + if len(pos_list)>0: + list_inp.insert(index_element_to_be_updated+2, pos_list) + + len_all_elements = [len(i) for i in list_inp] + list_len_bigger_1 = np.where(np.array(len_all_elements)>1) + list_len_bigger_1 = list_len_bigger_1[0] + + if len(list_len_bigger_1)>0: + early_list_bigger_than_one = list_len_bigger_1[0] + else: + early_list_bigger_than_one = -20 + return list_inp, early_list_bigger_than_one diff --git a/train/inference.py b/train/inference.py index 94e318d..73b4ed8 100644 --- a/train/inference.py +++ b/train/inference.py @@ -11,13 +11,11 @@ from tensorflow.keras import layers import tensorflow.keras.losses from tensorflow.keras.layers import * from models import * +from gt_gen_utils import * import click import json from tensorflow.python.keras import backend as tensorflow_backend - - - - +import xml.etree.ElementTree as ET with warnings.catch_warnings(): @@ -29,7 +27,7 @@ Tool to load model and predict for given image. """ class sbb_predict: - def __init__(self,image, model, task, config_params_model, patches, save, ground_truth): + def __init__(self,image, model, task, config_params_model, patches, save, ground_truth, xml_file): self.image=image self.patches=patches self.save=save @@ -37,6 +35,7 @@ class sbb_predict: self.ground_truth=ground_truth self.task=task self.config_params_model=config_params_model + self.xml_file = xml_file def resize_image(self,img_in,input_height,input_width): return cv2.resize( img_in, ( input_width,input_height) ,interpolation=cv2.INTER_NEAREST) @@ -166,7 +165,7 @@ class sbb_predict: ##if self.weights_dir!=None: ##self.model.load_weights(self.weights_dir) - if self.task != 'classification': + if (self.task != 'classification' and self.task != 'reading_order'): self.img_height=self.model.layers[len(self.model.layers)-1].output_shape[1] self.img_width=self.model.layers[len(self.model.layers)-1].output_shape[2] self.n_classes=self.model.layers[len(self.model.layers)-1].output_shape[3] @@ -233,6 +232,178 @@ class sbb_predict: index_class = np.argmax(label_p_pred[0]) print("Predicted Class: {}".format(classes_names[str(int(index_class))])) + elif self.task == 'reading_order': + img_height = self.config_params_model['input_height'] + img_width = self.config_params_model['input_width'] + + tree_xml, root_xml, file_name, id_paragraph, id_header, co_text_paragraph, co_text_header, tot_region_ref, x_len, y_len, index_tot_regions, img_poly = read_xml(self.xml_file) + _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_header) + + img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') + + for j in range(len(cy_main)): + img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12,int(x_min_main[j]):int(x_max_main[j]) ] = 1 + + co_text_all = co_text_paragraph + co_text_header + id_all_text = id_paragraph + id_header + + ##texts_corr_order_index = [index_tot_regions[tot_region_ref.index(i)] for i in id_all_text ] + ##texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] + texts_corr_order_index_int = list(np.array(range(len(co_text_all)))) + + min_area = 0 + max_area = 1 + + co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area) + + labels_con = np.zeros((y_len,x_len,len(co_text_all)),dtype='uint8') + for i in range(len(co_text_all)): + img_label = np.zeros((y_len,x_len,3),dtype='uint8') + img_label=cv2.fillPoly(img_label, pts =[co_text_all[i]], color=(1,1,1)) + labels_con[:,:,i] = img_label[:,:,0] + + img3= np.copy(img_poly) + labels_con = resize_image(labels_con, img_height, img_width) + + img_header_and_sep = resize_image(img_header_and_sep, img_height, img_width) + + img3= resize_image (img3, img_height, img_width) + img3 = img3.astype(np.uint16) + + inference_bs = 1#4 + + input_1= np.zeros( (inference_bs, img_height, img_width,3)) + + + starting_list_of_regions = [] + starting_list_of_regions.append( list(range(labels_con.shape[2])) ) + + index_update = 0 + index_selected = starting_list_of_regions[0] + + scalibility_num = 0 + while index_update>=0: + ij_list = starting_list_of_regions[index_update] + i = ij_list[0] + ij_list.pop(0) + + + pr_list = [] + post_list = [] + + batch_counter = 0 + tot_counter = 1 + + tot_iteration = len(ij_list) + full_bs_ite= tot_iteration//inference_bs + last_bs = tot_iteration % inference_bs + + jbatch_indexer =[] + for j in ij_list: + img1= np.repeat(labels_con[:,:,i][:, :, np.newaxis], 3, axis=2) + img2 = np.repeat(labels_con[:,:,j][:, :, np.newaxis], 3, axis=2) + + + img2[:,:,0][img3[:,:,0]==5] = 2 + img2[:,:,0][img_header_and_sep[:,:]==1] = 3 + + + + img1[:,:,0][img3[:,:,0]==5] = 2 + img1[:,:,0][img_header_and_sep[:,:]==1] = 3 + + #input_1= np.zeros( (height1, width1,3)) + + + jbatch_indexer.append(j) + + input_1[batch_counter,:,:,0] = img1[:,:,0]/3. + input_1[batch_counter,:,:,2] = img2[:,:,0]/3. + input_1[batch_counter,:,:,1] = img3[:,:,0]/5. + #input_1[batch_counter,:,:,:]= np.zeros( (batch_counter, height1, width1,3)) + batch_counter = batch_counter+1 + + #input_1[:,:,0] = img1[:,:,0]/3. + #input_1[:,:,2] = img2[:,:,0]/3. + #input_1[:,:,1] = img3[:,:,0]/5. + + if batch_counter==inference_bs or ( (tot_counter//inference_bs)==full_bs_ite and tot_counter%inference_bs==last_bs): + y_pr = self.model.predict(input_1 , verbose=0) + scalibility_num = scalibility_num+1 + + if batch_counter==inference_bs: + iteration_batches = inference_bs + else: + iteration_batches = last_bs + for jb in range(iteration_batches): + if y_pr[jb][0]>=0.5: + post_list.append(jbatch_indexer[jb]) + else: + pr_list.append(jbatch_indexer[jb]) + + batch_counter = 0 + jbatch_indexer = [] + + tot_counter = tot_counter+1 + + starting_list_of_regions, index_update = update_list_and_return_first_with_length_bigger_than_one(index_update, i, pr_list, post_list,starting_list_of_regions) + + index_sort = [i[0] for i in starting_list_of_regions ] + + + alltags=[elem.tag for elem in root_xml.iter()] + + + + link=alltags[0].split('}')[0]+'}' + name_space = alltags[0].split('}')[0] + name_space = name_space.split('{')[1] + + page_element = root_xml.find(link+'Page') + + """ + ro_subelement = ET.SubElement(page_element, 'ReadingOrder') + #print(page_element, 'page_element') + + #new_element = ET.Element('ReadingOrder') + + new_element_element = ET.Element('OrderedGroup') + new_element_element.set('id', "ro357564684568544579089") + + for index, id_text in enumerate(id_all_text): + new_element_2 = ET.Element('RegionRefIndexed') + new_element_2.set('regionRef', id_all_text[index]) + new_element_2.set('index', str(index_sort[index])) + + new_element_element.append(new_element_2) + + ro_subelement.append(new_element_element) + """ + ##ro_subelement = ET.SubElement(page_element, 'ReadingOrder') + + ro_subelement = ET.Element('ReadingOrder') + + ro_subelement2 = ET.SubElement(ro_subelement, 'OrderedGroup') + ro_subelement2.set('id', "ro357564684568544579089") + + for index, id_text in enumerate(id_all_text): + new_element_2 = ET.SubElement(ro_subelement2, 'RegionRefIndexed') + new_element_2.set('regionRef', id_all_text[index]) + new_element_2.set('index', str(index_sort[index])) + + if link+'PrintSpace' in alltags: + page_element.insert(1, ro_subelement) + else: + page_element.insert(0, ro_subelement) + + #page_element[0].append(new_element) + #root_xml.append(new_element) + alltags=[elem.tag for elem in root_xml.iter()] + + ET.register_namespace("",name_space) + tree_xml.write('library2.xml',xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + #tree_xml.write('library2.xml') + else: if self.patches: #def textline_contours(img,input_width,input_height,n_classes,model): @@ -356,7 +527,7 @@ class sbb_predict: def run(self): res=self.predict() - if self.task == 'classification': + if (self.task == 'classification' or self.task == 'reading_order'): pass else: img_seg_overlayed = self.visualize_model_output(res, self.img_org, self.task) @@ -397,15 +568,20 @@ class sbb_predict: "-gt", help="ground truth directory if you want to see the iou of prediction.", ) -def main(image, model, patches, save, ground_truth): +@click.option( + "--xml_file", + "-xml", + help="xml file with layout coordinates that reading order detection will be implemented on. The result will be written in the same xml file.", +) +def main(image, model, patches, save, ground_truth, xml_file): with open(os.path.join(model,'config.json')) as f: config_params_model = json.load(f) task = config_params_model['task'] - if task != 'classification': + if (task != 'classification' and task != 'reading_order'): if not save: print("Error: You used one of segmentation or binarization task but not set -s, you need a filename to save visualized output with -s") sys.exit(1) - x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth) + x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, xml_file) x.run() if __name__=="__main__": From 06ed00619399fb93d48bd803f4bd66ba942d4d84 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 29 May 2024 11:18:35 +0200 Subject: [PATCH 060/492] reading order detection on xml with layout + result will be written in an output directory with the same file name --- train/gt_gen_utils.py | 74 +++++++++++++++++++++++++++++++++++++------ train/inference.py | 45 +++++++++++++++++++------- 2 files changed, 99 insertions(+), 20 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 0286ac7..8f72fb8 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -664,6 +664,58 @@ def read_xml(xml_file): for jj in root1.iter(link+'RegionRefIndexed'): index_tot_regions.append(jj.attrib['index']) tot_region_ref.append(jj.attrib['regionRef']) + + if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): + co_printspace = [] + if link+'PrintSpace' in alltags: + region_tags_printspace = np.unique([x for x in alltags if x.endswith('PrintSpace')]) + elif link+'Border' in alltags: + region_tags_printspace = np.unique([x for x in alltags if x.endswith('Border')]) + + for tag in region_tags_printspace: + if link+'PrintSpace' in alltags: + tag_endings_printspace = ['}PrintSpace','}printspace'] + elif link+'Border' in alltags: + tag_endings_printspace = ['}Border','}border'] + + if tag.endswith(tag_endings_printspace[0]) or tag.endswith(tag_endings_printspace[1]): + for nn in root1.iter(tag): + c_t_in = [] + sumi = 0 + for vv in nn.iter(): + # check the format of coords + if vv.tag == link + 'Coords': + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + if vv.tag == link + 'Point': + c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) + sumi += 1 + elif vv.tag != link + 'Point' and sumi >= 1: + break + co_printspace.append(np.array(c_t_in)) + img_printspace = np.zeros( (y_len,x_len,3) ) + img_printspace=cv2.fillPoly(img_printspace, pts =co_printspace, color=(1,1,1)) + img_printspace = img_printspace.astype(np.uint8) + + imgray = cv2.cvtColor(img_printspace, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] + x, y, w, h = cv2.boundingRect(cnt) + + bb_coord_printspace = [x, y, w, h] + + else: + bb_coord_printspace = None + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) co_text_paragraph=[] @@ -754,7 +806,7 @@ def read_xml(xml_file): c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) elif "type" in nn.attrib and nn.attrib['type']=='heading': - id_heading.append(nn.attrib['id']) + ##id_heading.append(nn.attrib['id']) c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) @@ -763,7 +815,7 @@ def read_xml(xml_file): c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) #print(c_t_in_paragraph) elif "type" in nn.attrib and nn.attrib['type']=='header': - id_header.append(nn.attrib['id']) + #id_header.append(nn.attrib['id']) c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) @@ -776,11 +828,11 @@ def read_xml(xml_file): ###c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - id_marginalia.append(nn.attrib['id']) + #id_marginalia.append(nn.attrib['id']) c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) else: - id_paragraph.append(nn.attrib['id']) + #id_paragraph.append(nn.attrib['id']) c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) @@ -796,7 +848,7 @@ def read_xml(xml_file): sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='heading': - id_heading.append(nn.attrib['id']) + #id_heading.append(nn.attrib['id']) c_t_in_heading.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 @@ -806,7 +858,7 @@ def read_xml(xml_file): c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='header': - id_header.append(nn.attrib['id']) + #id_header.append(nn.attrib['id']) c_t_in_header.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 @@ -821,13 +873,13 @@ def read_xml(xml_file): ###sumi+=1 elif "type" in nn.attrib and nn.attrib['type']=='marginalia': - id_marginalia.append(nn.attrib['id']) + #id_marginalia.append(nn.attrib['id']) c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 else: - id_paragraph.append(nn.attrib['id']) + #id_paragraph.append(nn.attrib['id']) c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) sumi+=1 @@ -838,11 +890,14 @@ def read_xml(xml_file): co_text_drop.append(np.array(c_t_in_drop)) if len(c_t_in_paragraph)>0: co_text_paragraph.append(np.array(c_t_in_paragraph)) + id_paragraph.append(nn.attrib['id']) if len(c_t_in_heading)>0: co_text_heading.append(np.array(c_t_in_heading)) + id_heading.append(nn.attrib['id']) if len(c_t_in_header)>0: co_text_header.append(np.array(c_t_in_header)) + id_header.append(nn.attrib['id']) if len(c_t_in_page_number)>0: co_text_page_number.append(np.array(c_t_in_page_number)) if len(c_t_in_catch)>0: @@ -853,6 +908,7 @@ def read_xml(xml_file): if len(c_t_in_marginalia)>0: co_text_marginalia.append(np.array(c_t_in_marginalia)) + id_marginalia.append(nn.attrib['id']) elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): @@ -1014,7 +1070,7 @@ def read_xml(xml_file): img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) - return tree1, root1, file_name, id_paragraph, id_header,co_text_paragraph, co_text_header,\ + return tree1, root1, bb_coord_printspace, file_name, id_paragraph, id_header+id_heading, co_text_paragraph, co_text_header+co_text_heading,\ tot_region_ref,x_len, y_len,index_tot_regions, img_poly diff --git a/train/inference.py b/train/inference.py index 73b4ed8..28445e8 100644 --- a/train/inference.py +++ b/train/inference.py @@ -16,6 +16,7 @@ import click import json from tensorflow.python.keras import backend as tensorflow_backend import xml.etree.ElementTree as ET +import matplotlib.pyplot as plt with warnings.catch_warnings(): @@ -27,7 +28,7 @@ Tool to load model and predict for given image. """ class sbb_predict: - def __init__(self,image, model, task, config_params_model, patches, save, ground_truth, xml_file): + def __init__(self,image, model, task, config_params_model, patches, save, ground_truth, xml_file, out): self.image=image self.patches=patches self.save=save @@ -36,6 +37,7 @@ class sbb_predict: self.task=task self.config_params_model=config_params_model self.xml_file = xml_file + self.out = out def resize_image(self,img_in,input_height,input_width): return cv2.resize( img_in, ( input_width,input_height) ,interpolation=cv2.INTER_NEAREST) @@ -236,16 +238,18 @@ class sbb_predict: img_height = self.config_params_model['input_height'] img_width = self.config_params_model['input_width'] - tree_xml, root_xml, file_name, id_paragraph, id_header, co_text_paragraph, co_text_header, tot_region_ref, x_len, y_len, index_tot_regions, img_poly = read_xml(self.xml_file) + tree_xml, root_xml, bb_coord_printspace, file_name, id_paragraph, id_header, co_text_paragraph, co_text_header, tot_region_ref, x_len, y_len, index_tot_regions, img_poly = read_xml(self.xml_file) _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_header) img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') + for j in range(len(cy_main)): img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12,int(x_min_main[j]):int(x_max_main[j]) ] = 1 co_text_all = co_text_paragraph + co_text_header id_all_text = id_paragraph + id_header + ##texts_corr_order_index = [index_tot_regions[tot_region_ref.index(i)] for i in id_all_text ] ##texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] @@ -253,8 +257,9 @@ class sbb_predict: min_area = 0 max_area = 1 + - co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area) + ##co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area) labels_con = np.zeros((y_len,x_len,len(co_text_all)),dtype='uint8') for i in range(len(co_text_all)): @@ -262,6 +267,18 @@ class sbb_predict: img_label=cv2.fillPoly(img_label, pts =[co_text_all[i]], color=(1,1,1)) labels_con[:,:,i] = img_label[:,:,0] + if bb_coord_printspace: + #bb_coord_printspace[x,y,w,h,_,_] + x = bb_coord_printspace[0] + y = bb_coord_printspace[1] + w = bb_coord_printspace[2] + h = bb_coord_printspace[3] + labels_con = labels_con[y:y+h, x:x+w, :] + img_poly = img_poly[y:y+h, x:x+w, :] + img_header_and_sep = img_header_and_sep[y:y+h, x:x+w] + + + img3= np.copy(img_poly) labels_con = resize_image(labels_con, img_height, img_width) @@ -347,9 +364,11 @@ class sbb_predict: tot_counter = tot_counter+1 starting_list_of_regions, index_update = update_list_and_return_first_with_length_bigger_than_one(index_update, i, pr_list, post_list,starting_list_of_regions) - + + index_sort = [i[0] for i in starting_list_of_regions ] + id_all_text = np.array(id_all_text)[index_sort] alltags=[elem.tag for elem in root_xml.iter()] @@ -389,19 +408,17 @@ class sbb_predict: for index, id_text in enumerate(id_all_text): new_element_2 = ET.SubElement(ro_subelement2, 'RegionRefIndexed') new_element_2.set('regionRef', id_all_text[index]) - new_element_2.set('index', str(index_sort[index])) + new_element_2.set('index', str(index)) - if link+'PrintSpace' in alltags: + if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): page_element.insert(1, ro_subelement) else: page_element.insert(0, ro_subelement) - #page_element[0].append(new_element) - #root_xml.append(new_element) alltags=[elem.tag for elem in root_xml.iter()] ET.register_namespace("",name_space) - tree_xml.write('library2.xml',xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + tree_xml.write(os.path.join(self.out, file_name+'.xml'),xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) #tree_xml.write('library2.xml') else: @@ -545,6 +562,12 @@ class sbb_predict: help="image filename", type=click.Path(exists=True, dir_okay=False), ) +@click.option( + "--out", + "-o", + help="output directory where xml with detected reading order will be written.", + type=click.Path(exists=True, file_okay=False), +) @click.option( "--patches/--no-patches", "-p/-nop", @@ -573,7 +596,7 @@ class sbb_predict: "-xml", help="xml file with layout coordinates that reading order detection will be implemented on. The result will be written in the same xml file.", ) -def main(image, model, patches, save, ground_truth, xml_file): +def main(image, model, patches, save, ground_truth, xml_file, out): with open(os.path.join(model,'config.json')) as f: config_params_model = json.load(f) task = config_params_model['task'] @@ -581,7 +604,7 @@ def main(image, model, patches, save, ground_truth, xml_file): if not save: print("Error: You used one of segmentation or binarization task but not set -s, you need a filename to save visualized output with -s") sys.exit(1) - x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, xml_file) + x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, xml_file, out) x.run() if __name__=="__main__": From 09789619a8fe9589352f7bde6c0e7cb41a9ea087 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 29 May 2024 13:07:06 +0200 Subject: [PATCH 061/492] min_area size of regions considered for reading order detection passed as an argument for inference --- train/gt_gen_utils.py | 13 +++++++++++-- train/inference.py | 31 ++++++++++++++++++++++++------- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 8f72fb8..d3dd7df 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -32,10 +32,16 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m jv = 0 for c in contours: + if len(np.shape(c)) == 3: + c = c[0] + elif len(np.shape(c)) == 2: + pass + #c = c[0] if len(c) < 3: # A polygon cannot have less than 3 points continue - polygon = geometry.Polygon([point[0] for point in c]) + c_e = [point for point in c] + polygon = geometry.Polygon(c_e) # area = cv2.contourArea(c) area = polygon.area # Check that polygon has area greater than minimal area @@ -49,7 +55,10 @@ def filter_contours_area_of_image(image, contours, order_index, max_area, min_ar order_index_filtered = list() #jv = 0 for jv, c in enumerate(contours): - c = c[0] + if len(np.shape(c)) == 3: + c = c[0] + elif len(np.shape(c)) == 2: + pass if len(c) < 3: # A polygon cannot have less than 3 points continue c_e = [point for point in c] diff --git a/train/inference.py b/train/inference.py index 28445e8..c7a8b02 100644 --- a/train/inference.py +++ b/train/inference.py @@ -28,7 +28,7 @@ Tool to load model and predict for given image. """ class sbb_predict: - def __init__(self,image, model, task, config_params_model, patches, save, ground_truth, xml_file, out): + def __init__(self,image, model, task, config_params_model, patches, save, ground_truth, xml_file, out, min_area): self.image=image self.patches=patches self.save=save @@ -38,6 +38,10 @@ class sbb_predict: self.config_params_model=config_params_model self.xml_file = xml_file self.out = out + if min_area: + self.min_area = float(min_area) + else: + self.min_area = 0 def resize_image(self,img_in,input_height,input_width): return cv2.resize( img_in, ( input_width,input_height) ,interpolation=cv2.INTER_NEAREST) @@ -255,11 +259,18 @@ class sbb_predict: ##texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] texts_corr_order_index_int = list(np.array(range(len(co_text_all)))) - min_area = 0 - max_area = 1 + #print(texts_corr_order_index_int) - - ##co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area) + max_area = 1 + #print(np.shape(co_text_all[0]), len( np.shape(co_text_all[0]) ),'co_text_all') + #co_text_all = filter_contours_area_of_image_tables(img_poly, co_text_all, _, max_area, min_area) + #print(co_text_all,'co_text_all') + co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, self.min_area) + + #print(texts_corr_order_index_int) + + #co_text_all = [co_text_all[index] for index in texts_corr_order_index_int] + id_all_text = [id_all_text[index] for index in texts_corr_order_index_int] labels_con = np.zeros((y_len,x_len,len(co_text_all)),dtype='uint8') for i in range(len(co_text_all)): @@ -596,7 +607,13 @@ class sbb_predict: "-xml", help="xml file with layout coordinates that reading order detection will be implemented on. The result will be written in the same xml file.", ) -def main(image, model, patches, save, ground_truth, xml_file, out): + +@click.option( + "--min_area", + "-min", + help="min area size of regions considered for reading order detection. The default value is zero and means that all text regions are considered for reading order.", +) +def main(image, model, patches, save, ground_truth, xml_file, out, min_area): with open(os.path.join(model,'config.json')) as f: config_params_model = json.load(f) task = config_params_model['task'] @@ -604,7 +621,7 @@ def main(image, model, patches, save, ground_truth, xml_file, out): if not save: print("Error: You used one of segmentation or binarization task but not set -s, you need a filename to save visualized output with -s") sys.exit(1) - x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, xml_file, out) + x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, xml_file, out, min_area) x.run() if __name__=="__main__": From 47a16464518f32427d7ff609bbc572303c2ed148 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 30 May 2024 12:56:56 +0200 Subject: [PATCH 062/492] modifying xml parsing --- train/gt_gen_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index d3dd7df..debaf15 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -122,7 +122,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ ## to do: add footnote to text regions for index in tqdm(range(len(gt_list))): #try: - tree1 = ET.parse(dir_in+'/'+gt_list[index]) + tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding = 'iso-8859-5')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] link=alltags[0].split('}')[0]+'}' @@ -658,7 +658,7 @@ def find_new_features_of_contours(contours_main): return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin def read_xml(xml_file): file_name = Path(xml_file).stem - tree1 = ET.parse(xml_file) + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] link=alltags[0].split('}')[0]+'}' From 3ef0dbdd4281bfe4cabd13765fc9723ea1e506c2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 30 May 2024 16:59:50 +0200 Subject: [PATCH 063/492] scaling and cropping of labels and org images --- train/custom_config_page2label.json | 5 +- train/generate_gt_for_training.py | 34 ++++++-- train/gt_gen_utils.py | 125 ++++++++++++++++++++++++++-- 3 files changed, 145 insertions(+), 19 deletions(-) diff --git a/train/custom_config_page2label.json b/train/custom_config_page2label.json index e4c02cb..9116ce3 100644 --- a/train/custom_config_page2label.json +++ b/train/custom_config_page2label.json @@ -1,9 +1,8 @@ { -"use_case": "layout", +"use_case": "textline", "textregions":{ "rest_as_paragraph": 1, "header":2 , "heading":2 , "marginalia":3 }, "imageregion":4, "separatorregion":5, "graphicregions" :{"rest_as_decoration":6}, -"artificial_class_on_boundry": ["paragraph"], -"artificial_class_label":7 +"columns_width":{"1":1000, "2":1300, "3":1600, "4":2000, "5":2300, "6":2500} } diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index cf2b2a6..752090c 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -14,10 +14,22 @@ def main(): help="directory of GT page-xml files", type=click.Path(exists=True, file_okay=False), ) +@click.option( + "--dir_images", + "-di", + help="directory of org images. If print space cropping or scaling is needed for labels it would be great to provide the original images to apply the same function on them. So if -ps is not set true or in config files no columns_width key is given this argumnet can be ignored. File stems in this directory should be the same as those in dir_xml.", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--dir_out_images", + "-doi", + help="directory where the output org images after undergoing a process (like print space cropping or scaling) will be written.", + type=click.Path(exists=True, file_okay=False), +) @click.option( "--dir_out", "-do", - help="directory where ground truth images would be written", + help="directory where ground truth label images would be written", type=click.Path(exists=True, file_okay=False), ) @@ -33,8 +45,14 @@ def main(): "-to", help="this defines how output should be. A 2d image array or a 3d image array encoded with RGB color. Just pass 2d or 3d. The file will be saved one directory up. 2D image array is 3d but only information of one channel would be enough since all channels have the same values.", ) +@click.option( + "--printspace", + "-ps", + is_flag=True, + help="if this parameter set to true, generated labels and in the case of provided org images cropping will be imposed and cropped labels and images will be written in output directories.", +) -def pagexml2label(dir_xml,dir_out,type_output,config): +def pagexml2label(dir_xml,dir_out,type_output,config, printspace, dir_images, dir_out_images): if config: with open(config) as f: config_params = json.load(f) @@ -42,7 +60,7 @@ def pagexml2label(dir_xml,dir_out,type_output,config): print("passed") config_params = None gt_list = get_content_of_dir(dir_xml) - get_images_of_ground_truth(gt_list,dir_xml,dir_out,type_output, config, config_params) + get_images_of_ground_truth(gt_list,dir_xml,dir_out,type_output, config, config_params, printspace, dir_images, dir_out_images) @main.command() @click.option( @@ -181,7 +199,7 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i for i in range(len(texts_corr_order_index_int)): for j in range(len(texts_corr_order_index_int)): if i!=j: - input_matrix = np.zeros((input_height,input_width,3)).astype(np.int8) + input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) final_f_name = f_name+'_'+str(indexer+indexer_start) order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] if order_class_condition<0: @@ -189,13 +207,13 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i else: class_type = 0 - input_matrix[:,:,0] = resize_image(labels_con[:,:,i], input_height, input_width) - input_matrix[:,:,1] = resize_image(img_poly[:,:,0], input_height, input_width) - input_matrix[:,:,2] = resize_image(labels_con[:,:,j], input_height, input_width) + input_multi_visual_modal[:,:,0] = resize_image(labels_con[:,:,i], input_height, input_width) + input_multi_visual_modal[:,:,1] = resize_image(img_poly[:,:,0], input_height, input_width) + input_multi_visual_modal[:,:,2] = resize_image(labels_con[:,:,j], input_height, input_width) np.save(os.path.join(dir_out_classes,final_f_name+'.npy' ), class_type) - cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_matrix) + cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_multi_visual_modal) indexer = indexer+1 diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index debaf15..d3e95e8 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -115,11 +115,15 @@ def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y img_boundary[:,:][boundary[:,:]==1] =1 return co_text_eroded, img_boundary -def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_file, config_params): +def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_file, config_params, printspace, dir_images, dir_out_images): """ Reading the page xml files and write the ground truth images into given output directory. """ ## to do: add footnote to text regions + + if dir_images: + ls_org_imgs = os.listdir(dir_images) + ls_org_imgs_stem = [item.split('.')[0] for item in ls_org_imgs] for index in tqdm(range(len(gt_list))): #try: tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding = 'iso-8859-5')) @@ -133,6 +137,72 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) + if 'columns_width' in list(config_params.keys()): + columns_width_dict = config_params['columns_width'] + metadata_element = root1.find(link+'Metadata') + comment_is_sub_element = False + for child in metadata_element: + tag2 = child.tag + if tag2.endswith('}Comments') or tag2.endswith('}comments'): + text_comments = child.text + num_col = int(text_comments.split('num_col')[1]) + comment_is_sub_element = True + if not comment_is_sub_element: + num_col = None + + if num_col: + x_new = columns_width_dict[str(num_col)] + y_new = int ( x_new * (y_len / float(x_len)) ) + + if printspace: + region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace') or x.endswith('Border')]) + co_use_case = [] + + for tag in region_tags: + tag_endings = ['}PrintSpace','}Border'] + + if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): + for nn in root1.iter(tag): + c_t_in = [] + sumi = 0 + for vv in nn.iter(): + # check the format of coords + if vv.tag == link + 'Coords': + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + if vv.tag == link + 'Point': + c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) + sumi += 1 + elif vv.tag != link + 'Point' and sumi >= 1: + break + co_use_case.append(np.array(c_t_in)) + + img = np.zeros((y_len, x_len, 3)) + + img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) + + img_poly = img_poly.astype(np.uint8) + + imgray = cv2.cvtColor(img_poly, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) + + cnt = contours[np.argmax(cnt_size)] + + x, y, w, h = cv2.boundingRect(cnt) + bb_xywh = [x, y, w, h] + + if config_file and (config_params['use_case']=='textline' or config_params['use_case']=='word' or config_params['use_case']=='glyph' or config_params['use_case']=='printspace'): keys = list(config_params.keys()) if "artificial_class_label" in keys: @@ -186,7 +256,6 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ co_use_case.append(np.array(c_t_in)) - if "artificial_class_label" in keys: img_boundary = np.zeros((y_len, x_len)) erosion_rate = 1 @@ -205,12 +274,32 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + + + if printspace and config_params['use_case']!='printspace': + img_poly = img_poly[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] + + if 'columns_width' in list(config_params.keys()) and num_col and config_params['use_case']!='printspace': + img_poly = resize_image(img_poly, y_new, x_new) try: - cv2.imwrite(output_dir + '/' + gt_list[index].split('-')[1].split('.')[0] + '.png', - img_poly) + xml_file_stem = gt_list[index].split('-')[1].split('.')[0] + cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) except: - cv2.imwrite(output_dir + '/' + gt_list[index].split('.')[0] + '.png', img_poly) + xml_file_stem = gt_list[index].split('.')[0] + cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) + + if dir_images: + org_image_name = ls_org_imgs[ls_org_imgs_stem.index(xml_file_stem)] + img_org = cv2.imread(os.path.join(dir_images, org_image_name)) + + if printspace and config_params['use_case']!='printspace': + img_org = img_org[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] + + if 'columns_width' in list(config_params.keys()) and num_col and config_params['use_case']!='printspace': + img_org = resize_image(img_org, y_new, x_new) + + cv2.imwrite(os.path.join(dir_out_images, org_image_name), img_org) if config_file and config_params['use_case']=='layout': @@ -616,11 +705,31 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ + if printspace: + img_poly = img_poly[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] - try: - cv2.imwrite(output_dir+'/'+gt_list[index].split('-')[1].split('.')[0]+'.png',img_poly ) + if 'columns_width' in list(config_params.keys()) and num_col: + img_poly = resize_image(img_poly, y_new, x_new) + + try: + xml_file_stem = gt_list[index].split('-')[1].split('.')[0] + cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) except: - cv2.imwrite(output_dir+'/'+gt_list[index].split('.')[0]+'.png',img_poly ) + xml_file_stem = gt_list[index].split('.')[0] + cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) + + + if dir_images: + org_image_name = ls_org_imgs[ls_org_imgs_stem.index(xml_file_stem)] + img_org = cv2.imread(os.path.join(dir_images, org_image_name)) + + if printspace: + img_org = img_org[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] + + if 'columns_width' in list(config_params.keys()) and num_col: + img_org = resize_image(img_org, y_new, x_new) + + cv2.imwrite(os.path.join(dir_out_images, org_image_name), img_org) From 13ebe71d1349d5802d9ff5aa1e79e95141185371 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 6 Jun 2024 14:38:29 +0200 Subject: [PATCH 064/492] replacement in a list done correctly --- train/gt_gen_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index d3e95e8..38e77e8 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -636,7 +636,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if 'textregions' in keys: if 'rest_as_paragraph' in types_text: - types_text[types_text=='rest_as_paragraph'] = 'paragraph' + types_text = ['paragraph'if ttind=='rest_as_paragraph' else ttind for ttind in types_text] for element_text in types_text: if element_text == 'paragraph': color_label = labels_rgb_color[ config_params['textregions']['rest_as_paragraph']] @@ -688,7 +688,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if 'textregions' in keys: if 'rest_as_paragraph' in types_text: - types_text[types_text=='rest_as_paragraph'] = 'paragraph' + types_text = ['paragraph'if ttind=='rest_as_paragraph' else ttind for ttind in types_text] for element_text in types_text: if element_text == 'paragraph': color_label = config_params['textregions']['rest_as_paragraph'] From 742e3c2aa28171cbeff8517cf49ab779d196ee23 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 6 Jun 2024 14:46:06 +0200 Subject: [PATCH 065/492] Update README.md --- train/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/train/README.md b/train/README.md index 899c9a3..b9e70a8 100644 --- a/train/README.md +++ b/train/README.md @@ -73,3 +73,6 @@ The output folder should be an empty folder where the output model will be writt * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` * data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". * dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. + +#### Additional documentation +Please check the [wiki](https://github.com/qurator-spk/sbb_pixelwise_segmentation/wiki). From 5a5914e06c1185f24de378dc752892e699c0446b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 6 Jun 2024 18:45:47 +0200 Subject: [PATCH 066/492] just defined textregion types can be extracted as label --- train/gt_gen_utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 38e77e8..86eb0a1 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -325,6 +325,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ region_tags=np.unique([x for x in alltags if x.endswith('Region')]) co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} + all_defined_textregion_types = list(co_text.keys()) co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} co_sep=[] co_img=[] @@ -359,7 +360,8 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ else: if "type" in nn.attrib: - c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + if nn.attrib['type'] in all_defined_textregion_types: + c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) break else: @@ -384,8 +386,9 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ else: if "type" in nn.attrib: - c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 + if nn.attrib['type'] in all_defined_textregion_types: + c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 elif vv.tag!=link+'Point' and sumi>=1: From 4c376289e97890a55755e72198d20fde37dd1146 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 6 Jun 2024 18:55:22 +0200 Subject: [PATCH 067/492] just defined graphic region types can be extracted as label --- train/gt_gen_utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 86eb0a1..c2360fc 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -327,6 +327,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} all_defined_textregion_types = list(co_text.keys()) co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} + all_defined_graphic_types = list(co_graphic.keys()) co_sep=[] co_img=[] co_table=[] @@ -425,7 +426,8 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ else: if "type" in nn.attrib: - c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + if nn.attrib['type'] in all_defined_graphic_types: + c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) break else: @@ -450,8 +452,9 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ else: if "type" in nn.attrib: - c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) - sumi+=1 + if nn.attrib['type'] in all_defined_graphic_types: + c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 elif vv.tag!=link+'Point' and sumi>=1: break From cc91e4b12c42076f76bf3e8409c050ad80e9cf78 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 7 Jun 2024 16:24:31 +0200 Subject: [PATCH 068/492] updating train.py --- train/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train/train.py b/train/train.py index f338c78..e16745f 100644 --- a/train/train.py +++ b/train/train.py @@ -59,6 +59,8 @@ def config_params(): pretraining = False # Set to true to load pretrained weights of ResNet50 encoder. scaling_bluring = False # If true, a combination of scaling and blurring will be applied to the image. scaling_binarization = False # If true, a combination of scaling and binarization will be applied to the image. + rotation = False # If true, a 90 degree rotation will be implemeneted. + rotation_not_90 = False # If true rotation based on provided angles with thetha will be implemeneted. scaling_brightness = False # If true, a combination of scaling and brightening will be applied to the image. scaling_flip = False # If true, a combination of scaling and flipping will be applied to the image. thetha = None # Rotate image by these angles for augmentation. From 1921e6754f7abbafb5f7f2731f2d29588bf4eac6 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 10 Jun 2024 22:15:30 +0200 Subject: [PATCH 069/492] updating train.py nontransformer backend --- train/models.py | 13 +++++++++---- train/train.py | 12 +++++++++--- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/train/models.py b/train/models.py index d852ac3..b8b0d27 100644 --- a/train/models.py +++ b/train/models.py @@ -30,8 +30,8 @@ class Patches(layers.Layer): self.patch_size = patch_size def call(self, images): - print(tf.shape(images)[1],'images') - print(self.patch_size,'self.patch_size') + #print(tf.shape(images)[1],'images') + #print(self.patch_size,'self.patch_size') batch_size = tf.shape(images)[0] patches = tf.image.extract_patches( images=images, @@ -41,7 +41,7 @@ class Patches(layers.Layer): padding="VALID", ) patch_dims = patches.shape[-1] - print(patches.shape,patch_dims,'patch_dims') + #print(patches.shape,patch_dims,'patch_dims') patches = tf.reshape(patches, [batch_size, -1, patch_dims]) return patches def get_config(self): @@ -51,6 +51,7 @@ class Patches(layers.Layer): 'patch_size': self.patch_size, }) return config + class PatchEncoder(layers.Layer): def __init__(self, num_patches, projection_dim): @@ -408,7 +409,11 @@ def vit_resnet50_unet(n_classes, patch_size, num_patches, input_height=224, inpu if pretraining: model = Model(inputs, x).load_weights(resnet50_Weights_path) - num_patches = x.shape[1]*x.shape[2] + #num_patches = x.shape[1]*x.shape[2] + + #patch_size_y = input_height / x.shape[1] + #patch_size_x = input_width / x.shape[2] + #patch_size = patch_size_x * patch_size_y patches = Patches(patch_size)(x) # Encode patches. encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) diff --git a/train/train.py b/train/train.py index e16745f..84c9d3b 100644 --- a/train/train.py +++ b/train/train.py @@ -97,8 +97,6 @@ def run(_config, n_classes, n_epochs, input_height, pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name): if task == "segmentation" or task == "enhancement": - - num_patches = transformer_num_patches_xy[0]*transformer_num_patches_xy[1] if data_is_provided: dir_train_flowing = os.path.join(dir_output, 'train') dir_eval_flowing = os.path.join(dir_output, 'eval') @@ -213,7 +211,15 @@ def run(_config, n_classes, n_epochs, input_height, index_start = 0 if backbone_type=='nontransformer': model = resnet50_unet(n_classes, input_height, input_width, task, weight_decay, pretraining) - elif backbone_type=='nontransformer': + elif backbone_type=='transformer': + num_patches = transformer_num_patches_xy[0]*transformer_num_patches_xy[1] + + if not (num_patches == (input_width / 32) * (input_height / 32)): + print("Error: transformer num patches error. Parameter transformer_num_patches_xy should be set to (input_width/32) = {} and (input_height/32) = {}".format(int(input_width / 32), int(input_height / 32)) ) + sys.exit(1) + if not (transformer_patchsize == 1): + print("Error: transformer patchsize error. Parameter transformer_patchsizeshould set to 1" ) + sys.exit(1) model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width, task, weight_decay, pretraining) #if you want to see the model structure just uncomment model summary. From 29da23da7663ade94f9dc158ba9cd04a39a6f114 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 11 Jun 2024 17:48:30 +0200 Subject: [PATCH 070/492] binarization as a separate task of segmentation --- train/train.py | 13 +++++++------ train/utils.py | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/train/train.py b/train/train.py index 84c9d3b..9e06a66 100644 --- a/train/train.py +++ b/train/train.py @@ -96,7 +96,7 @@ def run(_config, n_classes, n_epochs, input_height, transformer_num_patches_xy, backbone_type, flip_index, dir_eval, dir_output, pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name): - if task == "segmentation" or task == "enhancement": + if task == "segmentation" or task == "enhancement" or task == "binarization": if data_is_provided: dir_train_flowing = os.path.join(dir_output, 'train') dir_eval_flowing = os.path.join(dir_output, 'eval') @@ -194,16 +194,16 @@ def run(_config, n_classes, n_epochs, input_height, if continue_training: if backbone_type=='nontransformer': - if is_loss_soft_dice and task == "segmentation": + if is_loss_soft_dice and (task == "segmentation" or task == "binarization"): model = load_model(dir_of_start_model, compile=True, custom_objects={'soft_dice_loss': soft_dice_loss}) - if weighted_loss and task == "segmentation": + if weighted_loss and (task == "segmentation" or task == "binarization"): model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) if not is_loss_soft_dice and not weighted_loss: model = load_model(dir_of_start_model , compile=True) elif backbone_type=='transformer': - if is_loss_soft_dice and task == "segmentation": + if is_loss_soft_dice and (task == "segmentation" or task == "binarization"): model = load_model(dir_of_start_model, compile=True, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches,'soft_dice_loss': soft_dice_loss}) - if weighted_loss and task == "segmentation": + if weighted_loss and (task == "segmentation" or task == "binarization"): model = load_model(dir_of_start_model, compile=True, custom_objects={'loss': weighted_categorical_crossentropy(weights)}) if not is_loss_soft_dice and not weighted_loss: model = load_model(dir_of_start_model , compile=True,custom_objects = {"PatchEncoder": PatchEncoder, "Patches": Patches}) @@ -224,8 +224,9 @@ def run(_config, n_classes, n_epochs, input_height, #if you want to see the model structure just uncomment model summary. #model.summary() + - if task == "segmentation": + if (task == "segmentation" or task == "binarization"): if not is_loss_soft_dice and not weighted_loss: model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy']) diff --git a/train/utils.py b/train/utils.py index a2e8a9c..605d8d1 100644 --- a/train/utils.py +++ b/train/utils.py @@ -309,7 +309,7 @@ def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_c interpolation=cv2.INTER_NEAREST) # Read an image from folder and resize img[i - c] = train_img # add to array - img[0], img[1], and so on. - if task == "segmentation": + if task == "segmentation" or task=="binarization": train_mask = cv2.imread(mask_folder + '/' + filename + '.png') train_mask = get_one_hot(resize_image(train_mask, input_height, input_width), input_height, input_width, n_classes) @@ -569,7 +569,7 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow indexer = 0 for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)): img_name = im.split('.')[0] - if task == "segmentation": + if task == "segmentation" or task == "binarization": dir_of_label_file = os.path.join(dir_seg, img_name + '.png') elif task=="enhancement": dir_of_label_file = os.path.join(dir_seg, im) From 95faf1a4c8bc25ffe6d89fa2d296fccf95479e18 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 12 Jun 2024 13:26:27 +0200 Subject: [PATCH 071/492] transformer patch size is dynamic now. --- train/config_params.json | 28 +++++++++++++----------- train/models.py | 47 ++++++++++++++++++++++++++++++++-------- train/train.py | 30 ++++++++++++++++++------- 3 files changed, 75 insertions(+), 30 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index 8a56de5..6b8b6ed 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,42 +1,44 @@ { - "backbone_type" : "nontransformer", - "task": "classification", + "backbone_type" : "transformer", + "task": "binarization", "n_classes" : 2, - "n_epochs" : 20, - "input_height" : 448, - "input_width" : 448, + "n_epochs" : 1, + "input_height" : 224, + "input_width" : 672, "weight_decay" : 1e-6, - "n_batch" : 6, + "n_batch" : 1, "learning_rate": 1e-4, - "f1_threshold_classification": 0.8, "patches" : true, "pretraining" : true, "augmentation" : false, "flip_aug" : false, "blur_aug" : false, "scaling" : true, + "degrading": false, + "brightening": false, "binarization" : false, "scaling_bluring" : false, "scaling_binarization" : false, "scaling_flip" : false, "rotation": false, "rotation_not_90": false, - "transformer_num_patches_xy": [28, 28], - "transformer_patchsize": 1, + "transformer_num_patches_xy": [7, 7], + "transformer_patchsize_x": 3, + "transformer_patchsize_y": 1, + "transformer_projection_dim": 192, "blur_k" : ["blur","guass","median"], "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], "brightness" : [1.3, 1.5, 1.7, 2], "degrade_scales" : [0.2, 0.4], "flip_index" : [0, 1, -1], "thetha" : [10, -10], - "classification_classes_name" : {"0":"apple", "1":"orange"}, "continue_training": false, "index_start" : 0, "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" + "dir_train": "/home/vahid/Documents/test/training_data_sample_binarization", + "dir_eval": "/home/vahid/Documents/test/eval", + "dir_output": "/home/vahid/Documents/test/out" } diff --git a/train/models.py b/train/models.py index b8b0d27..1abf304 100644 --- a/train/models.py +++ b/train/models.py @@ -6,25 +6,49 @@ from tensorflow.keras import layers from tensorflow.keras.regularizers import l2 mlp_head_units = [2048, 1024] -projection_dim = 64 +#projection_dim = 64 transformer_layers = 8 num_heads = 4 resnet50_Weights_path = './pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' IMAGE_ORDERING = 'channels_last' MERGE_AXIS = -1 -transformer_units = [ - projection_dim * 2, - projection_dim, -] # Size of the transformer layers def mlp(x, hidden_units, dropout_rate): for units in hidden_units: x = layers.Dense(units, activation=tf.nn.gelu)(x) x = layers.Dropout(dropout_rate)(x) return x - class Patches(layers.Layer): + def __init__(self, patch_size_x, patch_size_y):#__init__(self, **kwargs):#:__init__(self, patch_size):#__init__(self, **kwargs): + super(Patches, self).__init__() + self.patch_size_x = patch_size_x + self.patch_size_y = patch_size_y + + def call(self, images): + #print(tf.shape(images)[1],'images') + #print(self.patch_size,'self.patch_size') + batch_size = tf.shape(images)[0] + patches = tf.image.extract_patches( + images=images, + sizes=[1, self.patch_size_y, self.patch_size_x, 1], + strides=[1, self.patch_size_y, self.patch_size_x, 1], + rates=[1, 1, 1, 1], + padding="VALID", + ) + patch_dims = patches.shape[-1] + patches = tf.reshape(patches, [batch_size, -1, patch_dims]) + return patches + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'patch_size_x': self.patch_size_x, + 'patch_size_y': self.patch_size_y, + }) + return config + +class Patches_old(layers.Layer): def __init__(self, patch_size):#__init__(self, **kwargs):#:__init__(self, patch_size):#__init__(self, **kwargs): super(Patches, self).__init__() self.patch_size = patch_size @@ -369,8 +393,13 @@ def resnet50_unet(n_classes, input_height=224, input_width=224, task="segmentati return model -def vit_resnet50_unet(n_classes, patch_size, num_patches, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): +def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): inputs = layers.Input(shape=(input_height, input_width, 3)) + + transformer_units = [ + projection_dim * 2, + projection_dim, + ] # Size of the transformer layers IMAGE_ORDERING = 'channels_last' bn_axis=3 @@ -414,7 +443,7 @@ def vit_resnet50_unet(n_classes, patch_size, num_patches, input_height=224, inpu #patch_size_y = input_height / x.shape[1] #patch_size_x = input_width / x.shape[2] #patch_size = patch_size_x * patch_size_y - patches = Patches(patch_size)(x) + patches = Patches(patch_size_x, patch_size_y)(x) # Encode patches. encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) @@ -434,7 +463,7 @@ def vit_resnet50_unet(n_classes, patch_size, num_patches, input_height=224, inpu # Skip connection 2. encoded_patches = layers.Add()([x3, x2]) - encoded_patches = tf.reshape(encoded_patches, [-1, x.shape[1], x.shape[2], 64]) + encoded_patches = tf.reshape(encoded_patches, [-1, x.shape[1], x.shape[2] , int( projection_dim / (patch_size_x * patch_size_y) )]) v1024_2048 = Conv2D( 1024 , (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(encoded_patches) v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) diff --git a/train/train.py b/train/train.py index 9e06a66..bafcc9e 100644 --- a/train/train.py +++ b/train/train.py @@ -70,8 +70,10 @@ def config_params(): brightness = None # Brighten image for augmentation. flip_index = None # Flip image for augmentation. continue_training = False # Set to true if you would like to continue training an already trained a model. - transformer_patchsize = None # Patch size of vision transformer patches. + transformer_patchsize_x = None # Patch size of vision transformer patches. + transformer_patchsize_y = None transformer_num_patches_xy = None # Number of patches for vision transformer. + transformer_projection_dim = 64 # Transformer projection dimension index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. @@ -92,7 +94,7 @@ def run(_config, n_classes, n_epochs, input_height, brightening, binarization, blur_k, scales, degrade_scales, brightness, dir_train, data_is_provided, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, - thetha, scaling_flip, continue_training, transformer_patchsize, + thetha, scaling_flip, continue_training, transformer_projection_dim, transformer_patchsize_x, transformer_patchsize_y, transformer_num_patches_xy, backbone_type, flip_index, dir_eval, dir_output, pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name): @@ -212,15 +214,27 @@ def run(_config, n_classes, n_epochs, input_height, if backbone_type=='nontransformer': model = resnet50_unet(n_classes, input_height, input_width, task, weight_decay, pretraining) elif backbone_type=='transformer': - num_patches = transformer_num_patches_xy[0]*transformer_num_patches_xy[1] + num_patches_x = transformer_num_patches_xy[0] + num_patches_y = transformer_num_patches_xy[1] + num_patches = num_patches_x * num_patches_y - if not (num_patches == (input_width / 32) * (input_height / 32)): - print("Error: transformer num patches error. Parameter transformer_num_patches_xy should be set to (input_width/32) = {} and (input_height/32) = {}".format(int(input_width / 32), int(input_height / 32)) ) + ##if not (num_patches == (input_width / 32) * (input_height / 32)): + ##print("Error: transformer num patches error. Parameter transformer_num_patches_xy should be set to (input_width/32) = {} and (input_height/32) = {}".format(int(input_width / 32), int(input_height / 32)) ) + ##sys.exit(1) + #if not (transformer_patchsize == 1): + #print("Error: transformer patchsize error. Parameter transformer_patchsizeshould set to 1" ) + #sys.exit(1) + if (input_height != (num_patches_y * transformer_patchsize_y * 32) ): + print("Error: transformer_patchsize_y or transformer_num_patches_xy height value error . input_height should be equal to ( transformer_num_patches_xy height value * transformer_patchsize_y * 32)") sys.exit(1) - if not (transformer_patchsize == 1): - print("Error: transformer patchsize error. Parameter transformer_patchsizeshould set to 1" ) + if (input_width != (num_patches_x * transformer_patchsize_x * 32) ): + print("Error: transformer_patchsize_x or transformer_num_patches_xy width value error . input_width should be equal to ( transformer_num_patches_xy width value * transformer_patchsize_x * 32)") sys.exit(1) - model = vit_resnet50_unet(n_classes, transformer_patchsize, num_patches, input_height, input_width, task, weight_decay, pretraining) + if (transformer_projection_dim % (transformer_patchsize_y * transformer_patchsize_x)) != 0: + print("Error: transformer_projection_dim error. The remainder when parameter transformer_projection_dim is divided by (transformer_patchsize_y*transformer_patchsize_x) should be zero") + sys.exit(1) + + model = vit_resnet50_unet(n_classes, transformer_patchsize_x, transformer_patchsize_y, num_patches, transformer_projection_dim, input_height, input_width, task, weight_decay, pretraining) #if you want to see the model structure just uncomment model summary. #model.summary() From 22d7359db2b1660272a32dd2e43f69f67373883f Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 12 Jun 2024 17:39:57 +0200 Subject: [PATCH 072/492] Transformer+CNN structure is added to vision transformer type --- train/config_params.json | 16 +++-- train/models.py | 142 ++++++++++++++++++++++++++++++++++++--- train/train.py | 57 ++++++++++------ 3 files changed, 176 insertions(+), 39 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index 6b8b6ed..d72530e 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -2,9 +2,9 @@ "backbone_type" : "transformer", "task": "binarization", "n_classes" : 2, - "n_epochs" : 1, + "n_epochs" : 2, "input_height" : 224, - "input_width" : 672, + "input_width" : 224, "weight_decay" : 1e-6, "n_batch" : 1, "learning_rate": 1e-4, @@ -22,10 +22,14 @@ "scaling_flip" : false, "rotation": false, "rotation_not_90": false, - "transformer_num_patches_xy": [7, 7], - "transformer_patchsize_x": 3, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 192, + "transformer_num_patches_xy": [56, 56], + "transformer_patchsize_x": 4, + "transformer_patchsize_y": 4, + "transformer_projection_dim": 64, + "transformer_mlp_head_units": [128, 64], + "transformer_layers": 1, + "transformer_num_heads": 1, + "transformer_cnn_first": false, "blur_k" : ["blur","guass","median"], "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], "brightness" : [1.3, 1.5, 1.7, 2], diff --git a/train/models.py b/train/models.py index 1abf304..8841bd3 100644 --- a/train/models.py +++ b/train/models.py @@ -5,10 +5,10 @@ from tensorflow.keras.layers import * from tensorflow.keras import layers from tensorflow.keras.regularizers import l2 -mlp_head_units = [2048, 1024] -#projection_dim = 64 -transformer_layers = 8 -num_heads = 4 +##mlp_head_units = [512, 256]#[2048, 1024] +###projection_dim = 64 +##transformer_layers = 2#8 +##num_heads = 1#4 resnet50_Weights_path = './pretrained_model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' IMAGE_ORDERING = 'channels_last' MERGE_AXIS = -1 @@ -36,7 +36,8 @@ class Patches(layers.Layer): rates=[1, 1, 1, 1], padding="VALID", ) - patch_dims = patches.shape[-1] + #patch_dims = patches.shape[-1] + patch_dims = tf.shape(patches)[-1] patches = tf.reshape(patches, [batch_size, -1, patch_dims]) return patches def get_config(self): @@ -393,13 +394,13 @@ def resnet50_unet(n_classes, input_height=224, input_width=224, task="segmentati return model -def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): +def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=[128, 64], transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): inputs = layers.Input(shape=(input_height, input_width, 3)) - transformer_units = [ - projection_dim * 2, - projection_dim, - ] # Size of the transformer layers + #transformer_units = [ + #projection_dim * 2, + #projection_dim, + #] # Size of the transformer layers IMAGE_ORDERING = 'channels_last' bn_axis=3 @@ -459,7 +460,7 @@ def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, projec # Layer normalization 2. x3 = layers.LayerNormalization(epsilon=1e-6)(x2) # MLP. - x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1) + x3 = mlp(x3, hidden_units=mlp_head_units, dropout_rate=0.1) # Skip connection 2. encoded_patches = layers.Add()([x3, x2]) @@ -515,6 +516,125 @@ def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, projec return model +def vit_resnet50_unet_transformer_before_cnn(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=[128, 64], transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): + inputs = layers.Input(shape=(input_height, input_width, 3)) + + ##transformer_units = [ + ##projection_dim * 2, + ##projection_dim, + ##] # Size of the transformer layers + IMAGE_ORDERING = 'channels_last' + bn_axis=3 + + patches = Patches(patch_size_x, patch_size_y)(inputs) + # Encode patches. + encoded_patches = PatchEncoder(num_patches, projection_dim)(patches) + + for _ in range(transformer_layers): + # Layer normalization 1. + x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) + # Create a multi-head attention layer. + attention_output = layers.MultiHeadAttention( + num_heads=num_heads, key_dim=projection_dim, dropout=0.1 + )(x1, x1) + # Skip connection 1. + x2 = layers.Add()([attention_output, encoded_patches]) + # Layer normalization 2. + x3 = layers.LayerNormalization(epsilon=1e-6)(x2) + # MLP. + x3 = mlp(x3, hidden_units=mlp_head_units, dropout_rate=0.1) + # Skip connection 2. + encoded_patches = layers.Add()([x3, x2]) + + encoded_patches = tf.reshape(encoded_patches, [-1, input_height, input_width , int( projection_dim / (patch_size_x * patch_size_y) )]) + + encoded_patches = Conv2D(3, (1, 1), padding='same', data_format=IMAGE_ORDERING, kernel_regularizer=l2(weight_decay), name='convinput')(encoded_patches) + + x = ZeroPadding2D((3, 3), data_format=IMAGE_ORDERING)(encoded_patches) + x = Conv2D(64, (7, 7), data_format=IMAGE_ORDERING, strides=(2, 2),kernel_regularizer=l2(weight_decay), name='conv1')(x) + f1 = x + + x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) + x = Activation('relu')(x) + x = MaxPooling2D((3, 3), data_format=IMAGE_ORDERING, strides=(2, 2))(x) + + x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) + x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') + x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') + f2 = one_side_pad(x) + + x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') + x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') + f3 = x + + x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') + x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') + f4 = x + + x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') + x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') + f5 = x + + if pretraining: + model = Model(encoded_patches, x).load_weights(resnet50_Weights_path) + + v1024_2048 = Conv2D( 1024 , (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(x) + v1024_2048 = (BatchNormalization(axis=bn_axis))(v1024_2048) + v1024_2048 = Activation('relu')(v1024_2048) + + o = (UpSampling2D( (2, 2), data_format=IMAGE_ORDERING))(v1024_2048) + o = (concatenate([o, f4],axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o ,f3], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f2], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, f1], axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o) + o = (concatenate([o, inputs],axis=MERGE_AXIS)) + o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o) + o = (Conv2D(32, (3, 3), padding='valid', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay)))(o) + o = (BatchNormalization(axis=bn_axis))(o) + o = Activation('relu')(o) + + o = Conv2D(n_classes, (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(o) + if task == "segmentation": + o = (BatchNormalization(axis=bn_axis))(o) + o = (Activation('softmax'))(o) + else: + o = (Activation('sigmoid'))(o) + + model = Model(inputs=inputs, outputs=o) + + return model + def resnet50_classifier(n_classes,input_height=224,input_width=224,weight_decay=1e-6,pretraining=False): include_top=True assert input_height%32 == 0 diff --git a/train/train.py b/train/train.py index bafcc9e..71f31f3 100644 --- a/train/train.py +++ b/train/train.py @@ -70,10 +70,14 @@ def config_params(): brightness = None # Brighten image for augmentation. flip_index = None # Flip image for augmentation. continue_training = False # Set to true if you would like to continue training an already trained a model. - transformer_patchsize_x = None # Patch size of vision transformer patches. - transformer_patchsize_y = None - transformer_num_patches_xy = None # Number of patches for vision transformer. - transformer_projection_dim = 64 # Transformer projection dimension + transformer_patchsize_x = None # Patch size of vision transformer patches in x direction. + transformer_patchsize_y = None # Patch size of vision transformer patches in y direction. + transformer_num_patches_xy = None # Number of patches for vision transformer in x and y direction respectively. + transformer_projection_dim = 64 # Transformer projection dimension. Default value is 64. + transformer_mlp_head_units = [128, 64] # Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64] + transformer_layers = 8 # transformer layers. Default value is 8. + transformer_num_heads = 4 # Transformer number of heads. Default value is 4. + transformer_cnn_first = True # We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. index_start = 0 # Index of model to continue training from. E.g. if you trained for 3 epochs and last index is 2, to continue from model_1.h5, set "index_start" to 3 to start naming model with index 3. dir_of_start_model = '' # Directory containing pretrained encoder to continue training the model. is_loss_soft_dice = False # Use soft dice as loss function. When set to true, "weighted_loss" must be false. @@ -94,7 +98,9 @@ def run(_config, n_classes, n_epochs, input_height, brightening, binarization, blur_k, scales, degrade_scales, brightness, dir_train, data_is_provided, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, - thetha, scaling_flip, continue_training, transformer_projection_dim, transformer_patchsize_x, transformer_patchsize_y, + thetha, scaling_flip, continue_training, transformer_projection_dim, + transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_cnn_first, + transformer_patchsize_x, transformer_patchsize_y, transformer_num_patches_xy, backbone_type, flip_index, dir_eval, dir_output, pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name): @@ -218,26 +224,33 @@ def run(_config, n_classes, n_epochs, input_height, num_patches_y = transformer_num_patches_xy[1] num_patches = num_patches_x * num_patches_y - ##if not (num_patches == (input_width / 32) * (input_height / 32)): - ##print("Error: transformer num patches error. Parameter transformer_num_patches_xy should be set to (input_width/32) = {} and (input_height/32) = {}".format(int(input_width / 32), int(input_height / 32)) ) - ##sys.exit(1) - #if not (transformer_patchsize == 1): - #print("Error: transformer patchsize error. Parameter transformer_patchsizeshould set to 1" ) - #sys.exit(1) - if (input_height != (num_patches_y * transformer_patchsize_y * 32) ): - print("Error: transformer_patchsize_y or transformer_num_patches_xy height value error . input_height should be equal to ( transformer_num_patches_xy height value * transformer_patchsize_y * 32)") - sys.exit(1) - if (input_width != (num_patches_x * transformer_patchsize_x * 32) ): - print("Error: transformer_patchsize_x or transformer_num_patches_xy width value error . input_width should be equal to ( transformer_num_patches_xy width value * transformer_patchsize_x * 32)") - sys.exit(1) - if (transformer_projection_dim % (transformer_patchsize_y * transformer_patchsize_x)) != 0: - print("Error: transformer_projection_dim error. The remainder when parameter transformer_projection_dim is divided by (transformer_patchsize_y*transformer_patchsize_x) should be zero") - sys.exit(1) + if transformer_cnn_first: + if (input_height != (num_patches_y * transformer_patchsize_y * 32) ): + print("Error: transformer_patchsize_y or transformer_num_patches_xy height value error . input_height should be equal to ( transformer_num_patches_xy height value * transformer_patchsize_y * 32)") + sys.exit(1) + if (input_width != (num_patches_x * transformer_patchsize_x * 32) ): + print("Error: transformer_patchsize_x or transformer_num_patches_xy width value error . input_width should be equal to ( transformer_num_patches_xy width value * transformer_patchsize_x * 32)") + sys.exit(1) + if (transformer_projection_dim % (transformer_patchsize_y * transformer_patchsize_x)) != 0: + print("Error: transformer_projection_dim error. The remainder when parameter transformer_projection_dim is divided by (transformer_patchsize_y*transformer_patchsize_x) should be zero") + sys.exit(1) + - model = vit_resnet50_unet(n_classes, transformer_patchsize_x, transformer_patchsize_y, num_patches, transformer_projection_dim, input_height, input_width, task, weight_decay, pretraining) + model = vit_resnet50_unet(n_classes, transformer_patchsize_x, transformer_patchsize_y, num_patches, transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_projection_dim, input_height, input_width, task, weight_decay, pretraining) + else: + if (input_height != (num_patches_y * transformer_patchsize_y) ): + print("Error: transformer_patchsize_y or transformer_num_patches_xy height value error . input_height should be equal to ( transformer_num_patches_xy height value * transformer_patchsize_y)") + sys.exit(1) + if (input_width != (num_patches_x * transformer_patchsize_x) ): + print("Error: transformer_patchsize_x or transformer_num_patches_xy width value error . input_width should be equal to ( transformer_num_patches_xy width value * transformer_patchsize_x)") + sys.exit(1) + if (transformer_projection_dim % (transformer_patchsize_y * transformer_patchsize_x)) != 0: + print("Error: transformer_projection_dim error. The remainder when parameter transformer_projection_dim is divided by (transformer_patchsize_y*transformer_patchsize_x) should be zero") + sys.exit(1) + model = vit_resnet50_unet_transformer_before_cnn(n_classes, transformer_patchsize_x, transformer_patchsize_y, num_patches, transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_projection_dim, input_height, input_width, task, weight_decay, pretraining) #if you want to see the model structure just uncomment model summary. - #model.summary() + model.summary() if (task == "segmentation" or task == "binarization"): From 66022cf771dafd0cafa0734b545e60fc44fa07af Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 12 Jun 2024 17:40:40 +0200 Subject: [PATCH 073/492] update config --- train/config_params.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index d72530e..a89cbb5 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -42,7 +42,7 @@ "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, - "dir_train": "/home/vahid/Documents/test/training_data_sample_binarization", - "dir_eval": "/home/vahid/Documents/test/eval", - "dir_output": "/home/vahid/Documents/test/out" + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" } From b3cd01de3761ce251b9171aa8f48318d926594f5 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 21 Jun 2024 13:06:26 +0200 Subject: [PATCH 074/492] update reading order machine based --- train/generate_gt_for_training.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 752090c..cfcc151 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -163,8 +163,7 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i #print('########################') xml_file = os.path.join(dir_xml,ind_xml ) f_name = ind_xml.split('.')[0] - file_name, id_paragraph, id_header,co_text_paragraph,\ - co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) + _, _, _, file_name, id_paragraph, id_header,co_text_paragraph,co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) id_all_text = id_paragraph + id_header co_text_all = co_text_paragraph + co_text_header From fe69b9c4a8428cc6a957f2b40c5aa559dd25416b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 21 Jun 2024 23:42:25 +0200 Subject: [PATCH 075/492] update inference --- train/inference.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/train/inference.py b/train/inference.py index c7a8b02..3fec9c2 100644 --- a/train/inference.py +++ b/train/inference.py @@ -557,6 +557,10 @@ class sbb_predict: res=self.predict() if (self.task == 'classification' or self.task == 'reading_order'): pass + elif self.task == 'enhancement': + if self.save: + print(self.save) + cv2.imwrite(self.save,res) else: img_seg_overlayed = self.visualize_model_output(res, self.img_org, self.task) if self.save: From 9260d2962a0fbdcc30ae836d5e21af2122764aa7 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 9 Jul 2024 03:04:29 +0200 Subject: [PATCH 076/492] resolving typo --- train/gt_gen_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index c2360fc..c264f4c 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -304,8 +304,9 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if config_file and config_params['use_case']=='layout': keys = list(config_params.keys()) - if "artificial_class_on_boundry" in keys: - elements_with_artificial_class = list(config_params['artificial_class_on_boundry']) + + if "artificial_class_on_boundary" in keys: + elements_with_artificial_class = list(config_params['artificial_class_on_boundary']) artificial_class_rgb_color = (255,255,0) artificial_class_label = config_params['artificial_class_label'] #values = config_params.values() @@ -567,8 +568,8 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ elif vv.tag!=link+'Point' and sumi>=1: break co_noise.append(np.array(c_t_in)) - - if "artificial_class_on_boundry" in keys: + + if "artificial_class_on_boundary" in keys: img_boundary = np.zeros( (y_len,x_len) ) if "paragraph" in elements_with_artificial_class: erosion_rate = 2 @@ -655,7 +656,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - if "artificial_class_on_boundry" in keys: + if "artificial_class_on_boundary" in keys: img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] @@ -706,7 +707,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ color_label = config_params['textregions'][element_text] img_poly=cv2.fillPoly(img, pts =co_text[element_text], color=color_label) - if "artificial_class_on_boundry" in keys: + if "artificial_class_on_boundary" in keys: img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label From 3bceec9c19158030acdb59f8f84c2d0d66382414 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 16 Jul 2024 18:29:27 +0200 Subject: [PATCH 077/492] printspace_as_class_in_layout is integrated. Printspace can be defined as a class for layout segmentation --- train/gt_gen_utils.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index c264f4c..1df7b2a 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -154,7 +154,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ x_new = columns_width_dict[str(num_col)] y_new = int ( x_new * (y_len / float(x_len)) ) - if printspace: + if printspace or "printspace_as_class_in_layout" in list(config_params.keys()): region_tags = np.unique([x for x in alltags if x.endswith('PrintSpace') or x.endswith('Border')]) co_use_case = [] @@ -279,6 +279,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if printspace and config_params['use_case']!='printspace': img_poly = img_poly[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2], :] + if 'columns_width' in list(config_params.keys()) and num_col and config_params['use_case']!='printspace': img_poly = resize_image(img_poly, y_new, x_new) @@ -310,6 +311,10 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ artificial_class_rgb_color = (255,255,0) artificial_class_label = config_params['artificial_class_label'] #values = config_params.values() + + if "printspace_as_class_in_layout" in list(config_params.keys()): + printspace_class_rgb_color = (125,125,255) + printspace_class_label = config_params['printspace_as_class_in_layout'] if 'textregions' in keys: types_text_dict = config_params['textregions'] @@ -614,7 +619,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ - img = np.zeros( (y_len,x_len,3) ) + img = np.zeros( (y_len,x_len,3) ) if output_type == '3d': if 'graphicregions' in keys: @@ -661,6 +666,15 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + + if "printspace_as_class_in_layout" in list(config_params.keys()): + printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) + printspace_mask[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2]] = 1 + + img_poly[:,:,0][printspace_mask[:,:] == 0] = printspace_class_rgb_color[0] + img_poly[:,:,1][printspace_mask[:,:] == 0] = printspace_class_rgb_color[1] + img_poly[:,:,2][printspace_mask[:,:] == 0] = printspace_class_rgb_color[2] + @@ -709,6 +723,14 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if "artificial_class_on_boundary" in keys: img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label + + if "printspace_as_class_in_layout" in list(config_params.keys()): + printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) + printspace_mask[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2]] = 1 + + img_poly[:,:,0][printspace_mask[:,:] == 0] = printspace_class_label + img_poly[:,:,1][printspace_mask[:,:] == 0] = printspace_class_label + img_poly[:,:,2][printspace_mask[:,:] == 0] = printspace_class_label From 453d0fbf9220122096fd4578695783faa35823b7 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 17 Jul 2024 17:14:20 +0200 Subject: [PATCH 078/492] adding degrading and brightness augmentation to no patches case training --- train/utils.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/train/utils.py b/train/utils.py index 605d8d1..7a2274c 100644 --- a/train/utils.py +++ b/train/utils.py @@ -597,6 +597,14 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) indexer += 1 + if brightening: + for factor in brightness: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + (resize_image(do_brightening(dir_img + '/' +im, factor), input_height, input_width))) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + indexer += 1 if binarization: cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', @@ -606,6 +614,15 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) indexer += 1 + if degrading: + for degrade_scale_ind in degrade_scales: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + (resize_image(do_degrading(cv2.imread(dir_img + '/' + im), degrade_scale_ind), input_height, input_width))) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + indexer += 1 + if patches: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, From 861f0b1ebd39d8d2c7d127a0d335f8a3ef17c6e2 Mon Sep 17 00:00:00 2001 From: b-vr103 Date: Wed, 17 Jul 2024 18:20:24 +0200 Subject: [PATCH 079/492] brightness augmentation modified --- train/utils.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/train/utils.py b/train/utils.py index 7a2274c..891ee15 100644 --- a/train/utils.py +++ b/train/utils.py @@ -599,12 +599,15 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow indexer += 1 if brightening: for factor in brightness: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - (resize_image(do_brightening(dir_img + '/' +im, factor), input_height, input_width))) + try: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + (resize_image(do_brightening(dir_img + '/' +im, factor), input_height, input_width))) - cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', - resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) - indexer += 1 + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + indexer += 1 + except: + pass if binarization: cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', From 840d7c2283d6b71e083c6f10bf3b2e4b8f2e9102 Mon Sep 17 00:00:00 2001 From: b-vr103 Date: Tue, 23 Jul 2024 11:29:05 +0200 Subject: [PATCH 080/492] increasing margin in the case of pixelwise inference --- train/inference.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/train/inference.py b/train/inference.py index 3fec9c2..49bebf8 100644 --- a/train/inference.py +++ b/train/inference.py @@ -219,7 +219,7 @@ class sbb_predict: added_image = cv2.addWeighted(img,0.5,output,0.1,0) - return added_image + return added_image, output def predict(self): self.start_new_session_and_model() @@ -444,7 +444,7 @@ class sbb_predict: if img.shape[1] < self.img_width: img = cv2.resize(img, (self.img_height, img.shape[0]), interpolation=cv2.INTER_NEAREST) - margin = int(0 * self.img_width) + margin = int(0.1 * self.img_width) width_mid = self.img_width - 2 * margin height_mid = self.img_height - 2 * margin img = img / float(255.0) @@ -562,9 +562,10 @@ class sbb_predict: print(self.save) cv2.imwrite(self.save,res) else: - img_seg_overlayed = self.visualize_model_output(res, self.img_org, self.task) + img_seg_overlayed, only_prediction = self.visualize_model_output(res, self.img_org, self.task) if self.save: cv2.imwrite(self.save,img_seg_overlayed) + cv2.imwrite('./layout.png', only_prediction) if self.ground_truth: gt_img=cv2.imread(self.ground_truth) From 2c822dae4e49d970d26a7776e20f55f34144d79e Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 24 Jul 2024 16:52:05 +0200 Subject: [PATCH 081/492] erosion and dilation parameters are changed & separators are written in label images after artificial label --- train/gt_gen_utils.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 1df7b2a..253c44a 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -577,8 +577,8 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if "artificial_class_on_boundary" in keys: img_boundary = np.zeros( (y_len,x_len) ) if "paragraph" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 + erosion_rate = 0#2 + dilation_rate = 3#4 co_text['paragraph'], img_boundary = update_region_contours(co_text['paragraph'], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "drop-capital" in elements_with_artificial_class: erosion_rate = 0 @@ -586,35 +586,35 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ co_text["drop-capital"], img_boundary = update_region_contours(co_text["drop-capital"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "catch-word" in elements_with_artificial_class: erosion_rate = 0 - dilation_rate = 4 + dilation_rate = 2#4 co_text["catch-word"], img_boundary = update_region_contours(co_text["catch-word"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "page-number" in elements_with_artificial_class: erosion_rate = 0 - dilation_rate = 4 + dilation_rate = 2#4 co_text["page-number"], img_boundary = update_region_contours(co_text["page-number"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "header" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 + erosion_rate = 0#1 + dilation_rate = 3#4 co_text["header"], img_boundary = update_region_contours(co_text["header"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "heading" in elements_with_artificial_class: - erosion_rate = 1 - dilation_rate = 4 + erosion_rate = 0#1 + dilation_rate = 3#4 co_text["heading"], img_boundary = update_region_contours(co_text["heading"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "signature-mark" in elements_with_artificial_class: erosion_rate = 1 dilation_rate = 4 co_text["signature-mark"], img_boundary = update_region_contours(co_text["signature-mark"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "marginalia" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 + erosion_rate = 0#2 + dilation_rate = 3#4 co_text["marginalia"], img_boundary = update_region_contours(co_text["marginalia"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "footnote" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 + erosion_rate = 0#2 + dilation_rate = 2#4 co_text["footnote"], img_boundary = update_region_contours(co_text["footnote"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "footnote-continued" in elements_with_artificial_class: - erosion_rate = 2 - dilation_rate = 4 + erosion_rate = 0#2 + dilation_rate = 2#4 co_text["footnote-continued"], img_boundary = update_region_contours(co_text["footnote-continued"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) @@ -639,8 +639,6 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if 'imageregion' in keys: img_poly=cv2.fillPoly(img, pts =co_img, color=labels_rgb_color[ config_params['imageregion']]) - if 'separatorregion' in keys: - img_poly=cv2.fillPoly(img, pts =co_sep, color=labels_rgb_color[ config_params['separatorregion']]) if 'tableregion' in keys: img_poly=cv2.fillPoly(img, pts =co_table, color=labels_rgb_color[ config_params['tableregion']]) if 'noiseregion' in keys: @@ -666,6 +664,9 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + if 'separatorregion' in keys: + img_poly=cv2.fillPoly(img, pts =co_sep, color=labels_rgb_color[ config_params['separatorregion']]) + if "printspace_as_class_in_layout" in list(config_params.keys()): printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) @@ -697,9 +698,6 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if 'imageregion' in keys: color_label = config_params['imageregion'] img_poly=cv2.fillPoly(img, pts =co_img, color=(color_label,color_label,color_label)) - if 'separatorregion' in keys: - color_label = config_params['separatorregion'] - img_poly=cv2.fillPoly(img, pts =co_sep, color=(color_label,color_label,color_label)) if 'tableregion' in keys: color_label = config_params['tableregion'] img_poly=cv2.fillPoly(img, pts =co_table, color=(color_label,color_label,color_label)) @@ -724,6 +722,10 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if "artificial_class_on_boundary" in keys: img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label + if 'separatorregion' in keys: + color_label = config_params['separatorregion'] + img_poly=cv2.fillPoly(img, pts =co_sep, color=(color_label,color_label,color_label)) + if "printspace_as_class_in_layout" in list(config_params.keys()): printspace_mask = np.zeros((img_poly.shape[0], img_poly.shape[1])) printspace_mask[bb_xywh[1]:bb_xywh[1]+bb_xywh[3], bb_xywh[0]:bb_xywh[0]+bb_xywh[2]] = 1 From 6fb28d6ce8cab024595a8a787d92129fbbeaf3c3 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 1 Aug 2024 14:30:51 +0200 Subject: [PATCH 082/492] erosion rate changed --- train/gt_gen_utils.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 253c44a..13010bf 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -577,36 +577,36 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if "artificial_class_on_boundary" in keys: img_boundary = np.zeros( (y_len,x_len) ) if "paragraph" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 3#4 + erosion_rate = 2 + dilation_rate = 4 co_text['paragraph'], img_boundary = update_region_contours(co_text['paragraph'], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "drop-capital" in elements_with_artificial_class: - erosion_rate = 0 - dilation_rate = 4 + erosion_rate = 1 + dilation_rate = 3 co_text["drop-capital"], img_boundary = update_region_contours(co_text["drop-capital"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "catch-word" in elements_with_artificial_class: erosion_rate = 0 - dilation_rate = 2#4 + dilation_rate = 3#4 co_text["catch-word"], img_boundary = update_region_contours(co_text["catch-word"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "page-number" in elements_with_artificial_class: erosion_rate = 0 - dilation_rate = 2#4 + dilation_rate = 3#4 co_text["page-number"], img_boundary = update_region_contours(co_text["page-number"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "header" in elements_with_artificial_class: - erosion_rate = 0#1 - dilation_rate = 3#4 + erosion_rate = 1 + dilation_rate = 4 co_text["header"], img_boundary = update_region_contours(co_text["header"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "heading" in elements_with_artificial_class: - erosion_rate = 0#1 - dilation_rate = 3#4 + erosion_rate = 1 + dilation_rate = 4 co_text["heading"], img_boundary = update_region_contours(co_text["heading"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "signature-mark" in elements_with_artificial_class: erosion_rate = 1 dilation_rate = 4 co_text["signature-mark"], img_boundary = update_region_contours(co_text["signature-mark"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "marginalia" in elements_with_artificial_class: - erosion_rate = 0#2 - dilation_rate = 3#4 + erosion_rate = 2 + dilation_rate = 4 co_text["marginalia"], img_boundary = update_region_contours(co_text["marginalia"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) if "footnote" in elements_with_artificial_class: erosion_rate = 0#2 From 2d83b8faad8e6e0983529cda221eb17ebb0048f4 Mon Sep 17 00:00:00 2001 From: Clemens Neudecker <952378+cneud@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:35:06 +0200 Subject: [PATCH 083/492] add documentation from wiki as markdown file to the codebase --- train/train.md | 576 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 576 insertions(+) create mode 100644 train/train.md diff --git a/train/train.md b/train/train.md new file mode 100644 index 0000000..553522b --- /dev/null +++ b/train/train.md @@ -0,0 +1,576 @@ +# Documentation for Training Models + +This repository assists users in preparing training datasets, training models, and performing inference with trained models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training dataset. +All these use cases are now utilized in the Eynollah workflow. +As mentioned, the following three tasks can be accomplished using this repository: + +* Generate training dataset +* Train a model +* Inference with the trained model + +## Generate training dataset +The script generate_gt_for_training.py is used for generating training datasets. As the results of the following command demonstrate, the dataset generator provides three different commands: + +`python generate_gt_for_training.py --help` + + +These three commands are: + +* image-enhancement +* machine-based-reading-order +* pagexml2label + + +### image-enhancement + +Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of high-resolution images. The training dataset can then be generated using the following command: + +`python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs "degrading scales json file"` + +The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose resolution at different scales. The degraded images are used as input images, and the original high-resolution images serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: + +```yaml +{ + "scales": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] +} +``` + +### machine-based-reading-order + +For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's input is a three-channel image: the first and last channels contain information about each of the two text regions, while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct reading order. + +For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area to the image area, with a default value of zero. To run the dataset generator, use the following command: + + +`python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` + +### pagexml2label + +pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. +To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. + +In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired element is automatically encoded as 1 in the PNG label. + +To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. For example, in the case of 'textline' detection, the JSON file would resemble this: + +```yaml +{ +"use_case": "textline" +} +``` + +In the case of layout segmentation a possible custom config json file can be like this: + +```yaml +{ +"use_case": "layout", +"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, +"imageregion":4, +"separatorregion":5, +"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} +} +``` + +A possible custom config json file for layout segmentation where the "printspace" is wished to be a class: + +```yaml +{ +"use_case": "layout", +"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, +"imageregion":4, +"separatorregion":5, +"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} +"printspace_as_class_in_layout" : 8 +} +``` +For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', and 'tableregion'. + +Text regions and graphic regions also have their own specific types. The known types for us for text regions are 'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and 'signature'. +Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, users can extract all known types from the labels and be confident that no unknown types are overlooked. + +In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator region" are also present in the label. However, other regions like "noise region" and "table region" will not be included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. + +`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" "` + +We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, the example JSON config file should look like this: + +```yaml +{ + "use_case": "layout", + "textregions": { + "paragraph": 1, + "drop-capital": 1, + "header": 2, + "heading": 2, + "marginalia": 3 + }, + "imageregion": 4, + "separatorregion": 5, + "graphicregions": { + "rest_as_decoration": 6 + }, + "artificial_class_on_boundary": ["paragraph", "header", "heading", "marginalia"], + "artificial_class_label": 7 +} +``` + +This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the elements labeled as "paragraph," "header," "heading," and "marginalia." + +For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the "artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use case: + +```yaml +{ + "use_case": "textline", + "artificial_class_label": 2 +} +``` + +If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that in this scenario, since cropping will be applied to the label files, the directory of the original images must be provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels required for training are obtained. The command should resemble the following: + +`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will be written" ` + +## Train a model +### classification + +For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, all we require is a training directory with subdirectories, each containing images of its respective classes. We need separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both directories. Additionally, the class names should be specified in the config JSON file, as shown in the following example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the "classification_classes_name" key in the config file should appear as follows: + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "classification", + "n_classes" : 2, + "n_epochs" : 10, + "input_height" : 448, + "input_width" : 448, + "weight_decay" : 1e-6, + "n_batch" : 4, + "learning_rate": 1e-4, + "f1_threshold_classification": 0.8, + "pretraining" : true, + "classification_classes_name" : {"0":"apple", "1":"orange"}, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +The "dir_train" should be like this: + +``` +. +└── train # train directory + ├── apple # directory of images for apple class + └── orange # directory of images for orange class +``` + +And the "dir_eval" the same structure as train directory: + +``` +. +└── eval # evaluation directory + ├── apple # directory of images for apple class + └── orange # directory of images for orange class + +``` + +The classification model can be trained using the following command line: + +`python train.py with config_classification.json` + + +As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". + +### reading order +An example config json file for machine based reading order should be like this: + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "reading_order", + "n_classes" : 1, + "n_epochs" : 5, + "input_height" : 672, + "input_width" : 448, + "weight_decay" : 1e-6, + "n_batch" : 4, + "learning_rate": 1e-4, + "pretraining" : true, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +The "dir_train" should be like this: + +``` +. +└── train # train directory + ├── images # directory of images + └── labels # directory of labels +``` + +And the "dir_eval" the same structure as train directory: + +``` +. +└── eval # evaluation directory + ├── images # directory of images + └── labels # directory of labels +``` + +The classification model can be trained like the classification case command line. + +### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement + +#### Parameter configuration for segmentation or enhancement usecases + +The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. + +* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. +* task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be set to ``false``. +* n_batch: Number of batches at each iteration. +* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it should set to 1. And for the case of layout detection just the unique number of classes should be given. +* n_epochs: Number of epochs. +* input_height: This indicates the height of model's input. +* input_width: This indicates the width of model's input. +* weight_decay: Weight decay of l2 regularization of model layers. +* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved in a folder named "pretrained_model" in the same directory of "train.py" script. +* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. +* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. +* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. +* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" parameter. +* degrading: If ``true``, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. +* brightening: If ``true``, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. +* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. +* rotation: If ``true``, 90 degree rotation will be applied on image. +* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. +* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. +* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. +* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. +* flip_index: Type of flips. +* blur_k: Type of blurrings. +* scales: Scales of scaling. +* brightness: The amount of brightenings. +* thetha: Rotation angles. +* degrade_scales: The amount of degradings. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. +* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. +* index_start: Starting index for saved models in the case that "continue_training" is ``true``. +* dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. +* transformer_num_patches_xy: Number of patches for vision transformer in x and y direction respectively. +* transformer_patchsize_x: Patch size of vision transformer patches in x direction. +* transformer_patchsize_y: Patch size of vision transformer patches in y direction. +* transformer_projection_dim: Transformer projection dimension. Default value is 64. +* transformer_mlp_head_units: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. +* transformer_layers: transformer layers. Default value is 8. +* transformer_num_heads: Transformer number of heads. Default value is 4. +* transformer_cnn_first: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. + +In the case of segmentation and enhancement the train and evaluation directory should be as following. + +The "dir_train" should be like this: + +``` +. +└── train # train directory + ├── images # directory of images + └── labels # directory of labels +``` + +And the "dir_eval" the same structure as train directory: + +``` +. +└── eval # evaluation directory + ├── images # directory of images + └── labels # directory of labels +``` + +After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following command, similar to the process for classification and reading order: + +`python train.py with config_classification.json` + +#### Binarization + +An example config json file for binarization can be like this: + +```yaml +{ + "backbone_type" : "transformer", + "task": "binarization", + "n_classes" : 2, + "n_epochs" : 4, + "input_height" : 224, + "input_width" : 672, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "transformer_num_patches_xy": [7, 7], + "transformer_patchsize_x": 3, + "transformer_patchsize_y": 1, + "transformer_projection_dim": 192, + "transformer_mlp_head_units": [128, 64], + "transformer_layers": 8, + "transformer_num_heads": 4, + "transformer_cnn_first": true, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +#### Textline + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "segmentation", + "n_classes" : 2, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +#### Enhancement + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "enhancement", + "n_classes" : 3, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 4, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel image. + +#### Page extraction + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "segmentation", + "n_classes" : 2, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : false, + "pretraining" : true, + "augmentation" : false, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, hence the patches parameter should be set to false. + +#### layout segmentation + +An example config json file for layout segmentation with 5 classes (including background) can be like this: + +```yaml +{ + "backbone_type" : "transformer", + "task": "segmentation", + "n_classes" : 5, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "transformer_num_patches_xy": [7, 14], + "transformer_patchsize_x": 1, + "transformer_patchsize_y": 1, + "transformer_projection_dim": 64, + "transformer_mlp_head_units": [128, 64], + "transformer_layers": 8, + "transformer_num_heads": 4, + "transformer_cnn_first": true, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` +## Inference with the trained model +### classification + +For conducting inference with a trained model, you simply need to execute the following command line, specifying the directory of the model and the image on which to perform inference: + + +`python inference.py -m "model dir" -i "image" ` + +This will straightforwardly return the class of the image. + +### machine based reading order + + +To infer the reading order using an reading order model, we need a page XML file containing layout information but without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The new XML file with the added reading order will be written to the output directory with the same name. We need to run: + +`python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` + + +### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement + +For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: + + +`python inference.py -m "model dir" -i "image" -p -s "output image" ` + + +Note that in the case of page extraction the -p flag is not needed. + +For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be calculated for the output. To do this, you need to provide the GT label using the argument -gt. + + + From 3b90347a94521f6ed935ab1a94b39fe9504442ce Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 9 Aug 2024 12:46:18 +0200 Subject: [PATCH 084/492] save only layout output. different from overlayed layout on image --- train/inference.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/train/inference.py b/train/inference.py index 49bebf8..6054b01 100644 --- a/train/inference.py +++ b/train/inference.py @@ -32,6 +32,7 @@ class sbb_predict: self.image=image self.patches=patches self.save=save + self.save_layout=save_layout self.model_dir=model self.ground_truth=ground_truth self.task=task @@ -181,6 +182,7 @@ class sbb_predict: prediction = prediction * -1 prediction = prediction + 1 added_image = prediction * 255 + layout_only = None else: unique_classes = np.unique(prediction[:,:,0]) rgb_colors = {'0' : [255, 255, 255], @@ -200,26 +202,26 @@ class sbb_predict: '14' : [255, 125, 125], '15' : [255, 0, 255]} - output = np.zeros(prediction.shape) + layout_only = np.zeros(prediction.shape) for unq_class in unique_classes: rgb_class_unique = rgb_colors[str(int(unq_class))] - output[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] - output[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] - output[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] + layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] + layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] + layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] - img = self.resize_image(img, output.shape[0], output.shape[1]) + img = self.resize_image(img, layout_only.shape[0], layout_only.shape[1]) - output = output.astype(np.int32) + layout_only = layout_only.astype(np.int32) img = img.astype(np.int32) - added_image = cv2.addWeighted(img,0.5,output,0.1,0) + added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) - return added_image, output + return added_image, layout_only def predict(self): self.start_new_session_and_model() @@ -559,13 +561,12 @@ class sbb_predict: pass elif self.task == 'enhancement': if self.save: - print(self.save) cv2.imwrite(self.save,res) else: - img_seg_overlayed, only_prediction = self.visualize_model_output(res, self.img_org, self.task) + img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) if self.save: cv2.imwrite(self.save,img_seg_overlayed) - cv2.imwrite('./layout.png', only_prediction) + cv2.imwrite(self.save_layout, only_layout) if self.ground_truth: gt_img=cv2.imread(self.ground_truth) @@ -595,6 +596,11 @@ class sbb_predict: "-s", help="save prediction as a png file in current folder.", ) +@click.option( + "--save_layout", + "-sl", + help="save layout prediction only as a png file in current folder.", +) @click.option( "--model", "-m", @@ -618,7 +624,7 @@ class sbb_predict: "-min", help="min area size of regions considered for reading order detection. The default value is zero and means that all text regions are considered for reading order.", ) -def main(image, model, patches, save, ground_truth, xml_file, out, min_area): +def main(image, model, patches, save, save_layout, ground_truth, xml_file, out, min_area): with open(os.path.join(model,'config.json')) as f: config_params_model = json.load(f) task = config_params_model['task'] @@ -626,7 +632,7 @@ def main(image, model, patches, save, ground_truth, xml_file, out, min_area): if not save: print("Error: You used one of segmentation or binarization task but not set -s, you need a filename to save visualized output with -s") sys.exit(1) - x=sbb_predict(image, model, task, config_params_model, patches, save, ground_truth, xml_file, out, min_area) + x=sbb_predict(image, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area) x.run() if __name__=="__main__": From bf5837bf6e4c44add1d401a9912fd1bd599df780 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 9 Aug 2024 13:20:09 +0200 Subject: [PATCH 085/492] update --- train/inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/inference.py b/train/inference.py index 6054b01..8d0a572 100644 --- a/train/inference.py +++ b/train/inference.py @@ -28,7 +28,7 @@ Tool to load model and predict for given image. """ class sbb_predict: - def __init__(self,image, model, task, config_params_model, patches, save, ground_truth, xml_file, out, min_area): + def __init__(self,image, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area): self.image=image self.patches=patches self.save=save From 5e1821a7419bc20ff760eafccfb940b0c4938eb5 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 21 Aug 2024 00:48:30 +0200 Subject: [PATCH 086/492] augmentation function for red textlines, rgb background and scaling for no patch case --- train/utils.py | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/train/utils.py b/train/utils.py index 891ee15..2278849 100644 --- a/train/utils.py +++ b/train/utils.py @@ -12,6 +12,76 @@ from tensorflow.keras.utils import to_categorical from PIL import Image, ImageEnhance +def return_shuffled_channels(img, channels_order): + """ + channels order in ordinary case is like this [0, 1, 2]. In the case of shuffling the order should be provided. + """ + img_sh = np.copy(img) + + img_sh[:,:,0]= img[:,:,channels_order[0]] + img_sh[:,:,1]= img[:,:,channels_order[1]] + img_sh[:,:,2]= img[:,:,channels_order[2]] + return img_sh + +def return_binary_image_with_red_textlines(img_bin): + img_red = np.copy(img_bin) + + img_red[:,:,0][img_bin[:,:,0] == 0] = 255 + return img_red + +def return_binary_image_with_given_rgb_background(img_bin, img_rgb_background): + img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) + + img_final = np.copy(img_bin) + + img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] + img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] + img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] + + return img_final + +def return_binary_image_with_given_rgb_background_red_textlines(img_bin, img_rgb_background, img_color): + img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) + + img_final = np.copy(img_color) + + img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] + img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] + img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] + + return img_final + +def scale_image_for_no_patch(img, label, scale): + h_n = int(img.shape[0]*scale) + w_n = int(img.shape[1]*scale) + + channel0_avg = int( np.mean(img[:,:,0]) ) + channel1_avg = int( np.mean(img[:,:,1]) ) + channel2_avg = int( np.mean(img[:,:,2]) ) + + h_diff = img.shape[0] - h_n + w_diff = img.shape[1] - w_n + + h_start = int(h_diff / 2.) + w_start = int(w_diff / 2.) + + img_res = resize_image(img, h_n, w_n) + label_res = resize_image(label, h_n, w_n) + + img_scaled_padded = np.copy(img) + + label_scaled_padded = np.zeros(label.shape) + + img_scaled_padded[:,:,0] = channel0_avg + img_scaled_padded[:,:,1] = channel1_avg + img_scaled_padded[:,:,2] = channel2_avg + + img_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = img_res[:,:,:] + label_scaled_padded[h_start:h_start+h_n, w_start:w_start+w_n,:] = label_res[:,:,:] + + return img_scaled_padded, label_scaled_padded + + def return_number_of_total_training_data(path_classes): sub_classes = os.listdir(path_classes) n_tot = 0 From 445c45cb87935b73099d1753957c4c6c6eac32f2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 21 Aug 2024 16:17:59 +0200 Subject: [PATCH 087/492] updating augmentations --- train/train.py | 8 +++++--- train/utils.py | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/train/train.py b/train/train.py index 71f31f3..fa08a98 100644 --- a/train/train.py +++ b/train/train.py @@ -53,6 +53,7 @@ def config_params(): degrading = False # If true, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" in config_params.json. brightening = False # If true, brightening will be applied to the image. The amount of brightening is defined with "brightness" in config_params.json. binarization = False # If true, Otsu thresholding will be applied to augment the input with binarized images. + rgb_background = False dir_train = None # Directory of training dataset with subdirectories having the names "images" and "labels". dir_eval = None # Directory of validation dataset with subdirectories having the names "images" and "labels". dir_output = None # Directory where the output model will be saved. @@ -95,7 +96,7 @@ def run(_config, n_classes, n_epochs, input_height, index_start, dir_of_start_model, is_loss_soft_dice, n_batch, patches, augmentation, flip_aug, blur_aug, padding_white, padding_black, scaling, degrading, - brightening, binarization, blur_k, scales, degrade_scales, + brightening, binarization, rgb_background, blur_k, scales, degrade_scales, brightness, dir_train, data_is_provided, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, continue_training, transformer_projection_dim, @@ -108,6 +109,7 @@ def run(_config, n_classes, n_epochs, input_height, if data_is_provided: dir_train_flowing = os.path.join(dir_output, 'train') dir_eval_flowing = os.path.join(dir_output, 'eval') + dir_flow_train_imgs = os.path.join(dir_train_flowing, 'images') dir_flow_train_labels = os.path.join(dir_train_flowing, 'labels') @@ -161,7 +163,7 @@ def run(_config, n_classes, n_epochs, input_height, # writing patches into a sub-folder in order to be flowed from directory. provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, input_height, input_width, blur_k, - blur_aug, padding_white, padding_black, flip_aug, binarization, + blur_aug, padding_white, padding_black, flip_aug, binarization, rgb_background, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=augmentation, @@ -169,7 +171,7 @@ def run(_config, n_classes, n_epochs, input_height, provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, - blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, + blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, rgb_background, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches) diff --git a/train/utils.py b/train/utils.py index 2278849..cf7a65c 100644 --- a/train/utils.py +++ b/train/utils.py @@ -695,6 +695,47 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) indexer += 1 + + if rotation_not_90: + for thetha_i in thetha: + img_max_rotated, label_max_rotated = rotation_not_90_func(cv2.imread(dir_img + '/'+im), + cv2.imread(dir_of_label_file), thetha_i) + + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_max_rotated, input_height, input_width)) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_max_rotated, input_height, input_width)) + indexer += 1 + + if channels_shuffling: + for shuffle_index in shuffle_indexes: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + (resize_image(return_shuffled_channels(cv2.imread(dir_img + '/' + im), shuffle_index), input_height, input_width))) + + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + indexer += 1 + + if scaling: + for sc_ind in scales: + img_scaled, label_scaled = scale_image_for_no_patch(cv2.imread(dir_img + '/'+im), + cv2.imread(dir_of_label_file), sc_ind) + + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_scaled, input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_scaled, input_height, input_width)) + indexer += 1 + + if rgb_color_background: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + for i_n in range(number_of_backgrounds_per_image): + background_image_chosen_name = random.choice(list_all_possible_background_images) + img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) + img_with_overlayed_background = return_binary_image_with_given_rgb_background(img_bin_corr, img_rgb_background) + + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_with_overlayed_background, input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + + if patches: From aeb2ee4e3ef404b0fef2414462b9e51e9036bc18 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 21 Aug 2024 19:33:23 +0200 Subject: [PATCH 088/492] scaling, channels shuffling, rgb background and red content added to no patch augmentation --- train/config_params.json | 30 +++++++++++++++++++----------- train/train.py | 32 ++++++++++++++++++++++---------- train/utils.py | 32 +++++++++++++++++++++++++++----- 3 files changed, 68 insertions(+), 26 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index a89cbb5..e5f652d 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -1,19 +1,22 @@ { "backbone_type" : "transformer", - "task": "binarization", + "task": "segmentation", "n_classes" : 2, - "n_epochs" : 2, - "input_height" : 224, - "input_width" : 224, + "n_epochs" : 0, + "input_height" : 448, + "input_width" : 448, "weight_decay" : 1e-6, "n_batch" : 1, "learning_rate": 1e-4, - "patches" : true, + "patches" : false, "pretraining" : true, - "augmentation" : false, + "augmentation" : true, "flip_aug" : false, "blur_aug" : false, "scaling" : true, + "adding_rgb_background": true, + "add_red_textlines": true, + "channels_shuffling": true, "degrading": false, "brightening": false, "binarization" : false, @@ -31,18 +34,23 @@ "transformer_num_heads": 1, "transformer_cnn_first": false, "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "scales" : [0.6, 0.7, 0.8, 0.9], "brightness" : [1.3, 1.5, 1.7, 2], "degrade_scales" : [0.2, 0.4], "flip_index" : [0, 1, -1], - "thetha" : [10, -10], + "shuffle_indexes" : [ [0,2,1], [1,2,0], [1,0,2] , [2,1,0]], + "thetha" : [5, -5], + "number_of_backgrounds_per_image": 2, "continue_training": false, "index_start" : 0, "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" + "dir_train": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/train_new", + "dir_eval": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/eval_new", + "dir_output": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/output_new", + "dir_rgb_backgrounds": "/home/vahid/Documents/1_2_test_eynollah/set_rgb_background", + "dir_img_bin": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/train_new/images_bin" + } diff --git a/train/train.py b/train/train.py index fa08a98..5dfad07 100644 --- a/train/train.py +++ b/train/train.py @@ -53,7 +53,9 @@ def config_params(): degrading = False # If true, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" in config_params.json. brightening = False # If true, brightening will be applied to the image. The amount of brightening is defined with "brightness" in config_params.json. binarization = False # If true, Otsu thresholding will be applied to augment the input with binarized images. - rgb_background = False + adding_rgb_background = False + add_red_textlines = False + channels_shuffling = False dir_train = None # Directory of training dataset with subdirectories having the names "images" and "labels". dir_eval = None # Directory of validation dataset with subdirectories having the names "images" and "labels". dir_output = None # Directory where the output model will be saved. @@ -65,6 +67,7 @@ def config_params(): scaling_brightness = False # If true, a combination of scaling and brightening will be applied to the image. scaling_flip = False # If true, a combination of scaling and flipping will be applied to the image. thetha = None # Rotate image by these angles for augmentation. + shuffle_indexes = None blur_k = None # Blur image for augmentation. scales = None # Scale patches for augmentation. degrade_scales = None # Degrade image for augmentation. @@ -88,6 +91,10 @@ def config_params(): f1_threshold_classification = None # This threshold is used to consider models with an evaluation f1 scores bigger than it. The selected model weights undergo a weights ensembling. And avreage ensembled model will be written to output. classification_classes_name = None # Dictionary of classification classes names. backbone_type = None # As backbone we have 2 types of backbones. A vision transformer alongside a CNN and we call it "transformer" and only CNN called "nontransformer" + + dir_img_bin = None + number_of_backgrounds_per_image = 1 + dir_rgb_backgrounds = None @ex.automain @@ -95,15 +102,20 @@ def run(_config, n_classes, n_epochs, input_height, input_width, weight_decay, weighted_loss, index_start, dir_of_start_model, is_loss_soft_dice, n_batch, patches, augmentation, flip_aug, - blur_aug, padding_white, padding_black, scaling, degrading, - brightening, binarization, rgb_background, blur_k, scales, degrade_scales, + blur_aug, padding_white, padding_black, scaling, degrading,channels_shuffling, + brightening, binarization, adding_rgb_background, add_red_textlines, blur_k, scales, degrade_scales,shuffle_indexes, brightness, dir_train, data_is_provided, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, continue_training, transformer_projection_dim, transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_cnn_first, transformer_patchsize_x, transformer_patchsize_y, transformer_num_patches_xy, backbone_type, flip_index, dir_eval, dir_output, - pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name): + pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name, dir_img_bin, number_of_backgrounds_per_image,dir_rgb_backgrounds): + + if dir_rgb_backgrounds: + list_all_possible_background_images = os.listdir(dir_rgb_backgrounds) + else: + list_all_possible_background_images = None if task == "segmentation" or task == "enhancement" or task == "binarization": if data_is_provided: @@ -163,18 +175,18 @@ def run(_config, n_classes, n_epochs, input_height, # writing patches into a sub-folder in order to be flowed from directory. provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, input_height, input_width, blur_k, - blur_aug, padding_white, padding_black, flip_aug, binarization, rgb_background, + blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background,add_red_textlines, channels_shuffling, scaling, degrading, brightening, scales, degrade_scales, brightness, - flip_index, scaling_bluring, scaling_brightness, scaling_binarization, + flip_index,shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=augmentation, - patches=patches) + patches=patches, dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds) provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, - blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, rgb_background, + blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background, add_red_textlines, channels_shuffling, scaling, degrading, brightening, scales, degrade_scales, brightness, - flip_index, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches) + flip_index, shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, + rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches,dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds) if weighted_loss: weights = np.zeros(n_classes) diff --git a/train/utils.py b/train/utils.py index cf7a65c..20fda29 100644 --- a/train/utils.py +++ b/train/utils.py @@ -51,6 +51,16 @@ def return_binary_image_with_given_rgb_background_red_textlines(img_bin, img_rgb return img_final +def return_image_with_red_elements(img, img_bin): + img_final = np.copy(img) + + img_final[:,:,0][img_bin[:,:,0]==0] = 0 + img_final[:,:,1][img_bin[:,:,0]==0] = 0 + img_final[:,:,2][img_bin[:,:,0]==0] = 255 + return img_final + + + def scale_image_for_no_patch(img, label, scale): h_n = int(img.shape[0]*scale) w_n = int(img.shape[1]*scale) @@ -631,10 +641,10 @@ def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, i def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, input_height, input_width, blur_k, blur_aug, - padding_white, padding_black, flip_aug, binarization, scaling, degrading, - brightening, scales, degrade_scales, brightness, flip_index, + padding_white, padding_black, flip_aug, binarization, adding_rgb_background, add_red_textlines, channels_shuffling, scaling, degrading, + brightening, scales, degrade_scales, brightness, flip_index, shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=False): + rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=False, dir_img_bin=None,number_of_backgrounds_per_image=None,list_all_possible_background_images=None, dir_rgb_backgrounds=None): indexer = 0 for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)): @@ -724,17 +734,29 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_scaled, input_height, input_width)) indexer += 1 - if rgb_color_background: + if adding_rgb_background: img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') for i_n in range(number_of_backgrounds_per_image): background_image_chosen_name = random.choice(list_all_possible_background_images) img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) - img_with_overlayed_background = return_binary_image_with_given_rgb_background(img_bin_corr, img_rgb_background) + img_with_overlayed_background = return_binary_image_with_given_rgb_background(img_bin_corr, img_rgb_background_chosen) cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_with_overlayed_background, input_height, input_width)) cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + indexer += 1 + + if add_red_textlines: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + img_red_context = return_image_with_red_elements(cv2.imread(dir_img + '/'+im), img_bin_corr) + + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_red_context, input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + + indexer += 1 + From 61cdd2acb85e65ee023807ad885f1724e476596d Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 22 Aug 2024 21:58:09 +0200 Subject: [PATCH 089/492] using prepared binarized images in the case of augmentation --- train/utils.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/train/utils.py b/train/utils.py index 20fda29..84af85e 100644 --- a/train/utils.py +++ b/train/utils.py @@ -690,8 +690,15 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow pass if binarization: - cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', - resize_image(otsu_copy(cv2.imread(dir_img + '/' + im)), input_height, input_width)) + + if dir_img_bin: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(img_bin_corr, input_height, input_width)) + else: + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', + resize_image(otsu_copy(cv2.imread(dir_img + '/' + im)), input_height, input_width)) cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) From 5bbd0980b2a1ff3b5aa536353c21241539f6cf7b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 28 Aug 2024 00:04:19 +0200 Subject: [PATCH 090/492] early dilation for textline artificial class --- train/gt_gen_utils.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 13010bf..dd4091f 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -88,12 +88,15 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) return contours_imgs -def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len): +def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=None): co_text_eroded = [] for con in co_text: img_boundary_in = np.zeros( (y_len,x_len) ) img_boundary_in = cv2.fillPoly(img_boundary_in, pts=[con], color=(1, 1, 1)) + if dilation_early: + img_boundary_in = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_early) + #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica if erosion_rate > 0: img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_rate) @@ -258,22 +261,25 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if "artificial_class_label" in keys: img_boundary = np.zeros((y_len, x_len)) - erosion_rate = 1 + erosion_rate = 0#1 dilation_rate = 3 - co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + dilation_early = 2 + co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=dilation_early ) img = np.zeros((y_len, x_len, 3)) if output_type == '2d': img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) if "artificial_class_label" in keys: - img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label + img_mask = np.copy(img_poly) + img_poly[:,:][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=1)] = artificial_class_label elif output_type == '3d': img_poly = cv2.fillPoly(img, pts=co_use_case, color=textline_rgb_color) if "artificial_class_label" in keys: - img_poly[:,:,0][img_boundary[:,:]==1] = artificial_class_rgb_color[0] - img_poly[:,:,1][img_boundary[:,:]==1] = artificial_class_rgb_color[1] - img_poly[:,:,2][img_boundary[:,:]==1] = artificial_class_rgb_color[2] + img_mask = np.copy(img_poly) + img_poly[:,:,0][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[0] + img_poly[:,:,1][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[1] + img_poly[:,:,2][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=255)] = artificial_class_rgb_color[2] if printspace and config_params['use_case']!='printspace': From a57a31673d78741c5679aac66e06991e46fcec73 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 28 Aug 2024 02:09:27 +0200 Subject: [PATCH 091/492] adding foreground rgb to augmentation --- train/config_params.json | 10 ++++++---- train/train.py | 19 +++++++++++++------ train/utils.py | 40 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 57 insertions(+), 12 deletions(-) diff --git a/train/config_params.json b/train/config_params.json index e5f652d..1db8026 100644 --- a/train/config_params.json +++ b/train/config_params.json @@ -13,13 +13,14 @@ "augmentation" : true, "flip_aug" : false, "blur_aug" : false, - "scaling" : true, + "scaling" : false, "adding_rgb_background": true, - "add_red_textlines": true, - "channels_shuffling": true, + "adding_rgb_foreground": true, + "add_red_textlines": false, + "channels_shuffling": false, "degrading": false, "brightening": false, - "binarization" : false, + "binarization" : true, "scaling_bluring" : false, "scaling_binarization" : false, "scaling_flip" : false, @@ -51,6 +52,7 @@ "dir_eval": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/eval_new", "dir_output": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/output_new", "dir_rgb_backgrounds": "/home/vahid/Documents/1_2_test_eynollah/set_rgb_background", + "dir_rgb_foregrounds": "/home/vahid/Documents/1_2_test_eynollah/out_set_rgb_foreground", "dir_img_bin": "/home/vahid/Documents/test/sbb_pixelwise_segmentation/test_label/pageextractor_test/train_new/images_bin" } diff --git a/train/train.py b/train/train.py index 5dfad07..848ff6a 100644 --- a/train/train.py +++ b/train/train.py @@ -54,6 +54,7 @@ def config_params(): brightening = False # If true, brightening will be applied to the image. The amount of brightening is defined with "brightness" in config_params.json. binarization = False # If true, Otsu thresholding will be applied to augment the input with binarized images. adding_rgb_background = False + adding_rgb_foreground = False add_red_textlines = False channels_shuffling = False dir_train = None # Directory of training dataset with subdirectories having the names "images" and "labels". @@ -95,6 +96,7 @@ def config_params(): dir_img_bin = None number_of_backgrounds_per_image = 1 dir_rgb_backgrounds = None + dir_rgb_foregrounds = None @ex.automain @@ -103,20 +105,25 @@ def run(_config, n_classes, n_epochs, input_height, index_start, dir_of_start_model, is_loss_soft_dice, n_batch, patches, augmentation, flip_aug, blur_aug, padding_white, padding_black, scaling, degrading,channels_shuffling, - brightening, binarization, adding_rgb_background, add_red_textlines, blur_k, scales, degrade_scales,shuffle_indexes, + brightening, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, blur_k, scales, degrade_scales,shuffle_indexes, brightness, dir_train, data_is_provided, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, continue_training, transformer_projection_dim, transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_cnn_first, transformer_patchsize_x, transformer_patchsize_y, transformer_num_patches_xy, backbone_type, flip_index, dir_eval, dir_output, - pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name, dir_img_bin, number_of_backgrounds_per_image,dir_rgb_backgrounds): + pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name, dir_img_bin, number_of_backgrounds_per_image,dir_rgb_backgrounds, dir_rgb_foregrounds): if dir_rgb_backgrounds: list_all_possible_background_images = os.listdir(dir_rgb_backgrounds) else: list_all_possible_background_images = None + if dir_rgb_foregrounds: + list_all_possible_foreground_rgbs = os.listdir(dir_rgb_foregrounds) + else: + list_all_possible_foreground_rgbs = None + if task == "segmentation" or task == "enhancement" or task == "binarization": if data_is_provided: dir_train_flowing = os.path.join(dir_output, 'train') @@ -175,18 +182,18 @@ def run(_config, n_classes, n_epochs, input_height, # writing patches into a sub-folder in order to be flowed from directory. provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, input_height, input_width, blur_k, - blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background,add_red_textlines, channels_shuffling, + blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background,adding_rgb_foreground, add_red_textlines, channels_shuffling, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index,shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=augmentation, - patches=patches, dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds) + patches=patches, dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds, dir_rgb_foregrounds=dir_rgb_foregrounds,list_all_possible_foreground_rgbs=list_all_possible_foreground_rgbs) provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, - blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background, add_red_textlines, channels_shuffling, + blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, channels_shuffling, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index, shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, - rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches,dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds) + rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches,dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds,dir_rgb_foregrounds=dir_rgb_foregrounds,list_all_possible_foreground_rgbs=list_all_possible_foreground_rgbs ) if weighted_loss: weights = np.zeros(n_classes) diff --git a/train/utils.py b/train/utils.py index 84af85e..d38e798 100644 --- a/train/utils.py +++ b/train/utils.py @@ -40,6 +40,25 @@ def return_binary_image_with_given_rgb_background(img_bin, img_rgb_background): return img_final +def return_binary_image_with_given_rgb_background_and_given_foreground_rgb(img_bin, img_rgb_background, rgb_foreground): + img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) + + img_final = np.copy(img_bin) + img_foreground = np.zeros(img_bin.shape) + + + img_foreground[:,:,0][img_bin[:,:,0] == 0] = rgb_foreground[0] + img_foreground[:,:,1][img_bin[:,:,0] == 0] = rgb_foreground[1] + img_foreground[:,:,2][img_bin[:,:,0] == 0] = rgb_foreground[2] + + + img_final[:,:,0][img_bin[:,:,0] != 0] = img_rgb_background[:,:,0][img_bin[:,:,0] != 0] + img_final[:,:,1][img_bin[:,:,1] != 0] = img_rgb_background[:,:,1][img_bin[:,:,1] != 0] + img_final[:,:,2][img_bin[:,:,2] != 0] = img_rgb_background[:,:,2][img_bin[:,:,2] != 0] + + img_final = img_final + img_foreground + return img_final + def return_binary_image_with_given_rgb_background_red_textlines(img_bin, img_rgb_background, img_color): img_rgb_background = resize_image(img_rgb_background ,img_bin.shape[0], img_bin.shape[1]) @@ -641,10 +660,10 @@ def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, i def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, input_height, input_width, blur_k, blur_aug, - padding_white, padding_black, flip_aug, binarization, adding_rgb_background, add_red_textlines, channels_shuffling, scaling, degrading, + padding_white, padding_black, flip_aug, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, channels_shuffling, scaling, degrading, brightening, scales, degrade_scales, brightness, flip_index, shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, rotation, - rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=False, dir_img_bin=None,number_of_backgrounds_per_image=None,list_all_possible_background_images=None, dir_rgb_backgrounds=None): + rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=False, dir_img_bin=None,number_of_backgrounds_per_image=None,list_all_possible_background_images=None, dir_rgb_backgrounds=None, dir_rgb_foregrounds=None, list_all_possible_foreground_rgbs=None): indexer = 0 for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)): @@ -754,6 +773,23 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow indexer += 1 + if adding_rgb_foreground: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + for i_n in range(number_of_backgrounds_per_image): + background_image_chosen_name = random.choice(list_all_possible_background_images) + foreground_rgb_chosen_name = random.choice(list_all_possible_foreground_rgbs) + + img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) + foreground_rgb_chosen = np.load(dir_rgb_foregrounds + '/' + foreground_rgb_chosen_name) + + img_with_overlayed_background = return_binary_image_with_given_rgb_background_and_given_foreground_rgb(img_bin_corr, img_rgb_background_chosen, foreground_rgb_chosen) + + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_with_overlayed_background, input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', + resize_image(cv2.imread(dir_of_label_file), input_height, input_width)) + + indexer += 1 + if add_red_textlines: img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') img_red_context = return_image_with_red_elements(cv2.imread(dir_img + '/'+im), img_bin_corr) From e3da4944704d9d4af22a008addc1df8183a6ef44 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 28 Aug 2024 17:34:06 +0200 Subject: [PATCH 092/492] fixing artificial class bug --- train/gt_gen_utils.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index dd4091f..5784e14 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -8,6 +8,7 @@ from tqdm import tqdm import cv2 from shapely import geometry from pathlib import Path +import matplotlib.pyplot as plt KERNEL = np.ones((5, 5), np.uint8) @@ -83,9 +84,13 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): ret, thresh = cv2.threshold(imgray, 0, 255, 0) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + #print(len(contours_imgs), hierarchy) contours_imgs = return_parent_contours(contours_imgs, hierarchy) - contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) + + #print(len(contours_imgs), "iki") + #contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) return contours_imgs def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=None): @@ -103,12 +108,15 @@ def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y pixel = 1 min_size = 0 + + img_boundary_in = img_boundary_in.astype("uint8") + con_eroded = return_contours_of_interested_region(img_boundary_in,pixel, min_size ) - try: - co_text_eroded.append(con_eroded[0]) - except: - co_text_eroded.append(con) + #try: + co_text_eroded.append(con_eroded[0]) + #except: + #co_text_eroded.append(con) img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_rate) @@ -262,8 +270,8 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if "artificial_class_label" in keys: img_boundary = np.zeros((y_len, x_len)) erosion_rate = 0#1 - dilation_rate = 3 - dilation_early = 2 + dilation_rate = 2 + dilation_early = 1 co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=dilation_early ) From 3f354e1c342a36d52883c61bacebcddf43a31c54 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 30 Aug 2024 15:30:18 +0200 Subject: [PATCH 093/492] new augmentations for patchwise training --- train/utils.py | 64 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/train/utils.py b/train/utils.py index d38e798..3d42b64 100644 --- a/train/utils.py +++ b/train/utils.py @@ -823,6 +823,53 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow img_max_rotated, label_max_rotated, input_height, input_width, indexer=indexer) + + if channels_shuffling: + for shuffle_index in shuffle_indexes: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + return_shuffled_channels(cv2.imread(dir_img + '/' + im), shuffle_index), + cv2.imread(dir_of_label_file), + input_height, input_width, indexer=indexer) + + if adding_rgb_background: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + for i_n in range(number_of_backgrounds_per_image): + background_image_chosen_name = random.choice(list_all_possible_background_images) + img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) + img_with_overlayed_background = return_binary_image_with_given_rgb_background(img_bin_corr, img_rgb_background_chosen) + + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + img_with_overlayed_background, + cv2.imread(dir_of_label_file), + input_height, input_width, indexer=indexer) + + + if adding_rgb_foreground: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + for i_n in range(number_of_backgrounds_per_image): + background_image_chosen_name = random.choice(list_all_possible_background_images) + foreground_rgb_chosen_name = random.choice(list_all_possible_foreground_rgbs) + + img_rgb_background_chosen = cv2.imread(dir_rgb_backgrounds + '/' + background_image_chosen_name) + foreground_rgb_chosen = np.load(dir_rgb_foregrounds + '/' + foreground_rgb_chosen_name) + + img_with_overlayed_background = return_binary_image_with_given_rgb_background_and_given_foreground_rgb(img_bin_corr, img_rgb_background_chosen, foreground_rgb_chosen) + + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + img_with_overlayed_background, + cv2.imread(dir_of_label_file), + input_height, input_width, indexer=indexer) + + + if add_red_textlines: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + img_red_context = return_image_with_red_elements(cv2.imread(dir_img + '/'+im), img_bin_corr) + + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + img_red_context, + cv2.imread(dir_of_label_file), + input_height, input_width, indexer=indexer) + if flip_aug: for f_i in flip_index: indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, @@ -871,10 +918,19 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow input_height, input_width, indexer=indexer) if binarization: - indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, - otsu_copy(cv2.imread(dir_img + '/' + im)), - cv2.imread(dir_of_label_file), - input_height, input_width, indexer=indexer) + if dir_img_bin: + img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') + + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + img_bin_corr, + cv2.imread(dir_of_label_file), + input_height, input_width, indexer=indexer) + + else: + indexer = get_patches(dir_flow_train_imgs, dir_flow_train_labels, + otsu_copy(cv2.imread(dir_img + '/' + im)), + cv2.imread(dir_of_label_file), + input_height, input_width, indexer=indexer) if scaling_brightness: for sc_ind in scales: From a524f8b1a7e5e68219cdcb12e239bc6ae8a1391c Mon Sep 17 00:00:00 2001 From: johnlockejrr Date: Sat, 19 Oct 2024 13:21:29 -0700 Subject: [PATCH 094/492] Update inference.py to check if save_layout was passed as argument otherwise can give an cv2 error --- train/inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/train/inference.py b/train/inference.py index 8d0a572..89d32de 100644 --- a/train/inference.py +++ b/train/inference.py @@ -566,6 +566,7 @@ class sbb_predict: img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) if self.save: cv2.imwrite(self.save,img_seg_overlayed) + if self.save_layout: cv2.imwrite(self.save_layout, only_layout) if self.ground_truth: From f09eed1197d3f4d6cb4672fec48f73f50a1eee6b Mon Sep 17 00:00:00 2001 From: johnlockejrr Date: Sat, 19 Oct 2024 13:25:50 -0700 Subject: [PATCH 095/492] Changed deprecated `lr` to `learning_rate` and `model.fit_generator` to `model.fit` --- train/train.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/train/train.py b/train/train.py index 848ff6a..4cc3cbb 100644 --- a/train/train.py +++ b/train/train.py @@ -277,16 +277,16 @@ def run(_config, n_classes, n_epochs, input_height, if (task == "segmentation" or task == "binarization"): if not is_loss_soft_dice and not weighted_loss: model.compile(loss='categorical_crossentropy', - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) if is_loss_soft_dice: model.compile(loss=soft_dice_loss, - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) if weighted_loss: model.compile(loss=weighted_categorical_crossentropy(weights), - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) elif task == "enhancement": model.compile(loss='mean_squared_error', - optimizer=Adam(lr=learning_rate), metrics=['accuracy']) + optimizer=Adam(learning_rate=learning_rate), metrics=['accuracy']) # generating train and evaluation data @@ -299,7 +299,7 @@ def run(_config, n_classes, n_epochs, input_height, ##score_best=[] ##score_best.append(0) for i in tqdm(range(index_start, n_epochs + index_start)): - model.fit_generator( + model.fit( train_gen, steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, validation_data=val_gen, @@ -384,7 +384,7 @@ def run(_config, n_classes, n_epochs, input_height, #f1score_tot = [0] indexer_start = 0 - opt = SGD(lr=0.01, momentum=0.9) + opt = SGD(learning_rate=0.01, momentum=0.9) opt_adam = tf.keras.optimizers.Adam(learning_rate=0.0001) model.compile(loss="binary_crossentropy", optimizer = opt_adam,metrics=['accuracy']) From fd14e656aa38b17ca25224268d2e66634506b107 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 25 Oct 2024 14:01:39 +0200 Subject: [PATCH 096/492] early_erosion is added --- train/gt_gen_utils.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 5784e14..cabc7df 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -93,7 +93,7 @@ def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): #contours_imgs = filter_contours_area_of_image_tables(thresh, contours_imgs, hierarchy, max_area=1, min_area=min_area) return contours_imgs -def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=None): +def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=None, erosion_early=None): co_text_eroded = [] for con in co_text: img_boundary_in = np.zeros( (y_len,x_len) ) @@ -101,6 +101,9 @@ def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y if dilation_early: img_boundary_in = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_early) + + if erosion_early: + img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=erosion_early) #img_boundary_in = cv2.erode(img_boundary_in[:,:], KERNEL, iterations=7)#asiatica if erosion_rate > 0: @@ -137,6 +140,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ ls_org_imgs_stem = [item.split('.')[0] for item in ls_org_imgs] for index in tqdm(range(len(gt_list))): #try: + print(gt_list[index]) tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding = 'iso-8859-5')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] @@ -271,8 +275,9 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_boundary = np.zeros((y_len, x_len)) erosion_rate = 0#1 dilation_rate = 2 - dilation_early = 1 - co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=dilation_early ) + dilation_early = 0 + erosion_early = 2 + co_use_case, img_boundary = update_region_contours(co_use_case, img_boundary, erosion_rate, dilation_rate, y_len, x_len, dilation_early=dilation_early, erosion_early=erosion_early) img = np.zeros((y_len, x_len, 3)) @@ -280,7 +285,8 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly = cv2.fillPoly(img, pts=co_use_case, color=(1, 1, 1)) if "artificial_class_label" in keys: img_mask = np.copy(img_poly) - img_poly[:,:][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=1)] = artificial_class_label + ##img_poly[:,:][(img_boundary[:,:]==1) & (img_mask[:,:,0]!=1)] = artificial_class_label + img_poly[:,:][img_boundary[:,:]==1] = artificial_class_label elif output_type == '3d': img_poly = cv2.fillPoly(img, pts=co_use_case, color=textline_rgb_color) if "artificial_class_label" in keys: From 7b4d14b19f536614545b209bf3834b6b84a67d1d Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 29 Oct 2024 17:06:22 +0100 Subject: [PATCH 097/492] addinh shifting augmentation --- train/train.py | 7 ++++--- train/utils.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/train/train.py b/train/train.py index 848ff6a..7e3e390 100644 --- a/train/train.py +++ b/train/train.py @@ -50,6 +50,7 @@ def config_params(): padding_white = False # If true, white padding will be applied to the image. padding_black = False # If true, black padding will be applied to the image. scaling = False # If true, scaling will be applied to the image. The amount of scaling is defined with "scales" in config_params.json. + shifting = False degrading = False # If true, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" in config_params.json. brightening = False # If true, brightening will be applied to the image. The amount of brightening is defined with "brightness" in config_params.json. binarization = False # If true, Otsu thresholding will be applied to augment the input with binarized images. @@ -104,7 +105,7 @@ def run(_config, n_classes, n_epochs, input_height, input_width, weight_decay, weighted_loss, index_start, dir_of_start_model, is_loss_soft_dice, n_batch, patches, augmentation, flip_aug, - blur_aug, padding_white, padding_black, scaling, degrading,channels_shuffling, + blur_aug, padding_white, padding_black, scaling, shifting, degrading,channels_shuffling, brightening, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, blur_k, scales, degrade_scales,shuffle_indexes, brightness, dir_train, data_is_provided, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, @@ -183,7 +184,7 @@ def run(_config, n_classes, n_epochs, input_height, provide_patches(imgs_list, segs_list, dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, input_height, input_width, blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background,adding_rgb_foreground, add_red_textlines, channels_shuffling, - scaling, degrading, brightening, scales, degrade_scales, brightness, + scaling, shifting, degrading, brightening, scales, degrade_scales, brightness, flip_index,shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=augmentation, patches=patches, dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds, dir_rgb_foregrounds=dir_rgb_foregrounds,list_all_possible_foreground_rgbs=list_all_possible_foreground_rgbs) @@ -191,7 +192,7 @@ def run(_config, n_classes, n_epochs, input_height, provide_patches(imgs_list_test, segs_list_test, dir_img_val, dir_seg_val, dir_flow_eval_imgs, dir_flow_eval_labels, input_height, input_width, blur_k, blur_aug, padding_white, padding_black, flip_aug, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, channels_shuffling, - scaling, degrading, brightening, scales, degrade_scales, brightness, + scaling, shifting, degrading, brightening, scales, degrade_scales, brightness, flip_index, shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=patches,dir_img_bin=dir_img_bin,number_of_backgrounds_per_image=number_of_backgrounds_per_image,list_all_possible_background_images=list_all_possible_background_images, dir_rgb_backgrounds=dir_rgb_backgrounds,dir_rgb_foregrounds=dir_rgb_foregrounds,list_all_possible_foreground_rgbs=list_all_possible_foreground_rgbs ) diff --git a/train/utils.py b/train/utils.py index 3d42b64..d7ddb99 100644 --- a/train/utils.py +++ b/train/utils.py @@ -78,7 +78,50 @@ def return_image_with_red_elements(img, img_bin): img_final[:,:,2][img_bin[:,:,0]==0] = 255 return img_final +def shift_image_and_label(img, label, type_shift): + h_n = int(img.shape[0]*1.06) + w_n = int(img.shape[1]*1.06) + + channel0_avg = int( np.mean(img[:,:,0]) ) + channel1_avg = int( np.mean(img[:,:,1]) ) + channel2_avg = int( np.mean(img[:,:,2]) ) + h_diff = abs( img.shape[0] - h_n ) + w_diff = abs( img.shape[1] - w_n ) + + h_start = int(h_diff / 2.) + w_start = int(w_diff / 2.) + + img_scaled_padded = np.zeros((h_n, w_n, 3)) + label_scaled_padded = np.zeros((h_n, w_n, 3)) + + img_scaled_padded[:,:,0] = channel0_avg + img_scaled_padded[:,:,1] = channel1_avg + img_scaled_padded[:,:,2] = channel2_avg + + img_scaled_padded[h_start:h_start+img.shape[0], w_start:w_start+img.shape[1],:] = img[:,:,:] + label_scaled_padded[h_start:h_start+img.shape[0], w_start:w_start+img.shape[1],:] = label[:,:,:] + + + if type_shift=="xpos": + img_dis = img_scaled_padded[h_start:h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] + label_dis = label_scaled_padded[h_start:h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] + elif type_shift=="xmin": + img_dis = img_scaled_padded[h_start:h_start+img.shape[0],:img.shape[1],:] + label_dis = label_scaled_padded[h_start:h_start+img.shape[0],:img.shape[1],:] + elif type_shift=="ypos": + img_dis = img_scaled_padded[2*h_start:2*h_start+img.shape[0],w_start:w_start+img.shape[1],:] + label_dis = label_scaled_padded[2*h_start:2*h_start+img.shape[0],w_start:w_start+img.shape[1],:] + elif type_shift=="ymin": + img_dis = img_scaled_padded[:img.shape[0],w_start:w_start+img.shape[1],:] + label_dis = label_scaled_padded[:img.shape[0],w_start:w_start+img.shape[1],:] + elif type_shift=="xypos": + img_dis = img_scaled_padded[2*h_start:2*h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] + label_dis = label_scaled_padded[2*h_start:2*h_start+img.shape[0],2*w_start:2*w_start+img.shape[1],:] + elif type_shift=="xymin": + img_dis = img_scaled_padded[:img.shape[0],:img.shape[1],:] + label_dis = label_scaled_padded[:img.shape[0],:img.shape[1],:] + return img_dis, label_dis def scale_image_for_no_patch(img, label, scale): h_n = int(img.shape[0]*scale) @@ -660,7 +703,7 @@ def get_patches_num_scale_new(dir_img_f, dir_seg_f, img, label, height, width, i def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow_train_imgs, dir_flow_train_labels, input_height, input_width, blur_k, blur_aug, - padding_white, padding_black, flip_aug, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, channels_shuffling, scaling, degrading, + padding_white, padding_black, flip_aug, binarization, adding_rgb_background, adding_rgb_foreground, add_red_textlines, channels_shuffling, scaling, shifting, degrading, brightening, scales, degrade_scales, brightness, flip_index, shuffle_indexes, scaling_bluring, scaling_brightness, scaling_binarization, rotation, rotation_not_90, thetha, scaling_flip, task, augmentation=False, patches=False, dir_img_bin=None,number_of_backgrounds_per_image=None,list_all_possible_background_images=None, dir_rgb_backgrounds=None, dir_rgb_foregrounds=None, list_all_possible_foreground_rgbs=None): @@ -759,6 +802,16 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_scaled, input_height, input_width)) cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_scaled, input_height, input_width)) indexer += 1 + if shifting: + shift_types = ['xpos', 'xmin', 'ypos', 'ymin', 'xypos', 'xymin'] + for st_ind in shift_types: + img_shifted, label_shifted = shift_image_and_label(cv2.imread(dir_img + '/'+im), + cv2.imread(dir_of_label_file), st_ind) + + cv2.imwrite(dir_flow_train_imgs + '/img_' + str(indexer) + '.png', resize_image(img_shifted, input_height, input_width)) + cv2.imwrite(dir_flow_train_labels + '/img_' + str(indexer) + '.png', resize_image(label_shifted, input_height, input_width)) + indexer += 1 + if adding_rgb_background: img_bin_corr = cv2.imread(dir_img_bin + '/' + img_name+'.png') From 238ea3bd8ef59da890646c9b1581145b8d937d85 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 14 Nov 2024 16:26:19 +0100 Subject: [PATCH 098/492] update resizing in inference --- train/inference.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/train/inference.py b/train/inference.py index 8d0a572..2b12ff7 100644 --- a/train/inference.py +++ b/train/inference.py @@ -442,10 +442,11 @@ class sbb_predict: self.img_org = np.copy(img) if img.shape[0] < self.img_height: - img = cv2.resize(img, (img.shape[1], self.img_width), interpolation=cv2.INTER_NEAREST) + img = self.resize_image(img, self.img_height, img.shape[1]) if img.shape[1] < self.img_width: - img = cv2.resize(img, (self.img_height, img.shape[0]), interpolation=cv2.INTER_NEAREST) + img = self.resize_image(img, img.shape[0], self.img_width) + margin = int(0.1 * self.img_width) width_mid = self.img_width - 2 * margin height_mid = self.img_height - 2 * margin From e9b860b27513a255ec94892aec8b6a61e23d0b87 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 18 Nov 2024 16:34:53 +0100 Subject: [PATCH 099/492] artificial_class_label for table region --- train/gt_gen_utils.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index cabc7df..95b8414 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -116,10 +116,10 @@ def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y con_eroded = return_contours_of_interested_region(img_boundary_in,pixel, min_size ) - #try: - co_text_eroded.append(con_eroded[0]) - #except: - #co_text_eroded.append(con) + try: + co_text_eroded.append(con_eroded[0]) + except: + co_text_eroded.append(con) img_boundary_in_dilated = cv2.dilate(img_boundary_in[:,:], KERNEL, iterations=dilation_rate) @@ -636,6 +636,10 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ erosion_rate = 0#2 dilation_rate = 2#4 co_text["footnote-continued"], img_boundary = update_region_contours(co_text["footnote-continued"], img_boundary, erosion_rate, dilation_rate, y_len, x_len ) + if "tableregion" in elements_with_artificial_class: + erosion_rate = 0#2 + dilation_rate = 3#4 + co_table, img_boundary = update_region_contours(co_table, img_boundary, erosion_rate, dilation_rate, y_len, x_len ) From 90a1b186f78a9ad5934c4d46d93e1c2bf20d6789 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 14 Mar 2025 17:20:33 +0100 Subject: [PATCH 100/492] this enables to visualize reading order of textregions provided in page-xml files --- train/generate_gt_for_training.py | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index cfcc151..9e0f45e 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -214,6 +214,73 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_multi_visual_modal) indexer = indexer+1 + + +@main.command() +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_out", + "-do", + help="directory where plots will be written", + type=click.Path(exists=True, file_okay=False), +) + + +def visualize_reading_order(dir_xml, dir_out): + xml_files_ind = os.listdir(dir_xml) + + + indexer_start= 0#55166 + #min_area = 0.0001 + + for ind_xml in tqdm(xml_files_ind): + indexer = 0 + #print(ind_xml) + #print('########################') + xml_file = os.path.join(dir_xml,ind_xml ) + f_name = ind_xml.split('.')[0] + _, _, _, file_name, id_paragraph, id_header,co_text_paragraph,co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) + + id_all_text = id_paragraph + id_header + co_text_all = co_text_paragraph + co_text_header + + + cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours(co_text_all) + + texts_corr_order_index = [int(index_tot_regions[tot_region_ref.index(i)]) for i in id_all_text ] + #texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] + + + #cx_ordered = np.array(cx_main)[np.array(texts_corr_order_index)] + #cx_ordered = cx_ordered.astype(np.int32) + + cx_ordered = [int(val) for (_, val) in sorted(zip(texts_corr_order_index, cx_main), key=lambda x: \ + x[0], reverse=False)] + #cx_ordered = cx_ordered.astype(np.int32) + + cy_ordered = [int(val) for (_, val) in sorted(zip(texts_corr_order_index, cy_main), key=lambda x: \ + x[0], reverse=False)] + #cy_ordered = cy_ordered.astype(np.int32) + + + color = (0, 0, 255) + thickness = 20 + + img = np.zeros( (y_len,x_len,3) ) + img = cv2.fillPoly(img, pts =co_text_all, color=(255,0,0)) + for i in range(len(cx_ordered)-1): + start_point = (int(cx_ordered[i]), int(cy_ordered[i])) + end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) + img = cv2.arrowedLine(img, start_point, end_point, + color, thickness, tipLength = 0.03) + + cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img) From 363c343b373d99170d795ff20520ba9e586b4ab1 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 17 Mar 2025 20:09:48 +0100 Subject: [PATCH 101/492] visualising reaidng order- Overlaying on image is provided --- train/generate_gt_for_training.py | 36 ++++++++++++++------- train/gt_gen_utils.py | 53 +++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 11 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 9e0f45e..9869bfa 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -231,8 +231,12 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i type=click.Path(exists=True, file_okay=False), ) +@click.option( + "--dir_imgs", + "-dimg", + help="directory where the overlayed plots will be written", ) -def visualize_reading_order(dir_xml, dir_out): +def visualize_reading_order(dir_xml, dir_out, dir_imgs): xml_files_ind = os.listdir(dir_xml) @@ -271,16 +275,26 @@ def visualize_reading_order(dir_xml, dir_out): color = (0, 0, 255) thickness = 20 - - img = np.zeros( (y_len,x_len,3) ) - img = cv2.fillPoly(img, pts =co_text_all, color=(255,0,0)) - for i in range(len(cx_ordered)-1): - start_point = (int(cx_ordered[i]), int(cy_ordered[i])) - end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) - img = cv2.arrowedLine(img, start_point, end_point, - color, thickness, tipLength = 0.03) - - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img) + if dir_imgs: + layout = np.zeros( (y_len,x_len,3) ) + layout = cv2.fillPoly(layout, pts =co_text_all, color=(1,1,1)) + + img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) + img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) + + overlayed = overlay_layout_on_image(layout, img, cx_ordered, cy_ordered, color, thickness) + cv2.imwrite(os.path.join(dir_out, f_name+'.png'), overlayed) + + else: + img = np.zeros( (y_len,x_len,3) ) + img = cv2.fillPoly(img, pts =co_text_all, color=(255,0,0)) + for i in range(len(cx_ordered)-1): + start_point = (int(cx_ordered[i]), int(cy_ordered[i])) + end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) + img = cv2.arrowedLine(img, start_point, end_point, + color, thickness, tipLength = 0.03) + + cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 95b8414..753abf2 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -1290,3 +1290,56 @@ def update_list_and_return_first_with_length_bigger_than_one(index_element_to_be else: early_list_bigger_than_one = -20 return list_inp, early_list_bigger_than_one + +def overlay_layout_on_image(prediction, img, cx_ordered, cy_ordered, color, thickness): + + unique_classes = np.unique(prediction[:,:,0]) + rgb_colors = {'0' : [255, 255, 255], + '1' : [255, 0, 0], + '2' : [0, 0, 255], + '3' : [255, 0, 125], + '4' : [125, 125, 125], + '5' : [125, 125, 0], + '6' : [0, 125, 255], + '7' : [0, 125, 0], + '8' : [125, 125, 125], + '9' : [0, 125, 255], + '10' : [125, 0, 125], + '11' : [0, 255, 0], + '12' : [255, 125, 0], + '13' : [0, 255, 255], + '14' : [255, 125, 125], + '15' : [255, 0, 255]} + + layout_only = np.zeros(prediction.shape) + + for unq_class in unique_classes: + rgb_class_unique = rgb_colors[str(int(unq_class))] + layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] + layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] + layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] + + + + #img = self.resize_image(img, layout_only.shape[0], layout_only.shape[1]) + + layout_only = layout_only.astype(np.int32) + + for i in range(len(cx_ordered)-1): + start_point = (int(cx_ordered[i]), int(cy_ordered[i])) + end_point = (int(cx_ordered[i+1]), int(cy_ordered[i+1])) + layout_only = cv2.arrowedLine(layout_only, start_point, end_point, + color, thickness, tipLength = 0.03) + + img = img.astype(np.int32) + + + + added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) + + return added_image + +def find_format_of_given_filename_in_dir(dir_imgs, f_name): + ls_imgs = os.listdir(dir_imgs) + file_interested = [ind for ind in ls_imgs if ind.startswith(f_name+'.')] + return file_interested[0] From a22df11ebb564631611f4609048b31e67eb0541f Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 14 Apr 2025 00:42:08 +0200 Subject: [PATCH 102/492] Restoring the contour in the original image caused an error due to an empty tuple. This issue has been resolved, and as expected, the confidence score for this contour is set to zero --- src/eynollah/utils/contour.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index a81ccb4..0e84153 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -230,7 +230,6 @@ def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first): def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first, confidence_matrix): img_copy = np.zeros(img.shape) img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=(1, 1, 1)) - confidence_matrix_mapped_with_contour = confidence_matrix * img_copy[:,:,0] confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy[:,:,0])) @@ -239,9 +238,13 @@ def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first ret, thresh = cv2.threshold(imgray, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) - cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) - # print(np.shape(cont_int[0])) + if len(cont_int)==0: + cont_int = [] + cont_int.append(contour_par) + confidence_contour = 0 + else: + cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) + cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) return cont_int[0], index_r_con, confidence_contour def get_textregion_contours_in_org_image_light(cnts, img, slope_first, confidence_matrix, map=map): From 41318f0404722c3980db6f9174871c2e222258d7 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 15 Apr 2025 11:14:26 +0200 Subject: [PATCH 103/492] :memo: changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7ce6bb..ad86fe5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +Fixed: + + * restoring the contour in the original image caused an error due to an empty tuple + ## [0.4.0] - 2025-04-07 Fixed: From 30ba23464193b61541e4ba7784974b4d5c4ec33d Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 16 Apr 2025 19:27:17 +0200 Subject: [PATCH 104/492] CI: pypi --- .github/workflows/pypi.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 .github/workflows/pypi.yml diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml new file mode 100644 index 0000000..bb2344e --- /dev/null +++ b/.github/workflows/pypi.yml @@ -0,0 +1,24 @@ +name: PyPI CD + +on: + release: + types: [published] + workflow_dispatch: + +jobs: + pypi-publish: + name: upload release to PyPI + runs-on: ubuntu-latest + permissions: + # IMPORTANT: this permission is mandatory for Trusted Publishing + id-token: write + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v4 + - name: Build package + run: make build + - name: Publish package distributions to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + verbose: true From 825b2634f96788cc3351f089d24b8a1c2e202194 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 16 Apr 2025 23:36:41 +0200 Subject: [PATCH 105/492] rotation augmentation is provided for machine based reading order --- train/train.py | 7 +++++-- train/utils.py | 23 ++++++++++++++++++++++- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/train/train.py b/train/train.py index 7e3e390..130c7f4 100644 --- a/train/train.py +++ b/train/train.py @@ -380,7 +380,10 @@ def run(_config, n_classes, n_epochs, input_height, dir_flow_train_labels = os.path.join(dir_train, 'labels') classes = os.listdir(dir_flow_train_labels) - num_rows =len(classes) + if augmentation: + num_rows = len(classes)*(len(thetha) + 1) + else: + num_rows = len(classes) #ls_test = os.listdir(dir_flow_train_labels) #f1score_tot = [0] @@ -390,7 +393,7 @@ def run(_config, n_classes, n_epochs, input_height, model.compile(loss="binary_crossentropy", optimizer = opt_adam,metrics=['accuracy']) for i in range(n_epochs): - history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes), steps_per_epoch=num_rows / n_batch, verbose=1) + history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes, thetha, augmentation), steps_per_epoch=num_rows / n_batch, verbose=1) model.save( os.path.join(dir_output,'model_'+str(i+indexer_start) )) with open(os.path.join(os.path.join(dir_output,'model_'+str(i)),"config.json"), "w") as fp: diff --git a/train/utils.py b/train/utils.py index d7ddb99..50c21af 100644 --- a/train/utils.py +++ b/train/utils.py @@ -363,6 +363,11 @@ def rotation_not_90_func(img, label, thetha): return rotate_max_area(img, rotated, rotated_label, thetha) +def rotation_not_90_func_single_image(img, thetha): + rotated = imutils.rotate(img, thetha) + return rotate_max_area(img, rotated, thetha) + + def color_images(seg, n_classes): ann_u = range(n_classes) if len(np.shape(seg)) == 3: @@ -410,7 +415,7 @@ def IoU(Yi, y_predi): #print("Mean IoU: {:4.3f}".format(mIoU)) return mIoU -def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batchsize, height, width, n_classes): +def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batchsize, height, width, n_classes, thetha, augmentation=False): all_labels_files = os.listdir(classes_file_dir) ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) @@ -433,6 +438,22 @@ def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batch ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) batchcount = 0 + + if augmentation: + for thetha_i in thetha: + img_rot = rotation_not_90_func_single_image(img, thetha_i) + + ret_x[batchcount, :,:,0] = img_rot[:,:,0]/3.0 + ret_x[batchcount, :,:,2] = img_rot[:,:,2]/3.0 + ret_x[batchcount, :,:,1] = img_rot[:,:,1]/5.0 + + ret_y[batchcount, :] = label_class + batchcount+=1 + if batchcount>=batchsize: + yield (ret_x, ret_y) + ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) + ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) + batchcount = 0 def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_classes, task='segmentation'): c = 0 From dd21a3b33a3adb1a8ba2c34e2144e01b2b094366 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 17 Apr 2025 00:05:59 +0200 Subject: [PATCH 106/492] updating:rotation augmentation is provided for machine based reading order --- train/utils.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/train/utils.py b/train/utils.py index 50c21af..485056b 100644 --- a/train/utils.py +++ b/train/utils.py @@ -356,6 +356,18 @@ def rotate_max_area(image, rotated, rotated_label, angle): x2 = x1 + int(wr) return rotated[y1:y2, x1:x2], rotated_label[y1:y2, x1:x2] +def rotate_max_area_single_image(image, rotated, angle): + """ image: cv2 image matrix object + angle: in degree + """ + wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], + math.radians(angle)) + h, w, _ = rotated.shape + y1 = h // 2 - int(hr / 2) + y2 = y1 + int(hr) + x1 = w // 2 - int(wr / 2) + x2 = x1 + int(wr) + return rotated[y1:y2, x1:x2] def rotation_not_90_func(img, label, thetha): rotated = imutils.rotate(img, thetha) @@ -365,7 +377,7 @@ def rotation_not_90_func(img, label, thetha): def rotation_not_90_func_single_image(img, thetha): rotated = imutils.rotate(img, thetha) - return rotate_max_area(img, rotated, thetha) + return rotate_max_area_single_image(img, rotated, thetha) def color_images(seg, n_classes): From 4635dd219d5cfade1c038a371dceb78452a7fbf9 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 17 Apr 2025 00:12:30 +0200 Subject: [PATCH 107/492] updating:rotation augmentation is provided for machine based reading order --- train/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train/utils.py b/train/utils.py index 485056b..8be6963 100644 --- a/train/utils.py +++ b/train/utils.py @@ -455,6 +455,8 @@ def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batch for thetha_i in thetha: img_rot = rotation_not_90_func_single_image(img, thetha_i) + img_rot = resize_image(img_rot, height, width) + ret_x[batchcount, :,:,0] = img_rot[:,:,0]/3.0 ret_x[batchcount, :,:,2] = img_rot[:,:,2]/3.0 ret_x[batchcount, :,:,1] = img_rot[:,:,1]/5.0 From 192b9111e31eee4758364b1fe9f63f80aa533ec2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 22 Apr 2025 00:23:01 +0200 Subject: [PATCH 108/492] updating eynollah README, how to use it for use cases --- README.md | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 16ac661..3cfb587 100644 --- a/README.md +++ b/README.md @@ -50,10 +50,16 @@ For documentation on methods and models, have a look at [`models.md`](https://gi In case you want to train your own model with Eynollah, have a look at [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md). ## Usage -The command-line interface can be called like this: + +Eynollah has four key use cases: layout analysis, binarization, OCR, and machine-based reading order. + +### Layout +The layout module is responsible for detecting layouts, identifying text lines, and determining reading order using both heuristic methods or a machine-based reading order detection model. It's important to note that this functionality should not be confused with the machine-based-reading-order use case. The latter, still under development, focuses specifically on determining the reading order for a given layout in an XML file. In contrast, layout detection takes an image as input, and after detecting the layout, it can also determine the reading order using a machine-based model. + +The command-line interface for layout can be called like this: ```sh -eynollah \ +eynollah layout \ -i | -di \ -o \ -m \ @@ -66,6 +72,7 @@ The following options can be used to further configure the processing: |-------------------|:-------------------------------------------------------------------------------| | `-fl` | full layout analysis including all steps and segmentation classes | | `-light` | lighter and faster but simpler method for main region detection and deskewing | +| `-tll` | this indicates the light textline and should be passed with light version | | `-tab` | apply table detection | | `-ae` | apply enhancement (the resulting image is saved to the output directory) | | `-as` | apply scaling | @@ -83,6 +90,34 @@ The following options can be used to further configure the processing: If no option is set, the tool performs layout detection of main regions (background, text, images, separators and marginals). The best output quality is produced when RGB images are used as input rather than greyscale or binarized images. +### Binarization +Document Image Binarization + +The command-line interface for binarization of single image can be called like this: + +```sh +eynollah binarization \ + -m \ + \ + +``` + +and for flowing from a directory like this: + +```sh +eynollah binarization \ + -m \ + -di \ + -do +``` + +### OCR +Under development + +### Machine-based-reading-order +Under development + + #### Use as OCR-D processor Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), From 77dae129d50783b225eb3f72e32d38adaa8e0610 Mon Sep 17 00:00:00 2001 From: Konstantin Baierer Date: Tue, 22 Apr 2025 13:22:28 +0200 Subject: [PATCH 109/492] CI: Use most recent actions/setup-python@v5 --- .github/workflows/pypi.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index bb2344e..248f4ef 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Build package run: make build - name: Publish package distributions to PyPI From 208bde706f6a998af7811372ca80be82d3af95cb Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 30 Apr 2025 13:55:09 +0200 Subject: [PATCH 110/492] resolving issue #158 --- src/eynollah/utils/separate_lines.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 3499c29..6602574 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -214,9 +214,13 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): textline_con_fil=filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) - y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus=int( y_diff_mean * (7./40.0) ) - #print(sigma_gaus,'sigma_gaus') + + if len(np.diff(peaks_new_tot))>0: + y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) + sigma_gaus=int( y_diff_mean * (7./40.0) ) + else: + sigma_gaus=12 + except: sigma_gaus=12 if sigma_gaus<3: @@ -1616,6 +1620,7 @@ def do_work_of_slopes_new( textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.00008) + y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN if np.isnan(y_diff_mean): slope_for_all = MAX_SLOPE @@ -1641,13 +1646,6 @@ def do_work_of_slopes_new( all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].copy() mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] - ##plt.imshow(textline_mask_tot_ea) - ##plt.show() - ##plt.imshow(all_text_region_raw) - ##plt.show() - ##plt.imshow(mask_only_con_region) - ##plt.show() - all_text_region_raw[mask_only_con_region == 0] = 0 cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text) From 4cb4414740a89741c6ff25a33932ffc16ce201f8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 30 Apr 2025 16:01:52 +0200 Subject: [PATCH 111/492] Resolve remaining issue with #158 and resolving #124 --- src/eynollah/utils/separate_lines.py | 263 ++++++++++----------------- 1 file changed, 95 insertions(+), 168 deletions(-) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 6602574..0322579 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -102,14 +102,15 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) - y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) - # print(sigma_gaus,'sigma_gaus') + if len(np.diff(peaks_new_tot))>1: + y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) + sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) + else: + sigma_gaus = 12 except: sigma_gaus = 12 if sigma_gaus < 3: sigma_gaus = 3 - # print(sigma_gaus,'sigma') y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down = -y_padded + np.max(y_padded) @@ -137,7 +138,6 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): M = cv2.getRotationMatrix2D(center, -thetha, 1.0) x_d = M[0, 2] y_d = M[1, 2] - thetha = thetha / 180. * np.pi rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]]) contour_text_interest_copy = contour_text_interest.copy() @@ -162,77 +162,73 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): x = np.array(range(len(y))) peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) - if 1>0: - try: - y_padded_smoothed_e= gaussian_filter1d(y_padded, 2) - y_padded_up_to_down_e=-y_padded+np.max(y_padded) - y_padded_up_to_down_padded_e=np.zeros(len(y_padded_up_to_down_e)+40) - y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e)+20]=y_padded_up_to_down_e - y_padded_up_to_down_padded_e= gaussian_filter1d(y_padded_up_to_down_padded_e, 2) - + + try: + y_padded_smoothed_e= gaussian_filter1d(y_padded, 2) + y_padded_up_to_down_e=-y_padded+np.max(y_padded) + y_padded_up_to_down_padded_e=np.zeros(len(y_padded_up_to_down_e)+40) + y_padded_up_to_down_padded_e[20:len(y_padded_up_to_down_e)+20]=y_padded_up_to_down_e + y_padded_up_to_down_padded_e= gaussian_filter1d(y_padded_up_to_down_padded_e, 2) + + peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) + peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) + neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - peaks_e, _ = find_peaks(y_padded_smoothed_e, height=0) - peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) - neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) + arg_neg_must_be_deleted= np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3] + diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) + + arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) + arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] + peaks_new=peaks_e[:] + peaks_neg_new=peaks_neg_e[:] - arg_neg_must_be_deleted= np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3] - diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) - - arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) - arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] + clusters_to_be_deleted=[] + if len(arg_diff_cluster)>0: + clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) + for i in range(len(arg_diff_cluster)-1): + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1: + arg_diff_cluster[i+1]+1]) + clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) + if len(clusters_to_be_deleted)>0: + peaks_new_extra=[] + for m in range(len(clusters_to_be_deleted)): + min_cluster=np.min(peaks_e[clusters_to_be_deleted[m]]) + max_cluster=np.max(peaks_e[clusters_to_be_deleted[m]]) + peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) + for m1 in range(len(clusters_to_be_deleted[m])): + peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]-1]] + peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]]] + peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg_e[clusters_to_be_deleted[m][m1]]] + peaks_new_tot=[] + for i1 in peaks_new: + peaks_new_tot.append(i1) + for i1 in peaks_new_extra: + peaks_new_tot.append(i1) + peaks_new_tot=np.sort(peaks_new_tot) + else: + peaks_new_tot=peaks_e[:] - peaks_new=peaks_e[:] - peaks_neg_new=peaks_neg_e[:] + textline_con,hierarchy=return_contours_of_image(img_patch) + textline_con_fil=filter_contours_area_of_image(img_patch, + textline_con, hierarchy, + max_area=1, min_area=0.0008) - clusters_to_be_deleted=[] - if len(arg_diff_cluster)>0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) - for i in range(len(arg_diff_cluster)-1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i]+1: - arg_diff_cluster[i+1]+1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster)-1]+1:]) - if len(clusters_to_be_deleted)>0: - peaks_new_extra=[] - for m in range(len(clusters_to_be_deleted)): - min_cluster=np.min(peaks_e[clusters_to_be_deleted[m]]) - max_cluster=np.max(peaks_e[clusters_to_be_deleted[m]]) - peaks_new_extra.append( int( (min_cluster+max_cluster)/2.0) ) - for m1 in range(len(clusters_to_be_deleted[m])): - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]-1]] - peaks_new=peaks_new[peaks_new!=peaks_e[clusters_to_be_deleted[m][m1]]] - peaks_neg_new=peaks_neg_new[peaks_neg_new!=peaks_neg_e[clusters_to_be_deleted[m][m1]]] - peaks_new_tot=[] - for i1 in peaks_new: - peaks_new_tot.append(i1) - for i1 in peaks_new_extra: - peaks_new_tot.append(i1) - peaks_new_tot=np.sort(peaks_new_tot) - else: - peaks_new_tot=peaks_e[:] - - textline_con,hierarchy=return_contours_of_image(img_patch) - textline_con_fil=filter_contours_area_of_image(img_patch, - textline_con, hierarchy, - max_area=1, min_area=0.0008) - - if len(np.diff(peaks_new_tot))>0: - y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) - sigma_gaus=int( y_diff_mean * (7./40.0) ) - else: - sigma_gaus=12 - - except: + if len(np.diff(peaks_new_tot))>0: + y_diff_mean=np.mean(np.diff(peaks_new_tot))#self.find_contours_mean_y_diff(textline_con_fil) + sigma_gaus=int( y_diff_mean * (7./40.0) ) + else: sigma_gaus=12 - if sigma_gaus<3: - sigma_gaus=3 - #print(sigma_gaus,'sigma') + + except: + sigma_gaus=12 + if sigma_gaus<3: + sigma_gaus=3 y_padded_smoothed= gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down=-y_padded+np.max(y_padded) y_padded_up_to_down_padded=np.zeros(len(y_padded_up_to_down)+40) y_padded_up_to_down_padded[20:len(y_padded_up_to_down)+20]=y_padded_up_to_down y_padded_up_to_down_padded= gaussian_filter1d(y_padded_up_to_down_padded, sigma_gaus) - peaks, _ = find_peaks(y_padded_smoothed, height=0) peaks_neg, _ = find_peaks(y_padded_up_to_down_padded, height=0) @@ -243,6 +239,7 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) arg_diff_cluster=arg_diff[diff_arg_neg_must_be_deleted>1] + except: arg_neg_must_be_deleted=[] arg_diff_cluster=[] @@ -250,7 +247,6 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_new=peaks[:] peaks_neg_new=peaks_neg[:] clusters_to_be_deleted=[] - if len(arg_diff_cluster)>=2 and len(arg_diff_cluster)>0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[0:arg_diff_cluster[0]+1]) for i in range(len(arg_diff_cluster)-1): @@ -279,21 +275,6 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_new_tot.append(i1) peaks_new_tot=np.sort(peaks_new_tot) - ##plt.plot(y_padded_up_to_down_padded) - ##plt.plot(peaks_neg,y_padded_up_to_down_padded[peaks_neg],'*') - ##plt.show() - - ##plt.plot(y_padded_up_to_down_padded) - ##plt.plot(peaks_neg_new,y_padded_up_to_down_padded[peaks_neg_new],'*') - ##plt.show() - - ##plt.plot(y_padded_smoothed) - ##plt.plot(peaks,y_padded_smoothed[peaks],'*') - ##plt.show() - - ##plt.plot(y_padded_smoothed) - ##plt.plot(peaks_new_tot,y_padded_smoothed[peaks_new_tot],'*') - ##plt.show() peaks=peaks_new_tot[:] peaks_neg=peaks_neg_new[:] else: @@ -302,11 +283,13 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_neg=peaks_neg_new[:] except: pass - - mean_value_of_peaks=np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks=np.std(y_padded_smoothed[peaks]) + if len(y_padded_smoothed[peaks]) > 1: + mean_value_of_peaks=np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks=np.std(y_padded_smoothed[peaks]) + else: + mean_value_of_peaks = np.nan + std_value_of_peaks = np.nan peaks_values=y_padded_smoothed[peaks] - peaks_neg = peaks_neg - 20 - 20 peaks = peaks - 20 for jj in range(len(peaks_neg)): @@ -349,7 +332,6 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_narrow = peaks[jj] + first_nonzero + int( 1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) - if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 @@ -605,7 +587,6 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): [int(x_max), int(point_up)], [int(x_max), int(point_down)], [int(x_min), int(point_down)]])) - return peaks, textline_boxes_rot def separate_lines_vertical(img_patch, contour_text_interest, thetha): @@ -637,7 +618,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): peaks_neg_new = peaks_neg[:] clusters_to_be_deleted = [] - if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: + if len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) >= 2: clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : @@ -645,7 +626,7 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) - if len(arg_neg_must_be_deleted) == 1: + else: clusters_to_be_deleted.append(arg_neg_must_be_deleted) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] @@ -671,9 +652,14 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): peaks_new_tot = peaks[:] peaks = peaks_new_tot[:] peaks_neg = peaks_neg_new[:] - - mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks = np.std(y_padded_smoothed[peaks]) + + if len(y_padded_smoothed[peaks])>1: + mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks = np.std(y_padded_smoothed[peaks]) + else: + mean_value_of_peaks = np.nan + std_value_of_peaks = np.nan + peaks_values = y_padded_smoothed[peaks] peaks_neg = peaks_neg - 20 - 20 @@ -691,7 +677,6 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): textline_boxes_rot = [] if len(peaks_neg) == len(peaks) + 1 and len(peaks) >= 3: - # print('11') for jj in range(len(peaks)): if jj == (len(peaks) - 1): @@ -998,15 +983,16 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): textline_con_fil = filter_contours_area_of_image(img_patch, textline_con, hierarchy, max_area=1, min_area=0.0008) - y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) + if len(np.diff(peaks_new_tot)): + y_diff_mean = np.mean(np.diff(peaks_new_tot)) # self.find_contours_mean_y_diff(textline_con_fil) + sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) + else: + sigma_gaus = 12 - sigma_gaus = int(y_diff_mean * (7.0 / 40.0)) - # print(sigma_gaus,'sigma_gaus') except: sigma_gaus = 12 if sigma_gaus < 3: sigma_gaus = 3 - # print(sigma_gaus,'sigma') y_padded_smoothed = gaussian_filter1d(y_padded, sigma_gaus) y_padded_up_to_down = -y_padded + np.max(y_padded) @@ -1030,7 +1016,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): arg_diff_cluster = arg_diff[diff_arg_neg_must_be_deleted > 1] clusters_to_be_deleted = [] - if len(arg_diff_cluster) >= 2 and len(arg_diff_cluster) > 0: + if len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) >= 2: clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : @@ -1038,7 +1024,7 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) elif len(arg_neg_must_be_deleted) >= 2 and len(arg_diff_cluster) == 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[:]) - if len(arg_neg_must_be_deleted) == 1: + else: clusters_to_be_deleted.append(arg_neg_must_be_deleted) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] @@ -1081,9 +1067,14 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): peaks_new_tot = peaks[:] peaks = peaks_new_tot[:] peaks_neg = peaks_neg_new[:] - - mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) - std_value_of_peaks = np.std(y_padded_smoothed[peaks]) + + if len(y_padded_smoothed[peaks]) > 1: + mean_value_of_peaks = np.mean(y_padded_smoothed[peaks]) + std_value_of_peaks = np.std(y_padded_smoothed[peaks]) + else: + mean_value_of_peaks = np.nan + std_value_of_peaks = np.nan + peaks_values = y_padded_smoothed[peaks] ###peaks_neg = peaks_neg - 20 - 20 @@ -1093,10 +1084,8 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): if len(peaks_neg_true) > 0: peaks_neg_true = np.array(peaks_neg_true) - peaks_neg_true = peaks_neg_true - 20 - 20 - # print(peaks_neg_true) for i in range(len(peaks_neg_true)): img_patch[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 else: @@ -1181,13 +1170,11 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks[i] <= cut_off: forest.append(peaks_neg[i + 1]) if diff_peaks[i] > cut_off: - # print(forest[np.argmin(z[forest]) ] ) if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) forest = [] forest.append(peaks_neg[i + 1]) if i == (len(peaks_neg) - 1): - # print(print(forest[np.argmin(z[forest]) ] )) if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -1204,17 +1191,14 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks_pos[i] <= cut_off: forest.append(peaks[i + 1]) if diff_peaks_pos[i] > cut_off: - # print(forest[np.argmin(z[forest]) ] ) if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) forest = [] forest.append(peaks[i + 1]) if i == (len(peaks) - 1): - # print(print(forest[np.argmin(z[forest]) ] )) if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) - # print(len(peaks_neg_true) ,len(peaks_pos_true) ,'lensss') if len(peaks_neg_true) > 0: peaks_neg_true = np.array(peaks_neg_true) @@ -1240,7 +1224,6 @@ def separate_lines_new_inside_tiles(img_path, thetha): """ peaks_neg_true = peaks_neg_true - 20 - 20 - # print(peaks_neg_true) for i in range(len(peaks_neg_true)): img_path[peaks_neg_true[i] - 6 : peaks_neg_true[i] + 6, :] = 0 @@ -1282,7 +1265,6 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i contours_imgs, hierarchy, max_area=max_area, min_area=min_area) cont_final = [] - ###print(add_boxes_coor_into_textlines,'ikki') for i in range(len(contours_imgs)): img_contour = np.zeros((cnts_images.shape[0], cnts_images.shape[1], 3)) img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=(255, 255, 255)) @@ -1297,12 +1279,10 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i ##0] ##contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] ##if add_boxes_coor_into_textlines: - ##print(np.shape(contours_text_rot[0]),'sjppo') ##contours_text_rot[0][:, 0, 0]=contours_text_rot[0][:, 0, 0] + box_ind[0] ##contours_text_rot[0][:, 0, 1]=contours_text_rot[0][:, 0, 1] + box_ind[1] cont_final.append(contours_text_rot[0]) - ##print(cont_final,'nadizzzz') return None, cont_final def textline_contours_postprocessing(textline_mask, slope, contour_text_interest, box_ind, add_boxes_coor_into_textlines=False): @@ -1313,20 +1293,7 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_interest textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_CLOSE, kernel) textline_mask = cv2.erode(textline_mask, kernel, iterations=2) # textline_mask = cv2.erode(textline_mask, kernel, iterations=1) - - # print(textline_mask.shape[0]/float(textline_mask.shape[1]),'miz') try: - # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: - # plt.imshow(textline_mask) - # plt.show() - - # if abs(slope)>1: - # x_help=30 - # y_help=2 - # else: - # x_help=2 - # y_help=2 - x_help = 30 y_help = 2 @@ -1350,28 +1317,12 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_interest img_contour = np.zeros((box_ind[3], box_ind[2], 3)) img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=(255, 255, 255)) - # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: - # plt.imshow(img_contour) - # plt.show() - img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), img_contour.shape[1] + int(2 * x_help), 3)) img_contour_help[y_help : y_help + img_contour.shape[0], x_help : x_help + img_contour.shape[1], :] = np.copy(img_contour[:, :, :]) img_contour_rot = rotate_image(img_contour_help, slope) - # plt.imshow(img_contour_rot_help) - # plt.show() - - # plt.imshow(dst_help) - # plt.show() - - # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: - # plt.imshow(img_contour_rot_help) - # plt.show() - - # plt.imshow(dst_help) - # plt.show() img_contour_rot = img_contour_rot.astype(np.uint8) # dst_help = dst_help.astype(np.uint8) @@ -1382,9 +1333,7 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_interest len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] ind_big_con = np.argmax(len_con_text_rot) - # print('juzaa') if abs(slope) > 45: - # print(add_boxes_coor_into_textlines,'avval') _, contours_rotated_clean = separate_lines_vertical_cont( textline_mask, contours_text_rot[ind_big_con], box_ind, slope, add_boxes_coor_into_textlines=add_boxes_coor_into_textlines) @@ -1416,7 +1365,6 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl length_x = int(img_path.shape[1] / float(num_patches)) # margin = int(0.04 * length_x) just recently this was changed because it break lines into 2 margin = int(0.04 * length_x) - # print(margin,'margin') # if margin<=4: # margin = int(0.08 * length_x) # margin=0 @@ -1456,11 +1404,9 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl # if abs(slope_region)>70 and abs(slope_xline)<25: # slope_xline=[slope_region][0] slopes_tile_wise.append(slope_xline) - # print(slope_xline,'xlineeee') img_line_rotated = rotate_image(img_xline, slope_xline) img_line_rotated[:, :][img_line_rotated[:, :] != 0] = 1 - - # print(slopes_tile_wise,'slopes_tile_wise') + img_patch_ineterst = img_path[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[14]+dis_down ,:] img_patch_ineterst_revised = np.zeros(img_patch_ineterst.shape) @@ -1502,8 +1448,6 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] img_patch_ineterst_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size - # plt.imshow(img_patch_ineterst_revised) - # plt.show() return img_patch_ineterst_revised def do_image_rotation(angle, img, sigma_des, logger=None): @@ -1536,20 +1480,13 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, #img_resized[ int( img_int.shape[0]*(.4)):int( img_int.shape[0]*(.4))+img_int.shape[0] , int( img_int.shape[1]*(.8)):int( img_int.shape[1]*(.8))+img_int.shape[1] ]=img_int[:,:] img_resized[ onset_y:onset_y+img_int.shape[0] , onset_x:onset_x+img_int.shape[1] ]=img_int[:,:] - #print(img_resized.shape,'img_resizedshape') - #plt.imshow(img_resized) - #plt.show() if main_page and img_patch_org.shape[1] > img_patch_org.shape[0]: - #plt.imshow(img_resized) - #plt.show() angles = np.array([-45, 0, 45, 90,]) angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) angles = np.linspace(angle - 22.5, angle + 22.5, n_tot_angles) angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) elif main_page: - #plt.imshow(img_resized) - #plt.show() angles = np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) @@ -1620,7 +1557,6 @@ def do_work_of_slopes_new( textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.00008) - y_diff_mean = find_contours_mean_y_diff(textline_con_fil) if len(textline_con_fil) > 1 else np.NaN if np.isnan(y_diff_mean): slope_for_all = MAX_SLOPE @@ -1637,12 +1573,9 @@ def do_work_of_slopes_new( if slope_for_all == MAX_SLOPE: slope_for_all = slope_deskew slope = slope_for_all - mask_only_con_region = np.zeros(textline_mask_tot_ea.shape) mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contour_par], color=(1, 1, 1)) - # plt.imshow(mask_only_con_region) - # plt.show() all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w].copy() mask_only_con_region = mask_only_con_region[y: y + h, x: x + w] @@ -1706,20 +1639,15 @@ def do_work_of_slopes_new_curved( mask_region_in_patch_region = mask_biggest[y : y + h, x : x + w] textline_biggest_region = mask_biggest * textline_mask_tot_ea - # print(slope_for_all,'slope_for_all') textline_rotated_separated = separate_lines_new2(textline_biggest_region[y: y+h, x: x+w], 0, num_col, slope_for_all, logger=logger, plotter=plotter) - # new line added - ##print(np.shape(textline_rotated_separated),np.shape(mask_biggest)) + textline_rotated_separated[mask_region_in_patch_region[:, :] != 1] = 0 - # till here textline_region_in_image[y : y + h, x : x + w] = textline_rotated_separated - # plt.imshow(textline_region_in_image) - # plt.show() pixel_img = 1 cnt_textlines_in_image = return_contours_of_interested_textline(textline_region_in_image, pixel_img) @@ -1742,7 +1670,6 @@ def do_work_of_slopes_new_curved( logger.error(why) else: textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text, True) - # print(np.shape(textlines_cnt_per_region),'textlines_cnt_per_region') return textlines_cnt_per_region[::-1], box_text, contour, contour_par, crop_coor, index_r_con, slope From b227736094e33e2ba8cd6446eb9c0b46c006c10f Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 30 Apr 2025 16:04:34 +0200 Subject: [PATCH 112/492] Fix OCR text cleaning to correctly handle 'U', 'K', and 'N' starting sentence; update text line splitting size --- src/eynollah/eynollah.py | 62 ++++++++++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 022cf0a..a94e890 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -259,7 +259,7 @@ class Eynollah: self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_mb_ro_aug_3"#"/model_ens_reading_order_machine_based" #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" @@ -3320,12 +3320,22 @@ class Eynollah: def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): y_len = text_regions_p.shape[0] x_len = text_regions_p.shape[1] + img_poly = np.zeros((y_len,x_len), dtype='uint8') img_poly[text_regions_p[:,:]==1] = 1 img_poly[text_regions_p[:,:]==2] = 2 img_poly[text_regions_p[:,:]==3] = 4 img_poly[text_regions_p[:,:]==6] = 5 + + + #temp + sep_mask = (img_poly==5)*1 + sep_mask = sep_mask.astype('uint8') + sep_mask = cv2.erode(sep_mask, kernel=KERNEL, iterations=2) + img_poly[img_poly==5] = 0 + img_poly[sep_mask==1] = 5 + # img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') if contours_only_text_parent_h: @@ -3341,9 +3351,13 @@ class Eynollah: if not len(co_text_all): return [], [] - labels_con = np.zeros((y_len, x_len, len(co_text_all)), dtype=bool) + labels_con = np.zeros((int(y_len /6.), int(x_len/6.), len(co_text_all)), dtype=bool) + co_text_all = [(i/6).astype(int) for i in co_text_all] for i in range(len(co_text_all)): img = labels_con[:,:,i].astype(np.uint8) + + #img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) + cv2.fillPoly(img, pts=[co_text_all[i]], color=(1,)) labels_con[:,:,i] = img @@ -3359,6 +3373,7 @@ class Eynollah: labels_con = resize_image(labels_con.astype(np.uint8), height1, width1).astype(bool) img_header_and_sep = resize_image(img_header_and_sep, height1, width1) img_poly = resize_image(img_poly, height3, width3) + inference_bs = 3 input_1 = np.zeros((inference_bs, height1, width1, 3)) @@ -4575,10 +4590,6 @@ class Eynollah: return pcgts - ## check the ro order - - - #print("text region early 3 in %.1fs", time.time() - t0) if self.light_version: @@ -4886,7 +4897,7 @@ class Eynollah_ocr: self.model_ocr.to(self.device) else: - self.model_ocr_dir = dir_models + "/model_step_75000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -4974,7 +4985,7 @@ class Eynollah_ocr: def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image): width = np.shape(textline_image)[1] height = np.shape(textline_image)[0] - common_window = int(0.06*width) + common_window = int(0.22*width) width1 = int ( width/2. - common_window ) width2 = int ( width/2. + common_window ) @@ -4984,13 +4995,17 @@ class Eynollah_ocr: peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: + if len(peaks_real)>35: - peaks_real = peaks_real[(peaks_realwidth1)] + #peaks_real = peaks_real[(peaks_realwidth1)] + argsort = np.argsort(sum_smoothed[peaks_real])[::-1] + peaks_real_top_six = peaks_real[argsort[:6]] + midpoint = textline_image.shape[1] / 2. + arg_closest = np.argmin(np.abs(peaks_real_top_six - midpoint)) - arg_max = np.argmax(sum_smoothed[peaks_real]) + #arg_max = np.argmax(sum_smoothed[peaks_real]) - peaks_final = peaks_real[arg_max] + peaks_final = peaks_real_top_six[arg_closest]#peaks_real[arg_max] return peaks_final else: @@ -5038,10 +5053,19 @@ class Eynollah_ocr: if width_new == 0: width_new = img.shape[1] + + ##if width_new+32 >= image_width: + ##width_new = width_new - 32 + + ###patch_zero = np.zeros((32, 32, 3))#+255 + ###patch_zero[9:19,8:18,:] = 0 + img = resize_image(img, image_height, width_new) img_fin = np.ones((image_height, image_width, 3))*255 - img_fin[:,:+width_new,:] = img[:,:,:] + ###img_fin[:,:32,:] = patch_zero[:,:,:] + ###img_fin[:,32:32+width_new,:] = img[:,:,:] + img_fin[:,:width_new,:] = img[:,:,:] img_fin = img_fin / 255. return img_fin @@ -5097,7 +5121,7 @@ class Eynollah_ocr: img_crop = img_poly_on_img[y:y+h, x:x+w, :] img_crop[mask_poly==0] = 255 - if h2w_ratio > 0.05: + if h2w_ratio > 0.1: cropped_lines.append(img_crop) cropped_lines_meging_indexing.append(0) else: @@ -5234,7 +5258,7 @@ class Eynollah_ocr: if self.draw_texts_on_image: total_bb_coordinates.append([x,y,w,h]) - h2w_ratio = h/float(w) + w_scaled = w * image_height/float(h) img_poly_on_img = np.copy(img) if self.prediction_with_both_of_rgb_and_bin: @@ -5252,7 +5276,7 @@ class Eynollah_ocr: img_crop_bin[mask_poly==0] = 255 if not self.export_textline_images_and_text: - if h2w_ratio > 0.1: + if w_scaled < 1.5*image_width: img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) @@ -5334,11 +5358,11 @@ class Eynollah_ocr: if self.prediction_with_both_of_rgb_and_bin: preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) preds = (preds + preds_bin) / 2. - + pred_texts = self.decode_batch_predictions(preds) for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].strip("[UNK]") + pred_texts_ib = pred_texts[ib].replace("[UNK]", "") extracted_texts.append(pred_texts_ib) extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] @@ -5378,7 +5402,7 @@ class Eynollah_ocr: text_by_textregion = [] for ind in unique_cropped_lines_region_indexer: extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] - text_by_textregion.append(" ".join(extracted_texts_merged_un)) + text_by_textregion.append("".join(extracted_texts_merged_un)) indexer = 0 indexer_textregion = 0 From e2da7a623987f9c693957512f7902947699389ff Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 30 Apr 2025 16:06:29 +0200 Subject: [PATCH 113/492] Fix model name to return the correct machine-based model name --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index a94e890..d47016b 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -259,7 +259,7 @@ class Eynollah: self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_mb_ro_aug_3"#"/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_ens_reading_order_machine_based" #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" From f8b4d29a59098f8a82e90b2790015568841bc53f Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 2 May 2025 00:13:11 +0200 Subject: [PATCH 114/492] docker: prepackage ocrd-all-module-dir.json --- Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile b/Dockerfile index 4785fc1..4ba498b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,6 +36,8 @@ COPY . . COPY ocrd-tool.json . # prepackage ocrd-tool.json as ocrd-all-tool.json RUN ocrd ocrd-tool ocrd-tool.json dump-tools > $(dirname $(ocrd bashlib filename))/ocrd-all-tool.json +# prepackage ocrd-all-module-dir.json +RUN ocrd ocrd-tool ocrd-tool.json dump-module-dirs > $(dirname $(ocrd bashlib filename))/ocrd-all-module-dir.json # install everything and reduce image size RUN make install EXTRAS=OCR && rm -rf /build/eynollah # smoke test From e9179e1d3458c9261989cf996863881f03a24ebd Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 2 May 2025 00:13:06 +0200 Subject: [PATCH 115/492] docker: use latest core base stage --- Makefile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 5f2bf34..73d4d34 100644 --- a/Makefile +++ b/Makefile @@ -3,8 +3,9 @@ PIP ?= pip3 EXTRAS ?= # DOCKER_BASE_IMAGE = artefakt.dev.sbb.berlin:5000/sbb/ocrd_core:v2.68.0 -DOCKER_BASE_IMAGE = docker.io/ocrd/core-cuda-tf2:v3.3.0 -DOCKER_TAG = ocrd/eynollah +DOCKER_BASE_IMAGE ?= docker.io/ocrd/core-cuda-tf2:latest +DOCKER_TAG ?= ocrd/eynollah +DOCKER ?= docker #SEG_MODEL := https://qurator-data.de/eynollah/2021-04-25/models_eynollah.tar.gz #SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah_renamed.tar.gz @@ -117,7 +118,7 @@ coverage: # Build docker image docker: - docker build \ + $(DOCKER) build \ --build-arg DOCKER_BASE_IMAGE=$(DOCKER_BASE_IMAGE) \ --build-arg VCS_REF=$$(git rev-parse --short HEAD) \ --build-arg BUILD_DATE=$$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ From 184af46664ac05a780f2cad07cc950bb594d9352 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 2 May 2025 00:30:36 +0200 Subject: [PATCH 116/492] displaying detexted text on an image is provided for trocr case --- src/eynollah/eynollah.py | 55 +++++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 9 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index d47016b..5793d37 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -259,7 +259,7 @@ class Eynollah: self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_mb_ro_aug_2"#"/model_ens_reading_order_machine_based" #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" @@ -1221,7 +1221,7 @@ class Eynollah: seg_art[seg_art>0] =1 seg_line = label_p_pred[:,:,:,3] - seg_line[seg_line>0.1] =1 + seg_line[seg_line>0.5] =1#seg_line[seg_line>0.1] =1 seg_line[seg_line<1] =0 seg[seg_art==1]=4 @@ -3329,13 +3329,13 @@ class Eynollah: img_poly[text_regions_p[:,:]==6] = 5 - #temp - sep_mask = (img_poly==5)*1 - sep_mask = sep_mask.astype('uint8') - sep_mask = cv2.erode(sep_mask, kernel=KERNEL, iterations=2) - img_poly[img_poly==5] = 0 - img_poly[sep_mask==1] = 5 - # + ###temp + ##sep_mask = (img_poly==5)*1 + ##sep_mask = sep_mask.astype('uint8') + ##sep_mask = cv2.erode(sep_mask, kernel=KERNEL, iterations=2) + ##img_poly[img_poly==5] = 0 + ##img_poly[sep_mask==1] = 5 + ### img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') if contours_only_text_parent_h: @@ -5081,6 +5081,12 @@ class Eynollah_ocr: dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') img = cv2.imread(dir_img) + + if self.draw_texts_on_image: + out_image_with_text = os.path.join(self.dir_out_image_text, file_name+'.png') + image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") + draw = ImageDraw.Draw(image_text) + total_bb_coordinates = [] ##file_name = Path(dir_xmls).stem tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) @@ -5111,6 +5117,9 @@ class Eynollah_ocr: textline_coords = np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) x,y,w,h = cv2.boundingRect(textline_coords) + if self.draw_texts_on_image: + total_bb_coordinates.append([x,y,w,h]) + h2w_ratio = h/float(w) img_poly_on_img = np.copy(img) @@ -5161,6 +5170,34 @@ class Eynollah_ocr: #print(extracted_texts_merged, len(extracted_texts_merged)) unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + + if self.draw_texts_on_image: + + font_path = "NotoSans-Regular.ttf" # Make sure this file exists! + font = ImageFont.truetype(font_path, 40) + + for indexer_text, bb_ind in enumerate(total_bb_coordinates): + + + x_bb = bb_ind[0] + y_bb = bb_ind[1] + w_bb = bb_ind[2] + h_bb = bb_ind[3] + + font = self.fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally + text_y = y_bb + (h_bb - text_height) // 2 # Center vertically + + # Draw the text + draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) + image_text.save(out_image_with_text) #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') text_by_textregion = [] From 5d8c864c0881256d16f8484d01f8e1f34fdad254 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 2 May 2025 01:02:32 +0200 Subject: [PATCH 117/492] adding space between splitted textline predicted text in the case of trocr --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 5793d37..d148c67 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5164,7 +5164,7 @@ class Eynollah_ocr: extracted_texts = extracted_texts + generated_text_merged - extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] #print(extracted_texts_merged, len(extracted_texts_merged)) From a1a004b19da5dfc828fa077f25b10c72722f71c8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 2 May 2025 12:53:33 +0200 Subject: [PATCH 118/492] inference batch size for ocr is passed as an argument --- src/eynollah/cli.py | 8 +++++- src/eynollah/eynollah.py | 53 ++++++++++++++++++++++++++-------------- 2 files changed, 41 insertions(+), 20 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index c189aca..56d5d7e 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -374,6 +374,11 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ is_flag=True, help="If this parameter is set to True, the prediction will be performed using both RGB and binary images. However, this does not necessarily improve results; it may be beneficial for certain document images.", ) +@click.option( + "--batch_size", + "-bs", + help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", +) @click.option( "--log_level", "-l", @@ -381,7 +386,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="Override log level globally to this", ) -def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, log_level): +def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) @@ -397,6 +402,7 @@ def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, ex do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, draw_texts_on_image=draw_texts_on_image, prediction_with_both_of_rgb_and_bin=prediction_with_both_of_rgb_and_bin, + batch_size=batch_size, ) eynollah_ocr.run() diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index d148c67..62026bf 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4872,6 +4872,7 @@ class Eynollah_ocr: dir_out=None, dir_out_image_text=None, tr_ocr=False, + batch_size=None, export_textline_images_and_text=False, do_not_mask_with_textline_contour=False, draw_texts_on_image=False, @@ -4895,6 +4896,10 @@ class Eynollah_ocr: self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) self.model_ocr.to(self.device) + if not batch_size: + self.b_s = 2 + else: + self.b_s = int(batch_size) else: self.model_ocr_dir = dir_models + "/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" @@ -4903,6 +4908,10 @@ class Eynollah_ocr: self.prediction_model = tf.keras.models.Model( model_ocr.get_layer(name = "image").input, model_ocr.get_layer(name = "dense2").output) + if not batch_size: + self.b_s = 8 + else: + self.b_s = int(batch_size) with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: @@ -4918,6 +4927,7 @@ class Eynollah_ocr: self.num_to_char = StringLookup( vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True ) + def decode_batch_predictions(self, pred, max_len = 128): # input_len is the product of the batch size and the @@ -5073,10 +5083,9 @@ class Eynollah_ocr: ls_imgs = os.listdir(self.dir_in) if self.tr_ocr: - b_s = 2 + tr_ocr_input_height_and_width = 384 for ind_img in ls_imgs: - t0 = time.time() - file_name = ind_img.split('.')[0] + file_name = Path(ind_img).stem dir_img = os.path.join(self.dir_in, ind_img) dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') @@ -5131,15 +5140,15 @@ class Eynollah_ocr: img_crop[mask_poly==0] = 255 if h2w_ratio > 0.1: - cropped_lines.append(img_crop) + cropped_lines.append(resize_image(img_crop, tr_ocr_input_height_and_width, tr_ocr_input_height_and_width) ) cropped_lines_meging_indexing.append(0) else: splited_images, _ = self.return_textlines_split_if_needed(img_crop, None) #print(splited_images) if splited_images: - cropped_lines.append(splited_images[0]) + cropped_lines.append(resize_image(splited_images[0], tr_ocr_input_height_and_width, tr_ocr_input_height_and_width)) cropped_lines_meging_indexing.append(1) - cropped_lines.append(splited_images[1]) + cropped_lines.append(resize_image(splited_images[1], tr_ocr_input_height_and_width, tr_ocr_input_height_and_width)) cropped_lines_meging_indexing.append(-1) else: cropped_lines.append(img_crop) @@ -5148,21 +5157,24 @@ class Eynollah_ocr: extracted_texts = [] - n_iterations = math.ceil(len(cropped_lines) / b_s) + n_iterations = math.ceil(len(cropped_lines) / self.b_s) for i in range(n_iterations): if i==(n_iterations-1): - n_start = i*b_s + n_start = i*self.b_s imgs = cropped_lines[n_start:] else: - n_start = i*b_s - n_end = (i+1)*b_s + n_start = i*self.b_s + n_end = (i+1)*self.b_s imgs = cropped_lines[n_start:n_end] pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged + + del cropped_lines + gc.collect() extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] @@ -5241,14 +5253,12 @@ class Eynollah_ocr: padding_token = 299 image_width = 512#max_len * 4 image_height = 32 - b_s = 8 img_size=(image_width, image_height) for ind_img in ls_imgs: - t0 = time.time() - file_name = ind_img.split('.')[0] + file_name = Path(ind_img).stem dir_img = os.path.join(self.dir_in, ind_img) dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') @@ -5368,11 +5378,11 @@ class Eynollah_ocr: if not self.export_textline_images_and_text: extracted_texts = [] - n_iterations = math.ceil(len(cropped_lines) / b_s) + n_iterations = math.ceil(len(cropped_lines) / self.b_s) for i in range(n_iterations): if i==(n_iterations-1): - n_start = i*b_s + n_start = i*self.b_s imgs = cropped_lines[n_start:] imgs = np.array(imgs) imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) @@ -5381,14 +5391,14 @@ class Eynollah_ocr: imgs_bin = np.array(imgs_bin) imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) else: - n_start = i*b_s - n_end = (i+1)*b_s + n_start = i*self.b_s + n_end = (i+1)*self.b_s imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(b_s, image_height, image_width, 3) + imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) if self.prediction_with_both_of_rgb_and_bin: imgs_bin = cropped_lines_bin[n_start:n_end] - imgs_bin = np.array(imgs_bin).reshape(b_s, image_height, image_width, 3) + imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) preds = self.prediction_model.predict(imgs, verbose=0) @@ -5402,6 +5412,11 @@ class Eynollah_ocr: pred_texts_ib = pred_texts[ib].replace("[UNK]", "") extracted_texts.append(pred_texts_ib) + del cropped_lines + if self.prediction_with_both_of_rgb_and_bin: + del cropped_lines_bin + gc.collect() + extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] From 48e8dd4ab3ac9238fff0f1a7147ecfff9dab23e9 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 2 May 2025 12:57:26 +0200 Subject: [PATCH 119/492] machine based model name changed to public one --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 62026bf..cc1f766 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -259,7 +259,7 @@ class Eynollah: self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_mb_ro_aug_2"#"/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_ens_reading_order_machine_based" #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" From 89aa5450491d84d816e68de5603018c0b820eedb Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sat, 3 May 2025 02:59:16 +0200 Subject: [PATCH 120/492] let to add dataset abbrevation to extracted textline images and text --- src/eynollah/cli.py | 17 +++++++- src/eynollah/eynollah.py | 91 ++++++++++++++++++++++++---------------- 2 files changed, 71 insertions(+), 37 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 56d5d7e..7d08ac8 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -342,7 +342,6 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ "-m", help="directory of models", type=click.Path(exists=True, file_okay=False), - required=True, ) @click.option( "--tr_ocr", @@ -379,6 +378,11 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ "-bs", help="number of inference batch size. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", ) +@click.option( + "--dataset_abbrevation", + "-ds_pref", + help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", +) @click.option( "--log_level", "-l", @@ -386,10 +390,18 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="Override log level globally to this", ) -def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, log_level): +def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) + assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" + assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" + assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" + assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" + assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" + assert not export_textline_images_and_text or not draw_texts_on_image, "Exporting textline and text -etit can not be set alongside draw text on image -dtoi" + assert not export_textline_images_and_text or not prediction_with_both_of_rgb_and_bin, "Exporting textline and text -etit can not be set alongside prediction with both rgb and bin -brb" + eynollah_ocr = Eynollah_ocr( dir_xmls=dir_xmls, dir_out_image_text=dir_out_image_text, @@ -403,6 +415,7 @@ def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, ex draw_texts_on_image=draw_texts_on_image, prediction_with_both_of_rgb_and_bin=prediction_with_both_of_rgb_and_bin, batch_size=batch_size, + pref_of_dataset=dataset_abbrevation, ) eynollah_ocr.run() diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index cc1f766..0b15573 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4877,6 +4877,7 @@ class Eynollah_ocr: do_not_mask_with_textline_contour=False, draw_texts_on_image=False, prediction_with_both_of_rgb_and_bin=False, + pref_of_dataset = None, logger=None, ): self.dir_in = dir_in @@ -4890,43 +4891,45 @@ class Eynollah_ocr: self.draw_texts_on_image = draw_texts_on_image self.dir_out_image_text = dir_out_image_text self.prediction_with_both_of_rgb_and_bin = prediction_with_both_of_rgb_and_bin - if tr_ocr: - self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" - self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) - self.model_ocr.to(self.device) - if not batch_size: - self.b_s = 2 + self.pref_of_dataset = pref_of_dataset + if not export_textline_images_and_text: + if tr_ocr: + self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" + self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) + self.model_ocr.to(self.device) + if not batch_size: + self.b_s = 2 + else: + self.b_s = int(batch_size) + else: - self.b_s = int(batch_size) - - else: - self.model_ocr_dir = dir_models + "/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" - model_ocr = load_model(self.model_ocr_dir , compile=False) - - self.prediction_model = tf.keras.models.Model( - model_ocr.get_layer(name = "image").input, - model_ocr.get_layer(name = "dense2").output) - if not batch_size: - self.b_s = 8 - else: - self.b_s = int(batch_size) - + self.model_ocr_dir = dir_models + "/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + model_ocr = load_model(self.model_ocr_dir , compile=False) - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: - characters = json.load(config_file) + self.prediction_model = tf.keras.models.Model( + model_ocr.get_layer(name = "image").input, + model_ocr.get_layer(name = "dense2").output) + if not batch_size: + self.b_s = 8 + else: + self.b_s = int(batch_size) - - AUTOTUNE = tf.data.AUTOTUNE + + with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: + characters = json.load(config_file) - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) + + AUTOTUNE = tf.data.AUTOTUNE - # Mapping integers back to original characters. - self.num_to_char = StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) + # Mapping characters to integers. + char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) + + # Mapping integers back to original characters. + self.num_to_char = StringLookup( + vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True + ) def decode_batch_predictions(self, pred, max_len = 128): @@ -5365,10 +5368,28 @@ class Eynollah_ocr: if cheild_text.tag.endswith("Unicode"): textline_text = cheild_text.text if textline_text: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.txt'), 'w') as text_file: - text_file.write(textline_text) + if self.do_not_mask_with_textline_contour: + if self.pref_of_dataset: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.txt'), 'w') as text_file: + text_file.write(textline_text) - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.png'), img_crop ) + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.png'), img_crop ) + else: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.txt'), 'w') as text_file: + text_file.write(textline_text) + + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.png'), img_crop ) + else: + if self.pref_of_dataset: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.txt'), 'w') as text_file: + text_file.write(textline_text) + + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.png'), img_crop ) + else: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.txt'), 'w') as text_file: + text_file.write(textline_text) + + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.png'), img_crop ) indexer_textlines+=1 From 3b123b039c432145359f7b6a3b0d45c8669df791 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sat, 3 May 2025 19:25:32 +0200 Subject: [PATCH 121/492] adding min_early parameter for generating training dataset for machine based reading order model --- train/generate_gt_for_training.py | 64 +++++++++++++++++++++++-------- train/gt_gen_utils.py | 13 ++++++- 2 files changed, 60 insertions(+), 17 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 9869bfa..77e9238 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -147,11 +147,20 @@ def image_enhancement(dir_imgs, dir_out_images, dir_out_labels, scales): help="min area size of regions considered for reading order training.", ) -def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size): +@click.option( + "--min_area_early", + "-min_early", + help="If you have already generated a training dataset using a specific minimum area value and now wish to create a dataset with a smaller minimum area value, you can avoid regenerating the previous dataset by providing the earlier minimum area value. This will ensure that only the missing data is generated.", +) + +def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size, min_area_early): xml_files_ind = os.listdir(dir_xml) input_height = int(input_height) input_width = int(input_width) min_area = float(min_area_size) + if min_area_early: + min_area_early = float(min_area_early) + indexer_start= 0#55166 max_area = 1 @@ -181,7 +190,8 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i texts_corr_order_index_int = [int(x) for x in texts_corr_order_index] - co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area) + co_text_all, texts_corr_order_index_int, regions_ar_less_than_early_min = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, min_area, min_area_early) + arg_array = np.array(range(len(texts_corr_order_index_int))) @@ -195,25 +205,49 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i labels_con[:,:,i] = img_label[:,:,0] + labels_con = resize_image(labels_con, input_height, input_width) + img_poly = resize_image(img_poly, input_height, input_width) + + for i in range(len(texts_corr_order_index_int)): for j in range(len(texts_corr_order_index_int)): if i!=j: - input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) - final_f_name = f_name+'_'+str(indexer+indexer_start) - order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] - if order_class_condition<0: - class_type = 1 + if regions_ar_less_than_early_min: + if regions_ar_less_than_early_min[i]==1: + input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) + final_f_name = f_name+'_'+str(indexer+indexer_start) + order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] + if order_class_condition<0: + class_type = 1 + else: + class_type = 0 + + input_multi_visual_modal[:,:,0] = labels_con[:,:,i] + input_multi_visual_modal[:,:,1] = img_poly[:,:,0] + input_multi_visual_modal[:,:,2] = labels_con[:,:,j] + + np.save(os.path.join(dir_out_classes,final_f_name+'_missed.npy' ), class_type) + + cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'_missed.png' ), input_multi_visual_modal) + indexer = indexer+1 + else: - class_type = 0 + input_multi_visual_modal = np.zeros((input_height,input_width,3)).astype(np.int8) + final_f_name = f_name+'_'+str(indexer+indexer_start) + order_class_condition = texts_corr_order_index_int[i]-texts_corr_order_index_int[j] + if order_class_condition<0: + class_type = 1 + else: + class_type = 0 - input_multi_visual_modal[:,:,0] = resize_image(labels_con[:,:,i], input_height, input_width) - input_multi_visual_modal[:,:,1] = resize_image(img_poly[:,:,0], input_height, input_width) - input_multi_visual_modal[:,:,2] = resize_image(labels_con[:,:,j], input_height, input_width) + input_multi_visual_modal[:,:,0] = labels_con[:,:,i] + input_multi_visual_modal[:,:,1] = img_poly[:,:,0] + input_multi_visual_modal[:,:,2] = labels_con[:,:,j] - np.save(os.path.join(dir_out_classes,final_f_name+'.npy' ), class_type) - - cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_multi_visual_modal) - indexer = indexer+1 + np.save(os.path.join(dir_out_classes,final_f_name+'.npy' ), class_type) + + cv2.imwrite(os.path.join(dir_out_modal_image,final_f_name+'.png' ), input_multi_visual_modal) + indexer = indexer+1 @main.command() diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 753abf2..10183d6 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -51,9 +51,10 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m jv += 1 return found_polygons_early -def filter_contours_area_of_image(image, contours, order_index, max_area, min_area): +def filter_contours_area_of_image(image, contours, order_index, max_area, min_area, min_early): found_polygons_early = list() order_index_filtered = list() + regions_ar_less_than_early_min = list() #jv = 0 for jv, c in enumerate(contours): if len(np.shape(c)) == 3: @@ -68,8 +69,16 @@ def filter_contours_area_of_image(image, contours, order_index, max_area, min_ar if area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : found_polygons_early.append(np.array([[point] for point in polygon.exterior.coords], dtype=np.uint)) order_index_filtered.append(order_index[jv]) + if min_early: + if area < min_early * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]): # and hierarchy[0][jv][3]==-1 : + regions_ar_less_than_early_min.append(1) + else: + regions_ar_less_than_early_min.append(0) + else: + regions_ar_less_than_early_min = None + #jv += 1 - return found_polygons_early, order_index_filtered + return found_polygons_early, order_index_filtered, regions_ar_less_than_early_min def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): From 5694d971c5c068413b0a35db1aceabd50963107d Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 5 May 2025 15:39:05 +0200 Subject: [PATCH 122/492] saving model by steps is added to reading order and pixel wise segmentation use cases training --- train/train.py | 60 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 9 deletions(-) diff --git a/train/train.py b/train/train.py index 5dee567..df600a8 100644 --- a/train/train.py +++ b/train/train.py @@ -13,8 +13,29 @@ from tensorflow.keras.models import load_model from tqdm import tqdm import json from sklearn.metrics import f1_score +from tensorflow.keras.callbacks import Callback +class SaveWeightsAfterSteps(Callback): + def __init__(self, save_interval, save_path, _config): + super(SaveWeightsAfterSteps, self).__init__() + self.save_interval = save_interval + self.save_path = save_path + self.step_count = 0 + def on_train_batch_end(self, batch, logs=None): + self.step_count += 1 + + if self.step_count % self.save_interval ==0: + save_file = f"{self.save_path}/model_step_{self.step_count}" + #os.system('mkdir '+save_file) + + self.model.save(save_file) + + with open(os.path.join(os.path.join(save_path, "model_step_{self.step_count}"),"config.json"), "w") as fp: + json.dump(_config, fp) # encode dict into JSON + print(f"saved model as steps {self.step_count} to {save_file}") + + def configuration(): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True @@ -93,7 +114,7 @@ def config_params(): f1_threshold_classification = None # This threshold is used to consider models with an evaluation f1 scores bigger than it. The selected model weights undergo a weights ensembling. And avreage ensembled model will be written to output. classification_classes_name = None # Dictionary of classification classes names. backbone_type = None # As backbone we have 2 types of backbones. A vision transformer alongside a CNN and we call it "transformer" and only CNN called "nontransformer" - + save_interval = None dir_img_bin = None number_of_backgrounds_per_image = 1 dir_rgb_backgrounds = None @@ -112,7 +133,7 @@ def run(_config, n_classes, n_epochs, input_height, thetha, scaling_flip, continue_training, transformer_projection_dim, transformer_mlp_head_units, transformer_layers, transformer_num_heads, transformer_cnn_first, transformer_patchsize_x, transformer_patchsize_y, - transformer_num_patches_xy, backbone_type, flip_index, dir_eval, dir_output, + transformer_num_patches_xy, backbone_type, save_interval, flip_index, dir_eval, dir_output, pretraining, learning_rate, task, f1_threshold_classification, classification_classes_name, dir_img_bin, number_of_backgrounds_per_image,dir_rgb_backgrounds, dir_rgb_foregrounds): if dir_rgb_backgrounds: @@ -299,13 +320,27 @@ def run(_config, n_classes, n_epochs, input_height, ##img_validation_patches = os.listdir(dir_flow_eval_imgs) ##score_best=[] ##score_best.append(0) + + if save_interval: + save_weights_callback = SaveWeightsAfterSteps(save_interval, dir_output, _config) + + for i in tqdm(range(index_start, n_epochs + index_start)): - model.fit( - train_gen, - steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, - validation_data=val_gen, - validation_steps=1, - epochs=1) + if save_interval: + model.fit( + train_gen, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, + validation_data=val_gen, + validation_steps=1, + epochs=1, callbacks=[save_weights_callback]) + else: + model.fit( + train_gen, + steps_per_epoch=int(len(os.listdir(dir_flow_train_imgs)) / n_batch) - 1, + validation_data=val_gen, + validation_steps=1, + epochs=1) + model.save(os.path.join(dir_output,'model_'+str(i))) with open(os.path.join(os.path.join(dir_output,'model_'+str(i)),"config.json"), "w") as fp: @@ -392,8 +427,15 @@ def run(_config, n_classes, n_epochs, input_height, opt_adam = tf.keras.optimizers.Adam(learning_rate=0.0001) model.compile(loss="binary_crossentropy", optimizer = opt_adam,metrics=['accuracy']) + + if save_interval: + save_weights_callback = SaveWeightsAfterSteps(save_interval, dir_output, _config) + for i in range(n_epochs): - history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes, thetha, augmentation), steps_per_epoch=num_rows / n_batch, verbose=1) + if save_interval: + history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes, thetha, augmentation), steps_per_epoch=num_rows / n_batch, verbose=1, callbacks=[save_weights_callback]) + else: + history = model.fit(generate_arrays_from_folder_reading_order(dir_flow_train_labels, dir_flow_train_imgs, n_batch, input_height, input_width, n_classes, thetha, augmentation), steps_per_epoch=num_rows / n_batch, verbose=1) model.save( os.path.join(dir_output,'model_'+str(i+indexer_start) )) with open(os.path.join(os.path.join(dir_output,'model_'+str(i)),"config.json"), "w") as fp: From 92954b1b7b7363f8cdae91500cf0e729c2eebc62 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 5 May 2025 16:13:38 +0200 Subject: [PATCH 123/492] resolving issued with saving model by steps --- train/train.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/train/train.py b/train/train.py index df600a8..f6a4f47 100644 --- a/train/train.py +++ b/train/train.py @@ -21,6 +21,7 @@ class SaveWeightsAfterSteps(Callback): self.save_interval = save_interval self.save_path = save_path self.step_count = 0 + self._config = _config def on_train_batch_end(self, batch, logs=None): self.step_count += 1 @@ -31,8 +32,8 @@ class SaveWeightsAfterSteps(Callback): self.model.save(save_file) - with open(os.path.join(os.path.join(save_path, "model_step_{self.step_count}"),"config.json"), "w") as fp: - json.dump(_config, fp) # encode dict into JSON + with open(os.path.join(os.path.join(self.save_path, f"model_step_{self.step_count}"),"config.json"), "w") as fp: + json.dump(self._config, fp) # encode dict into JSON print(f"saved model as steps {self.step_count} to {save_file}") From 83211ae684513ef7f50ee88e0f641702441cde1f Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 7 May 2025 12:33:03 +0200 Subject: [PATCH 124/492] In the case of skip_layout_and_reading_order, the confidence value was not set correctly, leading to an error while writing to the XML file. --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 022cf0a..ec8d887 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4328,7 +4328,7 @@ class Eynollah: polygons_lines_xml = [] contours_tables = [] ocr_all_textlines = None - conf_contours_textregions =None + conf_contours_textregions = [0] pcgts = self.writer.build_pagexml_no_full_layout( cont_page, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, From 21ec4fbfb538b40f0d06f55bf8c92f4ca2ebf10c Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 7 May 2025 14:04:01 +0200 Subject: [PATCH 125/492] The text region coordinates are now correctly written into the XML output when using the skip layout and reading order option --- src/eynollah/eynollah.py | 2 +- src/eynollah/writer.py | 30 ++++++++++++++++++++---------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index ec8d887..6da003b 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4333,7 +4333,7 @@ class Eynollah: cont_page, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, - cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions, self.skip_layout_and_reading_order) return pcgts #print("text region early -1 in %.1fs", time.time() - t0) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 92e353f..e589fd4 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -168,7 +168,7 @@ class EynollahXmlWriter(): with open(self.output_filename, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines, conf_contours_textregion): + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines, conf_contours_textregion, skip_layout_reading_order=False): self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure @@ -184,7 +184,7 @@ class EynollahXmlWriter(): for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord), conf=conf_contours_textregion[mm]), + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord, skip_layout_reading_order), conf=conf_contours_textregion[mm]), ) #textregion.set_conf(conf_contours_textregion[mm]) page.add_TextRegion(textregion) @@ -303,18 +303,28 @@ class EynollahXmlWriter(): return pcgts - def calculate_polygon_coords(self, contour, page_coord): + def calculate_polygon_coords(self, contour, page_coord, skip_layout_reading_order=False): self.logger.debug('enter calculate_polygon_coords') coords = '' for value_bbox in contour: - if len(value_bbox) == 2: - coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y)) + if skip_layout_reading_order: + if len(value_bbox) == 2: + coords += str(int((value_bbox[0]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[1]) / self.scale_y)) + else: + coords += str(int((value_bbox[0][0]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[0][1]) / self.scale_y)) else: - coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) + if len(value_bbox) == 2: + coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y)) + else: + coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) coords=coords + ' ' return coords[:-1] From 6fa766d6a566fa4660c0c7424ddebb85f1a0d0c7 Mon Sep 17 00:00:00 2001 From: johnlockejrr <16368414+johnlockejrr@users.noreply.github.com> Date: Sun, 11 May 2025 05:31:34 -0700 Subject: [PATCH 126/492] Update utils.py --- train/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train/utils.py b/train/utils.py index 3d42b64..cba20c2 100644 --- a/train/utils.py +++ b/train/utils.py @@ -667,7 +667,7 @@ def provide_patches(imgs_list_train, segs_list_train, dir_img, dir_seg, dir_flow indexer = 0 for im, seg_i in tqdm(zip(imgs_list_train, segs_list_train)): - img_name = im.split('.')[0] + img_name = os.path.splitext(im)[0] if task == "segmentation" or task == "binarization": dir_of_label_file = os.path.join(dir_seg, img_name + '.png') elif task=="enhancement": From 3a9fc0efde07a4890995adbfefc8d135e9278747 Mon Sep 17 00:00:00 2001 From: johnlockejrr <16368414+johnlockejrr@users.noreply.github.com> Date: Sun, 11 May 2025 06:09:17 -0700 Subject: [PATCH 127/492] Update utils.py Changed unsafe basename extraction: `file_name = i.split('.')[0]` to `file_name = os.path.splitext(i)[0]` and `filename = n[i].split('.')[0]` to `filename = os.path.splitext(n[i])[0]` because `"Vat.sam.2_206.jpg` -> `Vat` instead of `"Vat.sam.2_206` --- train/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train/utils.py b/train/utils.py index cba20c2..bbe21d1 100644 --- a/train/utils.py +++ b/train/utils.py @@ -374,7 +374,7 @@ def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batch batchcount = 0 while True: for i in all_labels_files: - file_name = i.split('.')[0] + file_name = os.path.splitext(i)[0] img = cv2.imread(os.path.join(modal_dir,file_name+'.png')) label_class = int( np.load(os.path.join(classes_file_dir,i)) ) @@ -401,7 +401,7 @@ def data_gen(img_folder, mask_folder, batch_size, input_height, input_width, n_c for i in range(c, c + batch_size): # initially from 0 to 16, c = 0. try: - filename = n[i].split('.')[0] + filename = os.path.splitext(n[i])[0] train_img = cv2.imread(img_folder + '/' + n[i]) / 255. train_img = cv2.resize(train_img, (input_width, input_height), From c12b09a8686476291aa58231445cc535bb13b888 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 12 May 2025 00:10:18 +0200 Subject: [PATCH 128/492] I have tried to address the issues #163 and #161 . The changes have also improved marginal detection and enhanced the isolation of headers. --- requirements.txt | 1 + src/eynollah/cli.py | 14 +- src/eynollah/eynollah.py | 294 ++++++++++++++++++++++++++++++++++----- 3 files changed, 275 insertions(+), 34 deletions(-) diff --git a/requirements.txt b/requirements.txt index 9ed0584..aeffd47 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,5 @@ numpy <1.24.0 scikit-learn >= 0.23.2 tensorflow < 2.13 numba <= 0.58.1 +scikit-image loky diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 7d08ac8..99961c9 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -235,6 +235,16 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) "-ncl", help="upper limit of columns in document image", ) +@click.option( + "--threshold_art_class_layout", + "-tharl", + help="threshold of artifical class in the case of layout detection", +) +@click.option( + "--threshold_art_class_textline", + "-thart", + help="threshold of artifical class in the case of textline detection", +) @click.option( "--skip_layout_and_reading_order", "-slro/-noslro", @@ -248,7 +258,7 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) help="Override log level globally to this", ) -def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, skip_layout_and_reading_order, ignore_page_extraction, log_level): +def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) @@ -298,6 +308,8 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ num_col_upper=num_col_upper, num_col_lower=num_col_lower, skip_layout_and_reading_order=skip_layout_and_reading_order, + threshold_art_class_textline=threshold_art_class_textline, + threshold_art_class_layout=threshold_art_class_layout, ) if dir_in: eynollah.run(dir_in=dir_in, overwrite=overwrite) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0b15573..0c7c5d2 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -30,7 +30,7 @@ import numpy as np from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d from numba import cuda - +from skimage.morphology import skeletonize from ocrd import OcrdPage from ocrd_utils import getLogger, tf_disable_interactive_logs @@ -200,6 +200,8 @@ class Eynollah: do_ocr : bool = False, num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, + threshold_art_class_layout: Optional[float] = None, + threshold_art_class_textline: Optional[float] = None, skip_layout_and_reading_order : bool = False, logger : Optional[Logger] = None, ): @@ -237,6 +239,17 @@ class Eynollah: self.num_col_lower = int(num_col_lower) else: self.num_col_lower = num_col_lower + + if threshold_art_class_layout: + self.threshold_art_class_layout = float(threshold_art_class_layout) + else: + self.threshold_art_class_layout = 0.1 + + if threshold_art_class_textline: + self.threshold_art_class_textline = float(threshold_art_class_textline) + else: + self.threshold_art_class_textline = 0.1 + self.logger = logger if logger else getLogger('eynollah') # for parallelization of CPU-intensive tasks: self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) @@ -784,7 +797,7 @@ class Eynollah: self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False): + thresholding_for_artificial_class_in_light_version=False, threshold_art_class_textline=0.1): self.logger.debug("enter do_prediction") img_height_model = model.layers[-1].output_shape[1] @@ -802,10 +815,13 @@ class Eynollah: if thresholding_for_artificial_class_in_light_version: seg_art = label_p_pred[0,:,:,2] - seg_art[seg_art<0.2] = 0 + seg_art[seg_art0] =1 + + skeleton_art = skeletonize(seg_art) + skeleton_art = skeleton_art*1 - seg[seg_art==1]=2 + seg[skeleton_art==1]=2 seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) return prediction_true @@ -896,14 +912,17 @@ class Eynollah: if thresholding_for_artificial_class_in_light_version: seg_art = label_p_pred[:,:,:,2] - seg_art[seg_art<0.2] = 0 + seg_art[seg_art0] =1 - seg[seg_art==1]=2 + ##seg[seg_art==1]=2 indexer_inside_batch = 0 for i_batch, j_batch in zip(list_i_s, list_j_s): seg_in = seg[indexer_inside_batch] + + if thresholding_for_artificial_class_in_light_version: + seg_in_art = seg_art[indexer_inside_batch] index_y_u_in = list_y_u[indexer_inside_batch] index_y_d_in = list_y_d[indexer_inside_batch] @@ -917,54 +936,107 @@ class Eynollah: seg_in[0:-margin or None, 0:-margin or None, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[0:-margin or None, + 0:-margin or None] + elif i_batch == nxf - 1 and j_batch == nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + margin:index_x_u_in - 0] = \ seg_in[margin:, margin:, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[margin:, + margin:] + elif i_batch == 0 and j_batch == nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + 0:index_x_u_in - margin] = \ seg_in[margin:, 0:-margin or None, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[margin:, + 0:-margin or None] + elif i_batch == nxf - 1 and j_batch == 0: prediction_true[index_y_d_in + 0:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - 0] = \ seg_in[0:-margin or None, margin:, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[0:-margin or None, + margin:] + elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + 0:index_x_u_in - margin] = \ seg_in[margin:-margin or None, 0:-margin or None, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[margin:-margin or None, + 0:-margin or None] + elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - 0] = \ seg_in[margin:-margin or None, margin:, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[margin:-margin or None, + margin:] + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: prediction_true[index_y_d_in + 0:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - margin] = \ seg_in[0:-margin or None, margin:-margin or None, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[0:-margin or None, + margin:-margin or None] + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + margin:index_x_u_in - margin] = \ seg_in[margin:, margin:-margin or None, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[margin:, + margin:-margin or None] + else: prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - margin] = \ seg_in[margin:-margin or None, margin:-margin or None, np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[margin:-margin or None, + margin:-margin or None] indexer_inside_batch += 1 @@ -979,6 +1051,19 @@ class Eynollah: img_patch[:] = 0 prediction_true = prediction_true.astype(np.uint8) + + if thresholding_for_artificial_class_in_light_version: + kernel_min = np.ones((3, 3), np.uint8) + prediction_true[:,:,0][prediction_true[:,:,0]==2] = 0 + + skeleton_art = skeletonize(prediction_true[:,:,1]) + skeleton_art = skeleton_art*1 + + skeleton_art = skeleton_art.astype('uint8') + + skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) + + prediction_true[:,:,0][skeleton_art==1]=2 #del model gc.collect() return prediction_true @@ -1117,7 +1202,7 @@ class Eynollah: self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False): + thresholding_for_artificial_class_in_light_version=False, threshold_art_class_textline=0.1, threshold_art_class_layout=0.1): self.logger.debug("enter do_prediction_new_concept") img_height_model = model.layers[-1].output_shape[1] @@ -1132,19 +1217,28 @@ class Eynollah: label_p_pred = model.predict(img[np.newaxis], verbose=0) seg = np.argmax(label_p_pred, axis=3)[0] - if thresholding_for_artificial_class_in_light_version: - #seg_text = label_p_pred[0,:,:,1] - #seg_text[seg_text<0.2] =0 - #seg_text[seg_text>0] =1 - #seg[seg_text==1]=1 - - seg_art = label_p_pred[0,:,:,4] - seg_art[seg_art<0.2] =0 - seg_art[seg_art>0] =1 - seg[seg_art==1]=4 - seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) + + if thresholding_for_artificial_class_in_light_version: + kernel_min = np.ones((3, 3), np.uint8) + seg_art = label_p_pred[0,:,:,4] + seg_art[seg_art0] =1 + #seg[seg_art==1]=4 + seg_art = resize_image(seg_art, img_h_page, img_w_page).astype(np.uint8) + + prediction_true[:,:,0][prediction_true[:,:,0]==4] = 0 + + skeleton_art = skeletonize(seg_art) + skeleton_art = skeleton_art*1 + + skeleton_art = skeleton_art.astype('uint8') + + skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) + + prediction_true[:,:,0][skeleton_art==1] = 4 + return prediction_true , resize_image(label_p_pred[0, :, :, 1] , img_h_page, img_w_page) if img.shape[0] < img_height_model: @@ -1217,26 +1311,29 @@ class Eynollah: if thresholding_for_some_classes_in_light_version: seg_art = label_p_pred[:,:,:,4] - seg_art[seg_art<0.2] =0 + seg_art[seg_art0] =1 seg_line = label_p_pred[:,:,:,3] seg_line[seg_line>0.5] =1#seg_line[seg_line>0.1] =1 seg_line[seg_line<1] =0 - seg[seg_art==1]=4 + ##seg[seg_art==1]=4 seg[(seg_line==1) & (seg==0)]=3 if thresholding_for_artificial_class_in_light_version: seg_art = label_p_pred[:,:,:,2] - seg_art[seg_art<0.2] = 0 + seg_art[seg_art0] =1 - seg[seg_art==1]=2 + ##seg[seg_art==1]=2 indexer_inside_batch = 0 for i_batch, j_batch in zip(list_i_s, list_j_s): seg_in = seg[indexer_inside_batch] + + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + seg_in_art = seg_art[indexer_inside_batch] index_y_u_in = list_y_u[indexer_inside_batch] index_y_d_in = list_y_d[indexer_inside_batch] @@ -1255,6 +1352,12 @@ class Eynollah: label_p_pred[0, 0:-margin or None, 0:-margin or None, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[0:-margin or None, + 0:-margin or None] + elif i_batch == nxf - 1 and j_batch == nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + margin:index_x_u_in - 0] = \ @@ -1266,6 +1369,12 @@ class Eynollah: label_p_pred[0, margin:, margin:, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[margin:, + margin:] + elif i_batch == 0 and j_batch == nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + 0:index_x_u_in - margin] = \ @@ -1277,6 +1386,13 @@ class Eynollah: label_p_pred[0, margin:, 0:-margin or None, 1] + + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[margin:, + 0:-margin or None] + elif i_batch == nxf - 1 and j_batch == 0: prediction_true[index_y_d_in + 0:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - 0] = \ @@ -1288,6 +1404,12 @@ class Eynollah: label_p_pred[0, 0:-margin or None, margin:, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[0:-margin or None, + margin:] + elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + 0:index_x_u_in - margin] = \ @@ -1299,6 +1421,11 @@ class Eynollah: label_p_pred[0, margin:-margin or None, 0:-margin or None, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[margin:-margin or None, + 0:-margin or None] elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - 0] = \ @@ -1310,6 +1437,11 @@ class Eynollah: label_p_pred[0, margin:-margin or None, margin:, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[margin:-margin or None, + margin:] elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: prediction_true[index_y_d_in + 0:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - margin] = \ @@ -1321,6 +1453,11 @@ class Eynollah: label_p_pred[0, 0:-margin or None, margin:-margin or None, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[0:-margin or None, + margin:-margin or None] elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + margin:index_x_u_in - margin] = \ @@ -1332,6 +1469,11 @@ class Eynollah: label_p_pred[0, margin:, margin:-margin or None, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[margin:, + margin:-margin or None] else: prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - margin] = \ @@ -1343,6 +1485,11 @@ class Eynollah: label_p_pred[0, margin:-margin or None, margin:-margin or None, 1] + if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[margin:-margin or None, + margin:-margin or None] indexer_inside_batch += 1 list_i_s = [] @@ -1356,6 +1503,32 @@ class Eynollah: img_patch[:] = 0 prediction_true = prediction_true.astype(np.uint8) + + if thresholding_for_artificial_class_in_light_version: + kernel_min = np.ones((3, 3), np.uint8) + prediction_true[:,:,0][prediction_true[:,:,0]==2] = 0 + + skeleton_art = skeletonize(prediction_true[:,:,1]) + skeleton_art = skeleton_art*1 + + skeleton_art = skeleton_art.astype('uint8') + + skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) + + prediction_true[:,:,0][skeleton_art==1]=2 + + if thresholding_for_some_classes_in_light_version: + kernel_min = np.ones((3, 3), np.uint8) + prediction_true[:,:,0][prediction_true[:,:,0]==4] = 0 + + skeleton_art = skeletonize(prediction_true[:,:,1]) + skeleton_art = skeleton_art*1 + + skeleton_art = skeleton_art.astype('uint8') + + skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) + + prediction_true[:,:,0][skeleton_art==1]=4 gc.collect() return prediction_true, confidence_matrix @@ -1608,7 +1781,7 @@ class Eynollah: prediction_textline = self.do_prediction( use_patches, img, self.model_textline, marginal_of_patch_percent=0.15, n_batch_inference=3, - thresholding_for_artificial_class_in_light_version=self.textline_light) + thresholding_for_artificial_class_in_light_version=self.textline_light, threshold_art_class_textline=self.threshold_art_class_textline) #if not self.textline_light: #if num_col_classifier==1: #prediction_textline_nopatch = self.do_prediction(False, img, self.model_textline) @@ -1622,7 +1795,55 @@ class Eynollah: textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') #textline_mask_tot_ea_art = cv2.dilate(textline_mask_tot_ea_art, KERNEL, iterations=1) prediction_textline[:,:][textline_mask_tot_ea_art[:,:]==1]=2 + """ + else: + textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') + hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 1)) + + kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) + ##cv2.imwrite('textline_mask_tot_ea_art.png', textline_mask_tot_ea_art) + textline_mask_tot_ea_art = cv2.dilate(textline_mask_tot_ea_art, hor_kernel, iterations=1) + + ###cv2.imwrite('dil_textline_mask_tot_ea_art.png', dil_textline_mask_tot_ea_art) + + textline_mask_tot_ea_art = textline_mask_tot_ea_art.astype('uint8') + + #print(np.shape(dil_textline_mask_tot_ea_art), np.unique(dil_textline_mask_tot_ea_art), 'dil_textline_mask_tot_ea_art') + tsk = time.time() + skeleton_art_textline = skeletonize(textline_mask_tot_ea_art[:,:,0]) + + skeleton_art_textline = skeleton_art_textline*1 + + skeleton_art_textline = skeleton_art_textline.astype('uint8') + + skeleton_art_textline = cv2.dilate(skeleton_art_textline, kernel, iterations=1) + + #print(np.unique(skeleton_art_textline), np.shape(skeleton_art_textline)) + + #print(skeleton_art_textline, np.unique(skeleton_art_textline)) + + #cv2.imwrite('skeleton_art_textline.png', skeleton_art_textline) + + prediction_textline[:,:,0][skeleton_art_textline[:,:]==1]=2 + + #cv2.imwrite('prediction_textline1.png', prediction_textline[:,:,0]) + + ##hor_kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 1)) + ##ver_kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3)) + ##textline_mask_tot_ea_main = (prediction_textline[:,:]==1)*1 + ##textline_mask_tot_ea_main = textline_mask_tot_ea_main.astype('uint8') + + ##dil_textline_mask_tot_ea_main = cv2.erode(textline_mask_tot_ea_main, ver_kernel2, iterations=1) + + ##dil_textline_mask_tot_ea_main = cv2.dilate(textline_mask_tot_ea_main, hor_kernel2, iterations=1) + + ##dil_textline_mask_tot_ea_main = cv2.dilate(textline_mask_tot_ea_main, ver_kernel2, iterations=1) + + ##prediction_textline[:,:][dil_textline_mask_tot_ea_main[:,:]==1]=1 + + """ + textline_mask_tot_ea_lines = (prediction_textline[:,:]==1)*1 textline_mask_tot_ea_lines = textline_mask_tot_ea_lines.astype('uint8') if not self.textline_light: @@ -1631,10 +1852,15 @@ class Eynollah: prediction_textline[:,:][textline_mask_tot_ea_lines[:,:]==1]=1 if not self.textline_light: prediction_textline[:,:][old_art[:,:]==1]=2 + + #cv2.imwrite('prediction_textline2.png', prediction_textline[:,:,0]) prediction_textline_longshot = self.do_prediction(False, img, self.model_textline) prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w) - + + + #cv2.imwrite('prediction_textline.png', prediction_textline[:,:,0]) + #sys.exit() self.logger.debug('exit textline_contours') return ((prediction_textline[:, :, 0]==1).astype(np.uint8), (prediction_textline_longshot_true_size[:, :, 0]==1).astype(np.uint8)) @@ -1840,7 +2066,7 @@ class Eynollah: textline_mask_tot_ea = resize_image(textline_mask_tot_ea,img_height_h, img_width_h ) #print(self.image_org.shape) - #cv2.imwrite('out_13.png', self.image_page_org_size) + #cv2.imwrite('textline.png', textline_mask_tot_ea) #plt.imshwo(self.image_page_org_size) #plt.show() @@ -1852,13 +2078,13 @@ class Eynollah: img_resized.shape[1], img_resized.shape[0], num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( True, img_resized, self.model_region_1_2, n_batch_inference=1, - thresholding_for_some_classes_in_light_version=True) + thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) else: prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( False, self.image_page_org_size, self.model_region_1_2, n_batch_inference=1, - thresholding_for_artificial_class_in_light_version=True) + thresholding_for_artificial_class_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) ys = slice(*self.page_coord[0:2]) xs = slice(*self.page_coord[2:4]) prediction_regions_org[ys, xs] = prediction_regions_page @@ -1871,7 +2097,7 @@ class Eynollah: img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( True, img_resized, self.model_region_1_2, n_batch_inference=2, - thresholding_for_some_classes_in_light_version=True) + thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_region, n_batch_inference=3, thresholding_for_some_classes_in_light_version=True) #print("inside 3 ", time.time()-t_in) #plt.imshow(prediction_regions_org[:,:,0]) @@ -3811,7 +4037,7 @@ class Eynollah: if dilation_m1<6: dilation_m1 = 6 #print(dilation_m1, 'dilation_m1') - dilation_m1 = 6 + dilation_m1 = 4#6 dilation_m2 = int(dilation_m1/2.) +1 for i in range(len(x_differential)): @@ -4322,6 +4548,8 @@ class Eynollah: cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(textline_mask_tot_ea) all_found_textline_polygons = filter_contours_area_of_image( textline_mask_tot_ea, cnt_clean_rot_raw, hir_on_cnt_clean_rot, max_area=1, min_area=0.00001) + + all_found_textline_polygons = all_found_textline_polygons[::-1] all_found_textline_polygons=[ all_found_textline_polygons ] @@ -4329,8 +4557,8 @@ class Eynollah: all_found_textline_polygons) all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( all_found_textline_polygons, None, textline_mask_tot_ea, type_contour="textline") - - + + order_text_new = [0] slopes =[0] id_of_texts_tot =['region_0001'] @@ -4343,7 +4571,7 @@ class Eynollah: polygons_lines_xml = [] contours_tables = [] ocr_all_textlines = None - conf_contours_textregions =None + conf_contours_textregions =[0] pcgts = self.writer.build_pagexml_no_full_layout( cont_page, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, @@ -4905,7 +5133,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_ens_ocrcnn_125_225"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( From 4ddc84dee87ed7e1b600592ba8e96cad93e653e3 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 12 May 2025 18:31:40 +0200 Subject: [PATCH 129/492] visulizing textline detection from eynollah page-xml output --- train/generate_gt_for_training.py | 48 +++++++++++++++++ train/gt_gen_utils.py | 88 +++++++++++++++++++++++++++++++ 2 files changed, 136 insertions(+) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 77e9238..9ce743a 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -2,6 +2,7 @@ import click import json from gt_gen_utils import * from tqdm import tqdm +from pathlib import Path @click.group() def main(): @@ -331,6 +332,53 @@ def visualize_reading_order(dir_xml, dir_out, dir_imgs): cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img) +@main.command() +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_out", + "-do", + help="directory where plots will be written", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_imgs", + "-dimg", + help="directory of images where textline segmentation will be overlayed", ) + +def visualize_textline_segmentation(dir_xml, dir_out, dir_imgs): + xml_files_ind = os.listdir(dir_xml) + for ind_xml in tqdm(xml_files_ind): + indexer = 0 + #print(ind_xml) + #print('########################') + xml_file = os.path.join(dir_xml,ind_xml ) + f_name = Path(ind_xml).stem + + img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) + img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) + + co_tetxlines, y_len, x_len = get_textline_contours_for_visualization(xml_file) + + img_total = np.zeros((y_len, x_len, 3)) + for cont in co_tetxlines: + img_in = np.zeros((y_len, x_len, 3)) + img_in = cv2.fillPoly(img_in, pts =[cont], color=(1,1,1)) + + img_total = img_total + img_in + + img_total[:,:, 0][img_total[:,:, 0]>2] = 2 + + img_out, _ = visualize_model_output(img_total, img, task="textline") + + cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img_out) + if __name__ == "__main__": main() diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 10183d6..0a65f05 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -16,6 +16,52 @@ KERNEL = np.ones((5, 5), np.uint8) with warnings.catch_warnings(): warnings.simplefilter("ignore") +def visualize_model_output(prediction, img, task): + if task == "binarization": + prediction = prediction * -1 + prediction = prediction + 1 + added_image = prediction * 255 + layout_only = None + else: + unique_classes = np.unique(prediction[:,:,0]) + rgb_colors = {'0' : [255, 255, 255], + '1' : [255, 0, 0], + '2' : [255, 125, 0], + '3' : [255, 0, 125], + '4' : [125, 125, 125], + '5' : [125, 125, 0], + '6' : [0, 125, 255], + '7' : [0, 125, 0], + '8' : [125, 125, 125], + '9' : [0, 125, 255], + '10' : [125, 0, 125], + '11' : [0, 255, 0], + '12' : [0, 0, 255], + '13' : [0, 255, 255], + '14' : [255, 125, 125], + '15' : [255, 0, 255]} + + layout_only = np.zeros(prediction.shape) + + for unq_class in unique_classes: + rgb_class_unique = rgb_colors[str(int(unq_class))] + layout_only[:,:,0][prediction[:,:,0]==unq_class] = rgb_class_unique[0] + layout_only[:,:,1][prediction[:,:,0]==unq_class] = rgb_class_unique[1] + layout_only[:,:,2][prediction[:,:,0]==unq_class] = rgb_class_unique[2] + + + + img = resize_image(img, layout_only.shape[0], layout_only.shape[1]) + + layout_only = layout_only.astype(np.int32) + img = img.astype(np.int32) + + + + added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) + + return added_image, layout_only + def get_content_of_dir(dir_in): """ Listing all ground truth page xml files. All files are needed to have xml format. @@ -138,6 +184,48 @@ def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y img_boundary[:,:][boundary[:,:]==1] =1 return co_text_eroded, img_boundary + +def get_textline_contours_for_visualization(xml_file): + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + + + for jj in root1.iter(link+'Page'): + y_len=int(jj.attrib['imageHeight']) + x_len=int(jj.attrib['imageWidth']) + + region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) + tag_endings = ['}TextLine','}textline'] + co_use_case = [] + + for tag in region_tags: + if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): + for nn in root1.iter(tag): + c_t_in = [] + sumi = 0 + for vv in nn.iter(): + if vv.tag == link + 'Coords': + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + if vv.tag == link + 'Point': + c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) + sumi += 1 + elif vv.tag != link + 'Point' and sumi >= 1: + break + co_use_case.append(np.array(c_t_in)) + return co_use_case, y_len, x_len + + def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_file, config_params, printspace, dir_images, dir_out_images): """ Reading the page xml files and write the ground truth images into given output directory. From 4a7728bb346aeccf76a34a6e0ec900e4df40a765 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 12 May 2025 22:39:47 +0200 Subject: [PATCH 130/492] visuliazation layout from eynollah page-xml output --- train/generate_gt_for_training.py | 53 ++++- train/gt_gen_utils.py | 312 ++++++++++++++++++++++++++++++ 2 files changed, 355 insertions(+), 10 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 9ce743a..7e7c6a0 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -366,18 +366,51 @@ def visualize_textline_segmentation(dir_xml, dir_out, dir_imgs): co_tetxlines, y_len, x_len = get_textline_contours_for_visualization(xml_file) - img_total = np.zeros((y_len, x_len, 3)) - for cont in co_tetxlines: - img_in = np.zeros((y_len, x_len, 3)) - img_in = cv2.fillPoly(img_in, pts =[cont], color=(1,1,1)) - - img_total = img_total + img_in - - img_total[:,:, 0][img_total[:,:, 0]>2] = 2 + added_image = visualize_image_from_contours(co_tetxlines, img) - img_out, _ = visualize_model_output(img_total, img, task="textline") + cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) + - cv2.imwrite(os.path.join(dir_out, f_name+'.png'), img_out) + +@main.command() +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_out", + "-do", + help="directory where plots will be written", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_imgs", + "-dimg", + help="directory of images where textline segmentation will be overlayed", ) + +def visualize_layout_segmentation(dir_xml, dir_out, dir_imgs): + xml_files_ind = os.listdir(dir_xml) + for ind_xml in tqdm(xml_files_ind): + indexer = 0 + #print(ind_xml) + #print('########################') + xml_file = os.path.join(dir_xml,ind_xml ) + f_name = Path(ind_xml).stem + + img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) + img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) + + co_text, co_graphic, co_sep, co_img, co_table, co_noise, y_len, x_len = get_layout_contours_for_visualization(xml_file) + + + added_image = visualize_image_from_contours_layout(co_text['paragraph'], co_text['header'], co_text['drop-capital'], co_sep, co_img, co_text['marginalia'], img) + + cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) + if __name__ == "__main__": diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 0a65f05..9b67563 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -15,6 +15,63 @@ KERNEL = np.ones((5, 5), np.uint8) with warnings.catch_warnings(): warnings.simplefilter("ignore") + + +def visualize_image_from_contours_layout(co_par, co_header, co_drop, co_sep, co_image, co_marginal, img): + alpha = 0.5 + + blank_image = np.ones( (img.shape[:]), dtype=np.uint8) * 255 + + col_header = (173, 216, 230) + col_drop = (0, 191, 255) + boundary_color = (143, 216, 200)#(0, 0, 255) # Dark gray for the boundary + col_par = (0, 0, 139) # Lighter gray for the filled area + col_image = (0, 100, 0) + col_sep = (255, 0, 0) + col_marginal = (106, 90, 205) + + if len(co_image)>0: + cv2.drawContours(blank_image, co_image, -1, col_image, thickness=cv2.FILLED) # Fill the contour + + if len(co_sep)>0: + cv2.drawContours(blank_image, co_sep, -1, col_sep, thickness=cv2.FILLED) # Fill the contour + + + if len(co_header)>0: + cv2.drawContours(blank_image, co_header, -1, col_header, thickness=cv2.FILLED) # Fill the contour + + if len(co_par)>0: + cv2.drawContours(blank_image, co_par, -1, col_par, thickness=cv2.FILLED) # Fill the contour + + cv2.drawContours(blank_image, co_par, -1, boundary_color, thickness=1) # Draw the boundary + + if len(co_drop)>0: + cv2.drawContours(blank_image, co_drop, -1, col_drop, thickness=cv2.FILLED) # Fill the contour + + if len(co_marginal)>0: + cv2.drawContours(blank_image, co_marginal, -1, col_marginal, thickness=cv2.FILLED) # Fill the contour + + img_final =cv2.cvtColor(blank_image, cv2.COLOR_BGR2RGB) + + added_image = cv2.addWeighted(img,alpha,img_final,1- alpha,0) + return added_image + + +def visualize_image_from_contours(contours, img): + alpha = 0.5 + + blank_image = np.ones( (img.shape[:]), dtype=np.uint8) * 255 + + boundary_color = (0, 0, 255) # Dark gray for the boundary + fill_color = (173, 216, 230) # Lighter gray for the filled area + + cv2.drawContours(blank_image, contours, -1, fill_color, thickness=cv2.FILLED) # Fill the contour + cv2.drawContours(blank_image, contours, -1, boundary_color, thickness=1) # Draw the boundary + + img_final =cv2.cvtColor(blank_image, cv2.COLOR_BGR2RGB) + + added_image = cv2.addWeighted(img,alpha,img_final,1- alpha,0) + return added_image def visualize_model_output(prediction, img, task): if task == "binarization": @@ -224,7 +281,262 @@ def get_textline_contours_for_visualization(xml_file): break co_use_case.append(np.array(c_t_in)) return co_use_case, y_len, x_len + + +def get_layout_contours_for_visualization(xml_file): + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + + + for jj in root1.iter(link+'Page'): + y_len=int(jj.attrib['imageHeight']) + x_len=int(jj.attrib['imageWidth']) + + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + co_text = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} + all_defined_textregion_types = list(co_text.keys()) + co_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} + all_defined_graphic_types = list(co_graphic.keys()) + co_sep=[] + co_img=[] + co_table=[] + co_noise=[] + types_text = [] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + for nn in root1.iter(tag): + c_t_in = {'drop-capital':[], "footnote":[], "footnote-continued":[], "heading":[], "signature-mark":[], "header":[], "catch-word":[], "page-number":[], "marginalia":[], "paragraph":[]} + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + + if "rest_as_paragraph" in types_text: + types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] + if len(types_text_without_paragraph) == 0: + if "type" in nn.attrib: + c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + elif len(types_text_without_paragraph) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_text_without_paragraph: + c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + else: + if "type" in nn.attrib: + if nn.attrib['type'] in all_defined_textregion_types: + c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + break + else: + pass + + + if vv.tag==link+'Point': + if "rest_as_paragraph" in types_text: + types_text_without_paragraph = [element for element in types_text if element!='rest_as_paragraph' and element!='paragraph'] + if len(types_text_without_paragraph) == 0: + if "type" in nn.attrib: + c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + elif len(types_text_without_paragraph) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_text_without_paragraph: + c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + else: + c_t_in['paragraph'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + + else: + if "type" in nn.attrib: + if nn.attrib['type'] in all_defined_textregion_types: + c_t_in[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + + + elif vv.tag!=link+'Point' and sumi>=1: + break + + for element_text in list(c_t_in.keys()): + if len(c_t_in[element_text])>0: + co_text[element_text].append(np.array(c_t_in[element_text])) + + + if tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in_graphic = {"handwritten-annotation":[], "decoration":[], "stamp":[], "signature":[]} + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + + if "rest_as_decoration" in types_graphic: + types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] + if len(types_graphic_without_decoration) == 0: + if "type" in nn.attrib: + c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + elif len(types_graphic_without_decoration) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_graphic_without_decoration: + c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in_graphic['decoration'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + else: + if "type" in nn.attrib: + if nn.attrib['type'] in all_defined_graphic_types: + c_t_in_graphic[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + break + else: + pass + + + if vv.tag==link+'Point': + if "rest_as_decoration" in types_graphic: + types_graphic_without_decoration = [element for element in types_graphic if element!='rest_as_decoration' and element!='decoration'] + if len(types_graphic_without_decoration) == 0: + if "type" in nn.attrib: + c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + elif len(types_graphic_without_decoration) >= 1: + if "type" in nn.attrib: + if nn.attrib['type'] in types_graphic_without_decoration: + c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + else: + c_t_in_graphic['decoration'].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + + else: + if "type" in nn.attrib: + if nn.attrib['type'] in all_defined_graphic_types: + c_t_in_graphic[nn.attrib['type']].append( [ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ] ) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + + for element_graphic in list(c_t_in_graphic.keys()): + if len(c_t_in_graphic[element_graphic])>0: + co_graphic[element_graphic].append(np.array(c_t_in_graphic[element_graphic])) + + + if tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + + + if tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + + if tag.endswith('}TableRegion') or tag.endswith('}tableregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_table.append(np.array(c_t_in)) + + + if tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): + #print('sth') + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + #print(vv.tag,'in') + elif vv.tag!=link+'Point' and sumi>=1: + break + co_noise.append(np.array(c_t_in)) + return co_text, co_graphic, co_sep, co_img, co_table, co_noise, y_len, x_len def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_file, config_params, printspace, dir_images, dir_out_images): """ From 54088c6b04bb64976e9873195965ade8803b7d67 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 13 May 2025 14:40:57 +0200 Subject: [PATCH 131/492] The initial attempt at reading heavily deskewed or vertically aligned lines. --- src/eynollah/eynollah.py | 91 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0c7c5d2..9f2ca50 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -22,7 +22,6 @@ from multiprocessing import cpu_count import gc import copy import json - from loky import ProcessPoolExecutor import xml.etree.ElementTree as ET import cv2 @@ -77,7 +76,8 @@ from .utils.contour import ( from .utils.rotate import ( rotate_image, rotation_not_90_func, - rotation_not_90_func_full_layout + rotation_not_90_func_full_layout, + rotation_image_new ) from .utils.separate_lines import ( textline_contours_postprocessing, @@ -5310,6 +5310,75 @@ class Eynollah_ocr: img_fin = img_fin / 255. return img_fin + def get_deskewed_contour_and_bb_and_image(self, contour, image, deskew_angle): + (h_in, w_in) = image.shape[:2] + center = (w_in // 2, h_in // 2) + + rotation_matrix = cv2.getRotationMatrix2D(center, deskew_angle, 1.0) + + cos_angle = abs(rotation_matrix[0, 0]) + sin_angle = abs(rotation_matrix[0, 1]) + new_w = int((h_in * sin_angle) + (w_in * cos_angle)) + new_h = int((h_in * cos_angle) + (w_in * sin_angle)) + + rotation_matrix[0, 2] += (new_w / 2) - center[0] + rotation_matrix[1, 2] += (new_h / 2) - center[1] + + deskewed_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h)) + + contour_points = np.array(contour, dtype=np.float32) + transformed_points = cv2.transform(np.array([contour_points]), rotation_matrix)[0] + + x, y, w, h = cv2.boundingRect(np.array(transformed_points, dtype=np.int32)) + cropped_textline = deskewed_image[y:y+h, x:x+w] + + return cropped_textline + + def rotate_image_with_padding(self, image, angle): + # Get image dimensions + (h, w) = image.shape[:2] + + # Calculate the center of the image + center = (w // 2, h // 2) + + # Get the rotation matrix + rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + + # Compute the new bounding dimensions + cos = abs(rotation_matrix[0, 0]) + sin = abs(rotation_matrix[0, 1]) + new_w = int((h * sin) + (w * cos)) + new_h = int((h * cos) + (w * sin)) + + # Adjust the rotation matrix to account for translation + rotation_matrix[0, 2] += (new_w / 2) - center[0] + rotation_matrix[1, 2] += (new_h / 2) - center[1] + + # Perform the rotation + rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=(0, 0, 0)) + + return rotated_image + + def get_orientation_moments(self, contour): + moments = cv2.moments(contour) + if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero + return 90 if moments["mu11"] > 0 else -90 + else: + angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) + return np.degrees(angle) # Convert radians to degrees + + def get_contours_and_bounding_boxes(self, mask): + # Find contours in the binary mask + contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + largest_contour = max(contours, key=cv2.contourArea) if contours else None + + # Get the bounding rectangle for the contour + x, y, w, h = cv2.boundingRect(largest_contour) + #bounding_boxes.append((x, y, w, h)) + + return x, y, w, h + def run(self): ls_imgs = os.listdir(self.dir_in) @@ -5533,6 +5602,10 @@ class Eynollah_ocr: x,y,w,h = cv2.boundingRect(textline_coords) + angle_radians = math.atan2(h, w) + # Convert to degrees + angle_degrees = math.degrees(angle_radians) + if self.draw_texts_on_image: total_bb_coordinates.append([x,y,w,h]) @@ -5549,7 +5622,21 @@ class Eynollah_ocr: mask_poly = mask_poly[y:y+h, x:x+w, :] img_crop = img_poly_on_img[y:y+h, x:x+w, :] if not self.do_not_mask_with_textline_contour: + if angle_degrees > 15: + better_des_slope = self.get_orientation_moments(textline_coords) + + img_crop = self.rotate_image_with_padding(img_crop, -abs(better_des_slope) ) + mask_poly = self.rotate_image_with_padding(mask_poly, -abs(better_des_slope) ) + mask_poly = mask_poly.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = self.get_contours_and_bounding_boxes(mask_poly[:,:,0]) + + mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop[mask_poly==0] = 255 + if self.prediction_with_both_of_rgb_and_bin: img_crop_bin[mask_poly==0] = 255 From 88e03153217eba0809c53d617387d3cf3403a7c2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 13 May 2025 15:53:05 +0200 Subject: [PATCH 132/492] Accurately writing text line contours into xml file when the deskewing exceeds 45 degrees and the text line is in light mode --- src/eynollah/writer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 92e353f..8cd1c8e 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -119,7 +119,7 @@ class EynollahXmlWriter(): points_co += ',' points_co += str(textline_y_coord) - if (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) <= 45: + if self.textline_light or (self.curved_line and np.abs(slopes[region_idx]) <= 45): if len(contour_textline) == 2: points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x)) points_co += ',' @@ -128,7 +128,7 @@ class EynollahXmlWriter(): points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) points_co += ',' points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) - elif (self.curved_line or self.textline_light) and np.abs(slopes[region_idx]) > 45: + elif self.curved_line and np.abs(slopes[region_idx]) > 45: if len(contour_textline)==2: points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2])/self.scale_x)) points_co += ',' From 25abc0fabc8a70b6a9c21c35006c08fec577d792 Mon Sep 17 00:00:00 2001 From: johnlockejrr <16368414+johnlockejrr@users.noreply.github.com> Date: Wed, 14 May 2025 03:34:51 -0700 Subject: [PATCH 133/492] Update gt_gen_utils.py Keep safely the full basename without extension --- train/gt_gen_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 5784e14..8837462 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -22,7 +22,7 @@ def get_content_of_dir(dir_in): """ gt_all=os.listdir(dir_in) - gt_list=[file for file in gt_all if file.split('.')[ len(file.split('.'))-1 ]=='xml' ] + gt_list = [file for file in gt_all if os.path.splitext(file)[1] == '.xml'] return gt_list def return_parent_contours(contours, hierarchy): @@ -134,7 +134,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if dir_images: ls_org_imgs = os.listdir(dir_images) - ls_org_imgs_stem = [item.split('.')[0] for item in ls_org_imgs] + ls_org_imgs_stem = [os.path.splitext(item)[0] for item in ls_org_imgs] for index in tqdm(range(len(gt_list))): #try: tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding = 'iso-8859-5')) @@ -298,10 +298,10 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly = resize_image(img_poly, y_new, x_new) try: - xml_file_stem = gt_list[index].split('-')[1].split('.')[0] + xml_file_stem = os.path.splitext(gt_list[index])[0] cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) except: - xml_file_stem = gt_list[index].split('.')[0] + xml_file_stem = os.path.splitext(gt_list[index])[0] cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) if dir_images: @@ -757,10 +757,10 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ img_poly = resize_image(img_poly, y_new, x_new) try: - xml_file_stem = gt_list[index].split('-')[1].split('.')[0] + xml_file_stem = os.path.splitext(gt_list[index])[0] cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) except: - xml_file_stem = gt_list[index].split('.')[0] + xml_file_stem = os.path.splitext(gt_list[index])[0] cv2.imwrite(os.path.join(output_dir, xml_file_stem + '.png'), img_poly) From ed46615f004a96191208d2f5481229003336644f Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 14 May 2025 18:34:58 +0200 Subject: [PATCH 134/492] enhance ocr for vertical textlines --- src/eynollah/eynollah.py | 79 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 9f2ca50..5a73ef3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5133,7 +5133,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_ens_ocrcnn_125_225"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_425000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5585,6 +5585,7 @@ class Eynollah_ocr: region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) cropped_lines = [] + cropped_lines_ver_index = [] cropped_lines_region_indexer = [] cropped_lines_meging_indexing = [] @@ -5644,6 +5645,11 @@ class Eynollah_ocr: if w_scaled < 1.5*image_width: img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) + if angle_degrees > 15: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + cropped_lines_meging_indexing.append(0) if self.prediction_with_both_of_rgb_and_bin: img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) @@ -5657,11 +5663,22 @@ class Eynollah_ocr: img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(1) + + if angle_degrees > 15: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(-1) + if angle_degrees > 15: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + if self.prediction_with_both_of_rgb_and_bin: img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[0], image_height, image_width) cropped_lines_bin.append(img_fin) @@ -5673,6 +5690,11 @@ class Eynollah_ocr: cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) + if angle_degrees > 15: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + if self.prediction_with_both_of_rgb_and_bin: img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) @@ -5722,6 +5744,19 @@ class Eynollah_ocr: imgs = cropped_lines[n_start:] imgs = np.array(imgs) imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) + + ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) + indices_ver = np.where(ver_imgs == 1)[0] + + #print(indices_ver, 'indices_ver') + if len(indices_ver)>0: + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + + else: + imgs_ver_flipped = None + if self.prediction_with_both_of_rgb_and_bin: imgs_bin = cropped_lines_bin[n_start:] imgs_bin = np.array(imgs_bin) @@ -5732,12 +5767,54 @@ class Eynollah_ocr: imgs = cropped_lines[n_start:n_end] imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) + ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) + indices_ver = np.where(ver_imgs == 1)[0] + #print(indices_ver, 'indices_ver') + + if len(indices_ver)>0: + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_ver_flipped = None + + if self.prediction_with_both_of_rgb_and_bin: imgs_bin = cropped_lines_bin[n_start:n_end] imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) preds = self.prediction_model.predict(imgs, verbose=0) + + if len(indices_ver)>0: + #cv2.imwrite('flipped.png', (imgs_ver_flipped[0, :,:,:]*255).astype('uint8')) + #cv2.imwrite('original.png', (imgs[0, :,:,:]*255).astype('uint8')) + #sys.exit() + #print(imgs_ver_flipped.shape, 'imgs_ver_flipped.shape') + preds_flipped = self.prediction_model.predict(imgs_ver_flipped, verbose=0) + preds_max_fliped = np.max(preds_flipped, axis=2 ) + preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=256 + masked_means_flipped = np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped[np.isnan(masked_means_flipped)] = 0 + #print(masked_means_flipped, 'masked_means_flipped') + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=256 + + masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means[np.isnan(masked_means)] = 0 + + masked_means_ver = masked_means[indices_ver] + #print(masked_means_ver, 'pred_max_not_unk') + + indices_where_flipped_conf_value_is_higher = np.where(masked_means_flipped > masked_means_ver)[0] + + #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') + if len(indices_where_flipped_conf_value_is_higher)>0: + indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] + preds[indices_to_be_replaced,:,:] = preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] if self.prediction_with_both_of_rgb_and_bin: preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) preds = (preds + preds_bin) / 2. From 7a22e51f5d2ebff1bd0239c913eb1ed13d97fe77 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 14 May 2025 21:56:03 +0200 Subject: [PATCH 135/492] resolve some comments from review --- README.md | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 3cfb587..8a2c4a4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,8 @@ * Output in [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML) * [OCR-D](https://github.com/qurator-spk/eynollah#use-as-ocr-d-processor) interface -:warning: Development is currently focused on achieving the best possible quality of results for a wide variety of historical documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. +:warning: Development is currently focused on achieving the best possible quality of results for a wide variety of +historical documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. ## Installation Python `3.8-3.11` with Tensorflow `<2.13` on Linux are currently supported. @@ -42,7 +43,7 @@ cd eynollah; pip install -e . Alternatively, you can run `make install` or `make install-dev` for editable installation. ## Models -Pre-trained models can be downloaded from [qurator-data.de](https://qurator-data.de/eynollah/) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). +Pretrained models can be downloaded from [qurator-data.de](https://qurator-data.de/eynollah/) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). For documentation on methods and models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). @@ -50,13 +51,17 @@ For documentation on methods and models, have a look at [`models.md`](https://gi In case you want to train your own model with Eynollah, have a look at [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md). ## Usage +Eynollah supports four use cases: layout analysis (segmentation), binarization, text recognition (OCR), +and (trainable) reading order detection. -Eynollah has four key use cases: layout analysis, binarization, OCR, and machine-based reading order. +### Layout Analysis +The layout analysis module is responsible for detecting layouts, identifying text lines, and determining reading order +using both heuristic methods or a machine-based reading order detection model. -### Layout -The layout module is responsible for detecting layouts, identifying text lines, and determining reading order using both heuristic methods or a machine-based reading order detection model. It's important to note that this functionality should not be confused with the machine-based-reading-order use case. The latter, still under development, focuses specifically on determining the reading order for a given layout in an XML file. In contrast, layout detection takes an image as input, and after detecting the layout, it can also determine the reading order using a machine-based model. +Note that there are currently two supported ways for reading order detection: either as part of layout analysis based +on image input, or, currently under development, for given layout analysis results based on PAGE-XML data as input. -The command-line interface for layout can be called like this: +The command-line interface for layout analysis can be called like this: ```sh eynollah layout \ @@ -87,18 +92,19 @@ The following options can be used to further configure the processing: | `-sp ` | save cropped page image to this directory | | `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | -If no option is set, the tool performs layout detection of main regions (background, text, images, separators and marginals). +If no option is set, the tool performs layout detection of main regions (background, text, images, separators +and marginals). The best output quality is produced when RGB images are used as input rather than greyscale or binarized images. ### Binarization -Document Image Binarization +The binarization module performs document image binarization using pretrained pixelwise segmentation models. The command-line interface for binarization of single image can be called like this: ```sh eynollah binarization \ - -m \ - \ + -m \ + \ ``` @@ -117,9 +123,7 @@ Under development ### Machine-based-reading-order Under development - #### Use as OCR-D processor - Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). @@ -127,7 +131,6 @@ In this case, the source image file group with (preferably) RGB images should be ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models 2022-04-05 - If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: - existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) - existing annotation (and respective `AlternativeImage`s) are partially _ignored_: @@ -138,7 +141,6 @@ If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynol (because some other preprocessing step was in effect like `denoised`), then the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models 2022-04-05 Still, in general, it makes more sense to add other workflow steps **after** Eynollah. From 1b229ba7aeab5b5811ab3a3f0bf85ac3164ba0ec Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 15 May 2025 00:45:22 +0200 Subject: [PATCH 136/492] enhancement for vertical textlines --- src/eynollah/eynollah.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 5a73ef3..2e54687 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5626,8 +5626,8 @@ class Eynollah_ocr: if angle_degrees > 15: better_des_slope = self.get_orientation_moments(textline_coords) - img_crop = self.rotate_image_with_padding(img_crop, -abs(better_des_slope) ) - mask_poly = self.rotate_image_with_padding(mask_poly, -abs(better_des_slope) ) + img_crop = self.rotate_image_with_padding(img_crop, better_des_slope ) + mask_poly = self.rotate_image_with_padding(mask_poly, better_des_slope ) mask_poly = mask_poly.astype('uint8') #new bounding box From 1cbc669d36334d421dd2af9801e17456a35b0f01 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 15 May 2025 15:33:50 +0200 Subject: [PATCH 137/492] marginals detection enhanced for light version --- src/eynollah/utils/marginals.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/eynollah/utils/marginals.py b/src/eynollah/utils/marginals.py index a29e50d..c0c4892 100644 --- a/src/eynollah/utils/marginals.py +++ b/src/eynollah/utils/marginals.py @@ -26,8 +26,10 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.8),text_with_lines.shape[1]) text_with_lines=cv2.erode(text_with_lines,kernel,iterations=7) text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - - + + if light_version: + text_with_lines=rotate_image(text_with_lines,-slope_deskew) + text_with_lines_y=text_with_lines.sum(axis=0) text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0) From f9390c71e7ec3c577e80ad4a8894417481407f02 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sat, 17 May 2025 02:18:27 +0200 Subject: [PATCH 138/492] updating inference for mb reading order --- train/gt_gen_utils.py | 2 +- train/inference.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 9b67563..a734020 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -154,7 +154,7 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m jv += 1 return found_polygons_early -def filter_contours_area_of_image(image, contours, order_index, max_area, min_area, min_early): +def filter_contours_area_of_image(image, contours, order_index, max_area, min_area, min_early=None): found_polygons_early = list() order_index_filtered = list() regions_ar_less_than_early_min = list() diff --git a/train/inference.py b/train/inference.py index db3b31f..aecd0e6 100644 --- a/train/inference.py +++ b/train/inference.py @@ -267,7 +267,7 @@ class sbb_predict: #print(np.shape(co_text_all[0]), len( np.shape(co_text_all[0]) ),'co_text_all') #co_text_all = filter_contours_area_of_image_tables(img_poly, co_text_all, _, max_area, min_area) #print(co_text_all,'co_text_all') - co_text_all, texts_corr_order_index_int = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, self.min_area) + co_text_all, texts_corr_order_index_int, _ = filter_contours_area_of_image(img_poly, co_text_all, texts_corr_order_index_int, max_area, self.min_area) #print(texts_corr_order_index_int) From 5016039cd74098b30ccc78b9f0d0bdf0bb91f351 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 18 May 2025 02:48:05 +0200 Subject: [PATCH 139/492] enhancing marginal detection for light version --- src/eynollah/eynollah.py | 7 +++---- src/eynollah/utils/marginals.py | 13 ++++++++----- src/eynollah/utils/separate_lines.py | 4 ++-- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 2e54687..08a781c 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -272,7 +272,7 @@ class Eynollah: self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_step_2500000_mb_ro"#"/model_ens_reading_order_machine_based" #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" @@ -1315,7 +1315,7 @@ class Eynollah: seg_art[seg_art>0] =1 seg_line = label_p_pred[:,:,:,3] - seg_line[seg_line>0.5] =1#seg_line[seg_line>0.1] =1 + seg_line[seg_line>0.3] =1#seg_line[seg_line>0.5] =1#seg_line[seg_line>0.1] =1 seg_line[seg_line<1] =0 ##seg[seg_art==1]=4 @@ -3667,7 +3667,6 @@ class Eynollah: peaks_real, _ = find_peaks(sum_smoothed, height=0) if len(peaks_real)>70: - print(len(peaks_real), 'len(peaks_real)') peaks_real = peaks_real[(peaks_realwidth1)] @@ -5133,7 +5132,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_425000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_600000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( diff --git a/src/eynollah/utils/marginals.py b/src/eynollah/utils/marginals.py index c0c4892..ac8dc1d 100644 --- a/src/eynollah/utils/marginals.py +++ b/src/eynollah/utils/marginals.py @@ -10,7 +10,6 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve mask_marginals=np.zeros((text_with_lines.shape[0],text_with_lines.shape[1])) mask_marginals=mask_marginals.astype(np.uint8) - text_with_lines=text_with_lines.astype(np.uint8) ##text_with_lines=cv2.erode(text_with_lines,self.kernel,iterations=3) @@ -26,9 +25,11 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve text_with_lines=resize_image(text_with_lines,int(text_with_lines.shape[0]*1.8),text_with_lines.shape[1]) text_with_lines=cv2.erode(text_with_lines,kernel,iterations=7) text_with_lines=resize_image(text_with_lines,text_with_lines_eroded.shape[0],text_with_lines_eroded.shape[1]) - + + if light_version: - text_with_lines=rotate_image(text_with_lines,-slope_deskew) + kernel_hor = np.ones((1, 5), dtype=np.uint8) + text_with_lines = cv2.erode(text_with_lines,kernel_hor,iterations=6) text_with_lines_y=text_with_lines.sum(axis=0) text_with_lines_y_eroded=text_with_lines_eroded.sum(axis=0) @@ -42,8 +43,10 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve elif thickness_along_y_percent>=30 and thickness_along_y_percent<50: min_textline_thickness=20 else: - min_textline_thickness=40 - + if light_version: + min_textline_thickness=45 + else: + min_textline_thickness=40 if thickness_along_y_percent>=14: diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 0322579..6289d4d 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1466,7 +1466,7 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, main_page=False, logger=None, plotter=None, map=map): if main_page and plotter: plotter.save_plot_of_textline_density(img_patch_org) - + img_int=np.zeros((img_patch_org.shape[0],img_patch_org.shape[1])) img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] @@ -1487,7 +1487,7 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, angles = np.linspace(angle - 22.5, angle + 22.5, n_tot_angles) angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) elif main_page: - angles = np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) + angles = np.array (list(np.linspace(-12, -7, int(n_tot_angles/4))) + list(np.linspace(-6, 6, n_tot_angles- 2* int(n_tot_angles/4))) + list(np.linspace(7, 12, int(n_tot_angles/4))))#np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) early_slope_edge=11 From 44ff51f5c17fb1836f76b3ea953e7470521d3300 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 20 May 2025 16:51:08 +0200 Subject: [PATCH 140/492] mb reading order now can be done faster. Text regions are clustered using dilation, and mb reading order needs to be implemented for fewer regions --- src/eynollah/eynollah.py | 181 +++++++++++++++++++++++++++++---- src/eynollah/utils/__init__.py | 2 +- 2 files changed, 163 insertions(+), 20 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 08a781c..eb5c860 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -32,6 +32,7 @@ from numba import cuda from skimage.morphology import skeletonize from ocrd import OcrdPage from ocrd_utils import getLogger, tf_disable_interactive_logs +import statistics try: import torch @@ -797,7 +798,7 @@ class Eynollah: self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False, threshold_art_class_textline=0.1): + thresholding_for_artificial_class_in_light_version=False, thresholding_for_fl_light_version=False, threshold_art_class_textline=0.1): self.logger.debug("enter do_prediction") img_height_model = model.layers[-1].output_shape[1] @@ -822,6 +823,15 @@ class Eynollah: skeleton_art = skeleton_art*1 seg[skeleton_art==1]=2 + + if thresholding_for_fl_light_version: + seg_header = label_p_pred[0,:,:,2] + + seg_header[seg_header<0.2] = 0 + seg_header[seg_header>0] =1 + + seg[seg_header==1]=2 + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) return prediction_true @@ -1613,10 +1623,11 @@ class Eynollah: model_region = self.model_region_fl if patches else self.model_region_fl_np if self.light_version: - pass + thresholding_for_fl_light_version = True elif not patches: img = otsu_copy_binary(img).astype(np.uint8) prediction_regions = None + thresholding_for_fl_light_version = False elif cols: img = otsu_copy_binary(img).astype(np.uint8) if cols == 1: @@ -1632,7 +1643,7 @@ class Eynollah: else: img = resize_image(img, int(img_height_h * 2500 / float(img_width_h)), 2500).astype(np.uint8) - prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent=0.1, n_batch_inference=3) + prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent=0.1, n_batch_inference=3, thresholding_for_fl_light_version=thresholding_for_fl_light_version) prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) self.logger.debug("exit extract_text_regions") return prediction_regions, prediction_regions @@ -3544,9 +3555,87 @@ class Eynollah: return model def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): + #cv2.imwrite('textregions.png', text_regions_p*50) + min_cont_size_to_be_dilated = 10 + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + ver_kernel = np.ones((5, 1), dtype=np.uint8) + + cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) + args_cont_located = np.array(range(len(contours_only_text_parent))) + + diff_y_conts = np.abs(y_max_conts[:]-y_min_conts) + diff_x_conts = np.abs(x_max_conts[:]-x_min_conts) + + mean_x = statistics.mean(diff_x_conts) + median_x = statistics.median(diff_x_conts) + + + diff_x_ratio= diff_x_conts/mean_x + + args_cont_located_excluded = args_cont_located[diff_x_ratio>=1.3] + args_cont_located_included = args_cont_located[diff_x_ratio<1.3] + + contours_only_text_parent_excluded = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]>=1.3]#contours_only_text_parent[diff_x_ratio>=1.3] + contours_only_text_parent_included = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]<1.3]#contours_only_text_parent[diff_x_ratio<1.3] + + + cx_conts_excluded = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]>=1.3]#cx_conts[diff_x_ratio>=1.3] + cx_conts_included = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]<1.3]#cx_conts[diff_x_ratio<1.3] + + cy_conts_excluded = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]>=1.3]#cy_conts[diff_x_ratio>=1.3] + cy_conts_included = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]<1.3]#cy_conts[diff_x_ratio<1.3] + + #print(diff_x_ratio, 'ratio') + text_regions_p = text_regions_p.astype('uint8') + + if len(contours_only_text_parent_excluded)>0: + textregion_par = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1])).astype('uint8') + textregion_par = cv2.fillPoly(textregion_par, pts=contours_only_text_parent_included, color=(1,1)) + else: + textregion_par = (text_regions_p[:,:]==1)*1 + textregion_par = textregion_par.astype('uint8') + + + text_regions_p_textregions_dilated = cv2.dilate(textregion_par , ver_kernel, iterations=8) + text_regions_p_textregions_dilated[text_regions_p[:,:]>1] = 0 + + #cv2.imwrite('textregions_dilated.png', text_regions_p_textregions_dilated*255) + + + contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) + contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) + + indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located = self.return_indexes_of_contours_loctaed_inside_another_list_of_contours(contours_only_dilated, contours_only_text_parent_included, cx_conts_included, cy_conts_included, args_cont_located_included) + + + if len(args_cont_located_excluded)>0: + for ind in args_cont_located_excluded: + indexes_of_located_cont.append(np.array([ind])) + contours_only_dilated.append(contours_only_text_parent[ind]) + center_y_coordinates_of_located.append(0) + + array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] + flattened_array = np.concatenate([arr.ravel() for arr in array_list]) + #print(len( np.unique(flattened_array)), 'indexes_of_located_cont uniques') + + missing_textregions = list( set(np.array(range(len(contours_only_text_parent))) ) - set(np.unique(flattened_array)) ) + #print(missing_textregions, 'missing_textregions') + + for ind in missing_textregions: + indexes_of_located_cont.append(np.array([ind])) + contours_only_dilated.append(contours_only_text_parent[ind]) + center_y_coordinates_of_located.append(0) + + + if contours_only_text_parent_h: + for vi in range(len(contours_only_text_parent_h)): + indexes_of_located_cont.append(int(vi+len(contours_only_text_parent))) + + array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] + flattened_array = np.concatenate([arr.ravel() for arr in array_list]) + y_len = text_regions_p.shape[0] x_len = text_regions_p.shape[1] - img_poly = np.zeros((y_len,x_len), dtype='uint8') img_poly[text_regions_p[:,:]==1] = 1 @@ -3554,25 +3643,24 @@ class Eynollah: img_poly[text_regions_p[:,:]==3] = 4 img_poly[text_regions_p[:,:]==6] = 5 - - ###temp - ##sep_mask = (img_poly==5)*1 - ##sep_mask = sep_mask.astype('uint8') - ##sep_mask = cv2.erode(sep_mask, kernel=KERNEL, iterations=2) - ##img_poly[img_poly==5] = 0 - ##img_poly[sep_mask==1] = 5 - ### - img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') if contours_only_text_parent_h: _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours( contours_only_text_parent_h) for j in range(len(cy_main)): img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, - int(x_min_main[j]):int(x_max_main[j])] = 1 - co_text_all = contours_only_text_parent + contours_only_text_parent_h + int(x_min_main[j]):int(x_max_main[j])] = 1 + co_text_all_org = contours_only_text_parent + contours_only_text_parent_h + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + co_text_all = contours_only_dilated + contours_only_text_parent_h + else: + co_text_all = contours_only_text_parent + contours_only_text_parent_h else: - co_text_all = contours_only_text_parent + co_text_all_org = contours_only_text_parent + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + co_text_all = contours_only_dilated + else: + co_text_all = contours_only_text_parent if not len(co_text_all): return [], [] @@ -3651,8 +3739,26 @@ class Eynollah: break ordered = [i[0] for i in ordered] - region_ids = ['region_%04d' % i for i in range(len(co_text_all))] - return ordered, region_ids + + if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + org_contours_indexes = [] + for ind in range(len(ordered)): + region_with_curr_order = ordered[ind] + if region_with_curr_order < len(contours_only_dilated): + if np.isscalar(indexes_of_located_cont[region_with_curr_order]): + org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] + else: + arg_sort_located_cont = np.argsort(center_y_coordinates_of_located[region_with_curr_order]) + org_contours_indexes = org_contours_indexes + list(np.array(indexes_of_located_cont[region_with_curr_order])[arg_sort_located_cont]) ##org_contours_indexes + list ( + else: + org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] + + region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] + return org_contours_indexes, region_ids + else: + region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] + return ordered, region_ids + def return_start_and_end_of_common_text_of_textline_ocr(self, textline_image, ind_tot): width = np.shape(textline_image)[1] @@ -4293,6 +4399,29 @@ class Eynollah: contours[ind_u_a_trs].pop(ittrd) return contours + + def return_indexes_of_contours_loctaed_inside_another_list_of_contours(self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): + indexes_of_located_cont = [] + center_x_coordinates_of_located = [] + center_y_coordinates_of_located = [] + #M_main_tot = [cv2.moments(contours_loc[j]) + #for j in range(len(contours_loc))] + #cx_main_loc = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + #cy_main_loc = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + + for ij in range(len(contours)): + results = [cv2.pointPolygonTest(contours[ij], (cx_main_loc[ind], cy_main_loc[ind]), False) + for ind in range(len(cy_main_loc)) ] + results = np.array(results) + indexes_in = np.where((results == 0) | (results == 1)) + indexes = indexes_loc[indexes_in]# [(results == 0) | (results == 1)]#np.where((results == 0) | (results == 1)) + + indexes_of_located_cont.append(indexes) + center_x_coordinates_of_located.append(np.array(cx_main_loc)[indexes_in] ) + center_y_coordinates_of_located.append(np.array(cy_main_loc)[indexes_in] ) + + return indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located + def filter_contours_without_textline_inside( self, contours,text_con_org, contours_textline, contours_only_text_parent_d_ordered, conf_contours_textregions): @@ -4986,8 +5115,10 @@ class Eynollah: if self.full_layout: if self.reading_order_machine_based: + tror = time.time() order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( contours_only_text_parent, contours_only_text_parent_h, text_regions_p) + print('time spend for mb ro', time.time()-tror) else: if np.abs(slope_deskew) < SLOPE_THRESHOLD: order_text_new, id_of_texts_tot = self.do_order_of_regions( @@ -5619,8 +5750,15 @@ class Eynollah_ocr: mask_poly = np.zeros(img.shape) mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + mask_poly = mask_poly[y:y+h, x:x+w, :] img_crop = img_poly_on_img[y:y+h, x:x+w, :] + + if angle_degrees<=15: + if mask_poly[:,:,0].sum() /float(w*h) < 0.6 and w_scaled > 520: + cv2.imwrite(file_name+'_desk.png', img_crop) + + print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') if not self.do_not_mask_with_textline_contour: if angle_degrees > 15: better_des_slope = self.get_orientation_moments(textline_coords) @@ -5634,6 +5772,11 @@ class Eynollah_ocr: mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] + + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.6 and w_scaled > 520: + cv2.imwrite(file_name+'_desk.png', img_crop) + + print(file_name,w_n*h_n , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w_n*h_n) , 'ikiiiiii') img_crop[mask_poly==0] = 255 @@ -5641,7 +5784,7 @@ class Eynollah_ocr: img_crop_bin[mask_poly==0] = 255 if not self.export_textline_images_and_text: - if w_scaled < 1.5*image_width: + if w_scaled < 640:#1.5*image_width: img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) if angle_degrees > 15: diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index c5962f8..7fa4a7b 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -992,7 +992,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light( (regions_model_full[:,:,0]==2)).sum() pixels_main = all_pixels - pixels_header - if (pixels_header/float(pixels_main)>=0.3) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): + if ( (pixels_header/float(pixels_main)>=0.6) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ) and ( (length_con[ii]/float(height_con[ii]) )<=3 )) or ( (pixels_header/float(pixels_main)>=0.3) and ( (length_con[ii]/float(height_con[ii]) )>=3 ) ): regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 contours_only_text_parent_head.append(con) if contours_only_text_parent_d_ordered is not None: From 3ad621e956dd7cdb8e7f2d00edcfa4db7008d7d9 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 20 May 2025 19:01:52 +0200 Subject: [PATCH 141/492] ocr for curved lines --- src/eynollah/eynollah.py | 157 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 146 insertions(+), 11 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index eb5c860..912bc31 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5263,7 +5263,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_600000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_750000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5464,7 +5464,7 @@ class Eynollah_ocr: return cropped_textline - def rotate_image_with_padding(self, image, angle): + def rotate_image_with_padding(self, image, angle, border_value=(0,0,0)): # Get image dimensions (h, w) = image.shape[:2] @@ -5485,7 +5485,7 @@ class Eynollah_ocr: rotation_matrix[1, 2] += (new_h / 2) - center[1] # Perform the rotation - rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=(0, 0, 0)) + rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=border_value) return rotated_image @@ -5496,6 +5496,21 @@ class Eynollah_ocr: else: angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) return np.degrees(angle) # Convert radians to degrees + + + def get_orientation_moments_of_mask(self, mask): + mask=mask.astype('uint8') + print(mask.shape) + contours, _ = cv2.findContours(mask[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + largest_contour = max(contours, key=cv2.contourArea) if contours else None + + moments = cv2.moments(largest_contour) + if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero + return 90 if moments["mu11"] > 0 else -90 + else: + angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) + return np.degrees(angle) # Convert radians to degrees def get_contours_and_bounding_boxes(self, mask): # Find contours in the binary mask @@ -5508,6 +5523,121 @@ class Eynollah_ocr: #bounding_boxes.append((x, y, w, h)) return x, y, w, h + + def return_splitting_point_of_image(self, image_to_spliited): + width = np.shape(image_to_spliited)[1] + height = np.shape(image_to_spliited)[0] + common_window = int(0.03*width) + + width1 = int ( common_window) + width2 = int ( width - common_window ) + + img_sum = np.sum(image_to_spliited[:,:,0], axis=0) + sum_smoothed = gaussian_filter1d(img_sum, 3) + + peaks_real, _ = find_peaks(sum_smoothed, height=0) + + peaks_real = peaks_real[(peaks_realwidth1)] + + arg_sort = np.argsort(sum_smoothed[peaks_real]) + arg_sort4 =arg_sort[::-1][:4] + peaks_sort_4 = peaks_real[arg_sort][::-1][:4] + + return np.sort(peaks_sort_4) + + def break_curved_line_into_small_pieces_and_then_merge(self, img_curved, mask_curved): + peaks_4 = self.return_splitting_point_of_image(img_curved) + + + + img_0 = img_curved[:, :peaks_4[0], :] + img_1 = img_curved[:, peaks_4[0]:peaks_4[1], :] + img_2 = img_curved[:, peaks_4[1]:peaks_4[2], :] + img_3 = img_curved[:, peaks_4[2]:peaks_4[3], :] + img_4 = img_curved[:, peaks_4[3]:, :] + + + mask_0 = mask_curved[:, :peaks_4[0], :] + mask_1 = mask_curved[:, peaks_4[0]:peaks_4[1], :] + mask_2 = mask_curved[:, peaks_4[1]:peaks_4[2], :] + mask_3 = mask_curved[:, peaks_4[2]:peaks_4[3], :] + mask_4 = mask_curved[:, peaks_4[3]:, :] + + cv2.imwrite("split0.png", img_0) + cv2.imwrite("split1.png", img_1) + cv2.imwrite("split2.png", img_2) + cv2.imwrite("split3.png", img_3) + + or_ma_0 = self.get_orientation_moments_of_mask(mask_0) + or_ma_1 = self.get_orientation_moments_of_mask(mask_1) + or_ma_2 = self.get_orientation_moments_of_mask(mask_2) + or_ma_3 = self.get_orientation_moments_of_mask(mask_3) + or_ma_4 = self.get_orientation_moments_of_mask(mask_4) + + imgs_tot = [] + imgs_tot.append([img_0, mask_0, or_ma_0] ) + imgs_tot.append([img_1, mask_1, or_ma_1]) + imgs_tot.append([img_2, mask_2, or_ma_2]) + imgs_tot.append([img_3, mask_3, or_ma_3]) + imgs_tot.append([img_4, mask_4, or_ma_4]) + + w_tot_des_list = [] + w_tot_des = 0 + imgs_deskewed_list = [] + for ind in range(len(imgs_tot)): + img_in = imgs_tot[ind][0] + mask_in = imgs_tot[ind][1] + ori_in = imgs_tot[ind][2] + + if abs(ori_in)<45: + img_in_des = self.rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) + mask_in_des = self.rotate_image_with_padding(mask_in, ori_in) + mask_in_des = mask_in_des.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = self.get_contours_and_bounding_boxes(mask_in_des[:,:,0]) + + mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + img_in_des = resize_image(img_in_des, 32, w_relative) + + + else: + img_in_des = np.copy(img_in) + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + img_in_des = resize_image(img_in_des, 32, w_relative) + + w_tot_des+=img_in_des.shape[1] + w_tot_des_list.append(img_in_des.shape[1]) + imgs_deskewed_list.append(img_in_des) + + + + + img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 + + w_indexer = 0 + for ind in range(len(w_tot_des_list)): + img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] + w_indexer = w_indexer+w_tot_des_list[ind] + + #cv2.imwrite('final.png', img_final_deskewed) + #print(or_ma_0, or_ma_1, or_ma_2, or_ma_3, or_ma_4, 'orients') + + ##cv2.imwrite("split4.png", img_curved[:, peaks_4[3]:peaks_4[4], :]) + ##cv2.imwrite("split5.png", img_curved[:, peaks_4[4]:peaks_4[5], :]) + ##cv2.imwrite("split6.png", img_curved[:, peaks_4[5]:peaks_4[6], :]) + + ##cv2.imwrite("split7.png", img_curved[:, peaks_4[6]:peaks_4[7], :]) + ##cv2.imwrite("split8.png", img_curved[:, peaks_4[7]:peaks_4[8], :]) + ##cv2.imwrite("split9.png", img_curved[:, peaks_4[8]:peaks_4[9], :]) + + + #cv2.imwrite("split4.png", img_4) + #sys.exit() + return img_final_deskewed def run(self): ls_imgs = os.listdir(self.dir_in) @@ -5754,11 +5884,9 @@ class Eynollah_ocr: mask_poly = mask_poly[y:y+h, x:x+w, :] img_crop = img_poly_on_img[y:y+h, x:x+w, :] - if angle_degrees<=15: - if mask_poly[:,:,0].sum() /float(w*h) < 0.6 and w_scaled > 520: - cv2.imwrite(file_name+'_desk.png', img_crop) + - print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') + #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') if not self.do_not_mask_with_textline_contour: if angle_degrees > 15: better_des_slope = self.get_orientation_moments(textline_coords) @@ -5773,12 +5901,19 @@ class Eynollah_ocr: mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.6 and w_scaled > 520: - cv2.imwrite(file_name+'_desk.png', img_crop) + img_crop[mask_poly==0] = 255 - print(file_name,w_n*h_n , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w_n*h_n) , 'ikiiiiii') + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 100: + img_crop = self.break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) - img_crop[mask_poly==0] = 255 + #print(file_name,w_n*h_n , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w_n*h_n) , 'ikiiiiii') + else: + img_crop[mask_poly==0] = 255 + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 100: + img_crop = self.break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + + + if self.prediction_with_both_of_rgb_and_bin: img_crop_bin[mask_poly==0] = 255 From 14b70c25565d595ad31f6b1ce2e77df491f78679 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 21 May 2025 14:39:31 +0200 Subject: [PATCH 142/492] Implement hyphenated textline merging in OCR engine and a bug fixed for curved textline OCR --- src/eynollah/eynollah.py | 157 ++++++++++++++++++--------------------- 1 file changed, 71 insertions(+), 86 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 912bc31..6771db0 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5500,7 +5500,6 @@ class Eynollah_ocr: def get_orientation_moments_of_mask(self, mask): mask=mask.astype('uint8') - print(mask.shape) contours, _ = cv2.findContours(mask[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) largest_contour = max(contours, key=cv2.contourArea) if contours else None @@ -5547,97 +5546,69 @@ class Eynollah_ocr: def break_curved_line_into_small_pieces_and_then_merge(self, img_curved, mask_curved): peaks_4 = self.return_splitting_point_of_image(img_curved) - - - - img_0 = img_curved[:, :peaks_4[0], :] - img_1 = img_curved[:, peaks_4[0]:peaks_4[1], :] - img_2 = img_curved[:, peaks_4[1]:peaks_4[2], :] - img_3 = img_curved[:, peaks_4[2]:peaks_4[3], :] - img_4 = img_curved[:, peaks_4[3]:, :] - - - mask_0 = mask_curved[:, :peaks_4[0], :] - mask_1 = mask_curved[:, peaks_4[0]:peaks_4[1], :] - mask_2 = mask_curved[:, peaks_4[1]:peaks_4[2], :] - mask_3 = mask_curved[:, peaks_4[2]:peaks_4[3], :] - mask_4 = mask_curved[:, peaks_4[3]:, :] - - cv2.imwrite("split0.png", img_0) - cv2.imwrite("split1.png", img_1) - cv2.imwrite("split2.png", img_2) - cv2.imwrite("split3.png", img_3) - - or_ma_0 = self.get_orientation_moments_of_mask(mask_0) - or_ma_1 = self.get_orientation_moments_of_mask(mask_1) - or_ma_2 = self.get_orientation_moments_of_mask(mask_2) - or_ma_3 = self.get_orientation_moments_of_mask(mask_3) - or_ma_4 = self.get_orientation_moments_of_mask(mask_4) - - imgs_tot = [] - imgs_tot.append([img_0, mask_0, or_ma_0] ) - imgs_tot.append([img_1, mask_1, or_ma_1]) - imgs_tot.append([img_2, mask_2, or_ma_2]) - imgs_tot.append([img_3, mask_3, or_ma_3]) - imgs_tot.append([img_4, mask_4, or_ma_4]) - - w_tot_des_list = [] - w_tot_des = 0 - imgs_deskewed_list = [] - for ind in range(len(imgs_tot)): - img_in = imgs_tot[ind][0] - mask_in = imgs_tot[ind][1] - ori_in = imgs_tot[ind][2] + if len(peaks_4)>0: + imgs_tot = [] - if abs(ori_in)<45: - img_in_des = self.rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) - mask_in_des = self.rotate_image_with_padding(mask_in, ori_in) - mask_in_des = mask_in_des.astype('uint8') + for ind in range(len(peaks_4)+1): + if ind==0: + img = img_curved[:, :peaks_4[ind], :] + mask = mask_curved[:, :peaks_4[ind], :] + elif ind==len(peaks_4): + img = img_curved[:, peaks_4[ind-1]:, :] + mask = mask_curved[:, peaks_4[ind-1]:, :] + else: + img = img_curved[:, peaks_4[ind-1]:peaks_4[ind], :] + mask = mask_curved[:, peaks_4[ind-1]:peaks_4[ind], :] + + or_ma = self.get_orientation_moments_of_mask(mask) + + imgs_tot.append([img, mask, or_ma] ) + + + w_tot_des_list = [] + w_tot_des = 0 + imgs_deskewed_list = [] + for ind in range(len(imgs_tot)): + img_in = imgs_tot[ind][0] + mask_in = imgs_tot[ind][1] + ori_in = imgs_tot[ind][2] - #new bounding box - x_n, y_n, w_n, h_n = self.get_contours_and_bounding_boxes(mask_in_des[:,:,0]) + if abs(ori_in)<45: + img_in_des = self.rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) + mask_in_des = self.rotate_image_with_padding(mask_in, ori_in) + mask_in_des = mask_in_des.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = self.get_contours_and_bounding_boxes(mask_in_des[:,:,0]) + + mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + img_in_des = resize_image(img_in_des, 32, w_relative) + + + else: + img_in_des = np.copy(img_in) + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + img_in_des = resize_image(img_in_des, 32, w_relative) + + w_tot_des+=img_in_des.shape[1] + w_tot_des_list.append(img_in_des.shape[1]) + imgs_deskewed_list.append(img_in_des) - mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - img_in_des = resize_image(img_in_des, 32, w_relative) - else: - img_in_des = np.copy(img_in) - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - img_in_des = resize_image(img_in_des, 32, w_relative) - - w_tot_des+=img_in_des.shape[1] - w_tot_des_list.append(img_in_des.shape[1]) - imgs_deskewed_list.append(img_in_des) + img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - - - - img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - - w_indexer = 0 - for ind in range(len(w_tot_des_list)): - img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] - w_indexer = w_indexer+w_tot_des_list[ind] - - #cv2.imwrite('final.png', img_final_deskewed) - #print(or_ma_0, or_ma_1, or_ma_2, or_ma_3, or_ma_4, 'orients') - - ##cv2.imwrite("split4.png", img_curved[:, peaks_4[3]:peaks_4[4], :]) - ##cv2.imwrite("split5.png", img_curved[:, peaks_4[4]:peaks_4[5], :]) - ##cv2.imwrite("split6.png", img_curved[:, peaks_4[5]:peaks_4[6], :]) - - ##cv2.imwrite("split7.png", img_curved[:, peaks_4[6]:peaks_4[7], :]) - ##cv2.imwrite("split8.png", img_curved[:, peaks_4[7]:peaks_4[8], :]) - ##cv2.imwrite("split9.png", img_curved[:, peaks_4[8]:peaks_4[9], :]) - - - #cv2.imwrite("split4.png", img_4) - #sys.exit() - return img_final_deskewed + w_indexer = 0 + for ind in range(len(w_tot_des_list)): + img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] + w_indexer = w_indexer+w_tot_des_list[ind] + return img_final_deskewed + else: + return img_curved def run(self): ls_imgs = os.listdir(self.dir_in) @@ -6144,7 +6115,21 @@ class Eynollah_ocr: text_by_textregion = [] for ind in unique_cropped_lines_region_indexer: extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] - text_by_textregion.append("".join(extracted_texts_merged_un)) + if len(extracted_texts_merged_un)>1: + text_by_textregion_ind = "" + next_glue = "" + for indt in range(len(extracted_texts_merged_un)): + if extracted_texts_merged_un[indt].endswith('⸗') or extracted_texts_merged_un[indt].endswith('-'): + text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt][:-1] + next_glue = "" + else: + text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt] + next_glue = " " + text_by_textregion.append(text_by_textregion_ind) + + else: + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') indexer = 0 indexer_textregion = 0 From ee2c7e90137988e83fe3f2204c8a46849cce0f19 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 21 May 2025 17:42:44 +0200 Subject: [PATCH 143/492] enhancing curved lines OCR --- src/eynollah/eynollah.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6771db0..b510218 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5532,14 +5532,12 @@ class Eynollah_ocr: width2 = int ( width - common_window ) img_sum = np.sum(image_to_spliited[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) + sum_smoothed = gaussian_filter1d(img_sum, 1) peaks_real, _ = find_peaks(sum_smoothed, height=0) - peaks_real = peaks_real[(peaks_realwidth1)] - + arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] peaks_sort_4 = peaks_real[arg_sort][::-1][:4] return np.sort(peaks_sort_4) @@ -5585,12 +5583,16 @@ class Eynollah_ocr: img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + if w_relative==0: + w_relative = img_in_des.shape[1] img_in_des = resize_image(img_in_des, 32, w_relative) else: img_in_des = np.copy(img_in) w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + if w_relative==0: + w_relative = img_in_des.shape[1] img_in_des = resize_image(img_in_des, 32, w_relative) w_tot_des+=img_in_des.shape[1] From 089029cec734b254ba9737b0d213f49d1d16beef Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 23 May 2025 15:55:03 +0200 Subject: [PATCH 144/492] commit 21ec4fb is picked + rnn ocr at the same time with segmentation + enhancement of mb reading order --- src/eynollah/cli.py | 15 +- src/eynollah/eynollah.py | 775 +++++++++++--------------------- src/eynollah/utils/utils_ocr.py | 435 ++++++++++++++++++ src/eynollah/writer.py | 30 +- 4 files changed, 729 insertions(+), 526 deletions(-) create mode 100644 src/eynollah/utils/utils_ocr.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 99961c9..cd56833 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -225,6 +225,17 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) is_flag=True, help="if this parameter set to true, this tool will try to do ocr", ) +@click.option( + "--transformer_ocr", + "-tr/-notr", + is_flag=True, + help="if this parameter set to true, this tool will apply transformer ocr", +) +@click.option( + "--batch_size_ocr", + "-bs_ocr", + help="number of inference batch size of ocr model. Default b_s for trocr and cnn_rnn models are 2 and 8 respectively", +) @click.option( "--num_col_upper", "-ncu", @@ -258,7 +269,7 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) help="Override log level globally to this", ) -def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level): +def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, transformer_ocr, batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) @@ -305,6 +316,8 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ ignore_page_extraction=ignore_page_extraction, reading_order_machine_based=reading_order_machine_based, do_ocr=do_ocr, + transformer_ocr=transformer_ocr, + batch_size_ocr=batch_size_ocr, num_col_upper=num_col_upper, num_col_lower=num_col_lower, skip_layout_and_reading_order=skip_layout_and_reading_order, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index b510218..2564150 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -80,6 +80,13 @@ from .utils.rotate import ( rotation_not_90_func_full_layout, rotation_image_new ) +from .utils.utils_ocr import ( + return_textline_contour_with_added_box_coordinate, + preprocess_and_resize_image_for_ocrcnn_model, + return_textlines_split_if_needed, + decode_batch_predictions, + return_rnn_cnn_ocr_of_given_textlines +) from .utils.separate_lines import ( textline_contours_postprocessing, separate_lines_new2, @@ -199,6 +206,8 @@ class Eynollah: ignore_page_extraction : bool = False, reading_order_machine_based : bool = False, do_ocr : bool = False, + transformer_ocr: bool = False, + batch_size_ocr: Optional[int] = None, num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, threshold_art_class_layout: Optional[float] = None, @@ -232,6 +241,7 @@ class Eynollah: self.ignore_page_extraction = ignore_page_extraction self.skip_layout_and_reading_order = skip_layout_and_reading_order self.ocr = do_ocr + self.tr = transformer_ocr if num_col_upper: self.num_col_upper = int(num_col_upper) else: @@ -273,7 +283,7 @@ class Eynollah: self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_step_2500000_mb_ro"#"/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_step_4800000_mb_ro"#"/model_ens_reading_order_machine_based" #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" @@ -300,8 +310,10 @@ class Eynollah: else: #"/eynollah-textline_20210425" self.model_textline_dir = dir_models + "/modelens_textline_0_1__2_4_16092024" - if self.ocr: + if self.ocr and self.tr: self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" + elif self.ocr and not self.tr: + self.model_ocr_dir = dir_models + "/model_step_750000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" if self.tables: if self.light_version: self.model_table_dir = dir_models + "/modelens_table_0t4_201124" @@ -341,11 +353,37 @@ class Eynollah: self.model_region_fl = self.our_load_model(self.model_region_dir_fully) if self.reading_order_machine_based: self.model_reading_order = self.our_load_model(self.model_reading_order_dir) - if self.ocr: + if self.ocr and self.tr: self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #("microsoft/trocr-base-printed")#("microsoft/trocr-base-handwritten") self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") + elif self.ocr and not self.tr: + model_ocr = load_model(self.model_ocr_dir , compile=False) + + self.prediction_model = tf.keras.models.Model( + model_ocr.get_layer(name = "image").input, + model_ocr.get_layer(name = "dense2").output) + if not batch_size_ocr: + self.b_s_ocr = 8 + else: + self.b_s_ocr = int(batch_size_ocr) + + + with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: + characters = json.load(config_file) + + + AUTOTUNE = tf.data.AUTOTUNE + + # Mapping characters to integers. + char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) + + # Mapping integers back to original characters. + self.num_to_char = StringLookup( + vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True + ) + if self.tables: self.model_table = self.our_load_model(self.model_table_dir) @@ -1325,11 +1363,11 @@ class Eynollah: seg_art[seg_art>0] =1 seg_line = label_p_pred[:,:,:,3] - seg_line[seg_line>0.3] =1#seg_line[seg_line>0.5] =1#seg_line[seg_line>0.1] =1 + seg_line[seg_line>0.4] =1#seg_line[seg_line>0.5] =1#seg_line[seg_line>0.1] =1 seg_line[seg_line<1] =0 ##seg[seg_art==1]=4 - seg[(seg_line==1) & (seg==0)]=3 + #seg[(seg_line==1) & (seg==0)]=3 if thresholding_for_artificial_class_in_light_version: seg_art = label_p_pred[:,:,:,2] @@ -2060,7 +2098,7 @@ class Eynollah: ###img_bin = np.copy(prediction_bin) ###else: ###img_bin = np.copy(img_resized) - if self.ocr and not self.input_binary: + if (self.ocr and self.tr) and not self.input_binary: prediction_bin = self.do_prediction(True, img_resized, self.model_bin, n_batch_inference=5) prediction_bin = 255 * (prediction_bin[:,:,0] == 0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) @@ -3485,8 +3523,10 @@ class Eynollah: # 6 is the separators lable in old full layout model # 4 is the drop capital class in old full layout model # in the new full layout drop capital is 3 and separators are 5 - - text_regions_p[:,:][regions_fully[:,:,0]==5]=6 + + # the separators in full layout will not be written on layout + if not self.reading_order_machine_based: + text_regions_p[:,:][regions_fully[:,:,0]==5]=6 ###regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 3] = 4 #text_regions_p[:,:][regions_fully[:,:,0]==6]=6 @@ -3555,11 +3595,37 @@ class Eynollah: return model def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): - #cv2.imwrite('textregions.png', text_regions_p*50) + + height1 =672#448 + width1 = 448#224 + + height2 =672#448 + width2= 448#224 + + height3 =672#448 + width3 = 448#224 + + inference_bs = 3 + + cv2.imwrite('textregions.png', text_regions_p*50) + cv2.imwrite('sep.png', (text_regions_p[:,:]==6)*255) + + ver_kernel = np.ones((5, 1), dtype=np.uint8) + hor_kernel = np.ones((1, 5), dtype=np.uint8) + + + + #separators = (text_regions_p[:,:]==6)*1 + #text_regions_p[text_regions_p[:,:]==6] = 0 + #separators = separators.astype('uint8') + + #separators = cv2.erode(separators , hor_kernel, iterations=1) + #text_regions_p[separators[:,:]==1] = 6 + + #cv2.imwrite('sep_new.png', (text_regions_p[:,:]==6)*255) + min_cont_size_to_be_dilated = 10 if len(contours_only_text_parent)>min_cont_size_to_be_dilated: - ver_kernel = np.ones((5, 1), dtype=np.uint8) - cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) args_cont_located = np.array(range(len(contours_only_text_parent))) @@ -3595,12 +3661,13 @@ class Eynollah: textregion_par = (text_regions_p[:,:]==1)*1 textregion_par = textregion_par.astype('uint8') - - text_regions_p_textregions_dilated = cv2.dilate(textregion_par , ver_kernel, iterations=8) + text_regions_p_textregions_dilated = cv2.erode(textregion_par , hor_kernel, iterations=2) + text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=4) + text_regions_p_textregions_dilated = cv2.erode(text_regions_p_textregions_dilated , hor_kernel, iterations=1) + text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=5) text_regions_p_textregions_dilated[text_regions_p[:,:]>1] = 0 - #cv2.imwrite('textregions_dilated.png', text_regions_p_textregions_dilated*255) - + cv2.imwrite('text_regions_p_textregions_dilated.png', text_regions_p_textregions_dilated*255) contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) @@ -3664,7 +3731,8 @@ class Eynollah: if not len(co_text_all): return [], [] - + print(len(co_text_all), "co_text_all") + print(len(co_text_all_org), "co_text_all_org") labels_con = np.zeros((int(y_len /6.), int(x_len/6.), len(co_text_all)), dtype=bool) co_text_all = [(i/6).astype(int) for i in co_text_all] for i in range(len(co_text_all)): @@ -3675,21 +3743,13 @@ class Eynollah: cv2.fillPoly(img, pts=[co_text_all[i]], color=(1,)) labels_con[:,:,i] = img - height1 =672#448 - width1 = 448#224 - - height2 =672#448 - width2= 448#224 - - height3 =672#448 - width3 = 448#224 labels_con = resize_image(labels_con.astype(np.uint8), height1, width1).astype(bool) img_header_and_sep = resize_image(img_header_and_sep, height1, width1) img_poly = resize_image(img_poly, height3, width3) - inference_bs = 3 + input_1 = np.zeros((inference_bs, height1, width1, 3)) ordered = [list(range(len(co_text_all)))] index_update = 0 @@ -3760,217 +3820,213 @@ class Eynollah: return ordered, region_ids - def return_start_and_end_of_common_text_of_textline_ocr(self, textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.2*width) + ####def return_start_and_end_of_common_text_of_textline_ocr(self, textline_image, ind_tot): + ####width = np.shape(textline_image)[1] + ####height = np.shape(textline_image)[0] + ####common_window = int(0.2*width) - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) + ####width1 = int ( width/2. - common_window ) + ####width2 = int ( width/2. + common_window ) - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) + ####img_sum = np.sum(textline_image[:,:,0], axis=0) + ####sum_smoothed = gaussian_filter1d(img_sum, 3) - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: + ####peaks_real, _ = find_peaks(sum_smoothed, height=0) + ####if len(peaks_real)>70: - peaks_real = peaks_real[(peaks_realwidth1)] + ####peaks_real = peaks_real[(peaks_realwidth1)] - arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - argsort_sorted = np.argsort(peaks_sort_4) + ####arg_sort = np.argsort(sum_smoothed[peaks_real]) + ####arg_sort4 =arg_sort[::-1][:4] + ####peaks_sort_4 = peaks_real[arg_sort][::-1][:4] + ####argsort_sorted = np.argsort(peaks_sort_4) - first_4_sorted = peaks_sort_4[argsort_sorted] - y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - #print(first_4_sorted,'first_4_sorted') + ####first_4_sorted = peaks_sort_4[argsort_sorted] + ####y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] + #####print(first_4_sorted,'first_4_sorted') - arg_sortnew = np.argsort(y_4_sorted) - peaks_final =np.sort( first_4_sorted[arg_sortnew][2:] ) + ####arg_sortnew = np.argsort(y_4_sorted) + ####peaks_final =np.sort( first_4_sorted[arg_sortnew][2:] ) - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peaks_final[0], peaks_final[0]], [0, height-1]) - #plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') + #####plt.figure(ind_tot) + #####plt.imshow(textline_image) + #####plt.plot([peaks_final[0], peaks_final[0]], [0, height-1]) + #####plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) + #####plt.savefig('./'+str(ind_tot)+'.png') - return peaks_final[0], peaks_final[1] - else: - pass + ####return peaks_final[0], peaks_final[1] + ####else: + ####pass - def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.06*width) + ##def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image, ind_tot): + ##width = np.shape(textline_image)[1] + ##height = np.shape(textline_image)[0] + ##common_window = int(0.06*width) - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) + ##width1 = int ( width/2. - common_window ) + ##width2 = int ( width/2. + common_window ) - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) + ##img_sum = np.sum(textline_image[:,:,0], axis=0) + ##sum_smoothed = gaussian_filter1d(img_sum, 3) - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: - #print(len(peaks_real), 'len(peaks_real)') + ##peaks_real, _ = find_peaks(sum_smoothed, height=0) + ##if len(peaks_real)>70: + ###print(len(peaks_real), 'len(peaks_real)') - peaks_real = peaks_real[(peaks_realwidth1)] + ##peaks_real = peaks_real[(peaks_realwidth1)] - arg_max = np.argmax(sum_smoothed[peaks_real]) - peaks_final = peaks_real[arg_max] + ##arg_max = np.argmax(sum_smoothed[peaks_real]) + ##peaks_final = peaks_real[arg_max] - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peaks_final, peaks_final], [0, height-1]) - ##plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') + ###plt.figure(ind_tot) + ###plt.imshow(textline_image) + ###plt.plot([peaks_final, peaks_final], [0, height-1]) + ####plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) + ###plt.savefig('./'+str(ind_tot)+'.png') - return peaks_final - else: - return None + ##return peaks_final + ##else: + ##return None - def return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - self, peaks_real, sum_smoothed, start_split, end_split): + ###def return_start_and_end_of_common_text_of_textline_ocr_new_splitted( + ###self, peaks_real, sum_smoothed, start_split, end_split): - peaks_real = peaks_real[(peaks_realstart_split)] + ###peaks_real = peaks_real[(peaks_realstart_split)] - arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - argsort_sorted = np.argsort(peaks_sort_4) + ###arg_sort = np.argsort(sum_smoothed[peaks_real]) + ###arg_sort4 =arg_sort[::-1][:4] + ###peaks_sort_4 = peaks_real[arg_sort][::-1][:4] + ###argsort_sorted = np.argsort(peaks_sort_4) - first_4_sorted = peaks_sort_4[argsort_sorted] - y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - #print(first_4_sorted,'first_4_sorted') + ###first_4_sorted = peaks_sort_4[argsort_sorted] + ###y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] + ####print(first_4_sorted,'first_4_sorted') - arg_sortnew = np.argsort(y_4_sorted) - peaks_final =np.sort( first_4_sorted[arg_sortnew][3:] ) - return peaks_final[0] + ###arg_sortnew = np.argsort(y_4_sorted) + ###peaks_final =np.sort( first_4_sorted[arg_sortnew][3:] ) + ###return peaks_final[0] - def return_start_and_end_of_common_text_of_textline_ocr_new(self, textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.15*width) + ###def return_start_and_end_of_common_text_of_textline_ocr_new(self, textline_image, ind_tot): + ###width = np.shape(textline_image)[1] + ###height = np.shape(textline_image)[0] + ###common_window = int(0.15*width) - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - mid = int(width/2.) + ###width1 = int ( width/2. - common_window ) + ###width2 = int ( width/2. + common_window ) + ###mid = int(width/2.) - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) + ###img_sum = np.sum(textline_image[:,:,0], axis=0) + ###sum_smoothed = gaussian_filter1d(img_sum, 3) - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: - peak_start = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - peaks_real, sum_smoothed, width1, mid+2) - peak_end = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - peaks_real, sum_smoothed, mid-2, width2) + ###peaks_real, _ = find_peaks(sum_smoothed, height=0) + ###if len(peaks_real)>70: + ###peak_start = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( + ###peaks_real, sum_smoothed, width1, mid+2) + ###peak_end = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( + ###peaks_real, sum_smoothed, mid-2, width2) - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peak_start, peak_start], [0, height-1]) - #plt.plot([peak_end, peak_end], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') + ####plt.figure(ind_tot) + ####plt.imshow(textline_image) + ####plt.plot([peak_start, peak_start], [0, height-1]) + ####plt.plot([peak_end, peak_end], [0, height-1]) + ####plt.savefig('./'+str(ind_tot)+'.png') - return peak_start, peak_end - else: - pass + ###return peak_start, peak_end + ###else: + ###pass - def return_ocr_of_textline_without_common_section( - self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): + ##def return_ocr_of_textline_without_common_section( + ##self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): - if h2w_ratio > 0.05: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - #width = np.shape(textline_image)[1] - #height = np.shape(textline_image)[0] - #common_window = int(0.3*width) - #width1 = int ( width/2. - common_window ) - #width2 = int ( width/2. + common_window ) + ##if h2w_ratio > 0.05: + ##pixel_values = processor(textline_image, return_tensors="pt").pixel_values + ##generated_ids = model_ocr.generate(pixel_values.to(device)) + ##generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + ##else: + ###width = np.shape(textline_image)[1] + ###height = np.shape(textline_image)[0] + ###common_window = int(0.3*width) + ###width1 = int ( width/2. - common_window ) + ###width2 = int ( width/2. + common_window ) - split_point = self.return_start_and_end_of_common_text_of_textline_ocr_without_common_section( - textline_image, ind_tot) - if split_point: - image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) + ##split_point = self.return_start_and_end_of_common_text_of_textline_ocr_without_common_section( + ##textline_image, ind_tot) + ##if split_point: + ##image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) + ##image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - #pixel_values1 = processor(image1, return_tensors="pt").pixel_values - #pixel_values2 = processor(image2, return_tensors="pt").pixel_values + ###pixel_values1 = processor(image1, return_tensors="pt").pixel_values + ###pixel_values2 = processor(image2, return_tensors="pt").pixel_values - pixel_values_merged = processor([image1,image2], return_tensors="pt").pixel_values - generated_ids_merged = model_ocr.generate(pixel_values_merged.to(device)) - generated_text_merged = processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + ##pixel_values_merged = processor([image1,image2], return_tensors="pt").pixel_values + ##generated_ids_merged = model_ocr.generate(pixel_values_merged.to(device)) + ##generated_text_merged = processor.batch_decode(generated_ids_merged, skip_special_tokens=True) - #print(generated_text_merged,'generated_text_merged') + ###print(generated_text_merged,'generated_text_merged') - #generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - #generated_ids2 = model_ocr.generate(pixel_values2.to(device)) + ###generated_ids1 = model_ocr.generate(pixel_values1.to(device)) + ###generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - #generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - #generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] + ###generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] + ###generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - #generated_text = generated_text1 + ' ' + generated_text2 - generated_text = generated_text_merged[0] + ' ' + generated_text_merged[1] + ###generated_text = generated_text1 + ' ' + generated_text2 + ##generated_text = generated_text_merged[0] + ' ' + generated_text_merged[1] - #print(generated_text1,'generated_text1') - #print(generated_text2, 'generated_text2') - #print('########################################') - else: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + ###print(generated_text1,'generated_text1') + ###print(generated_text2, 'generated_text2') + ###print('########################################') + ##else: + ##pixel_values = processor(textline_image, return_tensors="pt").pixel_values + ##generated_ids = model_ocr.generate(pixel_values.to(device)) + ##generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - #print(generated_text,'generated_text') - #print('########################################') - return generated_text + ###print(generated_text,'generated_text') + ###print('########################################') + ##return generated_text - def return_ocr_of_textline( - self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): + ###def return_ocr_of_textline( + ###self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): - if h2w_ratio > 0.05: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - #width = np.shape(textline_image)[1] - #height = np.shape(textline_image)[0] - #common_window = int(0.3*width) - #width1 = int ( width/2. - common_window ) - #width2 = int ( width/2. + common_window ) + ###if h2w_ratio > 0.05: + ###pixel_values = processor(textline_image, return_tensors="pt").pixel_values + ###generated_ids = model_ocr.generate(pixel_values.to(device)) + ###generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + ###else: + ####width = np.shape(textline_image)[1] + ####height = np.shape(textline_image)[0] + ####common_window = int(0.3*width) + ####width1 = int ( width/2. - common_window ) + ####width2 = int ( width/2. + common_window ) - try: - width1, width2 = self.return_start_and_end_of_common_text_of_textline_ocr_new(textline_image, ind_tot) + ###try: + ###width1, width2 = self.return_start_and_end_of_common_text_of_textline_ocr_new(textline_image, ind_tot) - image1 = textline_image[:, :width2,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, width1:,:]#image.crop((width1, 0, width, height)) + ###image1 = textline_image[:, :width2,:]# image.crop((0, 0, width2, height)) + ###image2 = textline_image[:, width1:,:]#image.crop((width1, 0, width, height)) - pixel_values1 = processor(image1, return_tensors="pt").pixel_values - pixel_values2 = processor(image2, return_tensors="pt").pixel_values + ###pixel_values1 = processor(image1, return_tensors="pt").pixel_values + ###pixel_values2 = processor(image2, return_tensors="pt").pixel_values - generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - generated_ids2 = model_ocr.generate(pixel_values2.to(device)) + ###generated_ids1 = model_ocr.generate(pixel_values1.to(device)) + ###generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - #print(generated_text1,'generated_text1') - #print(generated_text2, 'generated_text2') - #print('########################################') + ###generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] + ###generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] + ####print(generated_text1,'generated_text1') + ####print(generated_text2, 'generated_text2') + ####print('########################################') - match = sq(None, generated_text1, generated_text2).find_longest_match( - 0, len(generated_text1), 0, len(generated_text2)) - generated_text = generated_text1 + generated_text2[match.b+match.size:] - except: - pixel_values = processor(textline_image, return_tensors="pt").pixel_values - generated_ids = model_ocr.generate(pixel_values.to(device)) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + ###match = sq(None, generated_text1, generated_text2).find_longest_match( + ###0, len(generated_text1), 0, len(generated_text2)) + ###generated_text = generated_text1 + generated_text2[match.b+match.size:] + ###except: + ###pixel_values = processor(textline_image, return_tensors="pt").pixel_values + ###generated_ids = model_ocr.generate(pixel_values.to(device)) + ###generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - return generated_text + ###return generated_text - def return_textline_contour_with_added_box_coordinate(self, textline_contour, box_ind): - textline_contour[:,0] = textline_contour[:,0] + box_ind[2] - textline_contour[:,1] = textline_contour[:,1] + box_ind[0] - return textline_contour def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): return [ls_cons[sorted_indexes[index]] for index in range(len(sorted_indexes))] @@ -4625,6 +4681,7 @@ class Eynollah: raise ValueError("run requires either a single image filename or a directory") for img_filename in self.ls_imgs: + print(img_filename, 'img_filename') self.logger.info(img_filename) t0 = time.time() @@ -4698,13 +4755,19 @@ class Eynollah: all_box_coord_marginals = [] polygons_lines_xml = [] contours_tables = [] - ocr_all_textlines = None conf_contours_textregions =[0] + + if self.ocr and not self.tr: + gc.collect() + ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, textline_light=True) + else: + ocr_all_textlines = None + pcgts = self.writer.build_pagexml_no_full_layout( cont_page, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, - cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions, self.skip_layout_and_reading_order) return pcgts #print("text region early -1 in %.1fs", time.time() - t0) @@ -5118,7 +5181,7 @@ class Eynollah: tror = time.time() order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( contours_only_text_parent, contours_only_text_parent_h, text_regions_p) - print('time spend for mb ro', time.time()-tror) + print('time spend for mb ro', time.time()-tror) else: if np.abs(slope_deskew) < SLOPE_THRESHOLD: order_text_new, id_of_texts_tot = self.do_order_of_regions( @@ -5160,7 +5223,7 @@ class Eynollah: order_text_new, id_of_texts_tot = self.do_order_of_regions( contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) - if self.ocr: + if self.ocr and self.tr: device = cuda.get_current_device() device.reset() gc.collect() @@ -5207,6 +5270,11 @@ class Eynollah: ocr_textline_in_textregion.append(text_ocr) ind_tot = ind_tot +1 ocr_all_textlines.append(ocr_textline_in_textregion) + + elif self.ocr and not self.tr: + gc.collect() + ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + else: ocr_all_textlines = None @@ -5289,329 +5357,6 @@ class Eynollah_ocr: vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True ) - - def decode_batch_predictions(self, pred, max_len = 128): - # input_len is the product of the batch size and the - # number of time steps. - input_len = np.ones(pred.shape[0]) * pred.shape[1] - - # Decode CTC predictions using greedy search. - # decoded is a tuple with 2 elements. - decoded = tf.keras.backend.ctc_decode(pred, - input_length = input_len, - beam_width = 100) - # The outputs are in the first element of the tuple. - # Additionally, the first element is actually a list, - # therefore we take the first element of that list as well. - #print(decoded,'decoded') - decoded = decoded[0][0][:, :max_len] - - #print(decoded, decoded.shape,'decoded') - - output = [] - for d in decoded: - # Convert the predicted indices to the corresponding chars. - d = tf.strings.reduce_join(self.num_to_char(d)) - d = d.numpy().decode("utf-8") - output.append(d) - return output - - - def distortion_free_resize(self, image, img_size): - w, h = img_size - image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True) - - # Check tha amount of padding needed to be done. - pad_height = h - tf.shape(image)[0] - pad_width = w - tf.shape(image)[1] - - # Only necessary if you want to do same amount of padding on both sides. - if pad_height % 2 != 0: - height = pad_height // 2 - pad_height_top = height + 1 - pad_height_bottom = height - else: - pad_height_top = pad_height_bottom = pad_height // 2 - - if pad_width % 2 != 0: - width = pad_width // 2 - pad_width_left = width + 1 - pad_width_right = width - else: - pad_width_left = pad_width_right = pad_width // 2 - - image = tf.pad( - image, - paddings=[ - [pad_height_top, pad_height_bottom], - [pad_width_left, pad_width_right], - [0, 0], - ], - ) - - image = tf.transpose(image, (1, 0, 2)) - image = tf.image.flip_left_right(image) - return image - - def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.22*width) - - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - - if len(peaks_real)>35: - - #peaks_real = peaks_real[(peaks_realwidth1)] - argsort = np.argsort(sum_smoothed[peaks_real])[::-1] - peaks_real_top_six = peaks_real[argsort[:6]] - midpoint = textline_image.shape[1] / 2. - arg_closest = np.argmin(np.abs(peaks_real_top_six - midpoint)) - - #arg_max = np.argmax(sum_smoothed[peaks_real]) - - peaks_final = peaks_real_top_six[arg_closest]#peaks_real[arg_max] - - return peaks_final - else: - return None - - # Function to fit text inside the given area - def fit_text_single_line(self, draw, text, font_path, max_width, max_height): - initial_font_size = 50 - font_size = initial_font_size - while font_size > 10: # Minimum font size - font = ImageFont.truetype(font_path, font_size) - text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - if text_width <= max_width and text_height <= max_height: - return font # Return the best-fitting font - - font_size -= 2 # Reduce font size and retry - - return ImageFont.truetype(font_path, 10) # Smallest font fallback - - def return_textlines_split_if_needed(self, textline_image, textline_image_bin): - - split_point = self.return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) - if split_point: - image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - if self.prediction_with_both_of_rgb_and_bin: - image1_bin = textline_image_bin[:, :split_point,:]# image.crop((0, 0, width2, height)) - image2_bin = textline_image_bin[:, split_point:,:]#image.crop((width1, 0, width, height)) - return [image1, image2], [image1_bin, image2_bin] - else: - return [image1, image2], None - else: - return None, None - def preprocess_and_resize_image_for_ocrcnn_model(self, img, image_height, image_width): - ratio = image_height /float(img.shape[0]) - w_ratio = int(ratio * img.shape[1]) - - if w_ratio <= image_width: - width_new = w_ratio - else: - width_new = image_width - - if width_new == 0: - width_new = img.shape[1] - - ##if width_new+32 >= image_width: - ##width_new = width_new - 32 - - ###patch_zero = np.zeros((32, 32, 3))#+255 - ###patch_zero[9:19,8:18,:] = 0 - - - img = resize_image(img, image_height, width_new) - img_fin = np.ones((image_height, image_width, 3))*255 - ###img_fin[:,:32,:] = patch_zero[:,:,:] - ###img_fin[:,32:32+width_new,:] = img[:,:,:] - img_fin[:,:width_new,:] = img[:,:,:] - img_fin = img_fin / 255. - return img_fin - - def get_deskewed_contour_and_bb_and_image(self, contour, image, deskew_angle): - (h_in, w_in) = image.shape[:2] - center = (w_in // 2, h_in // 2) - - rotation_matrix = cv2.getRotationMatrix2D(center, deskew_angle, 1.0) - - cos_angle = abs(rotation_matrix[0, 0]) - sin_angle = abs(rotation_matrix[0, 1]) - new_w = int((h_in * sin_angle) + (w_in * cos_angle)) - new_h = int((h_in * cos_angle) + (w_in * sin_angle)) - - rotation_matrix[0, 2] += (new_w / 2) - center[0] - rotation_matrix[1, 2] += (new_h / 2) - center[1] - - deskewed_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h)) - - contour_points = np.array(contour, dtype=np.float32) - transformed_points = cv2.transform(np.array([contour_points]), rotation_matrix)[0] - - x, y, w, h = cv2.boundingRect(np.array(transformed_points, dtype=np.int32)) - cropped_textline = deskewed_image[y:y+h, x:x+w] - - return cropped_textline - - def rotate_image_with_padding(self, image, angle, border_value=(0,0,0)): - # Get image dimensions - (h, w) = image.shape[:2] - - # Calculate the center of the image - center = (w // 2, h // 2) - - # Get the rotation matrix - rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0) - - # Compute the new bounding dimensions - cos = abs(rotation_matrix[0, 0]) - sin = abs(rotation_matrix[0, 1]) - new_w = int((h * sin) + (w * cos)) - new_h = int((h * cos) + (w * sin)) - - # Adjust the rotation matrix to account for translation - rotation_matrix[0, 2] += (new_w / 2) - center[0] - rotation_matrix[1, 2] += (new_h / 2) - center[1] - - # Perform the rotation - rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=border_value) - - return rotated_image - - def get_orientation_moments(self, contour): - moments = cv2.moments(contour) - if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero - return 90 if moments["mu11"] > 0 else -90 - else: - angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) - return np.degrees(angle) # Convert radians to degrees - - - def get_orientation_moments_of_mask(self, mask): - mask=mask.astype('uint8') - contours, _ = cv2.findContours(mask[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - - largest_contour = max(contours, key=cv2.contourArea) if contours else None - - moments = cv2.moments(largest_contour) - if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero - return 90 if moments["mu11"] > 0 else -90 - else: - angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) - return np.degrees(angle) # Convert radians to degrees - - def get_contours_and_bounding_boxes(self, mask): - # Find contours in the binary mask - contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - - largest_contour = max(contours, key=cv2.contourArea) if contours else None - - # Get the bounding rectangle for the contour - x, y, w, h = cv2.boundingRect(largest_contour) - #bounding_boxes.append((x, y, w, h)) - - return x, y, w, h - - def return_splitting_point_of_image(self, image_to_spliited): - width = np.shape(image_to_spliited)[1] - height = np.shape(image_to_spliited)[0] - common_window = int(0.03*width) - - width1 = int ( common_window) - width2 = int ( width - common_window ) - - img_sum = np.sum(image_to_spliited[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 1) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - peaks_real = peaks_real[(peaks_realwidth1)] - - arg_sort = np.argsort(sum_smoothed[peaks_real]) - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - - return np.sort(peaks_sort_4) - - def break_curved_line_into_small_pieces_and_then_merge(self, img_curved, mask_curved): - peaks_4 = self.return_splitting_point_of_image(img_curved) - if len(peaks_4)>0: - imgs_tot = [] - - for ind in range(len(peaks_4)+1): - if ind==0: - img = img_curved[:, :peaks_4[ind], :] - mask = mask_curved[:, :peaks_4[ind], :] - elif ind==len(peaks_4): - img = img_curved[:, peaks_4[ind-1]:, :] - mask = mask_curved[:, peaks_4[ind-1]:, :] - else: - img = img_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - mask = mask_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - - or_ma = self.get_orientation_moments_of_mask(mask) - - imgs_tot.append([img, mask, or_ma] ) - - - w_tot_des_list = [] - w_tot_des = 0 - imgs_deskewed_list = [] - for ind in range(len(imgs_tot)): - img_in = imgs_tot[ind][0] - mask_in = imgs_tot[ind][1] - ori_in = imgs_tot[ind][2] - - if abs(ori_in)<45: - img_in_des = self.rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) - mask_in_des = self.rotate_image_with_padding(mask_in, ori_in) - mask_in_des = mask_in_des.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = self.get_contours_and_bounding_boxes(mask_in_des[:,:,0]) - - mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - - - else: - img_in_des = np.copy(img_in) - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - - w_tot_des+=img_in_des.shape[1] - w_tot_des_list.append(img_in_des.shape[1]) - imgs_deskewed_list.append(img_in_des) - - - - - img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - - w_indexer = 0 - for ind in range(len(w_tot_des_list)): - img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] - w_indexer = w_indexer+w_tot_des_list[ind] - return img_final_deskewed - else: - return img_curved - def run(self): ls_imgs = os.listdir(self.dir_in) @@ -6069,7 +5814,7 @@ class Eynollah_ocr: preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) preds = (preds + preds_bin) / 2. - pred_texts = self.decode_batch_predictions(preds) + pred_texts = self.decode_batch_predictions(preds, self.num_to_char) for ib in range(imgs.shape[0]): pred_texts_ib = pred_texts[ib].replace("[UNK]", "") diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py new file mode 100644 index 0000000..44367b6 --- /dev/null +++ b/src/eynollah/utils/utils_ocr.py @@ -0,0 +1,435 @@ +import numpy as np +import cv2 +import tensorflow as tf +from scipy.signal import find_peaks +from scipy.ndimage import gaussian_filter1d +import math +from .resize import resize_image + +def decode_batch_predictions(pred, num_to_char, max_len = 128): + # input_len is the product of the batch size and the + # number of time steps. + input_len = np.ones(pred.shape[0]) * pred.shape[1] + + # Decode CTC predictions using greedy search. + # decoded is a tuple with 2 elements. + decoded = tf.keras.backend.ctc_decode(pred, + input_length = input_len, + beam_width = 100) + # The outputs are in the first element of the tuple. + # Additionally, the first element is actually a list, + # therefore we take the first element of that list as well. + #print(decoded,'decoded') + decoded = decoded[0][0][:, :max_len] + + #print(decoded, decoded.shape,'decoded') + + output = [] + for d in decoded: + # Convert the predicted indices to the corresponding chars. + d = tf.strings.reduce_join(num_to_char(d)) + d = d.numpy().decode("utf-8") + output.append(d) + return output + + +def distortion_free_resize(image, img_size): + w, h = img_size + image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True) + + # Check tha amount of padding needed to be done. + pad_height = h - tf.shape(image)[0] + pad_width = w - tf.shape(image)[1] + + # Only necessary if you want to do same amount of padding on both sides. + if pad_height % 2 != 0: + height = pad_height // 2 + pad_height_top = height + 1 + pad_height_bottom = height + else: + pad_height_top = pad_height_bottom = pad_height // 2 + + if pad_width % 2 != 0: + width = pad_width // 2 + pad_width_left = width + 1 + pad_width_right = width + else: + pad_width_left = pad_width_right = pad_width // 2 + + image = tf.pad( + image, + paddings=[ + [pad_height_top, pad_height_bottom], + [pad_width_left, pad_width_right], + [0, 0], + ], + ) + + image = tf.transpose(image, (1, 0, 2)) + image = tf.image.flip_left_right(image) + return image + +def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image): + width = np.shape(textline_image)[1] + height = np.shape(textline_image)[0] + common_window = int(0.22*width) + + width1 = int ( width/2. - common_window ) + width2 = int ( width/2. + common_window ) + + img_sum = np.sum(textline_image[:,:,0], axis=0) + sum_smoothed = gaussian_filter1d(img_sum, 3) + + peaks_real, _ = find_peaks(sum_smoothed, height=0) + + if len(peaks_real)>35: + + #peaks_real = peaks_real[(peaks_realwidth1)] + argsort = np.argsort(sum_smoothed[peaks_real])[::-1] + peaks_real_top_six = peaks_real[argsort[:6]] + midpoint = textline_image.shape[1] / 2. + arg_closest = np.argmin(np.abs(peaks_real_top_six - midpoint)) + + #arg_max = np.argmax(sum_smoothed[peaks_real]) + + peaks_final = peaks_real_top_six[arg_closest]#peaks_real[arg_max] + + return peaks_final + else: + return None + +# Function to fit text inside the given area +def fit_text_single_line(draw, text, font_path, max_width, max_height): + initial_font_size = 50 + font_size = initial_font_size + while font_size > 10: # Minimum font size + font = ImageFont.truetype(font_path, font_size) + text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + if text_width <= max_width and text_height <= max_height: + return font # Return the best-fitting font + + font_size -= 2 # Reduce font size and retry + + return ImageFont.truetype(font_path, 10) # Smallest font fallback + +def return_textlines_split_if_needed(textline_image, textline_image_bin, prediction_with_both_of_rgb_and_bin=False): + + split_point = return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) + if split_point: + image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) + image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) + if prediction_with_both_of_rgb_and_bin: + image1_bin = textline_image_bin[:, :split_point,:]# image.crop((0, 0, width2, height)) + image2_bin = textline_image_bin[:, split_point:,:]#image.crop((width1, 0, width, height)) + return [image1, image2], [image1_bin, image2_bin] + else: + return [image1, image2], None + else: + return None, None +def preprocess_and_resize_image_for_ocrcnn_model(img, image_height, image_width): + ratio = image_height /float(img.shape[0]) + w_ratio = int(ratio * img.shape[1]) + + if w_ratio <= image_width: + width_new = w_ratio + else: + width_new = image_width + + if width_new == 0: + width_new = img.shape[1] + + + img = resize_image(img, image_height, width_new) + img_fin = np.ones((image_height, image_width, 3))*255 + + img_fin[:,:width_new,:] = img[:,:,:] + img_fin = img_fin / 255. + return img_fin + +def get_deskewed_contour_and_bb_and_image(contour, image, deskew_angle): + (h_in, w_in) = image.shape[:2] + center = (w_in // 2, h_in // 2) + + rotation_matrix = cv2.getRotationMatrix2D(center, deskew_angle, 1.0) + + cos_angle = abs(rotation_matrix[0, 0]) + sin_angle = abs(rotation_matrix[0, 1]) + new_w = int((h_in * sin_angle) + (w_in * cos_angle)) + new_h = int((h_in * cos_angle) + (w_in * sin_angle)) + + rotation_matrix[0, 2] += (new_w / 2) - center[0] + rotation_matrix[1, 2] += (new_h / 2) - center[1] + + deskewed_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h)) + + contour_points = np.array(contour, dtype=np.float32) + transformed_points = cv2.transform(np.array([contour_points]), rotation_matrix)[0] + + x, y, w, h = cv2.boundingRect(np.array(transformed_points, dtype=np.int32)) + cropped_textline = deskewed_image[y:y+h, x:x+w] + + return cropped_textline + +def rotate_image_with_padding(image, angle, border_value=(0,0,0)): + # Get image dimensions + (h, w) = image.shape[:2] + + # Calculate the center of the image + center = (w // 2, h // 2) + + # Get the rotation matrix + rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + + # Compute the new bounding dimensions + cos = abs(rotation_matrix[0, 0]) + sin = abs(rotation_matrix[0, 1]) + new_w = int((h * sin) + (w * cos)) + new_h = int((h * cos) + (w * sin)) + + # Adjust the rotation matrix to account for translation + rotation_matrix[0, 2] += (new_w / 2) - center[0] + rotation_matrix[1, 2] += (new_h / 2) - center[1] + + # Perform the rotation + rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=border_value) + + return rotated_image + +def get_orientation_moments(contour): + moments = cv2.moments(contour) + if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero + return 90 if moments["mu11"] > 0 else -90 + else: + angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) + return np.degrees(angle) # Convert radians to degrees + + +def get_orientation_moments_of_mask(mask): + mask=mask.astype('uint8') + contours, _ = cv2.findContours(mask[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + largest_contour = max(contours, key=cv2.contourArea) if contours else None + + moments = cv2.moments(largest_contour) + if moments["mu20"] - moments["mu02"] == 0: # Avoid division by zero + return 90 if moments["mu11"] > 0 else -90 + else: + angle = 0.5 * np.arctan2(2 * moments["mu11"], moments["mu20"] - moments["mu02"]) + return np.degrees(angle) # Convert radians to degrees + +def get_contours_and_bounding_boxes(mask): + # Find contours in the binary mask + contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + largest_contour = max(contours, key=cv2.contourArea) if contours else None + + # Get the bounding rectangle for the contour + x, y, w, h = cv2.boundingRect(largest_contour) + #bounding_boxes.append((x, y, w, h)) + + return x, y, w, h + +def return_splitting_point_of_image(image_to_spliited): + width = np.shape(image_to_spliited)[1] + height = np.shape(image_to_spliited)[0] + common_window = int(0.03*width) + + width1 = int ( common_window) + width2 = int ( width - common_window ) + + img_sum = np.sum(image_to_spliited[:,:,0], axis=0) + sum_smoothed = gaussian_filter1d(img_sum, 1) + + peaks_real, _ = find_peaks(sum_smoothed, height=0) + peaks_real = peaks_real[(peaks_realwidth1)] + + arg_sort = np.argsort(sum_smoothed[peaks_real]) + peaks_sort_4 = peaks_real[arg_sort][::-1][:4] + + return np.sort(peaks_sort_4) + +def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved): + peaks_4 = return_splitting_point_of_image(img_curved) + if len(peaks_4)>0: + imgs_tot = [] + + for ind in range(len(peaks_4)+1): + if ind==0: + img = img_curved[:, :peaks_4[ind], :] + mask = mask_curved[:, :peaks_4[ind], :] + elif ind==len(peaks_4): + img = img_curved[:, peaks_4[ind-1]:, :] + mask = mask_curved[:, peaks_4[ind-1]:, :] + else: + img = img_curved[:, peaks_4[ind-1]:peaks_4[ind], :] + mask = mask_curved[:, peaks_4[ind-1]:peaks_4[ind], :] + + or_ma = get_orientation_moments_of_mask(mask) + + imgs_tot.append([img, mask, or_ma] ) + + + w_tot_des_list = [] + w_tot_des = 0 + imgs_deskewed_list = [] + for ind in range(len(imgs_tot)): + img_in = imgs_tot[ind][0] + mask_in = imgs_tot[ind][1] + ori_in = imgs_tot[ind][2] + + if abs(ori_in)<45: + img_in_des = rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) + mask_in_des = rotate_image_with_padding(mask_in, ori_in) + mask_in_des = mask_in_des.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_in_des[:,:,0]) + + mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + if w_relative==0: + w_relative = img_in_des.shape[1] + img_in_des = resize_image(img_in_des, 32, w_relative) + + + else: + img_in_des = np.copy(img_in) + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + if w_relative==0: + w_relative = img_in_des.shape[1] + img_in_des = resize_image(img_in_des, 32, w_relative) + + w_tot_des+=img_in_des.shape[1] + w_tot_des_list.append(img_in_des.shape[1]) + imgs_deskewed_list.append(img_in_des) + + + + + img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 + + w_indexer = 0 + for ind in range(len(w_tot_des_list)): + img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] + w_indexer = w_indexer+w_tot_des_list[ind] + return img_final_deskewed + else: + return img_curved + +def return_textline_contour_with_added_box_coordinate(textline_contour, box_ind): + textline_contour[:,0] = textline_contour[:,0] + box_ind[2] + textline_contour[:,1] = textline_contour[:,1] + box_ind[0] + return textline_contour + + +def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, prediction_model, b_s_ocr, num_to_char, textline_light=False, curved_line=False): + max_len = 512 + padding_token = 299 + image_width = 512#max_len * 4 + image_height = 32 + ind_tot = 0 + #cv2.imwrite('./img_out.png', image_page) + ocr_all_textlines = [] + cropped_lines_region_indexer = [] + cropped_lines_meging_indexing = [] + cropped_lines = [] + indexer_text_region = 0 + + for indexing, ind_poly_first in enumerate(all_found_textline_polygons): + #ocr_textline_in_textregion = [] + for indexing2, ind_poly in enumerate(ind_poly_first): + cropped_lines_region_indexer.append(indexer_text_region) + if not (textline_light or curved_line): + ind_poly = copy.deepcopy(ind_poly) + box_ind = all_box_coord[indexing] + + ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) + #print(ind_poly_copy) + ind_poly[ind_poly<0] = 0 + x, y, w, h = cv2.boundingRect(ind_poly) + + w_scaled = w * image_height/float(h) + + mask_poly = np.zeros(image.shape) + + img_poly_on_img = np.copy(image) + + mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) + + + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + + img_crop[mask_poly==0] = 255 + + if w_scaled < 640:#1.5*image_width: + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(0) + else: + splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) + + if splited_images: + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(1) + + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) + + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(-1) + + else: + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(0) + + indexer_text_region+=1 + + + extracted_texts = [] + + n_iterations = math.ceil(len(cropped_lines) / b_s_ocr) + + for i in range(n_iterations): + if i==(n_iterations-1): + n_start = i*b_s_ocr + imgs = cropped_lines[n_start:] + imgs = np.array(imgs) + imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) + + + else: + n_start = i*b_s_ocr + n_end = (i+1)*b_s_ocr + imgs = cropped_lines[n_start:n_end] + imgs = np.array(imgs).reshape(b_s_ocr, image_height, image_width, 3) + + + preds = prediction_model.predict(imgs, verbose=0) + + pred_texts = decode_batch_predictions(preds, num_to_char) + + for ib in range(imgs.shape[0]): + pred_texts_ib = pred_texts[ib].replace("[UNK]", "") + extracted_texts.append(pred_texts_ib) + + extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + + ocr_all_textlines = [] + for ind in unique_cropped_lines_region_indexer: + ocr_textline_in_textregion = [] + extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] + for it_ind, text_textline in enumerate(extracted_texts_merged_un): + ocr_textline_in_textregion.append(text_textline) + ocr_all_textlines.append(ocr_textline_in_textregion) + return ocr_all_textlines diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 8cd1c8e..cf0551b 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -168,7 +168,7 @@ class EynollahXmlWriter(): with open(self.output_filename, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines, conf_contours_textregion): + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines, conf_contours_textregion, skip_layout_reading_order=False): self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure @@ -184,7 +184,7 @@ class EynollahXmlWriter(): for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord), conf=conf_contours_textregion[mm]), + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord, skip_layout_reading_order), conf=conf_contours_textregion[mm]), ) #textregion.set_conf(conf_contours_textregion[mm]) page.add_TextRegion(textregion) @@ -303,18 +303,28 @@ class EynollahXmlWriter(): return pcgts - def calculate_polygon_coords(self, contour, page_coord): + def calculate_polygon_coords(self, contour, page_coord, skip_layout_reading_order=False): self.logger.debug('enter calculate_polygon_coords') coords = '' for value_bbox in contour: - if len(value_bbox) == 2: - coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y)) + if skip_layout_reading_order: + if len(value_bbox) == 2: + coords += str(int((value_bbox[0]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[1]) / self.scale_y)) + else: + coords += str(int((value_bbox[0][0]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[0][1]) / self.scale_y)) else: - coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) + if len(value_bbox) == 2: + coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y)) + else: + coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) + coords += ',' + coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) coords=coords + ' ' return coords[:-1] From 0250a6d3d05904ed53cafde596f364500cad8f08 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 23 May 2025 18:06:53 +0200 Subject: [PATCH 145/492] enhancing ocr --- src/eynollah/eynollah.py | 47 ++++++++++++++++++--------------- src/eynollah/utils/utils_ocr.py | 1 + 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 2564150..1b50713 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -85,7 +85,12 @@ from .utils.utils_ocr import ( preprocess_and_resize_image_for_ocrcnn_model, return_textlines_split_if_needed, decode_batch_predictions, - return_rnn_cnn_ocr_of_given_textlines + return_rnn_cnn_ocr_of_given_textlines, + fit_text_single_line, + break_curved_line_into_small_pieces_and_then_merge, + get_orientation_moments, + rotate_image_with_padding, + get_contours_and_bounding_boxes ) from .utils.separate_lines import ( textline_contours_postprocessing, @@ -5421,7 +5426,7 @@ class Eynollah_ocr: cropped_lines.append(resize_image(img_crop, tr_ocr_input_height_and_width, tr_ocr_input_height_and_width) ) cropped_lines_meging_indexing.append(0) else: - splited_images, _ = self.return_textlines_split_if_needed(img_crop, None) + splited_images, _ = return_textlines_split_if_needed(img_crop, None) #print(splited_images) if splited_images: cropped_lines.append(resize_image(splited_images[0], tr_ocr_input_height_and_width, tr_ocr_input_height_and_width)) @@ -5474,7 +5479,7 @@ class Eynollah_ocr: w_bb = bb_ind[2] h_bb = bb_ind[3] - font = self.fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) @@ -5607,14 +5612,14 @@ class Eynollah_ocr: #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') if not self.do_not_mask_with_textline_contour: if angle_degrees > 15: - better_des_slope = self.get_orientation_moments(textline_coords) + better_des_slope = get_orientation_moments(textline_coords) - img_crop = self.rotate_image_with_padding(img_crop, better_des_slope ) - mask_poly = self.rotate_image_with_padding(mask_poly, better_des_slope ) + img_crop = rotate_image_with_padding(img_crop, better_des_slope ) + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) mask_poly = mask_poly.astype('uint8') #new bounding box - x_n, y_n, w_n, h_n = self.get_contours_and_bounding_boxes(mask_poly[:,:,0]) + x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] @@ -5622,13 +5627,13 @@ class Eynollah_ocr: img_crop[mask_poly==0] = 255 if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 100: - img_crop = self.break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + img_crop = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) #print(file_name,w_n*h_n , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w_n*h_n) , 'ikiiiiii') else: img_crop[mask_poly==0] = 255 if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 100: - img_crop = self.break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + img_crop = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) @@ -5638,7 +5643,7 @@ class Eynollah_ocr: if not self.export_textline_images_and_text: if w_scaled < 640:#1.5*image_width: - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) if angle_degrees > 15: cropped_lines_ver_index.append(1) @@ -5647,15 +5652,15 @@ class Eynollah_ocr: cropped_lines_meging_indexing.append(0) if self.prediction_with_both_of_rgb_and_bin: - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) else: if self.prediction_with_both_of_rgb_and_bin: - splited_images, splited_images_bin = self.return_textlines_split_if_needed(img_crop, img_crop_bin) + splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, img_crop_bin) else: - splited_images, splited_images_bin = self.return_textlines_split_if_needed(img_crop, None) + splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) if splited_images: - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(1) @@ -5664,7 +5669,7 @@ class Eynollah_ocr: else: cropped_lines_ver_index.append(0) - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(-1) @@ -5675,13 +5680,13 @@ class Eynollah_ocr: cropped_lines_ver_index.append(0) if self.prediction_with_both_of_rgb_and_bin: - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[0], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[0], image_height, image_width) cropped_lines_bin.append(img_fin) - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[1], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[1], image_height, image_width) cropped_lines_bin.append(img_fin) else: - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) @@ -5691,7 +5696,7 @@ class Eynollah_ocr: cropped_lines_ver_index.append(0) if self.prediction_with_both_of_rgb_and_bin: - img_fin = self.preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) if self.export_textline_images_and_text: @@ -5814,7 +5819,7 @@ class Eynollah_ocr: preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) preds = (preds + preds_bin) / 2. - pred_texts = self.decode_batch_predictions(preds, self.num_to_char) + pred_texts = decode_batch_predictions(preds, self.num_to_char) for ib in range(imgs.shape[0]): pred_texts_ib = pred_texts[ib].replace("[UNK]", "") @@ -5844,7 +5849,7 @@ class Eynollah_ocr: w_bb = bb_ind[2] h_bb = bb_ind[3] - font = self.fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 44367b6..339b38a 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -4,6 +4,7 @@ import tensorflow as tf from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d import math +from PIL import Image, ImageDraw, ImageFont from .resize import resize_image def decode_batch_predictions(pred, num_to_char, max_len = 128): From 25e3a2a99f4e585ee73d39e981897062ccd13a1e Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 23 May 2025 18:30:51 +0200 Subject: [PATCH 146/492] visualizing ro for single xml file --- train/generate_gt_for_training.py | 53 +++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 9 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 7e7c6a0..9b7f02b 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -252,6 +252,12 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i @main.command() +@click.option( + "--xml_file", + "-xml", + help="xml filename", + type=click.Path(exists=True, dir_okay=False), +) @click.option( "--dir_xml", "-dx", @@ -271,10 +277,14 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i "-dimg", help="directory where the overlayed plots will be written", ) -def visualize_reading_order(dir_xml, dir_out, dir_imgs): - xml_files_ind = os.listdir(dir_xml) - +def visualize_reading_order(xml_file, dir_xml, dir_out, dir_imgs): + assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" + if dir_xml: + xml_files_ind = os.listdir(dir_xml) + else: + xml_files_ind = [xml_file] + indexer_start= 0#55166 #min_area = 0.0001 @@ -282,8 +292,17 @@ def visualize_reading_order(dir_xml, dir_out, dir_imgs): indexer = 0 #print(ind_xml) #print('########################') - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = ind_xml.split('.')[0] + #xml_file = os.path.join(dir_xml,ind_xml ) + + if dir_xml: + xml_file = os.path.join(dir_xml,ind_xml ) + f_name = Path(ind_xml).stem + else: + xml_file = os.path.join(ind_xml ) + f_name = Path(ind_xml).stem + print(f_name, 'f_name') + + #f_name = ind_xml.split('.')[0] _, _, _, file_name, id_paragraph, id_header,co_text_paragraph,co_text_header,tot_region_ref,x_len, y_len,index_tot_regions,img_poly = read_xml(xml_file) id_all_text = id_paragraph + id_header @@ -373,6 +392,12 @@ def visualize_textline_segmentation(dir_xml, dir_out, dir_imgs): @main.command() +@click.option( + "--xml_file", + "-xml", + help="xml filename", + type=click.Path(exists=True, dir_okay=False), +) @click.option( "--dir_xml", "-dx", @@ -392,14 +417,24 @@ def visualize_textline_segmentation(dir_xml, dir_out, dir_imgs): "-dimg", help="directory of images where textline segmentation will be overlayed", ) -def visualize_layout_segmentation(dir_xml, dir_out, dir_imgs): - xml_files_ind = os.listdir(dir_xml) +def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): + assert xml_file and dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" + if dir_xml: + xml_files_ind = os.listdir(dir_xml) + else: + xml_files_ind = [xml_file] + for ind_xml in tqdm(xml_files_ind): indexer = 0 #print(ind_xml) #print('########################') - xml_file = os.path.join(dir_xml,ind_xml ) - f_name = Path(ind_xml).stem + if dir_xml: + xml_file = os.path.join(dir_xml,ind_xml ) + f_name = Path(ind_xml).stem + else: + xml_file = os.path.join(ind_xml ) + f_name = Path(ind_xml).stem + print(f_name, 'f_name') img_file_name_with_format = find_format_of_given_filename_in_dir(dir_imgs, f_name) img = cv2.imread(os.path.join(dir_imgs, img_file_name_with_format)) From ba3420b2d8ea1cbca26aac2cc904dd499b893984 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 25 May 2025 01:12:58 +0200 Subject: [PATCH 147/492] Drop capitals are written separately and are not attached to their corresponding text line. The OCR use case also supports single-image input. --- src/eynollah/cli.py | 11 ++++++++-- src/eynollah/eynollah.py | 46 +++++++++++++++++++++++++++++----------- src/eynollah/writer.py | 8 +++---- 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index cd56833..0c18b2c 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -331,6 +331,12 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ @main.command() +@click.option( + "--image", + "-i", + help="image filename", + type=click.Path(exists=True, dir_okay=False), +) @click.option( "--dir_in", "-di", @@ -415,7 +421,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="Override log level globally to this", ) -def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, log_level): +def ocr(image, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) @@ -426,8 +432,9 @@ def ocr(dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, ex assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" assert not export_textline_images_and_text or not draw_texts_on_image, "Exporting textline and text -etit can not be set alongside draw text on image -dtoi" assert not export_textline_images_and_text or not prediction_with_both_of_rgb_and_bin, "Exporting textline and text -etit can not be set alongside prediction with both rgb and bin -brb" - + assert (bool(image) ^ bool(dir_in)), "Either -i (single image) or -di (directory) must be provided, but not both." eynollah_ocr = Eynollah_ocr( + image_filename=image, dir_xmls=dir_xmls, dir_out_image_text=dir_out_image_text, dir_in=dir_in, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 1b50713..aa38274 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5134,10 +5134,10 @@ class Eynollah: pixel_img = 4 polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, pixel_img) - all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline( - text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, - all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, - kernel=KERNEL, curved_line=self.curved_line, textline_light=self.textline_light) + ##all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline( + ##text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, + ##all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, + ##kernel=KERNEL, curved_line=self.curved_line, textline_light=self.textline_light) if not self.reading_order_machine_based: pixel_seps = 6 @@ -5299,6 +5299,7 @@ class Eynollah_ocr: dir_models, dir_xmls=None, dir_in=None, + image_filename=None, dir_in_bin=None, dir_out=None, dir_out_image_text=None, @@ -5312,6 +5313,7 @@ class Eynollah_ocr: logger=None, ): self.dir_in = dir_in + self.image_filename = image_filename self.dir_in_bin = dir_in_bin self.dir_out = dir_out self.dir_xmls = dir_xmls @@ -5363,13 +5365,20 @@ class Eynollah_ocr: ) def run(self): - ls_imgs = os.listdir(self.dir_in) + if self.dir_in: + ls_imgs = os.listdir(self.dir_in) + else: + ls_imgs = [self.image_filename] if self.tr_ocr: tr_ocr_input_height_and_width = 384 for ind_img in ls_imgs: - file_name = Path(ind_img).stem - dir_img = os.path.join(self.dir_in, ind_img) + if self.dir_in: + file_name = Path(ind_img).stem + dir_img = os.path.join(self.dir_in, ind_img) + else: + file_name = Path(self.image_filename).stem + dir_img = self.image_filename dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') img = cv2.imread(dir_img) @@ -5541,8 +5550,15 @@ class Eynollah_ocr: img_size=(image_width, image_height) for ind_img in ls_imgs: - file_name = Path(ind_img).stem - dir_img = os.path.join(self.dir_in, ind_img) + if self.dir_in: + file_name = Path(ind_img).stem + dir_img = os.path.join(self.dir_in, ind_img) + else: + file_name = Path(self.image_filename).stem + dir_img = self.image_filename + + #file_name = Path(ind_img).stem + #dir_img = os.path.join(self.dir_in, ind_img) dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') img = cv2.imread(dir_img) @@ -5576,6 +5592,7 @@ class Eynollah_ocr: indexer_text_region = 0 indexer_textlines = 0 for nn in root1.iter(region_tags): + type_textregion = nn.attrib['type'] for child_textregion in nn: if child_textregion.tag.endswith("TextLine"): for child_textlines in child_textregion: @@ -5589,7 +5606,9 @@ class Eynollah_ocr: angle_radians = math.atan2(h, w) # Convert to degrees angle_degrees = math.degrees(angle_radians) - + if type_textregion=='drop-capital': + angle_degrees = 0 + if self.draw_texts_on_image: total_bb_coordinates.append([x,y,w,h]) @@ -5632,8 +5651,11 @@ class Eynollah_ocr: #print(file_name,w_n*h_n , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w_n*h_n) , 'ikiiiiii') else: img_crop[mask_poly==0] = 255 - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 100: - img_crop = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + if type_textregion=='drop-capital': + pass + else: + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 100: + img_crop = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index cf0551b..f07abf6 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -283,14 +283,14 @@ class EynollahXmlWriter(): Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) page.add_TextRegion(marginal) self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) - + for mm in range(len(found_polygons_drop_capitals)): dropcapital = TextRegionType(id=counter.next_region_id, type_='drop-capital', Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord))) page.add_TextRegion(dropcapital) - ###all_box_coord_drop = None - ###slopes_drop = None - ###self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=None) + all_box_coord_drop = None + slopes_drop = None + self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=None) for mm in range(len(found_polygons_text_region_img)): page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) From b18691f96a5f67e5fda1e6b46d1a399bf20fe858 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 25 May 2025 03:33:54 +0200 Subject: [PATCH 148/492] rnn ocr for all layout textregion types --- src/eynollah/eynollah.py | 41 ++++++++++++++++++++++++++-------------- src/eynollah/writer.py | 31 ++++++++++++++++++++++-------- 2 files changed, 50 insertions(+), 22 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index aa38274..0ee3d14 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4715,11 +4715,10 @@ class Eynollah: if self.extract_only_images: text_regions_p_1, erosion_hurts, polygons_lines_xml, polygons_of_images, image_page, page_coord, cont_page = \ self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) - ocr_all_textlines = None pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], polygons_of_images, [], [], [], [], [], - cont_page, [], [], ocr_all_textlines, []) + cont_page, [], []) if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) return pcgts @@ -4772,7 +4771,7 @@ class Eynollah: cont_page, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, - cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions, self.skip_layout_and_reading_order) + cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines=ocr_all_textlines, conf_contours_textregion=conf_contours_textregions, skip_layout_reading_order=self.skip_layout_and_reading_order) return pcgts #print("text region early -1 in %.1fs", time.time() - t0) @@ -4822,10 +4821,9 @@ class Eynollah: if not num_col: self.logger.info("No columns detected, outputting an empty PAGE-XML") - ocr_all_textlines = None pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], [], [], [], [], [], [], - cont_page, [], [], ocr_all_textlines, []) + cont_page, [], []) return pcgts #print("text region early in %.1fs", time.time() - t0) @@ -5004,13 +5002,13 @@ class Eynollah: [], [], page_coord, [], [], [], [], [], [], polygons_of_images, contours_tables, [], polygons_of_marginals, empty_marginals, empty_marginals, [], [], [], - cont_page, polygons_lines_xml, [], [], []) + cont_page, polygons_lines_xml) else: pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], polygons_of_images, polygons_of_marginals, empty_marginals, empty_marginals, [], [], - cont_page, polygons_lines_xml, contours_tables, [], []) + cont_page, polygons_lines_xml, contours_tables) return pcgts @@ -5196,16 +5194,28 @@ class Eynollah: contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d) self.logger.info("detection of reading order took %.1fs", time.time() - t_order) - if self.ocr: - ocr_all_textlines = [] + if self.ocr and not self.tr: + gc.collect() + if len(all_found_textline_polygons)>0: + ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + if all_found_textline_polygons_marginals and len(all_found_textline_polygons_marginals)>0: + ocr_all_textlines_marginals = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + + if all_found_textline_polygons_h and len(all_found_textline_polygons)>0: + ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_h, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + if polygons_of_drop_capitals and len(polygons_of_drop_capitals)>0: + ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines(image_page, polygons_of_drop_capitals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines = None + ocr_all_textlines_marginals = None + ocr_all_textlines_h = None + ocr_all_textlines_drop = None pcgts = self.writer.build_pagexml_full_layout( contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, - cont_page, polygons_lines_xml, ocr_all_textlines, conf_contours_textregions, conf_contours_textregions_h) + cont_page, polygons_lines_xml, ocr_all_textlines, ocr_all_textlines_h, ocr_all_textlines_marginals, ocr_all_textlines_drop, conf_contours_textregions, conf_contours_textregions_h) return pcgts contours_only_text_parent_h = None @@ -5278,18 +5288,21 @@ class Eynollah: elif self.ocr and not self.tr: gc.collect() - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - + if len(all_found_textline_polygons)>0: + ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + if all_found_textline_polygons_marginals and len(all_found_textline_polygons_marginals)>0: + ocr_all_textlines_marginals = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines = None - #print(ocr_all_textlines) + ocr_all_textlines_marginals = None self.logger.info("detection of reading order took %.1fs", time.time() - t_order) + pcgts = self.writer.build_pagexml_no_full_layout( txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, - cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, ocr_all_textlines_marginals, conf_contours_textregions) return pcgts diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index f07abf6..085ee6f 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -56,10 +56,12 @@ class EynollahXmlWriter(): points_page_print = points_page_print + ' ' return points_page_print[:-1] - def serialize_lines_in_marginal(self, marginal_region, all_found_textline_polygons_marginals, marginal_idx, page_coord, all_box_coord_marginals, slopes_marginals, counter): + def serialize_lines_in_marginal(self, marginal_region, all_found_textline_polygons_marginals, marginal_idx, page_coord, all_box_coord_marginals, slopes_marginals, counter, ocr_all_textlines_textregion): for j in range(len(all_found_textline_polygons_marginals[marginal_idx])): coords = CoordsType() textline = TextLineType(id=counter.next_line_id, Coords=coords) + if ocr_all_textlines_textregion: + textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] ) marginal_region.add_TextLine(textline) marginal_region.set_orientation(-slopes_marginals[marginal_idx]) points_co = '' @@ -168,7 +170,7 @@ class EynollahXmlWriter(): with open(self.output_filename, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines, conf_contours_textregion, skip_layout_reading_order=False): + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines=None, ocr_all_textlines_marginals=None, conf_contours_textregion=None, skip_layout_reading_order=False): self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure @@ -198,7 +200,12 @@ class EynollahXmlWriter(): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) page.add_TextRegion(marginal) - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) + if ocr_all_textlines_marginals: + ocr_textlines = ocr_all_textlines_marginals[mm] + else: + ocr_textlines = None + + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter, ocr_textlines) for mm in range(len(found_polygons_text_region_img)): img_region = ImageRegionType(id=counter.next_region_id, Coords=CoordsType()) @@ -242,7 +249,7 @@ class EynollahXmlWriter(): return pcgts - def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines, conf_contours_textregion, conf_contours_textregion_h): + def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines=None, ocr_all_textlines_h=None, ocr_all_textlines_marginals=None, ocr_all_textlines_drop=None, conf_contours_textregion=None, conf_contours_textregion_h=None): self.logger.debug('enter build_pagexml_full_layout') # create the file structure @@ -272,8 +279,8 @@ class EynollahXmlWriter(): Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) page.add_TextRegion(textregion) - if ocr_all_textlines: - ocr_textlines = ocr_all_textlines[mm] + if ocr_all_textlines_h: + ocr_textlines = ocr_all_textlines_h[mm] else: ocr_textlines = None self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter, ocr_textlines) @@ -282,7 +289,11 @@ class EynollahXmlWriter(): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) page.add_TextRegion(marginal) - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter) + if ocr_all_textlines_marginals: + ocr_textlines = ocr_all_textlines_marginals[mm] + else: + ocr_textlines = None + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter, ocr_textlines) for mm in range(len(found_polygons_drop_capitals)): dropcapital = TextRegionType(id=counter.next_region_id, type_='drop-capital', @@ -290,7 +301,11 @@ class EynollahXmlWriter(): page.add_TextRegion(dropcapital) all_box_coord_drop = None slopes_drop = None - self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=None) + if ocr_all_textlines_drop: + ocr_textlines = ocr_all_textlines_drop[mm] + else: + ocr_textlines = None + self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=ocr_textlines) for mm in range(len(found_polygons_text_region_img)): page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) From 31d9fa0c80191786de97cca0cd7be3d0f7248140 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 25 May 2025 21:44:36 +0200 Subject: [PATCH 149/492] strings alignment function is added + new changes needed for prediction with both bin and rgb inputs is implemented --- requirements.txt | 1 + src/eynollah/eynollah.py | 78 +++++++++++++++++++++++++++------ src/eynollah/utils/utils_ocr.py | 47 +++++++++++++++++--- 3 files changed, 107 insertions(+), 19 deletions(-) diff --git a/requirements.txt b/requirements.txt index aeffd47..4bc0c6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ tensorflow < 2.13 numba <= 0.58.1 scikit-image loky +biopython diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0ee3d14..1f79995 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5647,6 +5647,10 @@ class Eynollah_ocr: better_des_slope = get_orientation_moments(textline_coords) img_crop = rotate_image_with_padding(img_crop, better_des_slope ) + + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope ) + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) mask_poly = mask_poly.astype('uint8') @@ -5655,26 +5659,35 @@ class Eynollah_ocr: mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - + img_crop[mask_poly==0] = 255 + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop_bin[mask_poly==0] = 255 + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 100: - img_crop = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) - - #print(file_name,w_n*h_n , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w_n*h_n) , 'ikiiiiii') + if self.prediction_with_both_of_rgb_and_bin: + img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + + else: img_crop[mask_poly==0] = 255 + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin[mask_poly==0] = 255 if type_textregion=='drop-capital': pass else: if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 100: - img_crop = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + if self.prediction_with_both_of_rgb_and_bin: + img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) - - - if self.prediction_with_both_of_rgb_and_bin: - img_crop_bin[mask_poly==0] = 255 + if not self.export_textline_images_and_text: if w_scaled < 640:#1.5*image_width: @@ -5796,6 +5809,14 @@ class Eynollah_ocr: imgs_bin = cropped_lines_bin[n_start:] imgs_bin = np.array(imgs_bin) imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) + + if len(indices_ver)>0: + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + + else: + imgs_bin_ver_flipped = None else: n_start = i*self.b_s n_end = (i+1)*self.b_s @@ -5817,22 +5838,25 @@ class Eynollah_ocr: if self.prediction_with_both_of_rgb_and_bin: imgs_bin = cropped_lines_bin[n_start:n_end] imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) + + + if len(indices_ver)>0: + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_bin_ver_flipped = None preds = self.prediction_model.predict(imgs, verbose=0) if len(indices_ver)>0: - #cv2.imwrite('flipped.png', (imgs_ver_flipped[0, :,:,:]*255).astype('uint8')) - #cv2.imwrite('original.png', (imgs[0, :,:,:]*255).astype('uint8')) - #sys.exit() - #print(imgs_ver_flipped.shape, 'imgs_ver_flipped.shape') preds_flipped = self.prediction_model.predict(imgs_ver_flipped, verbose=0) preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=256 masked_means_flipped = np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - #print(masked_means_flipped, 'masked_means_flipped') preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) @@ -5852,6 +5876,32 @@ class Eynollah_ocr: preds[indices_to_be_replaced,:,:] = preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] if self.prediction_with_both_of_rgb_and_bin: preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) + + if len(indices_ver)>0: + preds_flipped = self.prediction_model.predict(imgs_bin_ver_flipped, verbose=0) + preds_max_fliped = np.max(preds_flipped, axis=2 ) + preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=256 + masked_means_flipped = np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped[np.isnan(masked_means_flipped)] = 0 + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=256 + + masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means[np.isnan(masked_means)] = 0 + + masked_means_ver = masked_means[indices_ver] + #print(masked_means_ver, 'pred_max_not_unk') + + indices_where_flipped_conf_value_is_higher = np.where(masked_means_flipped > masked_means_ver)[0] + + #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') + if len(indices_where_flipped_conf_value_is_higher)>0: + indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] + preds_bin[indices_to_be_replaced,:,:] = preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + preds = (preds + preds_bin) / 2. pred_texts = decode_batch_predictions(preds, self.num_to_char) diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 339b38a..524e7ce 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -5,6 +5,7 @@ from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d import math from PIL import Image, ImageDraw, ImageFont +from Bio import pairwise2 from .resize import resize_image def decode_batch_predictions(pred, num_to_char, max_len = 128): @@ -252,7 +253,7 @@ def return_splitting_point_of_image(image_to_spliited): return np.sort(peaks_sort_4) -def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved): +def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, img_bin_curved=None): peaks_4 = return_splitting_point_of_image(img_curved) if len(peaks_4)>0: imgs_tot = [] @@ -260,29 +261,44 @@ def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved): for ind in range(len(peaks_4)+1): if ind==0: img = img_curved[:, :peaks_4[ind], :] + if img_bin_curved: + img_bin = img_curved_bin[:, :peaks_4[ind], :] mask = mask_curved[:, :peaks_4[ind], :] elif ind==len(peaks_4): img = img_curved[:, peaks_4[ind-1]:, :] + if img_bin_curved: + img_bin = img_curved_bin[:, peaks_4[ind-1]:, :] mask = mask_curved[:, peaks_4[ind-1]:, :] else: img = img_curved[:, peaks_4[ind-1]:peaks_4[ind], :] + if img_bin_curved: + img_bin = img_curved_bin[:, peaks_4[ind-1]:peaks_4[ind], :] mask = mask_curved[:, peaks_4[ind-1]:peaks_4[ind], :] or_ma = get_orientation_moments_of_mask(mask) - - imgs_tot.append([img, mask, or_ma] ) + + if img_bin_curved: + imgs_tot.append([img, mask, or_ma, img_bin] ) + else: + imgs_tot.append([img, mask, or_ma] ) w_tot_des_list = [] w_tot_des = 0 imgs_deskewed_list = [] + imgs_bin_deskewed_list = [] + for ind in range(len(imgs_tot)): img_in = imgs_tot[ind][0] mask_in = imgs_tot[ind][1] ori_in = imgs_tot[ind][2] + if img_bin_curved: + img_bin_in = imgs_tot[ind][3] if abs(ori_in)<45: img_in_des = rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) + if img_bin_curved: + img_bin_in_des = rotate_image_with_padding(img_bin_in, ori_in, border_value=(255,255,255) ) mask_in_des = rotate_image_with_padding(mask_in, ori_in) mask_in_des = mask_in_des.astype('uint8') @@ -291,36 +307,52 @@ def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved): mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + if img_bin_curved: + img_bin_in_des = img_bin_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) if w_relative==0: w_relative = img_in_des.shape[1] img_in_des = resize_image(img_in_des, 32, w_relative) + if img_bin_curved: + img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) else: img_in_des = np.copy(img_in) + if img_bin_curved: + img_bin_in_des = np.copy(img_bin_in) w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) if w_relative==0: w_relative = img_in_des.shape[1] img_in_des = resize_image(img_in_des, 32, w_relative) + if img_bin_curved: + img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) w_tot_des+=img_in_des.shape[1] w_tot_des_list.append(img_in_des.shape[1]) imgs_deskewed_list.append(img_in_des) + if img_bin_curved: + imgs_bin_deskewed_list.append(img_bin_in_des) img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 + if img_bin_curved: + img_bin_final_deskewed = np.zeros((32, w_tot_des, 3))+255 + else: + img_bin_final_deskewed = None w_indexer = 0 for ind in range(len(w_tot_des_list)): img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] + if img_bin_curved: + img_bin_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_bin_deskewed_list[ind][:,:,:] w_indexer = w_indexer+w_tot_des_list[ind] - return img_final_deskewed + return img_final_deskewed, img_bin_final_deskewed else: - return img_curved + return img_curved, img_bin_curved def return_textline_contour_with_added_box_coordinate(textline_contour, box_ind): textline_contour[:,0] = textline_contour[:,0] + box_ind[2] @@ -434,3 +466,8 @@ def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, pr ocr_textline_in_textregion.append(text_textline) ocr_all_textlines.append(ocr_textline_in_textregion) return ocr_all_textlines + +def biopython_align(str1, str2): + alignments = pairwise2.align.globalms(str1, str2, 2, -1, -2, -2) + best_alignment = alignments[0] # Get the best alignment + return best_alignment.seqA, best_alignment.seqB From 03f52e7a467869d6476de6632411e4e93320bf14 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 27 May 2025 23:45:22 +0200 Subject: [PATCH 150/492] updating ocr --- src/eynollah/cli.py | 10 ++++-- src/eynollah/eynollah.py | 24 ++++++++++++-- src/eynollah/utils/utils_ocr.py | 55 +++++++++++++++++---------------- 3 files changed, 58 insertions(+), 31 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 0c18b2c..2d0d6f9 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -337,6 +337,12 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="image filename", type=click.Path(exists=True, dir_okay=False), ) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) @click.option( "--dir_in", "-di", @@ -421,7 +427,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="Override log level globally to this", ) -def ocr(image, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, log_level): +def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) @@ -449,7 +455,7 @@ def ocr(image, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ batch_size=batch_size, pref_of_dataset=dataset_abbrevation, ) - eynollah_ocr.run() + eynollah_ocr.run(overwrite=overwrite) if __name__ == "__main__": main() diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 1f79995..efa1dde 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5338,6 +5338,8 @@ class Eynollah_ocr: self.dir_out_image_text = dir_out_image_text self.prediction_with_both_of_rgb_and_bin = prediction_with_both_of_rgb_and_bin self.pref_of_dataset = pref_of_dataset + self.logger = logger if logger else getLogger('eynollah') + if not export_textline_images_and_text: if tr_ocr: self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") @@ -5351,7 +5353,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_750000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_1075000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5377,7 +5379,7 @@ class Eynollah_ocr: vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True ) - def run(self): + def run(self, overwrite : bool = False): if self.dir_in: ls_imgs = os.listdir(self.dir_in) else: @@ -5394,6 +5396,14 @@ class Eynollah_ocr: dir_img = self.image_filename dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') + + if os.path.exists(out_file_ocr): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) + else: + self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) + continue + img = cv2.imread(dir_img) if self.draw_texts_on_image: @@ -5574,6 +5584,14 @@ class Eynollah_ocr: #dir_img = os.path.join(self.dir_in, ind_img) dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') + + if os.path.exists(out_file_ocr): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) + else: + self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) + continue + img = cv2.imread(dir_img) if self.prediction_with_both_of_rgb_and_bin: cropped_lines_bin = [] @@ -5704,7 +5722,7 @@ class Eynollah_ocr: cropped_lines_bin.append(img_fin) else: if self.prediction_with_both_of_rgb_and_bin: - splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, img_crop_bin) + splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, img_crop_bin, prediction_with_both_of_rgb_and_bin=self.prediction_with_both_of_rgb_and_bin) else: splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) if splited_images: diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 524e7ce..9ef344a 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -74,32 +74,24 @@ def distortion_free_resize(image, img_size): def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image): width = np.shape(textline_image)[1] height = np.shape(textline_image)[0] - common_window = int(0.22*width) + common_window = int(0.06*width) width1 = int ( width/2. - common_window ) width2 = int ( width/2. + common_window ) - + img_sum = np.sum(textline_image[:,:,0], axis=0) sum_smoothed = gaussian_filter1d(img_sum, 3) - + peaks_real, _ = find_peaks(sum_smoothed, height=0) - - if len(peaks_real)>35: + if len(peaks_real)>70: - #peaks_real = peaks_real[(peaks_realwidth1)] - argsort = np.argsort(sum_smoothed[peaks_real])[::-1] - peaks_real_top_six = peaks_real[argsort[:6]] - midpoint = textline_image.shape[1] / 2. - arg_closest = np.argmin(np.abs(peaks_real_top_six - midpoint)) + peaks_real = peaks_real[(peaks_realwidth1)] - #arg_max = np.argmax(sum_smoothed[peaks_real]) - - peaks_final = peaks_real_top_six[arg_closest]#peaks_real[arg_max] - + arg_max = np.argmax(sum_smoothed[peaks_real]) + peaks_final = peaks_real[arg_max] return peaks_final else: return None - # Function to fit text inside the given area def fit_text_single_line(draw, text, font_path, max_width, max_height): initial_font_size = 50 @@ -305,17 +297,28 @@ def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, #new bounding box x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_in_des[:,:,0]) - mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - if img_bin_curved: - img_bin_in_des = img_bin_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - - w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) - if w_relative==0: - w_relative = img_in_des.shape[1] - img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved: - img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) + if w_n==0 or h_n==0: + img_in_des = np.copy(img_in) + if img_bin_curved: + img_bin_in_des = np.copy(img_bin_in) + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + if w_relative==0: + w_relative = img_in_des.shape[1] + img_in_des = resize_image(img_in_des, 32, w_relative) + if img_bin_curved: + img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) + else: + mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + if img_bin_curved: + img_bin_in_des = img_bin_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] + + w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) + if w_relative==0: + w_relative = img_in_des.shape[1] + img_in_des = resize_image(img_in_des, 32, w_relative) + if img_bin_curved: + img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) else: From 1e7cecfcf9534c93b24c11fc7b988a0bd5230a4f Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 28 May 2025 01:17:21 +0200 Subject: [PATCH 151/492] updating ocr --- src/eynollah/eynollah.py | 2 +- src/eynollah/utils/utils_ocr.py | 36 ++++++++++++++++----------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index efa1dde..0a9248e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5353,7 +5353,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_1075000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_1150000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 9ef344a..aa1efa6 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -253,23 +253,23 @@ def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, for ind in range(len(peaks_4)+1): if ind==0: img = img_curved[:, :peaks_4[ind], :] - if img_bin_curved: - img_bin = img_curved_bin[:, :peaks_4[ind], :] + if img_bin_curved is not None: + img_bin = img_bin_curved[:, :peaks_4[ind], :] mask = mask_curved[:, :peaks_4[ind], :] elif ind==len(peaks_4): img = img_curved[:, peaks_4[ind-1]:, :] - if img_bin_curved: - img_bin = img_curved_bin[:, peaks_4[ind-1]:, :] + if img_bin_curved is not None: + img_bin = img_bin_curved[:, peaks_4[ind-1]:, :] mask = mask_curved[:, peaks_4[ind-1]:, :] else: img = img_curved[:, peaks_4[ind-1]:peaks_4[ind], :] - if img_bin_curved: - img_bin = img_curved_bin[:, peaks_4[ind-1]:peaks_4[ind], :] + if img_bin_curved is not None: + img_bin = img_bin_curved[:, peaks_4[ind-1]:peaks_4[ind], :] mask = mask_curved[:, peaks_4[ind-1]:peaks_4[ind], :] or_ma = get_orientation_moments_of_mask(mask) - if img_bin_curved: + if img_bin_curved is not None: imgs_tot.append([img, mask, or_ma, img_bin] ) else: imgs_tot.append([img, mask, or_ma] ) @@ -284,12 +284,12 @@ def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, img_in = imgs_tot[ind][0] mask_in = imgs_tot[ind][1] ori_in = imgs_tot[ind][2] - if img_bin_curved: + if img_bin_curved is not None: img_bin_in = imgs_tot[ind][3] if abs(ori_in)<45: img_in_des = rotate_image_with_padding(img_in, ori_in, border_value=(255,255,255) ) - if img_bin_curved: + if img_bin_curved is not None: img_bin_in_des = rotate_image_with_padding(img_bin_in, ori_in, border_value=(255,255,255) ) mask_in_des = rotate_image_with_padding(mask_in, ori_in) mask_in_des = mask_in_des.astype('uint8') @@ -299,50 +299,50 @@ def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, if w_n==0 or h_n==0: img_in_des = np.copy(img_in) - if img_bin_curved: + if img_bin_curved is not None: img_bin_in_des = np.copy(img_bin_in) w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) if w_relative==0: w_relative = img_in_des.shape[1] img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved: + if img_bin_curved is not None: img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) else: mask_in_des = mask_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] img_in_des = img_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] - if img_bin_curved: + if img_bin_curved is not None: img_bin_in_des = img_bin_in_des[y_n:y_n+h_n, x_n:x_n+w_n, :] w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) if w_relative==0: w_relative = img_in_des.shape[1] img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved: + if img_bin_curved is not None: img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) else: img_in_des = np.copy(img_in) - if img_bin_curved: + if img_bin_curved is not None: img_bin_in_des = np.copy(img_bin_in) w_relative = int(32 * img_in_des.shape[1]/float(img_in_des.shape[0]) ) if w_relative==0: w_relative = img_in_des.shape[1] img_in_des = resize_image(img_in_des, 32, w_relative) - if img_bin_curved: + if img_bin_curved is not None: img_bin_in_des = resize_image(img_bin_in_des, 32, w_relative) w_tot_des+=img_in_des.shape[1] w_tot_des_list.append(img_in_des.shape[1]) imgs_deskewed_list.append(img_in_des) - if img_bin_curved: + if img_bin_curved is not None: imgs_bin_deskewed_list.append(img_bin_in_des) img_final_deskewed = np.zeros((32, w_tot_des, 3))+255 - if img_bin_curved: + if img_bin_curved is not None: img_bin_final_deskewed = np.zeros((32, w_tot_des, 3))+255 else: img_bin_final_deskewed = None @@ -350,7 +350,7 @@ def break_curved_line_into_small_pieces_and_then_merge(img_curved, mask_curved, w_indexer = 0 for ind in range(len(w_tot_des_list)): img_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_deskewed_list[ind][:,:,:] - if img_bin_curved: + if img_bin_curved is not None: img_bin_final_deskewed[:,w_indexer:w_indexer+w_tot_des_list[ind],:] = imgs_bin_deskewed_list[ind][:,:,:] w_indexer = w_indexer+w_tot_des_list[ind] return img_final_deskewed, img_bin_final_deskewed From df903aa1b45f43a44eb324e71b5b911763a4d47c Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sat, 31 May 2025 01:09:14 +0200 Subject: [PATCH 152/492] Parametrize OCR for handling curved lines --- src/eynollah/eynollah.py | 10 +++++----- src/eynollah/utils/utils_ocr.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0a9248e..6c00329 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5353,7 +5353,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_1150000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_1225000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5642,7 +5642,7 @@ class Eynollah_ocr: if self.draw_texts_on_image: total_bb_coordinates.append([x,y,w,h]) - + w_scaled = w * image_height/float(h) img_poly_on_img = np.copy(img) @@ -5684,7 +5684,7 @@ class Eynollah_ocr: img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] img_crop_bin[mask_poly==0] = 255 - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 100: + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: if self.prediction_with_both_of_rgb_and_bin: img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) else: @@ -5698,7 +5698,7 @@ class Eynollah_ocr: if type_textregion=='drop-capital': pass else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 100: + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: if self.prediction_with_both_of_rgb_and_bin: img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) else: @@ -5708,7 +5708,7 @@ class Eynollah_ocr: if not self.export_textline_images_and_text: - if w_scaled < 640:#1.5*image_width: + if w_scaled < 530:#640:#1.5*image_width: img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) if angle_degrees > 15: diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index aa1efa6..81a8ae1 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -241,7 +241,7 @@ def return_splitting_point_of_image(image_to_spliited): peaks_real = peaks_real[(peaks_realwidth1)] arg_sort = np.argsort(sum_smoothed[peaks_real]) - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] + peaks_sort_4 = peaks_real[arg_sort][::-1][:3] return np.sort(peaks_sort_4) From 3b475915c79ee8c1690349f2d08625ab479eb930 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 1 Jun 2025 15:53:04 +0200 Subject: [PATCH 153/492] image enhancer is integrated --- src/eynollah/cli.py | 69 +++ src/eynollah/eynollah.py | 234 +--------- src/eynollah/image_enhancer.py | 756 +++++++++++++++++++++++++++++++++ 3 files changed, 830 insertions(+), 229 deletions(-) create mode 100644 src/eynollah/image_enhancer.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 2d0d6f9..840bc4b 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -3,6 +3,7 @@ import click from ocrd_utils import initLogging, getLevelName, getLogger from eynollah.eynollah import Eynollah, Eynollah_ocr from eynollah.sbb_binarize import SbbBinarizer +from eynollah.image_enhancer import Enhancer @click.group() def main(): @@ -70,6 +71,74 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) +@main.command() +@click.option( + "--image", + "-i", + help="image filename", + type=click.Path(exists=True, dir_okay=False), +) + +@click.option( + "--out", + "-o", + help="directory to write output xml data", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) +@click.option( + "--dir_in", + "-di", + help="directory of images", + type=click.Path(exists=True, file_okay=False), +) +@click.option( + "--model", + "-m", + help="directory of models", + type=click.Path(exists=True, file_okay=False), + required=True, +) + +@click.option( + "--num_col_upper", + "-ncu", + help="lower limit of columns in document image", +) +@click.option( + "--num_col_lower", + "-ncl", + help="upper limit of columns in document image", +) +@click.option( + "--log_level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) + +def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, log_level): + initLogging() + if log_level: + getLogger('enhancement').setLevel(getLevelName(log_level)) + assert image or dir_in, "Either a single image -i or a dir_in -di is required" + enhancer_object = Enhancer( + model, + logger=getLogger('enhancement'), + dir_out=out, + num_col_upper=num_col_upper, + num_col_lower=num_col_lower, + ) + if dir_in: + enhancer_object.run(dir_in=dir_in, overwrite=overwrite) + else: + enhancer_object.run(image_filename=image, overwrite=overwrite) @main.command() @click.option( diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6c00329..cf540d3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3612,25 +3612,12 @@ class Eynollah: inference_bs = 3 - cv2.imwrite('textregions.png', text_regions_p*50) - cv2.imwrite('sep.png', (text_regions_p[:,:]==6)*255) - ver_kernel = np.ones((5, 1), dtype=np.uint8) hor_kernel = np.ones((1, 5), dtype=np.uint8) - - #separators = (text_regions_p[:,:]==6)*1 - #text_regions_p[text_regions_p[:,:]==6] = 0 - #separators = separators.astype('uint8') - - #separators = cv2.erode(separators , hor_kernel, iterations=1) - #text_regions_p[separators[:,:]==1] = 6 - - #cv2.imwrite('sep_new.png', (text_regions_p[:,:]==6)*255) - min_cont_size_to_be_dilated = 10 - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) args_cont_located = np.array(range(len(contours_only_text_parent))) @@ -3672,7 +3659,6 @@ class Eynollah: text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=5) text_regions_p_textregions_dilated[text_regions_p[:,:]>1] = 0 - cv2.imwrite('text_regions_p_textregions_dilated.png', text_regions_p_textregions_dilated*255) contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) @@ -3723,21 +3709,20 @@ class Eynollah: img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, int(x_min_main[j]):int(x_max_main[j])] = 1 co_text_all_org = contours_only_text_parent + contours_only_text_parent_h - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: co_text_all = contours_only_dilated + contours_only_text_parent_h else: co_text_all = contours_only_text_parent + contours_only_text_parent_h else: co_text_all_org = contours_only_text_parent - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: co_text_all = contours_only_dilated else: co_text_all = contours_only_text_parent if not len(co_text_all): return [], [] - print(len(co_text_all), "co_text_all") - print(len(co_text_all_org), "co_text_all_org") + labels_con = np.zeros((int(y_len /6.), int(x_len/6.), len(co_text_all)), dtype=bool) co_text_all = [(i/6).astype(int) for i in co_text_all] for i in range(len(co_text_all)): @@ -3805,7 +3790,7 @@ class Eynollah: ordered = [i[0] for i in ordered] - if len(contours_only_text_parent)>min_cont_size_to_be_dilated: + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: org_contours_indexes = [] for ind in range(len(ordered)): region_with_curr_order = ordered[ind] @@ -3823,215 +3808,6 @@ class Eynollah: else: region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] return ordered, region_ids - - - ####def return_start_and_end_of_common_text_of_textline_ocr(self, textline_image, ind_tot): - ####width = np.shape(textline_image)[1] - ####height = np.shape(textline_image)[0] - ####common_window = int(0.2*width) - - ####width1 = int ( width/2. - common_window ) - ####width2 = int ( width/2. + common_window ) - - ####img_sum = np.sum(textline_image[:,:,0], axis=0) - ####sum_smoothed = gaussian_filter1d(img_sum, 3) - - ####peaks_real, _ = find_peaks(sum_smoothed, height=0) - ####if len(peaks_real)>70: - - ####peaks_real = peaks_real[(peaks_realwidth1)] - - ####arg_sort = np.argsort(sum_smoothed[peaks_real]) - ####arg_sort4 =arg_sort[::-1][:4] - ####peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - ####argsort_sorted = np.argsort(peaks_sort_4) - - ####first_4_sorted = peaks_sort_4[argsort_sorted] - ####y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - #####print(first_4_sorted,'first_4_sorted') - - ####arg_sortnew = np.argsort(y_4_sorted) - ####peaks_final =np.sort( first_4_sorted[arg_sortnew][2:] ) - - #####plt.figure(ind_tot) - #####plt.imshow(textline_image) - #####plt.plot([peaks_final[0], peaks_final[0]], [0, height-1]) - #####plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) - #####plt.savefig('./'+str(ind_tot)+'.png') - - ####return peaks_final[0], peaks_final[1] - ####else: - ####pass - - ##def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image, ind_tot): - ##width = np.shape(textline_image)[1] - ##height = np.shape(textline_image)[0] - ##common_window = int(0.06*width) - - ##width1 = int ( width/2. - common_window ) - ##width2 = int ( width/2. + common_window ) - - ##img_sum = np.sum(textline_image[:,:,0], axis=0) - ##sum_smoothed = gaussian_filter1d(img_sum, 3) - - ##peaks_real, _ = find_peaks(sum_smoothed, height=0) - ##if len(peaks_real)>70: - ###print(len(peaks_real), 'len(peaks_real)') - - ##peaks_real = peaks_real[(peaks_realwidth1)] - - ##arg_max = np.argmax(sum_smoothed[peaks_real]) - ##peaks_final = peaks_real[arg_max] - - ###plt.figure(ind_tot) - ###plt.imshow(textline_image) - ###plt.plot([peaks_final, peaks_final], [0, height-1]) - ####plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) - ###plt.savefig('./'+str(ind_tot)+'.png') - - ##return peaks_final - ##else: - ##return None - - ###def return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - ###self, peaks_real, sum_smoothed, start_split, end_split): - - ###peaks_real = peaks_real[(peaks_realstart_split)] - - ###arg_sort = np.argsort(sum_smoothed[peaks_real]) - ###arg_sort4 =arg_sort[::-1][:4] - ###peaks_sort_4 = peaks_real[arg_sort][::-1][:4] - ###argsort_sorted = np.argsort(peaks_sort_4) - - ###first_4_sorted = peaks_sort_4[argsort_sorted] - ###y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] - ####print(first_4_sorted,'first_4_sorted') - - ###arg_sortnew = np.argsort(y_4_sorted) - ###peaks_final =np.sort( first_4_sorted[arg_sortnew][3:] ) - ###return peaks_final[0] - - ###def return_start_and_end_of_common_text_of_textline_ocr_new(self, textline_image, ind_tot): - ###width = np.shape(textline_image)[1] - ###height = np.shape(textline_image)[0] - ###common_window = int(0.15*width) - - ###width1 = int ( width/2. - common_window ) - ###width2 = int ( width/2. + common_window ) - ###mid = int(width/2.) - - ###img_sum = np.sum(textline_image[:,:,0], axis=0) - ###sum_smoothed = gaussian_filter1d(img_sum, 3) - - ###peaks_real, _ = find_peaks(sum_smoothed, height=0) - ###if len(peaks_real)>70: - ###peak_start = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - ###peaks_real, sum_smoothed, width1, mid+2) - ###peak_end = self.return_start_and_end_of_common_text_of_textline_ocr_new_splitted( - ###peaks_real, sum_smoothed, mid-2, width2) - - ####plt.figure(ind_tot) - ####plt.imshow(textline_image) - ####plt.plot([peak_start, peak_start], [0, height-1]) - ####plt.plot([peak_end, peak_end], [0, height-1]) - ####plt.savefig('./'+str(ind_tot)+'.png') - - ###return peak_start, peak_end - ###else: - ###pass - - ##def return_ocr_of_textline_without_common_section( - ##self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): - - ##if h2w_ratio > 0.05: - ##pixel_values = processor(textline_image, return_tensors="pt").pixel_values - ##generated_ids = model_ocr.generate(pixel_values.to(device)) - ##generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - ##else: - ###width = np.shape(textline_image)[1] - ###height = np.shape(textline_image)[0] - ###common_window = int(0.3*width) - ###width1 = int ( width/2. - common_window ) - ###width2 = int ( width/2. + common_window ) - - ##split_point = self.return_start_and_end_of_common_text_of_textline_ocr_without_common_section( - ##textline_image, ind_tot) - ##if split_point: - ##image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) - ##image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - - ###pixel_values1 = processor(image1, return_tensors="pt").pixel_values - ###pixel_values2 = processor(image2, return_tensors="pt").pixel_values - - ##pixel_values_merged = processor([image1,image2], return_tensors="pt").pixel_values - ##generated_ids_merged = model_ocr.generate(pixel_values_merged.to(device)) - ##generated_text_merged = processor.batch_decode(generated_ids_merged, skip_special_tokens=True) - - ###print(generated_text_merged,'generated_text_merged') - - ###generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - ###generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - - ###generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - ###generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - - ###generated_text = generated_text1 + ' ' + generated_text2 - ##generated_text = generated_text_merged[0] + ' ' + generated_text_merged[1] - - ###print(generated_text1,'generated_text1') - ###print(generated_text2, 'generated_text2') - ###print('########################################') - ##else: - ##pixel_values = processor(textline_image, return_tensors="pt").pixel_values - ##generated_ids = model_ocr.generate(pixel_values.to(device)) - ##generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - ###print(generated_text,'generated_text') - ###print('########################################') - ##return generated_text - - ###def return_ocr_of_textline( - ###self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): - - ###if h2w_ratio > 0.05: - ###pixel_values = processor(textline_image, return_tensors="pt").pixel_values - ###generated_ids = model_ocr.generate(pixel_values.to(device)) - ###generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - ###else: - ####width = np.shape(textline_image)[1] - ####height = np.shape(textline_image)[0] - ####common_window = int(0.3*width) - ####width1 = int ( width/2. - common_window ) - ####width2 = int ( width/2. + common_window ) - - ###try: - ###width1, width2 = self.return_start_and_end_of_common_text_of_textline_ocr_new(textline_image, ind_tot) - - ###image1 = textline_image[:, :width2,:]# image.crop((0, 0, width2, height)) - ###image2 = textline_image[:, width1:,:]#image.crop((width1, 0, width, height)) - - ###pixel_values1 = processor(image1, return_tensors="pt").pixel_values - ###pixel_values2 = processor(image2, return_tensors="pt").pixel_values - - ###generated_ids1 = model_ocr.generate(pixel_values1.to(device)) - ###generated_ids2 = model_ocr.generate(pixel_values2.to(device)) - - ###generated_text1 = processor.batch_decode(generated_ids1, skip_special_tokens=True)[0] - ###generated_text2 = processor.batch_decode(generated_ids2, skip_special_tokens=True)[0] - ####print(generated_text1,'generated_text1') - ####print(generated_text2, 'generated_text2') - ####print('########################################') - - ###match = sq(None, generated_text1, generated_text2).find_longest_match( - ###0, len(generated_text1), 0, len(generated_text2)) - ###generated_text = generated_text1 + generated_text2[match.b+match.size:] - ###except: - ###pixel_values = processor(textline_image, return_tensors="pt").pixel_values - ###generated_ids = model_ocr.generate(pixel_values.to(device)) - ###generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - ###return generated_text - def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): return [ls_cons[sorted_indexes[index]] for index in range(len(sorted_indexes))] diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py new file mode 100644 index 0000000..71445f7 --- /dev/null +++ b/src/eynollah/image_enhancer.py @@ -0,0 +1,756 @@ +""" +Image enhancer. The output can be written as same scale of input or in new predicted scale. +""" + +from logging import Logger +from difflib import SequenceMatcher as sq +from PIL import Image, ImageDraw, ImageFont +import math +import os +import sys +import time +from typing import Optional +import atexit +import warnings +from functools import partial +from pathlib import Path +from multiprocessing import cpu_count +import gc +import copy +from loky import ProcessPoolExecutor +import xml.etree.ElementTree as ET +import cv2 +import numpy as np +from ocrd import OcrdPage +from ocrd_utils import getLogger, tf_disable_interactive_logs +import statistics +from tensorflow.keras.models import load_model +from .utils.resize import resize_image +from .utils import ( + crop_image_inside_box +) + +DPI_THRESHOLD = 298 +KERNEL = np.ones((5, 5), np.uint8) + + +class Enhancer: + def __init__( + self, + dir_models : str, + dir_out : Optional[str] = None, + num_col_upper : Optional[int] = None, + num_col_lower : Optional[int] = None, + logger : Optional[Logger] = None, + ): + self.dir_out = dir_out + self.input_binary = False + self.light_version = False + if num_col_upper: + self.num_col_upper = int(num_col_upper) + else: + self.num_col_upper = num_col_upper + if num_col_lower: + self.num_col_lower = int(num_col_lower) + else: + self.num_col_lower = num_col_lower + + self.logger = logger if logger else getLogger('enhancement') + # for parallelization of CPU-intensive tasks: + self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) + atexit.register(self.executor.shutdown) + self.dir_models = dir_models + self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" + self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" + self.model_page_dir = dir_models + "/eynollah-page-extraction_20210425" + + try: + for device in tf.config.list_physical_devices('GPU'): + tf.config.experimental.set_memory_growth(device, True) + except: + self.logger.warning("no GPU device available") + + self.model_page = self.our_load_model(self.model_page_dir) + self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) + self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement) + + def cache_images(self, image_filename=None, image_pil=None, dpi=None): + ret = {} + t_c0 = time.time() + if image_filename: + ret['img'] = cv2.imread(image_filename) + if self.light_version: + self.dpi = 100 + else: + self.dpi = 0#check_dpi(image_filename) + else: + ret['img'] = pil2cv(image_pil) + if self.light_version: + self.dpi = 100 + else: + self.dpi = 0#check_dpi(image_pil) + ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) + for prefix in ('', '_grayscale'): + ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) + self._imgs = ret + if dpi is not None: + self.dpi = dpi + + def reset_file_name_dir(self, image_filename): + t_c = time.time() + self.cache_images(image_filename=image_filename) + self.output_filename = os.path.join(self.dir_out, Path(image_filename).stem +'.png') + + def imread(self, grayscale=False, uint8=True): + key = 'img' + if grayscale: + key += '_grayscale' + if uint8: + key += '_uint8' + return self._imgs[key].copy() + + def isNaN(self, num): + return num != num + + @staticmethod + def our_load_model(model_file): + if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): + # prefer SavedModel over HDF5 format if it exists + model_file = model_file[:-3] + try: + model = load_model(model_file, compile=False) + except: + model = load_model(model_file, compile=False, custom_objects={ + "PatchEncoder": PatchEncoder, "Patches": Patches}) + return model + + def predict_enhancement(self, img): + self.logger.debug("enter predict_enhancement") + + img_height_model = self.model_enhancement.layers[-1].output_shape[1] + img_width_model = self.model_enhancement.layers[-1].output_shape[2] + if img.shape[0] < img_height_model: + img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) + if img.shape[1] < img_width_model: + img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST) + margin = int(0.1 * img_width_model) + width_mid = img_width_model - 2 * margin + height_mid = img_height_model - 2 * margin + img = img / 255. + img_h = img.shape[0] + img_w = img.shape[1] + + prediction_true = np.zeros((img_h, img_w, 3)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - img_width_model + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - img_height_model + + img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] + label_p_pred = self.model_enhancement.predict(img_patch, verbose=0) + seg = label_p_pred[0, :, :, :] * 255 + + if i == 0 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + 0:index_x_u - margin] = \ + seg[0:-margin or None, + 0:-margin or None] + elif i == nxf - 1 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + margin:index_x_u - 0] = \ + seg[margin:, + margin:] + elif i == 0 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + 0:index_x_u - margin] = \ + seg[margin:, + 0:-margin or None] + elif i == nxf - 1 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + margin:index_x_u - 0] = \ + seg[0:-margin or None, + margin:] + elif i == 0 and j != 0 and j != nyf - 1: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + 0:index_x_u - margin] = \ + seg[margin:-margin or None, + 0:-margin or None] + elif i == nxf - 1 and j != 0 and j != nyf - 1: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + margin:index_x_u - 0] = \ + seg[margin:-margin or None, + margin:] + elif i != 0 and i != nxf - 1 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + margin:index_x_u - margin] = \ + seg[0:-margin or None, + margin:-margin or None] + elif i != 0 and i != nxf - 1 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + margin:index_x_u - margin] = \ + seg[margin:, + margin:-margin or None] + else: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + margin:index_x_u - margin] = \ + seg[margin:-margin or None, + margin:-margin or None] + + prediction_true = prediction_true.astype(int) + return prediction_true + + def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): + self.logger.debug("enter calculate_width_height_by_columns") + if num_col == 1 and width_early < 1100: + img_w_new = 2000 + elif num_col == 1 and width_early >= 2500: + img_w_new = 2000 + elif num_col == 1 and width_early >= 1100 and width_early < 2500: + img_w_new = width_early + elif num_col == 2 and width_early < 2000: + img_w_new = 2400 + elif num_col == 2 and width_early >= 3500: + img_w_new = 2400 + elif num_col == 2 and width_early >= 2000 and width_early < 3500: + img_w_new = width_early + elif num_col == 3 and width_early < 2000: + img_w_new = 3000 + elif num_col == 3 and width_early >= 4000: + img_w_new = 3000 + elif num_col == 3 and width_early >= 2000 and width_early < 4000: + img_w_new = width_early + elif num_col == 4 and width_early < 2500: + img_w_new = 4000 + elif num_col == 4 and width_early >= 5000: + img_w_new = 4000 + elif num_col == 4 and width_early >= 2500 and width_early < 5000: + img_w_new = width_early + elif num_col == 5 and width_early < 3700: + img_w_new = 5000 + elif num_col == 5 and width_early >= 7000: + img_w_new = 5000 + elif num_col == 5 and width_early >= 3700 and width_early < 7000: + img_w_new = width_early + elif num_col == 6 and width_early < 4500: + img_w_new = 6500 # 5400 + else: + img_w_new = width_early + img_h_new = img_w_new * img.shape[0] // img.shape[1] + + if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: + img_new = np.copy(img) + num_column_is_classified = False + #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: + elif img_h_new >= 8000: + img_new = np.copy(img) + num_column_is_classified = False + else: + img_new = resize_image(img, img_h_new, img_w_new) + num_column_is_classified = True + + return img_new, num_column_is_classified + + def early_page_for_num_of_column_classification(self,img_bin): + self.logger.debug("enter early_page_for_num_of_column_classification") + if self.input_binary: + img = np.copy(img_bin).astype(np.uint8) + else: + img = self.imread() + img = cv2.GaussianBlur(img, (5, 5), 0) + img_page_prediction = self.do_prediction(False, img, self.model_page) + + imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + thresh = cv2.dilate(thresh, KERNEL, iterations=3) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + cnt_size = np.array([cv2.contourArea(contours[j]) + for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] + box = cv2.boundingRect(cnt) + else: + box = [0, 0, img.shape[1], img.shape[0]] + cropped_page, page_coord = crop_image_inside_box(box, img) + + self.logger.debug("exit early_page_for_num_of_column_classification") + return cropped_page, page_coord + + def calculate_width_height_by_columns_1_2(self, img, num_col, width_early, label_p_pred): + self.logger.debug("enter calculate_width_height_by_columns") + if num_col == 1: + img_w_new = 1000 + else: + img_w_new = 1300 + img_h_new = img_w_new * img.shape[0] // img.shape[1] + + if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: + img_new = np.copy(img) + num_column_is_classified = False + #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: + elif img_h_new >= 8000: + img_new = np.copy(img) + num_column_is_classified = False + else: + img_new = resize_image(img, img_h_new, img_w_new) + num_column_is_classified = True + + return img_new, num_column_is_classified + + def resize_and_enhance_image_with_column_classifier(self, light_version): + self.logger.debug("enter resize_and_enhance_image_with_column_classifier") + dpi = 0#self.dpi + self.logger.info("Detected %s DPI", dpi) + if self.input_binary: + img = self.imread() + prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) + prediction_bin = 255 * (prediction_bin[:,:,0]==0) + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) + img= np.copy(prediction_bin) + img_bin = prediction_bin + else: + img = self.imread() + self.h_org, self.w_org = img.shape[:2] + img_bin = None + + width_early = img.shape[1] + t1 = time.time() + _, page_coord = self.early_page_for_num_of_column_classification(img_bin) + + self.image_page_org_size = img[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3], :] + self.page_coord = page_coord + + if self.num_col_upper and not self.num_col_lower: + num_col = self.num_col_upper + label_p_pred = [np.ones(6)] + elif self.num_col_lower and not self.num_col_upper: + num_col = self.num_col_lower + label_p_pred = [np.ones(6)] + elif not self.num_col_upper and not self.num_col_lower: + if self.input_binary: + img_in = np.copy(img) + img_in = img_in / 255.0 + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) + else: + img_1ch = self.imread(grayscale=True) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = self.model_classifier.predict(img_in, verbose=0) + num_col = np.argmax(label_p_pred[0]) + 1 + elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): + if self.input_binary: + img_in = np.copy(img) + img_in = img_in / 255.0 + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) + else: + img_1ch = self.imread(grayscale=True) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = self.model_classifier.predict(img_in, verbose=0) + num_col = np.argmax(label_p_pred[0]) + 1 + + if num_col > self.num_col_upper: + num_col = self.num_col_upper + label_p_pred = [np.ones(6)] + if num_col < self.num_col_lower: + num_col = self.num_col_lower + label_p_pred = [np.ones(6)] + else: + num_col = self.num_col_upper + label_p_pred = [np.ones(6)] + + self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) + + if dpi < DPI_THRESHOLD: + if light_version and num_col in (1,2): + img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( + img, num_col, width_early, label_p_pred) + else: + img_new, num_column_is_classified = self.calculate_width_height_by_columns( + img, num_col, width_early, label_p_pred) + if light_version: + image_res = np.copy(img_new) + else: + image_res = self.predict_enhancement(img_new) + is_image_enhanced = True + + else: + num_column_is_classified = True + image_res = np.copy(img) + is_image_enhanced = False + + self.logger.debug("exit resize_and_enhance_image_with_column_classifier") + return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin + def do_prediction( + self, patches, img, model, + n_batch_inference=1, marginal_of_patch_percent=0.1, + thresholding_for_some_classes_in_light_version=False, + thresholding_for_artificial_class_in_light_version=False, thresholding_for_fl_light_version=False, threshold_art_class_textline=0.1): + + self.logger.debug("enter do_prediction") + img_height_model = model.layers[-1].output_shape[1] + img_width_model = model.layers[-1].output_shape[2] + + if not patches: + img_h_page = img.shape[0] + img_w_page = img.shape[1] + img = img / float(255.0) + img = resize_image(img, img_height_model, img_width_model) + + label_p_pred = model.predict(img[np.newaxis], verbose=0) + seg = np.argmax(label_p_pred, axis=3)[0] + + if thresholding_for_artificial_class_in_light_version: + seg_art = label_p_pred[0,:,:,2] + + seg_art[seg_art0] =1 + + skeleton_art = skeletonize(seg_art) + skeleton_art = skeleton_art*1 + + seg[skeleton_art==1]=2 + + if thresholding_for_fl_light_version: + seg_header = label_p_pred[0,:,:,2] + + seg_header[seg_header<0.2] = 0 + seg_header[seg_header>0] =1 + + seg[seg_header==1]=2 + + seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + prediction_true = resize_image(seg_color, img_h_page, img_w_page).astype(np.uint8) + return prediction_true + + if img.shape[0] < img_height_model: + img = resize_image(img, img_height_model, img.shape[1]) + if img.shape[1] < img_width_model: + img = resize_image(img, img.shape[0], img_width_model) + + self.logger.debug("Patch size: %sx%s", img_height_model, img_width_model) + margin = int(marginal_of_patch_percent * img_height_model) + width_mid = img_width_model - 2 * margin + height_mid = img_height_model - 2 * margin + img = img / 255. + #img = img.astype(np.float16) + img_h = img.shape[0] + img_w = img.shape[1] + prediction_true = np.zeros((img_h, img_w, 3)) + mask_true = np.zeros((img_h, img_w)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + img_patch = np.zeros((n_batch_inference, img_height_model, img_width_model, 3)) + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - img_width_model + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - img_height_model + + list_i_s.append(i) + list_j_s.append(j) + list_x_u.append(index_x_u) + list_x_d.append(index_x_d) + list_y_d.append(index_y_d) + list_y_u.append(index_y_u) + + img_patch[batch_indexer,:,:,:] = img[index_y_d:index_y_u, index_x_d:index_x_u, :] + batch_indexer += 1 + + if (batch_indexer == n_batch_inference or + # last batch + i == nxf - 1 and j == nyf - 1): + self.logger.debug("predicting patches on %s", str(img_patch.shape)) + label_p_pred = model.predict(img_patch, verbose=0) + seg = np.argmax(label_p_pred, axis=3) + + if thresholding_for_some_classes_in_light_version: + seg_not_base = label_p_pred[:,:,:,4] + seg_not_base[seg_not_base>0.03] =1 + seg_not_base[seg_not_base<1] =0 + + seg_line = label_p_pred[:,:,:,3] + seg_line[seg_line>0.1] =1 + seg_line[seg_line<1] =0 + + seg_background = label_p_pred[:,:,:,0] + seg_background[seg_background>0.25] =1 + seg_background[seg_background<1] =0 + + seg[seg_not_base==1]=4 + seg[seg_background==1]=0 + seg[(seg_line==1) & (seg==0)]=3 + if thresholding_for_artificial_class_in_light_version: + seg_art = label_p_pred[:,:,:,2] + + seg_art[seg_art0] =1 + + ##seg[seg_art==1]=2 + + indexer_inside_batch = 0 + for i_batch, j_batch in zip(list_i_s, list_j_s): + seg_in = seg[indexer_inside_batch] + + if thresholding_for_artificial_class_in_light_version: + seg_in_art = seg_art[indexer_inside_batch] + + index_y_u_in = list_y_u[indexer_inside_batch] + index_y_d_in = list_y_d[indexer_inside_batch] + + index_x_u_in = list_x_u[indexer_inside_batch] + index_x_d_in = list_x_d[indexer_inside_batch] + + if i_batch == 0 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[0:-margin or None, + 0:-margin or None, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[0:-margin or None, + 0:-margin or None] + + elif i_batch == nxf - 1 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[margin:, + margin:, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[margin:, + margin:] + + elif i_batch == 0 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[margin:, + 0:-margin or None, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[margin:, + 0:-margin or None] + + elif i_batch == nxf - 1 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[0:-margin or None, + margin:, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[0:-margin or None, + margin:] + + elif i_batch == 0 and j_batch != 0 and j_batch != nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin] = \ + seg_in[margin:-margin or None, + 0:-margin or None, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + 0:index_x_u_in - margin, 1] = \ + seg_in_art[margin:-margin or None, + 0:-margin or None] + + elif i_batch == nxf - 1 and j_batch != 0 and j_batch != nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0] = \ + seg_in[margin:-margin or None, + margin:, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - 0, 1] = \ + seg_in_art[margin:-margin or None, + margin:] + + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == 0: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[0:-margin or None, + margin:-margin or None, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + 0:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[0:-margin or None, + margin:-margin or None] + + elif i_batch != 0 and i_batch != nxf - 1 and j_batch == nyf - 1: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[margin:, + margin:-margin or None, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - 0, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[margin:, + margin:-margin or None] + + else: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin] = \ + seg_in[margin:-margin or None, + margin:-margin or None, + np.newaxis] + if thresholding_for_artificial_class_in_light_version: + prediction_true[index_y_d_in + margin:index_y_u_in - margin, + index_x_d_in + margin:index_x_u_in - margin, 1] = \ + seg_in_art[margin:-margin or None, + margin:-margin or None] + indexer_inside_batch += 1 + + + list_i_s = [] + list_j_s = [] + list_x_u = [] + list_x_d = [] + list_y_u = [] + list_y_d = [] + + batch_indexer = 0 + img_patch[:] = 0 + + prediction_true = prediction_true.astype(np.uint8) + + if thresholding_for_artificial_class_in_light_version: + kernel_min = np.ones((3, 3), np.uint8) + prediction_true[:,:,0][prediction_true[:,:,0]==2] = 0 + + skeleton_art = skeletonize(prediction_true[:,:,1]) + skeleton_art = skeleton_art*1 + + skeleton_art = skeleton_art.astype('uint8') + + skeleton_art = cv2.dilate(skeleton_art, kernel_min, iterations=1) + + prediction_true[:,:,0][skeleton_art==1]=2 + #del model + gc.collect() + return prediction_true + + def run_enhancement(self, light_version): + t_in = time.time() + self.logger.info("Resizing and enhancing image...") + is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = \ + self.resize_and_enhance_image_with_column_classifier(light_version) + + self.logger.info("Image was %senhanced.", '' if is_image_enhanced else 'not ') + return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified + + + def run_single(self): + t0 = time.time() + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(light_version=False) + + return img_res + + + def run(self, image_filename : Optional[str] = None, dir_in : Optional[str] = None, overwrite : bool = False): + """ + Get image and scales, then extract the page of scanned image + """ + self.logger.debug("enter run") + t0_tot = time.time() + + if dir_in: + self.ls_imgs = os.listdir(dir_in) + elif image_filename: + self.ls_imgs = [image_filename] + else: + raise ValueError("run requires either a single image filename or a directory") + + for img_filename in self.ls_imgs: + self.logger.info(img_filename) + t0 = time.time() + + self.reset_file_name_dir(os.path.join(dir_in or "", img_filename)) + #print("text region early -11 in %.1fs", time.time() - t0) + + if os.path.exists(self.output_filename): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", self.output_filename) + else: + self.logger.warning("will skip input for existing output file '%s'", self.output_filename) + continue + + image_enhanced = self.run_single() + img_enhanced_org_scale = resize_image(image_enhanced, self.h_org, self.w_org) + + cv2.imwrite(self.output_filename, img_enhanced_org_scale) + From 9342b76038fb274e1f4f8a7e2d31cb1ee3e1e296 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 1 Jun 2025 22:10:13 +0200 Subject: [PATCH 154/492] saving enhanced image in org or scaled resolution --- src/eynollah/cli.py | 9 ++++++++- src/eynollah/eynollah.py | 5 ++--- src/eynollah/image_enhancer.py | 7 +++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 840bc4b..9398c47 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -116,6 +116,12 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) "-ncl", help="upper limit of columns in document image", ) +@click.option( + "--save_org_scale/--no_save_org_scale", + "-sos/-nosos", + is_flag=True, + help="if this parameter set to true, this tool will save the enhanced image in org scale.", +) @click.option( "--log_level", "-l", @@ -123,7 +129,7 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) help="Override log level globally to this", ) -def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, log_level): +def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, save_org_scale, log_level): initLogging() if log_level: getLogger('enhancement').setLevel(getLevelName(log_level)) @@ -134,6 +140,7 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low dir_out=out, num_col_upper=num_col_upper, num_col_lower=num_col_lower, + save_org_scale=save_org_scale, ) if dir_in: enhancer_object.run(dir_in=dir_in, overwrite=overwrite) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index cf540d3..9c834e2 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5434,10 +5434,9 @@ class Eynollah_ocr: img_crop = img_poly_on_img[y:y+h, x:x+w, :] - #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') if not self.do_not_mask_with_textline_contour: - if angle_degrees > 15: + if angle_degrees > 3: better_des_slope = get_orientation_moments(textline_coords) img_crop = rotate_image_with_padding(img_crop, better_des_slope ) @@ -5484,7 +5483,7 @@ class Eynollah_ocr: if not self.export_textline_images_and_text: - if w_scaled < 530:#640:#1.5*image_width: + if w_scaled < 640:#1.5*image_width: img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) if angle_degrees > 15: diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 71445f7..c89f532 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -41,11 +41,13 @@ class Enhancer: dir_out : Optional[str] = None, num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, + save_org_scale : bool = False, logger : Optional[Logger] = None, ): self.dir_out = dir_out self.input_binary = False self.light_version = False + self.save_org_scale = save_org_scale if num_col_upper: self.num_col_upper = int(num_col_upper) else: @@ -750,7 +752,8 @@ class Enhancer: continue image_enhanced = self.run_single() - img_enhanced_org_scale = resize_image(image_enhanced, self.h_org, self.w_org) + if self.save_org_scale: + image_enhanced = resize_image(image_enhanced, self.h_org, self.w_org) - cv2.imwrite(self.output_filename, img_enhanced_org_scale) + cv2.imwrite(self.output_filename, image_enhanced) From e26c4ab9b4071df22445fc6b45d91db826ce7917 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 1 Jun 2025 22:44:50 +0200 Subject: [PATCH 155/492] image enhancer updated --- src/eynollah/image_enhancer.py | 40 +++++++--------------------------- 1 file changed, 8 insertions(+), 32 deletions(-) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index c89f532..983712d 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -225,47 +225,23 @@ class Enhancer: def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1 and width_early < 1100: + if num_col == 1: img_w_new = 2000 - elif num_col == 1 and width_early >= 2500: - img_w_new = 2000 - elif num_col == 1 and width_early >= 1100 and width_early < 2500: - img_w_new = width_early - elif num_col == 2 and width_early < 2000: + elif num_col == 2: img_w_new = 2400 - elif num_col == 2 and width_early >= 3500: - img_w_new = 2400 - elif num_col == 2 and width_early >= 2000 and width_early < 3500: - img_w_new = width_early - elif num_col == 3 and width_early < 2000: + elif num_col == 3: img_w_new = 3000 - elif num_col == 3 and width_early >= 4000: - img_w_new = 3000 - elif num_col == 3 and width_early >= 2000 and width_early < 4000: - img_w_new = width_early - elif num_col == 4 and width_early < 2500: + elif num_col == 4: img_w_new = 4000 - elif num_col == 4 and width_early >= 5000: - img_w_new = 4000 - elif num_col == 4 and width_early >= 2500 and width_early < 5000: - img_w_new = width_early - elif num_col == 5 and width_early < 3700: + elif num_col == 5: img_w_new = 5000 - elif num_col == 5 and width_early >= 7000: - img_w_new = 5000 - elif num_col == 5 and width_early >= 3700 and width_early < 7000: - img_w_new = width_early - elif num_col == 6 and width_early < 4500: - img_w_new = 6500 # 5400 + elif num_col == 6: + img_w_new = 6500 else: img_w_new = width_early img_h_new = img_w_new * img.shape[0] // img.shape[1] - if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: - img_new = np.copy(img) - num_column_is_classified = False - #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: - elif img_h_new >= 8000: + if img_h_new >= 8000: img_new = np.copy(img) num_column_is_classified = False else: From f79af201abf14b2fe6ec51b066daf7aac7a929ff Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 2 Jun 2025 18:21:33 +0200 Subject: [PATCH 156/492] Fix: Resolved OCR bug when text region type is undefined --- src/eynollah/eynollah.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 9c834e2..fc60f2e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5399,7 +5399,10 @@ class Eynollah_ocr: indexer_text_region = 0 indexer_textlines = 0 for nn in root1.iter(region_tags): - type_textregion = nn.attrib['type'] + try: + type_textregion = nn.attrib['type'] + except: + type_textregion = 'paragraph' for child_textregion in nn: if child_textregion.tag.endswith("TextLine"): for child_textlines in child_textregion: @@ -5467,6 +5470,7 @@ class Eynollah_ocr: else: + better_des_slope = 0 img_crop[mask_poly==0] = 255 if self.prediction_with_both_of_rgb_and_bin: img_crop_bin[mask_poly==0] = 255 @@ -5486,7 +5490,7 @@ class Eynollah_ocr: if w_scaled < 640:#1.5*image_width: img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) - if angle_degrees > 15: + if abs(better_des_slope) > 45: cropped_lines_ver_index.append(1) else: cropped_lines_ver_index.append(0) @@ -5505,7 +5509,7 @@ class Eynollah_ocr: cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(1) - if angle_degrees > 15: + if abs(better_des_slope) > 45: cropped_lines_ver_index.append(1) else: cropped_lines_ver_index.append(0) @@ -5515,7 +5519,7 @@ class Eynollah_ocr: cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(-1) - if angle_degrees > 15: + if abs(better_des_slope) > 45: cropped_lines_ver_index.append(1) else: cropped_lines_ver_index.append(0) @@ -5531,7 +5535,7 @@ class Eynollah_ocr: cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) - if angle_degrees > 15: + if abs(better_des_slope) > 45: cropped_lines_ver_index.append(1) else: cropped_lines_ver_index.append(0) From eb91000490282e2ea0d6058032f69f29da7783b6 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 2 Jun 2025 18:23:34 +0200 Subject: [PATCH 157/492] layout visualization updated --- train/generate_gt_for_training.py | 4 ++-- train/gt_gen_utils.py | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 9b7f02b..8ca5cd3 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -418,7 +418,7 @@ def visualize_textline_segmentation(dir_xml, dir_out, dir_imgs): help="directory of images where textline segmentation will be overlayed", ) def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): - assert xml_file and dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" + assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" if dir_xml: xml_files_ind = os.listdir(dir_xml) else: @@ -442,7 +442,7 @@ def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): co_text, co_graphic, co_sep, co_img, co_table, co_noise, y_len, x_len = get_layout_contours_for_visualization(xml_file) - added_image = visualize_image_from_contours_layout(co_text['paragraph'], co_text['header'], co_text['drop-capital'], co_sep, co_img, co_text['marginalia'], img) + added_image = visualize_image_from_contours_layout(co_text['paragraph'], co_text['header']+co_text['heading'], co_text['drop-capital'], co_sep, co_img, co_text['marginalia'], img) cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index a734020..0ac15a2 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -306,6 +306,7 @@ def get_layout_contours_for_visualization(xml_file): co_noise=[] types_text = [] + types_graphic = [] for tag in region_tags: if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): @@ -325,6 +326,9 @@ def get_layout_contours_for_visualization(xml_file): if len(types_text_without_paragraph) == 0: if "type" in nn.attrib: c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + elif len(types_text_without_paragraph) >= 1: if "type" in nn.attrib: if nn.attrib['type'] in types_text_without_paragraph: @@ -332,10 +336,15 @@ def get_layout_contours_for_visualization(xml_file): else: c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: if "type" in nn.attrib: if nn.attrib['type'] in all_defined_textregion_types: c_t_in[nn.attrib['type']].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + c_t_in['paragraph'].append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) break else: From 9b4e78c55ce4fc4c121a9e6afae4ebcf79f42435 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 11 Jun 2025 18:57:08 +0200 Subject: [PATCH 158/492] Fixed duplicate textline_light assignments (true and false) in the OCR-D framework for the Eynollah light version, which caused rectangles to be used instead of contours for textlines --- src/eynollah/ocrd-tool.json | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index e972ec8..ce15206 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -38,7 +38,7 @@ "textline_light": { "type": "boolean", "default": true, - "description": "Light version need textline light" + "description": "Light version need textline light. If this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method." }, "tables": { "type": "boolean", @@ -65,11 +65,6 @@ "default": false, "description": "if this parameter set to true, this tool would check that input image need resizing and enhancement or not." }, - "textline_light": { - "type": "boolean", - "default": false, - "description": "if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method." - }, "right_to_left": { "type": "boolean", "default": false, From 32889ef1e01dd24f0b7d5dfe0ad2a6e12a910aeb Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 12 Jun 2025 13:57:41 +0200 Subject: [PATCH 159/492] adapt binarization CLI according to #156 --- src/eynollah/cli.py | 19 ++++++++----------- src/eynollah/sbb_binarize.py | 10 +++++----- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index c189aca..42f9bca 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -48,8 +48,7 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i @main.command() @click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') @click.option('--model_dir', '-m', type=click.Path(exists=True, file_okay=False), required=True, help='directory containing models for prediction') -@click.argument('input_image', required=False) -@click.argument('output_image', required=False) +@click.option("--input-image", "-i", help="input image", type=click.Path(exists=True, dir_okay=False)) @click.option( "--dir_in", "-di", @@ -57,16 +56,14 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i type=click.Path(exists=True, file_okay=False), ) @click.option( - "--dir_out", - "-do", - help="directory for output images", - type=click.Path(exists=True, file_okay=False), + "--output", + "-o", + help="output image (if using -i) or output image directory (if using -di)", + type=click.Path(file_okay=True, dir_okay=True), ) -def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out): - assert (dir_out is None) == (dir_in is None), "Options -di and -do are mutually dependent" - assert (input_image is None) == (output_image is None), "INPUT_IMAGE and OUTPUT_IMAGE are mutually dependent" - assert (dir_in is None) != (input_image is None), "Specify either -di and -do options, or INPUT_IMAGE and OUTPUT_IMAGE" - SbbBinarizer(model_dir).run(image_path=input_image, use_patches=patches, save=output_image, dir_in=dir_in, dir_out=dir_out) +def binarization(patches, model_dir, input_image, dir_in, output): + assert (dir_in is None) != (input_image is None), "Specify either -di and or -i not both" + SbbBinarizer(model_dir).run(image_path=input_image, use_patches=patches, output=output, dir_in=dir_in) diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index f43b6ba..2d5035f 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -314,8 +314,8 @@ class SbbBinarizer: prediction_true = prediction_true.astype(np.uint8) return prediction_true[:,:,0] - def run(self, image=None, image_path=None, save=None, use_patches=False, dir_in=None, dir_out=None): - print(dir_in,'dir_in') + def run(self, image=None, image_path=None, output=None, use_patches=False, dir_in=None): + # print(dir_in,'dir_in') if not dir_in: if (image is not None and image_path is not None) or \ (image is None and image_path is None): @@ -343,8 +343,8 @@ class SbbBinarizer: kernel = np.ones((5, 5), np.uint8) img_last[:, :][img_last[:, :] > 0] = 255 img_last = (img_last[:, :] == 0) * 255 - if save: - cv2.imwrite(save, img_last) + if output: + cv2.imwrite(output, img_last) return img_last else: ls_imgs = os.listdir(dir_in) @@ -374,4 +374,4 @@ class SbbBinarizer: img_last[:, :][img_last[:, :] > 0] = 255 img_last = (img_last[:, :] == 0) * 255 - cv2.imwrite(os.path.join(dir_out,image_stem+'.png'), img_last) + cv2.imwrite(os.path.join(output, image_stem + '.png'), img_last) From c194a20c9c55bedb16ed859343f48a6b3645eadc Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 11 Jun 2025 18:57:08 +0200 Subject: [PATCH 160/492] Fixed duplicate textline_light assignments (true and false) in the OCR-D framework for the Eynollah light version, which caused rectangles to be used instead of contours for textlines --- src/eynollah/ocrd-tool.json | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index e972ec8..ce15206 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -38,7 +38,7 @@ "textline_light": { "type": "boolean", "default": true, - "description": "Light version need textline light" + "description": "Light version need textline light. If this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method." }, "tables": { "type": "boolean", @@ -65,11 +65,6 @@ "default": false, "description": "if this parameter set to true, this tool would check that input image need resizing and enhancement or not." }, - "textline_light": { - "type": "boolean", - "default": false, - "description": "if this parameter set to true, this tool will try to return contoure of textlines instead of rectangle bounding box of textline with a faster method." - }, "right_to_left": { "type": "boolean", "default": false, From b7b218ff11660061fb0f606b871ebe3c9f831184 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 12 Jun 2025 15:30:17 +0200 Subject: [PATCH 161/492] OCR-D processor: same behavior as standalone wrt light_version/textline_light --- src/eynollah/processor.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py index 8f99489..a53fede 100644 --- a/src/eynollah/processor.py +++ b/src/eynollah/processor.py @@ -14,9 +14,10 @@ class EynollahProcessor(Processor): return 'ocrd-eynollah-segment' def setup(self) -> None: - if self.parameter['textline_light'] and not self.parameter['light_version']: - raise ValueError("Error: You set parameter 'textline_light' to enable light textline detection, " - "but parameter 'light_version' is not enabled") + assert self.parameter + if self.parameter['textline_light'] != self.parameter['light_version']: + raise ValueError("Error: You must set or unset both parameter 'textline_light' (to enable light textline detection), " + "and parameter 'light_version' (faster+simpler method for main region detection and deskewing)") self.eynollah = Eynollah( self.resolve_resource(self.parameter['models']), logger=self.logger, From f5a1d1a255a080469ba4624d7912b6e5e4cc7647 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 25 Jun 2025 18:24:16 +0200 Subject: [PATCH 162/492] docker file to train model with desired cuda and cudnn --- train/Dockerfile | 29 ++++++++++++++++++ train/config_params_docker.json | 54 +++++++++++++++++++++++++++++++++ train/train.py | 2 +- 3 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 train/Dockerfile create mode 100644 train/config_params_docker.json diff --git a/train/Dockerfile b/train/Dockerfile new file mode 100644 index 0000000..2456ea4 --- /dev/null +++ b/train/Dockerfile @@ -0,0 +1,29 @@ +# Use NVIDIA base image +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 + +# Set the working directory +WORKDIR /app + + +# Set environment variable for GitPython +ENV GIT_PYTHON_REFRESH=quiet + +# Install Python and pip +RUN apt-get update && apt-get install -y --fix-broken && \ + apt-get install -y \ + python3 \ + python3-pip \ + python3-distutils \ + python3-setuptools \ + python3-wheel && \ + rm -rf /var/lib/apt/lists/* + +# Copy and install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy the rest of the application +COPY . . + +# Specify the entry point +CMD ["python3", "train.py", "with", "config_params_docker.json"] diff --git a/train/config_params_docker.json b/train/config_params_docker.json new file mode 100644 index 0000000..45f87d3 --- /dev/null +++ b/train/config_params_docker.json @@ -0,0 +1,54 @@ +{ + "backbone_type" : "nontransformer", + "task": "segmentation", + "n_classes" : 3, + "n_epochs" : 1, + "input_height" : 672, + "input_width" : 448, + "weight_decay" : 1e-6, + "n_batch" : 4, + "learning_rate": 1e-4, + "patches" : false, + "pretraining" : true, + "augmentation" : false, + "flip_aug" : false, + "blur_aug" : true, + "scaling" : true, + "adding_rgb_background": false, + "adding_rgb_foreground": false, + "add_red_textlines": false, + "channels_shuffling": true, + "degrading": true, + "brightening": true, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": true, + "transformer_num_patches_xy": [14, 21], + "transformer_patchsize_x": 1, + "transformer_patchsize_y": 1, + "transformer_projection_dim": 64, + "transformer_mlp_head_units": [128, 64], + "transformer_layers": 1, + "transformer_num_heads": 1, + "transformer_cnn_first": true, + "blur_k" : ["blur","gauss","median"], + "scales" : [0.6, 0.7, 0.8, 0.9], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "shuffle_indexes" : [ [0,2,1], [1,2,0], [1,0,2] , [2,1,0]], + "thetha" : [5, -5], + "number_of_backgrounds_per_image": 2, + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": true, + "data_is_provided": false, + "dir_train": "/entry_point_dir/train", + "dir_eval": "/entry_point_dir/eval", + "dir_output": "/entry_point_dir/output" +} diff --git a/train/train.py b/train/train.py index f6a4f47..e8e92af 100644 --- a/train/train.py +++ b/train/train.py @@ -53,7 +53,7 @@ def get_dirs_or_files(input_data): return image_input, labels_input -ex = Experiment() +ex = Experiment(save_git_info=False) @ex.config From 1b222594d694884108428d47a74aa67111d40218 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 25 Jun 2025 18:33:55 +0200 Subject: [PATCH 163/492] Update README.md: how to train model using docker image --- train/README.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/train/README.md b/train/README.md index b9e70a8..7c69a10 100644 --- a/train/README.md +++ b/train/README.md @@ -24,7 +24,19 @@ each class will be defined with a RGB value and beside images, a text file of cl ### Train To train a model, run: ``python train.py with config_params.json`` - + +### Train using Docker + +#### Build the Docker image + + ```bash + docker build -t model-training . + ``` +#### Run Docker image + ```bash + docker run --gpus all -v /host/path/to/entry_point_dir:/entry_point_dir model-training + ``` + ### Ground truth format Lables for each pixel are identified by a number. So if you have a binary case, ``n_classes`` should be set to ``2`` and labels should From 53dd4b26a95172f9aa33ff9806c637c18cad5ab4 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 3 Jul 2025 11:50:47 +0200 Subject: [PATCH 164/492] decorated with confidence value for cnnrnn ocr model --- src/eynollah/eynollah.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index fc60f2e..3b9d898 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5129,7 +5129,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_1225000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_step_900000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5487,7 +5487,7 @@ class Eynollah_ocr: if not self.export_textline_images_and_text: - if w_scaled < 640:#1.5*image_width: + if w_scaled < 750:#1.5*image_width: img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) if abs(better_des_slope) > 45: @@ -5580,6 +5580,7 @@ class Eynollah_ocr: if not self.export_textline_images_and_text: extracted_texts = [] + extracted_conf_value = [] n_iterations = math.ceil(len(cropped_lines) / self.b_s) @@ -5700,12 +5701,19 @@ class Eynollah_ocr: preds_bin[indices_to_be_replaced,:,:] = preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] preds = (preds + preds_bin) / 2. + pred_texts = decode_batch_predictions(preds, self.num_to_char) + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=256 + masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) for ib in range(imgs.shape[0]): pred_texts_ib = pred_texts[ib].replace("[UNK]", "") extracted_texts.append(pred_texts_ib) + extracted_conf_value.append(masked_means[ib]) del cropped_lines if self.prediction_with_both_of_rgb_and_bin: @@ -5713,7 +5721,10 @@ class Eynollah_ocr: gc.collect() extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + + extracted_conf_value_merged = [extracted_conf_value[ind] if cropped_lines_meging_indexing[ind]==0 else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] for ind_cfm in range(len(extracted_texts_merged)) if extracted_texts_merged[ind_cfm] is not None] extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) @@ -5791,6 +5802,7 @@ class Eynollah_ocr: if not is_textline_text: text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") unicode_textline = ET.SubElement(text_subelement, 'Unicode') unicode_textline.text = extracted_texts_merged[indexer] else: @@ -5798,6 +5810,7 @@ class Eynollah_ocr: if childtest3.tag.endswith("TextEquiv"): for child_uc in childtest3: if child_uc.tag.endswith("Unicode"): + childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") child_uc.text = extracted_texts_merged[indexer] indexer = indexer + 1 From 04fead348fa612c36e428465e0df092dd701484c Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 3 Jul 2025 15:24:52 +0200 Subject: [PATCH 165/492] ocr: make sure that image height or width is not zero --- src/eynollah/eynollah.py | 4 ---- src/eynollah/utils/utils_ocr.py | 34 +++++++++++++++++++-------------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 3b9d898..1260a96 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5435,7 +5435,6 @@ class Eynollah_ocr: mask_poly = mask_poly[y:y+h, x:x+w, :] img_crop = img_poly_on_img[y:y+h, x:x+w, :] - #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') if not self.do_not_mask_with_textline_contour: @@ -5482,9 +5481,6 @@ class Eynollah_ocr: img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) else: img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) - - - if not self.export_textline_images_and_text: if w_scaled < 750:#1.5*image_width: diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 81a8ae1..1e9162a 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -124,23 +124,26 @@ def return_textlines_split_if_needed(textline_image, textline_image_bin, predict else: return None, None def preprocess_and_resize_image_for_ocrcnn_model(img, image_height, image_width): - ratio = image_height /float(img.shape[0]) - w_ratio = int(ratio * img.shape[1]) - - if w_ratio <= image_width: - width_new = w_ratio + if img.shape[0]==0 or img.shape[1]==0: + img_fin = np.ones((image_height, image_width, 3)) else: - width_new = image_width + ratio = image_height /float(img.shape[0]) + w_ratio = int(ratio * img.shape[1]) - if width_new == 0: - width_new = img.shape[1] + if w_ratio <= image_width: + width_new = w_ratio + else: + width_new = image_width + + if width_new == 0: + width_new = img.shape[1] + - - img = resize_image(img, image_height, width_new) - img_fin = np.ones((image_height, image_width, 3))*255 + img = resize_image(img, image_height, width_new) + img_fin = np.ones((image_height, image_width, 3))*255 - img_fin[:,:width_new,:] = img[:,:,:] - img_fin = img_fin / 255. + img_fin[:,:width_new,:] = img[:,:,:] + img_fin = img_fin / 255. return img_fin def get_deskewed_contour_and_bb_and_image(contour, image, deskew_angle): @@ -188,7 +191,10 @@ def rotate_image_with_padding(image, angle, border_value=(0,0,0)): rotation_matrix[1, 2] += (new_h / 2) - center[1] # Perform the rotation - rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=border_value) + try: + rotated_image = cv2.warpAffine(image, rotation_matrix, (new_w, new_h), borderValue=border_value) + except: + rotated_image = np.copy(image) return rotated_image From fee40049cdfe1325d65f717b66fe3ccc11d4c9d4 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 16 Jul 2025 14:00:12 +0200 Subject: [PATCH 166/492] ocr model renamed - image text font for ocr result is now using Charis-7.000 font (downloaded from here https://software.sil.org/charis/download/) --- src/eynollah/eynollah.py | 148 +++++++++++++++++++++------------------ 1 file changed, 78 insertions(+), 70 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 1260a96..bf11dec 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -318,7 +318,7 @@ class Eynollah: if self.ocr and self.tr: self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" elif self.ocr and not self.tr: - self.model_ocr_dir = dir_models + "/model_step_750000_ocr"#"/model_step_125000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250716" if self.tables: if self.light_version: self.model_table_dir = dir_models + "/modelens_table_0t4_201124" @@ -5129,7 +5129,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_900000_ocr"#"/model_step_25000_ocr"#"/model_step_1050000_ocr"#"/model_0_ocr_cnnrnn"#"/model_23_ocr_cnnrnn" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250716" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5276,7 +5276,7 @@ class Eynollah_ocr: if self.draw_texts_on_image: - font_path = "NotoSans-Regular.ttf" # Make sure this file exists! + font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! font = ImageFont.truetype(font_path, 40) for indexer_text, bb_ind in enumerate(total_bb_coordinates): @@ -5340,8 +5340,8 @@ class Eynollah_ocr: tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) #print("Job done in %.1fs", time.time() - t0) else: - max_len = 512 - padding_token = 299 + max_len = 512#280#512 + padding_token = 299#1500#299 image_width = 512#max_len * 4 image_height = 32 @@ -5435,52 +5435,57 @@ class Eynollah_ocr: mask_poly = mask_poly[y:y+h, x:x+w, :] img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') - if not self.do_not_mask_with_textline_contour: - if angle_degrees > 3: - better_des_slope = get_orientation_moments(textline_coords) - - img_crop = rotate_image_with_padding(img_crop, better_des_slope ) - - if self.prediction_with_both_of_rgb_and_bin: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope ) - - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) - mask_poly = mask_poly.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) - - mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - + + if self.export_textline_images_and_text: + if not self.do_not_mask_with_textline_contour: img_crop[mask_poly==0] = 255 - - if self.prediction_with_both_of_rgb_and_bin: - img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop_bin[mask_poly==0] = 255 - - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: + + else: + #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') + if not self.do_not_mask_with_textline_contour: + if angle_degrees > 3: + better_des_slope = get_orientation_moments(textline_coords) + + img_crop = rotate_image_with_padding(img_crop, better_des_slope ) + if self.prediction_with_both_of_rgb_and_bin: - img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) - + img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope ) + + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) + mask_poly = mask_poly.astype('uint8') - else: - better_des_slope = 0 - img_crop[mask_poly==0] = 255 - if self.prediction_with_both_of_rgb_and_bin: - img_crop_bin[mask_poly==0] = 255 - if type_textregion=='drop-capital': - pass - else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: + #new bounding box + x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) + + mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] + + img_crop[mask_poly==0] = 255 + + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop_bin[mask_poly==0] = 255 + + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: if self.prediction_with_both_of_rgb_and_bin: img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) else: img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + + + else: + better_des_slope = 0 + img_crop[mask_poly==0] = 255 + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin[mask_poly==0] = 255 + if type_textregion=='drop-capital': + pass + else: + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: + if self.prediction_with_both_of_rgb_and_bin: + img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) if not self.export_textline_images_and_text: if w_scaled < 750:#1.5*image_width: @@ -5541,35 +5546,38 @@ class Eynollah_ocr: cropped_lines_bin.append(img_fin) if self.export_textline_images_and_text: - if child_textlines.tag.endswith("TextEquiv"): - for cheild_text in child_textlines: - if cheild_text.tag.endswith("Unicode"): - textline_text = cheild_text.text - if textline_text: - if self.do_not_mask_with_textline_contour: - if self.pref_of_dataset: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.txt'), 'w') as text_file: - text_file.write(textline_text) + if img_crop.shape[0]==0 or img_crop.shape[1]==0: + pass + else: + if child_textlines.tag.endswith("TextEquiv"): + for cheild_text in child_textlines: + if cheild_text.tag.endswith("Unicode"): + textline_text = cheild_text.text + if textline_text: + if self.do_not_mask_with_textline_contour: + if self.pref_of_dataset: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.txt'), 'w') as text_file: + text_file.write(textline_text) - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.png'), img_crop ) + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.png'), img_crop ) + else: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.txt'), 'w') as text_file: + text_file.write(textline_text) + + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.png'), img_crop ) else: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.txt'), 'w') as text_file: - text_file.write(textline_text) + if self.pref_of_dataset: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.txt'), 'w') as text_file: + text_file.write(textline_text) - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.png'), img_crop ) - else: - if self.pref_of_dataset: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.txt'), 'w') as text_file: - text_file.write(textline_text) + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.png'), img_crop ) + else: + with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.txt'), 'w') as text_file: + text_file.write(textline_text) - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.png'), img_crop ) - else: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.txt'), 'w') as text_file: - text_file.write(textline_text) - - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.png'), img_crop ) - - indexer_textlines+=1 + cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.png'), img_crop ) + + indexer_textlines+=1 if not self.export_textline_images_and_text: indexer_text_region = indexer_text_region +1 @@ -5727,7 +5735,7 @@ class Eynollah_ocr: if self.draw_texts_on_image: - font_path = "NotoSans-Regular.ttf" # Make sure this file exists! + font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! font = ImageFont.truetype(font_path, 40) for indexer_text, bb_ind in enumerate(total_bb_coordinates): From 673e67a847935c3ff3dd15cf2c67095aae36ecb8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 21 Jul 2025 10:54:20 +0200 Subject: [PATCH 167/492] update model names --- src/eynollah/eynollah.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index bf11dec..12acff7 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5129,7 +5129,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250716" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250716"#"/model_ens_ocrcnn_new6"#"/model_ens_ocrcnn_new2"# model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5143,7 +5143,6 @@ class Eynollah_ocr: with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: characters = json.load(config_file) - AUTOTUNE = tf.data.AUTOTUNE @@ -5154,6 +5153,7 @@ class Eynollah_ocr: self.num_to_char = StringLookup( vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True ) + self.end_character = len(characters) + 2 def run(self, overwrite : bool = False): if self.dir_in: @@ -5340,8 +5340,8 @@ class Eynollah_ocr: tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) #print("Job done in %.1fs", time.time() - t0) else: - max_len = 512#280#512 - padding_token = 299#1500#299 + ###max_len = 280#512#280#512 + ###padding_token = 1500#299#1500#299 image_width = 512#max_len * 4 image_height = 32 @@ -5656,13 +5656,13 @@ class Eynollah_ocr: preds_flipped = self.prediction_model.predict(imgs_ver_flipped, verbose=0) preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=256 + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character masked_means_flipped = np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) masked_means_flipped[np.isnan(masked_means_flipped)] = 0 preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=256 + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) masked_means[np.isnan(masked_means)] = 0 @@ -5683,13 +5683,13 @@ class Eynollah_ocr: preds_flipped = self.prediction_model.predict(imgs_bin_ver_flipped, verbose=0) preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=256 + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character masked_means_flipped = np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) masked_means_flipped[np.isnan(masked_means_flipped)] = 0 preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=256 + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) masked_means[np.isnan(masked_means)] = 0 @@ -5711,7 +5711,7 @@ class Eynollah_ocr: preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=256 + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) for ib in range(imgs.shape[0]): From daa597dbaaa12be3d2435960fb272852fc89c09a Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 21 Jul 2025 14:50:05 +0200 Subject: [PATCH 168/492] should merged text for the whole page be written in xml? --- src/eynollah/eynollah.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 12acff7..bdb8f1a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5129,7 +5129,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250716"#"/model_ens_ocrcnn_new6"#"/model_ens_ocrcnn_new2"# + self.model_ocr_dir = dir_models + "/model_ens_ocrcnn_new6"#"/model_eynollah_ocr_cnnrnn_20250716"#"/model_ens_ocrcnn_new6"#"/model_ens_ocrcnn_new2"# model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5141,7 +5141,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: + with open(os.path.join(self.model_ocr_dir, "characters_20250707_all_lang.txt"),"r") as config_file: characters = json.load(config_file) AUTOTUNE = tf.data.AUTOTUNE @@ -5780,9 +5780,24 @@ class Eynollah_ocr: text_by_textregion.append(" ".join(extracted_texts_merged_un)) #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') + + ###index_tot_regions = [] + ###tot_region_ref = [] + + ###for jj in root1.iter(link+'RegionRefIndexed'): + ###index_tot_regions.append(jj.attrib['index']) + ###tot_region_ref.append(jj.attrib['regionRef']) + + ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} + + id_textregions = [] + textregions_by_existing_ids = [] indexer = 0 indexer_textregion = 0 for nn in root1.iter(region_tags): + id_textregion = nn.attrib['id'] + id_textregions.append(id_textregion) + textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) is_textregion_text = False for childtest in nn: @@ -5829,7 +5844,17 @@ class Eynollah_ocr: else: unicode_textregion.text = text_by_textregion[indexer_textregion] indexer_textregion = indexer_textregion + 1 - + + ###sample_order = [(id_to_order[tid], text) for tid, text in zip(id_textregions, textregions_by_existing_ids) if tid in id_to_order] + + ##ordered_texts_sample = [text for _, text in sorted(sample_order)] + ##tot_page_text = ' '.join(ordered_texts_sample) + + ##for page_element in root1.iter(link+'Page'): + ##text_page = ET.SubElement(page_element, 'TextEquiv') + ##unicode_textpage = ET.SubElement(text_page, 'Unicode') + ##unicode_textpage.text = tot_page_text + ET.register_namespace("",name_space) tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) #print("Job done in %.1fs", time.time() - t0) From da141bb42e6f7af4a069a77942e0695c68a56592 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 23 Jul 2025 16:44:17 +0200 Subject: [PATCH 169/492] resolving tests error --- tests/test_run.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_run.py b/tests/test_run.py index 607140e..b4e2dbd 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -85,8 +85,8 @@ def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, ca outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ '-m', SBBBIN_MODELS, - str(infile), - str(outfile), + '-i', str(infile), + '-o', str(outfile), ] caplog.set_level(logging.INFO) def only_eynollah(logrec): @@ -117,7 +117,7 @@ def test_run_eynollah_binarization_directory(tmp_path, subtests, pytestconfig, c args = [ '-m', SBBBIN_MODELS, '-di', str(indir), - '-do', str(outdir), + '-o', str(outdir), ] caplog.set_level(logging.INFO) def only_eynollah(logrec): From fd0595f9207fb2f608eb1ae3c40dc6826a409d38 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 24 Jul 2025 13:52:38 +0200 Subject: [PATCH 170/492] Update Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5f2bf34..1458427 100644 --- a/Makefile +++ b/Makefile @@ -85,7 +85,7 @@ smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif eynollah layout -di $( Date: Fri, 25 Jul 2025 13:18:38 +0200 Subject: [PATCH 171/492] threshold for textline ocr + new ocr model --- src/eynollah/cli.py | 8 ++- src/eynollah/eynollah.py | 117 +++++++++++++++++++++++---------------- 2 files changed, 76 insertions(+), 49 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 9398c47..a313860 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -496,6 +496,11 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ "-ds_pref", help="in the case of extracting textline and text from a xml GT file user can add an abbrevation of dataset name to generated dataset", ) +@click.option( + "--min_conf_value_of_textline_text", + "-min_conf", + help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.", +) @click.option( "--log_level", "-l", @@ -503,7 +508,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="Override log level globally to this", ) -def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, log_level): +def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) @@ -530,6 +535,7 @@ def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, prediction_with_both_of_rgb_and_bin=prediction_with_both_of_rgb_and_bin, batch_size=batch_size, pref_of_dataset=dataset_abbrevation, + min_conf_value_of_textline_text=min_conf_value_of_textline_text, ) eynollah_ocr.run(overwrite=overwrite) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index bdb8f1a..aa1b2e1 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -318,7 +318,7 @@ class Eynollah: if self.ocr and self.tr: self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" elif self.ocr and not self.tr: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250716" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250725" if self.tables: if self.light_version: self.model_table_dir = dir_models + "/modelens_table_0t4_201124" @@ -4974,13 +4974,23 @@ class Eynollah: gc.collect() if len(all_found_textline_polygons)>0: ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + else: + ocr_all_textlines = None + if all_found_textline_polygons_marginals and len(all_found_textline_polygons_marginals)>0: ocr_all_textlines_marginals = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + else: + ocr_all_textlines_marginals = None if all_found_textline_polygons_h and len(all_found_textline_polygons)>0: ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_h, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + else: + ocr_all_textlines_h = None + if polygons_of_drop_capitals and len(polygons_of_drop_capitals)>0: ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines(image_page, polygons_of_drop_capitals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + else: + ocr_all_textlines_drop = None else: ocr_all_textlines = None ocr_all_textlines_marginals = None @@ -5098,7 +5108,8 @@ class Eynollah_ocr: do_not_mask_with_textline_contour=False, draw_texts_on_image=False, prediction_with_both_of_rgb_and_bin=False, - pref_of_dataset = None, + pref_of_dataset=None, + min_conf_value_of_textline_text : Optional[float]=None, logger=None, ): self.dir_in = dir_in @@ -5117,6 +5128,10 @@ class Eynollah_ocr: self.logger = logger if logger else getLogger('eynollah') if not export_textline_images_and_text: + if min_conf_value_of_textline_text: + self.min_conf_value_of_textline_text = float(min_conf_value_of_textline_text) + else: + self.min_conf_value_of_textline_text = 0.3 if tr_ocr: self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") @@ -5129,7 +5144,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_ens_ocrcnn_new6"#"/model_eynollah_ocr_cnnrnn_20250716"#"/model_ens_ocrcnn_new6"#"/model_ens_ocrcnn_new2"# + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250725"#"/model_step_1020000_ocr"#"/model_ens_ocrcnn_new10"#"/model_step_255000_ocr"#"/model_ens_ocrcnn_new9"#"/model_step_900000_ocr"#"/model_eynollah_ocr_cnnrnn_20250716"#"/model_ens_ocrcnn_new6"#"/model_ens_ocrcnn_new2"# model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5139,9 +5154,8 @@ class Eynollah_ocr: self.b_s = 8 else: self.b_s = int(batch_size) - - with open(os.path.join(self.model_ocr_dir, "characters_20250707_all_lang.txt"),"r") as config_file: + with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: characters = json.load(config_file) AUTOTUNE = tf.data.AUTOTUNE @@ -5442,50 +5456,54 @@ class Eynollah_ocr: else: #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') - if not self.do_not_mask_with_textline_contour: - if angle_degrees > 3: - better_des_slope = get_orientation_moments(textline_coords) + + if angle_degrees > 3: + better_des_slope = get_orientation_moments(textline_coords) + + img_crop = rotate_image_with_padding(img_crop, better_des_slope ) + + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope ) - img_crop = rotate_image_with_padding(img_crop, better_des_slope ) + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) + mask_poly = mask_poly.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) + + mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - if self.prediction_with_both_of_rgb_and_bin: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope ) - - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) - mask_poly = mask_poly.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) - - mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - + if not self.do_not_mask_with_textline_contour: img_crop[mask_poly==0] = 255 - - if self.prediction_with_both_of_rgb_and_bin: - img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] + + if self.prediction_with_both_of_rgb_and_bin: + img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] + if not self.do_not_mask_with_textline_contour: img_crop_bin[mask_poly==0] = 255 + + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: + if self.prediction_with_both_of_rgb_and_bin: + img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: + else: + better_des_slope = 0 + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + if self.prediction_with_both_of_rgb_and_bin: + if not self.do_not_mask_with_textline_contour: + img_crop_bin[mask_poly==0] = 255 + if type_textregion=='drop-capital': + pass + else: + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: if self.prediction_with_both_of_rgb_and_bin: img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) else: img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) - - - else: - better_des_slope = 0 - img_crop[mask_poly==0] = 255 - if self.prediction_with_both_of_rgb_and_bin: - img_crop_bin[mask_poly==0] = 255 - if type_textregion=='drop-capital': - pass - else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: - if self.prediction_with_both_of_rgb_and_bin: - img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) if not self.export_textline_images_and_text: if w_scaled < 750:#1.5*image_width: @@ -5716,9 +5734,12 @@ class Eynollah_ocr: for ib in range(imgs.shape[0]): pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - extracted_texts.append(pred_texts_ib) - extracted_conf_value.append(masked_means[ib]) - + if masked_means[ib] >= self.min_conf_value_of_textline_text: + extracted_texts.append(pred_texts_ib) + extracted_conf_value.append(masked_means[ib]) + else: + extracted_texts.append("") + extracted_conf_value.append(0) del cropped_lines if self.prediction_with_both_of_rgb_and_bin: del cropped_lines_bin @@ -5790,14 +5811,14 @@ class Eynollah_ocr: ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} - id_textregions = [] - textregions_by_existing_ids = [] + #id_textregions = [] + #textregions_by_existing_ids = [] indexer = 0 indexer_textregion = 0 for nn in root1.iter(region_tags): - id_textregion = nn.attrib['id'] - id_textregions.append(id_textregion) - textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) + #id_textregion = nn.attrib['id'] + #id_textregions.append(id_textregion) + #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) is_textregion_text = False for childtest in nn: From 322b04145f7b1460dfe9a3fbd702e3c65dd29ca3 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 5 Aug 2025 14:22:22 +0200 Subject: [PATCH 172/492] use the latest ocr model with balanced fraktur-antiqua training dataset --- src/eynollah/cli.py | 4 ++-- src/eynollah/eynollah.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index a313860..5135534 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -325,12 +325,12 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low @click.option( "--threshold_art_class_layout", "-tharl", - help="threshold of artifical class in the case of layout detection", + help="threshold of artifical class in the case of layout detection. The default value is 0.1", ) @click.option( "--threshold_art_class_textline", "-thart", - help="threshold of artifical class in the case of textline detection", + help="threshold of artifical class in the case of textline detection. The default value is 0.1", ) @click.option( "--skip_layout_and_reading_order", diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index aa1b2e1..9e5ba51 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -318,7 +318,7 @@ class Eynollah: if self.ocr and self.tr: self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" elif self.ocr and not self.tr: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250725" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250805" if self.tables: if self.light_version: self.model_table_dir = dir_models + "/modelens_table_0t4_201124" @@ -5144,7 +5144,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250725"#"/model_step_1020000_ocr"#"/model_ens_ocrcnn_new10"#"/model_step_255000_ocr"#"/model_ens_ocrcnn_new9"#"/model_step_900000_ocr"#"/model_eynollah_ocr_cnnrnn_20250716"#"/model_ens_ocrcnn_new6"#"/model_ens_ocrcnn_new2"# + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250805" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( From 6462ea5b33cd6e4c1eaac1b2bf1fe072147e76f9 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 6 Aug 2025 22:33:42 +0200 Subject: [PATCH 173/492] adding visualization of ocr text of xml file --- train/generate_gt_for_training.py | 81 +++++++++++++++++++++++++++++++ train/gt_gen_utils.py | 71 +++++++++++++++++++++++++++ 2 files changed, 152 insertions(+) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 8ca5cd3..1971f68 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -3,6 +3,7 @@ import json from gt_gen_utils import * from tqdm import tqdm from pathlib import Path +from PIL import Image, ImageDraw, ImageFont @click.group() def main(): @@ -447,6 +448,86 @@ def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) + + +@main.command() +@click.option( + "--xml_file", + "-xml", + help="xml filename", + type=click.Path(exists=True, dir_okay=False), +) +@click.option( + "--dir_xml", + "-dx", + help="directory of GT page-xml files", + type=click.Path(exists=True, file_okay=False), +) + +@click.option( + "--dir_out", + "-do", + help="directory where plots will be written", + type=click.Path(exists=True, file_okay=False), +) + + +def visualize_ocr_text(xml_file, dir_xml, dir_out): + assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" + if dir_xml: + xml_files_ind = os.listdir(dir_xml) + else: + xml_files_ind = [xml_file] + + font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! + font = ImageFont.truetype(font_path, 40) + + for ind_xml in tqdm(xml_files_ind): + indexer = 0 + #print(ind_xml) + #print('########################') + if dir_xml: + xml_file = os.path.join(dir_xml,ind_xml ) + f_name = Path(ind_xml).stem + else: + xml_file = os.path.join(ind_xml ) + f_name = Path(ind_xml).stem + print(f_name, 'f_name') + + co_tetxlines, y_len, x_len, ocr_texts = get_textline_contours_and_ocr_text(xml_file) + + total_bb_coordinates = [] + + image_text = Image.new("RGB", (x_len, y_len), "white") + draw = ImageDraw.Draw(image_text) + + + + for index, cnt in enumerate(co_tetxlines): + x,y,w,h = cv2.boundingRect(cnt) + #total_bb_coordinates.append([x,y,w,h]) + + #fit_text_single_line + + #x_bb = bb_ind[0] + #y_bb = bb_ind[1] + #w_bb = bb_ind[2] + #h_bb = bb_ind[3] + + font = fit_text_single_line(draw, ocr_texts[index], font_path, w, int(h*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), ocr_texts[index], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + text_x = x + (w - text_width) // 2 # Center horizontally + text_y = y + (h - text_height) // 2 # Center vertically + + # Draw the text + draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) + image_text.save(os.path.join(dir_out, f_name+'.png')) if __name__ == "__main__": main() diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 5076dd6..907e04d 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -9,6 +9,7 @@ import cv2 from shapely import geometry from pathlib import Path import matplotlib.pyplot as plt +from PIL import Image, ImageDraw, ImageFont KERNEL = np.ones((5, 5), np.uint8) @@ -283,6 +284,76 @@ def get_textline_contours_for_visualization(xml_file): return co_use_case, y_len, x_len +def get_textline_contours_and_ocr_text(xml_file): + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + + + for jj in root1.iter(link+'Page'): + y_len=int(jj.attrib['imageHeight']) + x_len=int(jj.attrib['imageWidth']) + + region_tags = np.unique([x for x in alltags if x.endswith('TextLine')]) + tag_endings = ['}TextLine','}textline'] + co_use_case = [] + ocr_textlines = [] + + for tag in region_tags: + if tag.endswith(tag_endings[0]) or tag.endswith(tag_endings[1]): + for nn in root1.iter(tag): + c_t_in = [] + ocr_text_in = [''] + sumi = 0 + for vv in nn.iter(): + if vv.tag == link + 'Coords': + for childtest2 in nn: + if childtest2.tag.endswith("TextEquiv"): + for child_uc in childtest2: + if child_uc.tag.endswith("Unicode"): + text = child_uc.text + ocr_text_in[0]= text + + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + + + if vv.tag == link + 'Point': + c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) + sumi += 1 + elif vv.tag != link + 'Point' and sumi >= 1: + break + + + co_use_case.append(np.array(c_t_in)) + ocr_textlines.append(ocr_text_in[0]) + return co_use_case, y_len, x_len, ocr_textlines + +def fit_text_single_line(draw, text, font_path, max_width, max_height): + initial_font_size = 50 + font_size = initial_font_size + while font_size > 10: # Minimum font size + font = ImageFont.truetype(font_path, font_size) + text_bbox = draw.textbbox((0, 0), text, font=font) # Get text bounding box + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + if text_width <= max_width and text_height <= max_height: + return font # Return the best-fitting font + + font_size -= 2 # Reduce font size and retry + + return ImageFont.truetype(font_path, 10) # Smallest font fallback + def get_layout_contours_for_visualization(xml_file): tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) root1=tree1.getroot() From 263da755ef5d1a03f6398d090b02a094025a52aa Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 7 Aug 2025 10:32:49 +0200 Subject: [PATCH 174/492] loading xmls with UTF-8 encoding --- train/generate_gt_for_training.py | 26 +++++++++++++------------- train/gt_gen_utils.py | 10 +++++----- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 1971f68..d4b58dc 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -495,7 +495,7 @@ def visualize_ocr_text(xml_file, dir_xml, dir_out): print(f_name, 'f_name') co_tetxlines, y_len, x_len, ocr_texts = get_textline_contours_and_ocr_text(xml_file) - + total_bb_coordinates = [] image_text = Image.new("RGB", (x_len, y_len), "white") @@ -513,20 +513,20 @@ def visualize_ocr_text(xml_file, dir_xml, dir_out): #y_bb = bb_ind[1] #w_bb = bb_ind[2] #h_bb = bb_ind[3] - - font = fit_text_single_line(draw, ocr_texts[index], font_path, w, int(h*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), ocr_texts[index], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] + if ocr_texts[index]: + font = fit_text_single_line(draw, ocr_texts[index], font_path, w, int(h*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), ocr_texts[index], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] - text_x = x + (w - text_width) // 2 # Center horizontally - text_y = y + (h - text_height) // 2 # Center vertically + text_x = x + (w - text_width) // 2 # Center horizontally + text_y = y + (h - text_height) // 2 # Center vertically - # Draw the text - draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) + # Draw the text + draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) image_text.save(os.path.join(dir_out, f_name+'.png')) if __name__ == "__main__": diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 907e04d..753b0f5 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -244,7 +244,7 @@ def update_region_contours(co_text, img_boundary, erosion_rate, dilation_rate, y return co_text_eroded, img_boundary def get_textline_contours_for_visualization(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] link=alltags[0].split('}')[0]+'}' @@ -285,7 +285,7 @@ def get_textline_contours_for_visualization(xml_file): def get_textline_contours_and_ocr_text(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] link=alltags[0].split('}')[0]+'}' @@ -355,7 +355,7 @@ def fit_text_single_line(draw, text, font_path, max_width, max_height): return ImageFont.truetype(font_path, 10) # Smallest font fallback def get_layout_contours_for_visualization(xml_file): - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] link=alltags[0].split('}')[0]+'}' @@ -630,7 +630,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ for index in tqdm(range(len(gt_list))): #try: print(gt_list[index]) - tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding = 'iso-8859-5')) + tree1 = ET.parse(dir_in+'/'+gt_list[index], parser = ET.XMLParser(encoding='utf-8')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] link=alltags[0].split('}')[0]+'}' @@ -1311,7 +1311,7 @@ def find_new_features_of_contours(contours_main): return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin def read_xml(xml_file): file_name = Path(xml_file).stem - tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding = 'iso-8859-5')) + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] link=alltags[0].split('}')[0]+'}' From 52d9cc9bafe5021d93999e975703fa0ad315337a Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 8 Aug 2025 11:32:02 +0200 Subject: [PATCH 175/492] deskewing with faster multiprocessing --- src/eynollah/eynollah.py | 9 +-- src/eynollah/utils/separate_lines.py | 103 +++++++++++++++++++++++++++ 2 files changed, 108 insertions(+), 4 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 9e5ba51..5299d3e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -96,6 +96,7 @@ from .utils.separate_lines import ( textline_contours_postprocessing, separate_lines_new2, return_deskew_slop, + return_deskew_slop_old_mp, do_work_of_slopes_new, do_work_of_slopes_new_curved, do_work_of_slopes_new_light, @@ -1936,8 +1937,8 @@ class Eynollah: y_diff_mean = find_contours_mean_y_diff(textline_con_fil) sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) crop_img[crop_img > 0] = 1 - slope_corresponding_textregion = return_deskew_slop(crop_img, sigma_des, - map=self.executor.map, logger=self.logger, plotter=self.plotter) + slope_corresponding_textregion = return_deskew_slop_old_mp(crop_img, sigma_des, + logger=self.logger, plotter=self.plotter) except Exception as why: self.logger.error(why) slope_corresponding_textregion = MAX_SLOPE @@ -3203,8 +3204,8 @@ class Eynollah: def run_deskew(self, textline_mask_tot_ea): #print(textline_mask_tot_ea.shape, 'textline_mask_tot_ea deskew') - slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), 2, 30, True, - map=self.executor.map, logger=self.logger, plotter=self.plotter) + slope_deskew = return_deskew_slop_old_mp(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), 2, 30, True, + logger=self.logger, plotter=self.plotter) slope_first = 0 if self.plotter: diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 6289d4d..ead5cfb 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -5,6 +5,8 @@ import numpy as np import cv2 from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d +from multiprocessing import Process, Queue, cpu_count +from multiprocessing import Pool from .rotate import rotate_image from .resize import resize_image from .contour import ( @@ -1526,6 +1528,107 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map angle = 0 return angle + +def return_deskew_slop_old_mp(img_patch_org, sigma_des,n_tot_angles=100, + main_page=False, logger=None, plotter=None): + if main_page and plotter: + plotter.save_plot_of_textline_density(img_patch_org) + + img_int=np.zeros((img_patch_org.shape[0],img_patch_org.shape[1])) + img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] + + max_shape=np.max(img_int.shape) + img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) + + onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) + onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) + + img_resized[ onset_y:onset_y+img_int.shape[0] , onset_x:onset_x+img_int.shape[1] ]=img_int[:,:] + + if main_page and img_patch_org.shape[1] > img_patch_org.shape[0]: + angles = np.array([-45, 0, 45, 90,]) + angle = get_smallest_skew_omp(img_resized, sigma_des, angles, plotter=plotter) + + angles = np.linspace(angle - 22.5, angle + 22.5, n_tot_angles) + angle = get_smallest_skew_omp(img_resized, sigma_des, angles, plotter=plotter) + elif main_page: + angles = np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) + angle = get_smallest_skew_omp(img_resized, sigma_des, angles, plotter=plotter) + + early_slope_edge=11 + if abs(angle) > early_slope_edge: + if angle < 0: + angles = np.linspace(-90, -12, n_tot_angles) + else: + angles = np.linspace(90, 12, n_tot_angles) + angle = get_smallest_skew_omp(img_resized, sigma_des, angles, plotter=plotter) + else: + angles = np.linspace(-25, 25, int(0.5 * n_tot_angles) + 10) + angle = get_smallest_skew_omp(img_resized, sigma_des, angles, plotter=plotter) + + early_slope_edge=22 + if abs(angle) > early_slope_edge: + if angle < 0: + angles = np.linspace(-90, -25, int(0.5 * n_tot_angles) + 10) + else: + angles = np.linspace(90, 25, int(0.5 * n_tot_angles) + 10) + angle = get_smallest_skew_omp(img_resized, sigma_des, angles, plotter=plotter) + + return angle + +def do_image_rotation_omp(queue_of_all_params,angles_per_process, img_resized, sigma_des): + vars_per_each_subprocess = [] + angles_per_each_subprocess = [] + for mv in range(len(angles_per_process)): + img_rot=rotate_image(img_resized,angles_per_process[mv]) + img_rot[img_rot!=0]=1 + try: + var_spectrum=find_num_col_deskew(img_rot,sigma_des,20.3 ) + except: + var_spectrum=0 + vars_per_each_subprocess.append(var_spectrum) + angles_per_each_subprocess.append(angles_per_process[mv]) + + queue_of_all_params.put([vars_per_each_subprocess, angles_per_each_subprocess]) + +def get_smallest_skew_omp(img_resized, sigma_des, angles, plotter=None): + num_cores = cpu_count() + + queue_of_all_params = Queue() + processes = [] + nh = np.linspace(0, len(angles), num_cores + 1) + + for i in range(num_cores): + angles_per_process = angles[int(nh[i]) : int(nh[i + 1])] + processes.append(Process(target=do_image_rotation_omp, args=(queue_of_all_params, angles_per_process, img_resized, sigma_des))) + + for i in range(num_cores): + processes[i].start() + + var_res=[] + all_angles = [] + for i in range(num_cores): + list_all_par = queue_of_all_params.get(True) + vars_for_subprocess = list_all_par[0] + angles_sub_process = list_all_par[1] + for j in range(len(vars_for_subprocess)): + var_res.append(vars_for_subprocess[j]) + all_angles.append(angles_sub_process[j]) + + for i in range(num_cores): + processes[i].join() + + if plotter: + plotter.save_plot_of_rotation_angle(all_angles, var_res) + + + try: + var_res=np.array(var_res) + ang_int=all_angles[np.argmax(var_res)]#angels_sorted[arg_final]#angels[arg_sort_early[arg_sort[arg_final]]]#angels[arg_fin] + except: + ang_int=0 + return ang_int + def do_work_of_slopes_new( box_text, contour, contour_par, index_r_con, textline_mask_tot_ea, image_page_rotated, slope_deskew, From cf4983da54a1d8e0e5e382569a5502110b438189 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 8 Aug 2025 16:12:55 +0200 Subject: [PATCH 176/492] visualize vertical ocr text vertically --- train/generate_gt_for_training.py | 36 +++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index d4b58dc..91ee2c8 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -514,19 +514,37 @@ def visualize_ocr_text(xml_file, dir_xml, dir_out): #w_bb = bb_ind[2] #h_bb = bb_ind[3] if ocr_texts[index]: + + + is_vertical = h > 2*w # Check orientation font = fit_text_single_line(draw, ocr_texts[index], font_path, w, int(h*0.4) ) - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), ocr_texts[index], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] + if is_vertical: + + vertical_font = fit_text_single_line(draw, ocr_texts[index], font_path, h, int(w * 0.8)) - text_x = x + (w - text_width) // 2 # Center horizontally - text_y = y + (h - text_height) // 2 # Center vertically + text_img = Image.new("RGBA", (h, w), (255, 255, 255, 0)) # Note: dimensions are swapped + text_draw = ImageDraw.Draw(text_img) + text_draw.text((0, 0), ocr_texts[index], font=vertical_font, fill="black") - # Draw the text - draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) + # Rotate text image by 90 degrees + rotated_text = text_img.rotate(90, expand=1) + + # Calculate paste position (centered in bbox) + paste_x = x + (w - rotated_text.width) // 2 + paste_y = y + (h - rotated_text.height) // 2 + + image_text.paste(rotated_text, (paste_x, paste_y), rotated_text) # Use rotated image as mask + else: + text_bbox = draw.textbbox((0, 0), ocr_texts[index], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + text_x = x + (w - text_width) // 2 # Center horizontally + text_y = y + (h - text_height) // 2 # Center vertically + + # Draw the text + draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) image_text.save(os.path.join(dir_out, f_name+'.png')) if __name__ == "__main__": From 268aa141d7b70a63e5b2ef317fda864249f8f17c Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 12 Aug 2025 12:50:15 +0200 Subject: [PATCH 177/492] avoiding float in range --- src/eynollah/utils/__init__.py | 41 +++++++++++++++++----------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 7fa4a7b..ca86047 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1801,8 +1801,8 @@ def return_boxes_of_images_by_order_of_reading_new( #print(y_type_2_up,x_starting_up,x_ending_up,'didid') nodes_in = [] for ij in range(len(x_starting_up)): - nodes_in = nodes_in + list(range(x_starting_up[ij], - x_ending_up[ij])) + nodes_in = nodes_in + list(range(int(x_starting_up[ij]), + int(x_ending_up[ij]))) nodes_in = np.unique(nodes_in) #print(nodes_in,'nodes_in') @@ -1825,8 +1825,8 @@ def return_boxes_of_images_by_order_of_reading_new( elif len(y_diff_main_separator_up)==0: nodes_in = [] for ij in range(len(x_starting_up)): - nodes_in = nodes_in + list(range(x_starting_up[ij], - x_ending_up[ij])) + nodes_in = nodes_in + list(range(int(x_starting_up[ij]), + int(x_ending_up[ij]))) nodes_in = np.unique(nodes_in) #print(nodes_in,'nodes_in2') #print(np.array(range(len(peaks_neg_tot)-1)),'np.array(range(len(peaks_neg_tot)-1))') @@ -1866,8 +1866,8 @@ def return_boxes_of_images_by_order_of_reading_new( columns_covered_by_mothers = [] for dj in range(len(x_start_without_mother)): columns_covered_by_mothers = columns_covered_by_mothers + \ - list(range(x_start_without_mother[dj], - x_end_without_mother[dj])) + list(range(int(x_start_without_mother[dj]), + int(x_end_without_mother[dj]))) columns_covered_by_mothers = list(set(columns_covered_by_mothers)) all_columns=np.arange(len(peaks_neg_tot)-1) @@ -1909,8 +1909,8 @@ def return_boxes_of_images_by_order_of_reading_new( columns_covered_by_mothers = [] for dj in range(len(x_start_without_mother)): columns_covered_by_mothers = columns_covered_by_mothers + \ - list(range(x_start_without_mother[dj], - x_end_without_mother[dj])) + list(range(int(x_start_without_mother[dj]), + int(x_end_without_mother[dj]))) columns_covered_by_mothers = list(set(columns_covered_by_mothers)) all_columns=np.arange(len(peaks_neg_tot)-1) @@ -1926,8 +1926,8 @@ def return_boxes_of_images_by_order_of_reading_new( columns_covered_by_with_child_no_mothers = [] for dj in range(len(x_end_with_child_without_mother)): columns_covered_by_with_child_no_mothers = columns_covered_by_with_child_no_mothers + \ - list(range(x_start_with_child_without_mother[dj], - x_end_with_child_without_mother[dj])) + list(range(int(x_start_with_child_without_mother[dj]), + int(x_end_with_child_without_mother[dj]))) columns_covered_by_with_child_no_mothers = list(set(columns_covered_by_with_child_no_mothers)) all_columns = np.arange(len(peaks_neg_tot)-1) @@ -1970,8 +1970,8 @@ def return_boxes_of_images_by_order_of_reading_new( columns_covered_by_mothers = [] for dj in range(len(x_starting_all_between_nm_wc)): columns_covered_by_mothers = columns_covered_by_mothers + \ - list(range(x_starting_all_between_nm_wc[dj], - x_ending_all_between_nm_wc[dj])) + list(range(int(x_starting_all_between_nm_wc[dj]), + int(x_ending_all_between_nm_wc[dj]))) columns_covered_by_mothers = list(set(columns_covered_by_mothers)) all_columns=np.arange(i_s_nc, x_end_biggest_column) @@ -1979,8 +1979,8 @@ def return_boxes_of_images_by_order_of_reading_new( should_longest_line_be_extended=0 if (len(x_diff_all_between_nm_wc) > 0 and - set(list(range(x_starting_all_between_nm_wc[biggest], - x_ending_all_between_nm_wc[biggest])) + + set(list(range(int(x_starting_all_between_nm_wc[biggest]), + int(x_ending_all_between_nm_wc[biggest]))) + list(columns_not_covered)) != set(all_columns)): should_longest_line_be_extended=1 index_lines_so_close_to_top_separator = \ @@ -2012,7 +2012,7 @@ def return_boxes_of_images_by_order_of_reading_new( x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, np.array(columns_not_covered) + 1) ind_args_between=np.arange(len(x_ending_all_between_nm_wc)) - for column in range(i_s_nc, x_end_biggest_column): + for column in range(int(i_s_nc), int(x_end_biggest_column)): ind_args_in_col=ind_args_between[x_starting_all_between_nm_wc==column] #print('babali2') #print(ind_args_in_col,'ind_args_in_col') @@ -2064,7 +2064,7 @@ def return_boxes_of_images_by_order_of_reading_new( x_end_itself=x_end_copy.pop(il) #print(y_copy,'y_copy2') - for column in range(x_start_itself, x_end_itself+1): + for column in range(int(x_start_itself), int(x_end_itself)+1): #print(column,'cols') y_in_cols=[] for yic in range(len(y_copy)): @@ -2095,11 +2095,11 @@ def return_boxes_of_images_by_order_of_reading_new( all_columns = np.arange(len(peaks_neg_tot)-1) columns_covered_by_lines_covered_more_than_2col = [] for dj in range(len(x_starting)): - if set(list(range(x_starting[dj],x_ending[dj]))) == set(all_columns): + if set(list(range(int(x_starting[dj]),int(x_ending[dj]) ))) == set(all_columns): pass else: columns_covered_by_lines_covered_more_than_2col = columns_covered_by_lines_covered_more_than_2col + \ - list(range(x_starting[dj],x_ending[dj])) + list(range(int(x_starting[dj]),int(x_ending[dj]) )) columns_covered_by_lines_covered_more_than_2col = list(set(columns_covered_by_lines_covered_more_than_2col)) columns_not_covered = list(set(all_columns) - set(columns_covered_by_lines_covered_more_than_2col)) @@ -2124,7 +2124,7 @@ def return_boxes_of_images_by_order_of_reading_new( x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) ind_args=np.array(range(len(y_type_2))) - #ind_args=np.array(ind_args) + for column in range(len(peaks_neg_tot)-1): #print(column,'column') ind_args_in_col=ind_args[x_starting==column] @@ -2155,8 +2155,7 @@ def return_boxes_of_images_by_order_of_reading_new( x_start_itself=x_start_copy.pop(il) x_end_itself=x_end_copy.pop(il) - #print(y_copy,'y_copy2') - for column in range(x_start_itself, x_end_itself+1): + for column in range(int(x_start_itself), int(x_end_itself)+1): #print(column,'cols') y_in_cols=[] for yic in range(len(y_copy)): From 8ebba5ac046faff317e13455b94f79c6c510d782 Mon Sep 17 00:00:00 2001 From: michalbubula Date: Tue, 12 Aug 2025 16:21:15 +0200 Subject: [PATCH 178/492] add feedback to command line interface --- src/eynollah/eynollah.py | 305 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 290 insertions(+), 15 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index d47016b..d9939ca 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -203,6 +203,17 @@ class Eynollah: skip_layout_and_reading_order : bool = False, logger : Optional[Logger] = None, ): + if logger: + self.logger = logger + else: + self.logger = getLogger('eynollah') + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.INFO) + formatter = logging.Formatter('%(message)s') + console_handler.setFormatter(formatter) + self.logger.addHandler(console_handler) + self.logger.setLevel(logging.INFO) + if skip_layout_and_reading_order: textline_light = True self.light_version = light_version @@ -237,10 +248,7 @@ class Eynollah: self.num_col_lower = int(num_col_lower) else: self.num_col_lower = num_col_lower - self.logger = logger if logger else getLogger('eynollah') - # for parallelization of CPU-intensive tasks: - self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) - atexit.register(self.executor.shutdown) + self.dir_models = dir_models self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" @@ -293,7 +301,14 @@ class Eynollah: self.model_table_dir = dir_models + "/modelens_table_0t4_201124" else: self.model_table_dir = dir_models + "/eynollah-tables_20210319" + + + t_start = time.time() + # for parallelization of CPU-intensive tasks: + self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) + atexit.register(self.executor.shutdown) + # #gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) # #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=7.7, allow_growth=True) # #session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) @@ -307,7 +322,11 @@ class Eynollah: tf.config.experimental.set_memory_growth(device, True) except: self.logger.warning("no GPU device available") - + + msg = "Loading models..." + print(msg) + self.logger.info(msg) + self.model_page = self.our_load_model(self.model_page_dir) self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) self.model_bin = self.our_load_model(self.model_dir_of_binarization) @@ -334,6 +353,10 @@ class Eynollah: self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") if self.tables: self.model_table = self.our_load_model(self.model_table_dir) + + msg = f"Model initialization complete ({time.time() - t_start:.1f}s)" + print(msg) + self.logger.info(msg) def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} @@ -4294,21 +4317,81 @@ class Eynollah: def run_single(self): t0 = time.time() + + msg = f"Processing file: {self.writer.image_filename}" + print(msg) + self.logger.info(msg) + + # Log enabled features directly + enabled_modes = [] + if self.light_version: + enabled_modes.append("Light version") + if self.textline_light: + enabled_modes.append("Light textline detection") + if self.full_layout: + enabled_modes.append("Full layout analysis") + if self.ocr: + enabled_modes.append("OCR") + if self.tables: + enabled_modes.append("Table detection") + + if enabled_modes: + msg = "Enabled modes: " + ", ".join(enabled_modes) + print(msg) + self.logger.info(msg) + + + msg = "Step 1/5: Image Enhancement" + print(msg) + self.logger.info(msg) + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) - self.logger.info("Enhancing took %.1fs ", time.time() - t0) + + msg = f"Image: {self.image.shape[1]}x{self.image.shape[0]}, {self.dpi} DPI, {num_col_classifier} columns" + print(msg) + self.logger.info(msg) + if is_image_enhanced: + msg = "Enhancement applied" + print(msg) + self.logger.info(msg) + + msg = f"Enhancement complete ({time.time() - t0:.1f}s)" + print(msg) + self.logger.info(msg) + + + # Image Extraction Mode if self.extract_only_images: + msg = "Step 2/5: Image Extraction Mode" + print(msg) + self.logger.info(msg) + text_regions_p_1, erosion_hurts, polygons_lines_xml, polygons_of_images, image_page, page_coord, cont_page = \ self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) + ocr_all_textlines = None pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], polygons_of_images, [], [], [], [], [], cont_page, [], [], ocr_all_textlines, []) + if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) + + msg = "Image extraction complete" + print(msg) + self.logger.info(msg) return pcgts + # Basic Processing Mode if self.skip_layout_and_reading_order: + msg = "Step 2/5: Basic Processing Mode" + print(msg) + self.logger.info(msg) + msg = "Skipping layout analysis and reading order detection" + print(msg) + self.logger.info(msg) + _ ,_, _, textline_mask_tot_ea, img_bin_light, _ = \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier, skip_layout_and_reading_order=self.skip_layout_and_reading_order) @@ -4349,11 +4432,21 @@ class Eynollah: all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + msg = "Basic processing complete" + print(msg) + self.logger.info(msg) return pcgts #print("text region early -1 in %.1fs", time.time() - t0) t1 = time.time() + msg = "Step 2/5: Layout Analysis" + print(msg) + self.logger.info(msg) + if self.light_version: + msg = "Using light version processing" + print(msg) + self.logger.info(msg) text_regions_p_1 ,erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin_light, confidence_matrix = \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) #print("text region early -2 in %.1fs", time.time() - t0) @@ -4384,20 +4477,30 @@ class Eynollah: text_regions_p_1 ,erosion_hurts, polygons_lines_xml = \ self.get_regions_from_xy_2models(img_res, is_image_enhanced, num_col_classifier) - self.logger.info("Textregion detection took %.1fs ", time.time() - t1) + msg = f"Textregion detection took {time.time() - t1:.1f}s" + print(msg) + self.logger.info(msg) confidence_matrix = np.zeros((text_regions_p_1.shape[:2])) t1 = time.time() num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ text_regions_p_1, cont_page, table_prediction = \ self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts) - self.logger.info("Graphics detection took %.1fs ", time.time() - t1) + msg = f"Graphics detection took {time.time() - t1:.1f}s" + print(msg) + self.logger.info(msg) #self.logger.info('cont_page %s', cont_page) #plt.imshow(table_prediction) #plt.show() + msg = f"Layout analysis complete ({time.time() - t1:.1f}s)" + print(msg) + self.logger.info(msg) if not num_col: - self.logger.info("No columns detected, outputting an empty PAGE-XML") + msg = "No columns detected - generating empty PAGE-XML" + print(msg) + self.logger.info(msg) + ocr_all_textlines = None pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], [], [], [], [], [], [], @@ -4408,10 +4511,18 @@ class Eynollah: t1 = time.time() if not self.light_version: textline_mask_tot_ea = self.run_textline(image_page) - self.logger.info("textline detection took %.1fs", time.time() - t1) + msg = f"Textline detection took {time.time() - t1:.1f}s" + print(msg) + self.logger.info(msg) t1 = time.time() slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) - self.logger.info("deskewing took %.1fs", time.time() - t1) + if np.abs(slope_deskew) > 0.01: # Only log if there is significant skew + msg = f"Applied deskew correction: {slope_deskew:.2f} degrees" + print(msg) + self.logger.info(msg) + msg = f"Deskewing took {time.time() - t1:.1f}s" + print(msg) + self.logger.info(msg) elif num_col_classifier in (1,2): org_h_l_m = textline_mask_tot_ea.shape[0] org_w_l_m = textline_mask_tot_ea.shape[1] @@ -4431,6 +4542,19 @@ class Eynollah: textline_mask_tot, text_regions_p, image_page_rotated = \ self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction) + + msg = "Step 3/5: Text Line Detection" + print(msg) + self.logger.info(msg) + + if self.curved_line: + msg = "Mode: Curved line detection" + print(msg) + self.logger.info(msg) + elif self.textline_light: + msg = "Mode: Light detection" + print(msg) + self.logger.info(msg) if self.light_version and num_col_classifier in (1,2): image_page = resize_image(image_page,org_h_l_m, org_w_l_m ) @@ -4441,7 +4565,9 @@ class Eynollah: table_prediction = resize_image(table_prediction,org_h_l_m, org_w_l_m ) image_page_rotated = resize_image(image_page_rotated,org_h_l_m, org_w_l_m ) - self.logger.info("detection of marginals took %.1fs", time.time() - t1) + msg = f"Detection of marginals took {time.time() - t1:.1f}s" + print(msg) + self.logger.info(msg) #print("text region early 2 marginal in %.1fs", time.time() - t0) ## birdan sora chock chakir t1 = time.time() @@ -4540,7 +4666,9 @@ class Eynollah: cx_bigest_d_big[0] = cx_bigest_d[ind_largest] cy_biggest_d_big[0] = cy_biggest_d[ind_largest] except Exception as why: - self.logger.error(why) + msg = str(why) + print(f"Error: {msg}") + self.logger.error(msg) (h, w) = text_only.shape[:2] center = (w // 2.0, h // 2.0) @@ -4758,6 +4886,23 @@ class Eynollah: t_order = time.time() if self.full_layout: + msg = "Step 4/5: Reading Order Detection" + print(msg) + self.logger.info(msg) + + if self.reading_order_machine_based: + msg = "Using machine-based detection" + print(msg) + self.logger.info(msg) + if self.right2left: + msg = "Right-to-left mode enabled" + print(msg) + self.logger.info(msg) + if self.headers_off: + msg = "Headers ignored in reading order" + print(msg) + self.logger.info(msg) + if self.reading_order_machine_based: order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( contours_only_text_parent, contours_only_text_parent_h, text_regions_p) @@ -4768,21 +4913,84 @@ class Eynollah: else: order_text_new, id_of_texts_tot = self.do_order_of_regions( contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d) - self.logger.info("detection of reading order took %.1fs", time.time() - t_order) + msg = f"Detection of reading order took {time.time() - t_order:.1f}s" + print(msg) + self.logger.info(msg) if self.ocr: + msg = "Step 4.5/5: OCR Processing" + print(msg) + self.logger.info(msg) + + if torch.cuda.is_available(): + msg = "Using GPU acceleration" + print(msg) + self.logger.info(msg) + else: + msg = "Using CPU processing" + print(msg) + self.logger.info(msg) + ocr_all_textlines = [] else: ocr_all_textlines = None + + msg = "Step 5/5: Output Generation" + print(msg) + self.logger.info(msg) + + output_config = [] + if self.enable_plotting: + output_config.append("Saving debug plots") + if self.dir_of_cropped_images: + output_config.append(f"Saving cropped images to: {self.dir_of_cropped_images}") + if self.dir_of_layout: + output_config.append(f"Saving layout plots to: {self.dir_of_layout}") + if self.dir_of_deskewed: + output_config.append(f"Saving deskewed images to: {self.dir_of_deskewed}") + + if output_config: + self.logger.info("Output configuration:\n * %s", "\n * ".join(output_config)) + pcgts = self.writer.build_pagexml_full_layout( contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_xml, ocr_all_textlines, conf_contours_textregions, conf_contours_textregions_h) + + summary = [ + f"Total processing time: {time.time() - t0:.1f}s", + f"Output file: {self.writer.output_filename}" + ] + + if self.ocr: + summary.append("OCR processing completed") + if self.full_layout: + summary.append("Full layout analysis completed") + if self.tables: + summary.append("Table detection completed") + return pcgts contours_only_text_parent_h = None + msg = "Step 4/5: Reading Order Detection" + print(msg) + self.logger.info(msg) + + if self.reading_order_machine_based: + msg = "Using machine-based detection" + print(msg) + self.logger.info(msg) + if self.right2left: + msg = "Right-to-left mode enabled" + print(msg) + self.logger.info(msg) + if self.headers_off: + msg = "Headers ignored in reading order" + print(msg) + self.logger.info(msg) + if self.reading_order_machine_based: order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( contours_only_text_parent, contours_only_text_parent_h, text_regions_p) @@ -4803,6 +5011,33 @@ class Eynollah: contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) if self.ocr: + msg = "Step 4.5/5: OCR Processing" + print(msg) + self.logger.info(msg) + + if torch.cuda.is_available(): + msg = "Using GPU acceleration" + print(msg) + self.logger.info(msg) + else: + msg = "Using CPU processing" + print(msg) + self.logger.info(msg) + + if self.light_version: + msg = "Using light version OCR" + print(msg) + self.logger.info(msg) + + if self.textline_light: + msg = "Using light text line detection for OCR" + print(msg) + self.logger.info(msg) + + msg = "Processing text lines..." + print(msg) + self.logger.info(msg) + device = cuda.get_current_device() device.reset() gc.collect() @@ -4853,12 +5088,52 @@ class Eynollah: else: ocr_all_textlines = None #print(ocr_all_textlines) - self.logger.info("detection of reading order took %.1fs", time.time() - t_order) + msg = f"Detection of reading order took {time.time() - t_order:.1f}s" + print(msg) + self.logger.info(msg) + + msg = "Step 5/5: Output Generation" + print(msg) + self.logger.info(msg) + + msg = "Generating PAGE-XML output" + print(msg) + self.logger.info(msg) + + if self.enable_plotting: + msg = "Saving debug plots" + print(msg) + self.logger.info(msg) + + if self.dir_of_cropped_images: + msg = f"Saving cropped images to: {self.dir_of_cropped_images}" + print(msg) + self.logger.info(msg) + + if self.dir_of_layout: + msg = f"Saving layout plots to: {self.dir_of_layout}" + print(msg) + self.logger.info(msg) + + if self.dir_of_deskewed: + msg = f"Saving deskewed images to: {self.dir_of_deskewed}" + print(msg) + self.logger.info(msg) + pcgts = self.writer.build_pagexml_no_full_layout( txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + + msg = f"\nProcessing completed in {time.time() - t0:.1f}s" + print(msg) + self.logger.info(msg) + + msg = f"Output file: {self.writer.output_filename}" + print(msg) + self.logger.info(msg) + return pcgts From 21615a986dbe2c6a2ddcf603b45ebe24e52f1e90 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 13 Aug 2025 14:14:37 +0200 Subject: [PATCH 179/492] OCR-D processor: expose reading_order_machine_based --- src/eynollah/ocrd-tool.json | 5 +++++ src/eynollah/processor.py | 3 +++ 2 files changed, 8 insertions(+) diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index ce15206..af5e03f 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -74,6 +74,11 @@ "type": "boolean", "default": false, "description": "ignore the special role of headings during reading order detection" + }, + "reading_order_machine_based": { + "type": "boolean", + "default": false, + "description": "use data-driven (rather than rule-based) reading order detection" } }, "resources": [ diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py index a53fede..c2922c1 100644 --- a/src/eynollah/processor.py +++ b/src/eynollah/processor.py @@ -24,6 +24,7 @@ class EynollahProcessor(Processor): allow_enhancement=self.parameter['allow_enhancement'], curved_line=self.parameter['curved_line'], right2left=self.parameter['right_to_left'], + reading_order_machine_based=self.parameter['reading_order_machine_based'], ignore_page_extraction=self.parameter['ignore_page_extraction'], light_version=self.parameter['light_version'], textline_light=self.parameter['textline_light'], @@ -57,6 +58,8 @@ class EynollahProcessor(Processor): - If ``ignore_page_extraction``, then attempt no cropping of the page. - If ``curved_line``, then compute contour polygons for text lines instead of simple bounding boxes. + - If ``reading_order_machine_based``, then detect reading order via + data-driven model instead of geometrical heuristics. Produce a new output file by serialising the resulting hierarchy. """ From 77415028769c55d48e6583ffc267e8d86a4a7cf0 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 18 Aug 2025 02:31:13 +0200 Subject: [PATCH 180/492] reading order on given layout --- src/eynollah/cli.py | 48 +- src/eynollah/mb_ro_on_layout.py | 1134 +++++++++++++++++++++++++++++++ 2 files changed, 1158 insertions(+), 24 deletions(-) create mode 100644 src/eynollah/mb_ro_on_layout.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 5135534..67fd57e 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -4,6 +4,7 @@ from ocrd_utils import initLogging, getLevelName, getLogger from eynollah.eynollah import Eynollah, Eynollah_ocr from eynollah.sbb_binarize import SbbBinarizer from eynollah.image_enhancer import Enhancer +from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout @click.group() def main(): @@ -13,38 +14,37 @@ def main(): @click.option( "--dir_xml", "-dx", - help="directory of GT page-xml files", + help="directory of page-xml files", type=click.Path(exists=True, file_okay=False), ) @click.option( - "--dir_out_modal_image", - "-domi", - help="directory where ground truth images would be written", + "--xml_file", + "-xml", + help="xml filename", + type=click.Path(exists=True, dir_okay=False), +) +@click.option( + "--dir_out", + "-do", + help="directory for output images", type=click.Path(exists=True, file_okay=False), ) @click.option( - "--dir_out_classes", - "-docl", - help="directory where ground truth classes would be written", + "--model", + "-m", + help="directory of models", type=click.Path(exists=True, file_okay=False), + required=True, ) -@click.option( - "--input_height", - "-ih", - help="input height", -) -@click.option( - "--input_width", - "-iw", - help="input width", -) -@click.option( - "--min_area_size", - "-min", - help="min area size of regions considered for reading order training.", -) -def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size): - xml_files_ind = os.listdir(dir_xml) + +def machine_based_reading_order(dir_xml, xml_file, dir_out, model): + raedingorder_object = machine_based_reading_order_on_layout(model, dir_out=dir_out, logger=getLogger('enhancement')) + + if dir_xml: + raedingorder_object.run(dir_in=dir_xml) + else: + raedingorder_object.run(xml_filename=xml_file) + @main.command() @click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py new file mode 100644 index 0000000..7625a90 --- /dev/null +++ b/src/eynollah/mb_ro_on_layout.py @@ -0,0 +1,1134 @@ +""" +Image enhancer. The output can be written as same scale of input or in new predicted scale. +""" + +from logging import Logger +from difflib import SequenceMatcher as sq +from PIL import Image, ImageDraw, ImageFont +import math +import os +import sys +import time +from typing import Optional +import atexit +import warnings +from functools import partial +from pathlib import Path +from multiprocessing import cpu_count +import gc +import copy +from loky import ProcessPoolExecutor +import xml.etree.ElementTree as ET +import cv2 +import numpy as np +from ocrd import OcrdPage +from ocrd_utils import getLogger, tf_disable_interactive_logs +import statistics +from tensorflow.keras.models import load_model +from .utils.resize import resize_image +from .utils import ( + crop_image_inside_box +) + +from .utils.contour import ( + filter_contours_area_of_image, + filter_contours_area_of_image_tables, + find_contours_mean_y_diff, + find_new_features_of_contours, + find_features_of_contours, + get_text_region_boxes_by_given_contours, + get_textregion_contours_in_org_image, + get_textregion_contours_in_org_image_light, + return_contours_of_image, + return_contours_of_interested_region, + return_contours_of_interested_region_by_min_size, + return_contours_of_interested_textline, + return_parent_contours, +) + +DPI_THRESHOLD = 298 +KERNEL = np.ones((5, 5), np.uint8) + + +class machine_based_reading_order_on_layout: + def __init__( + self, + dir_models : str, + dir_out : Optional[str] = None, + logger : Optional[Logger] = None, + ): + self.dir_out = dir_out + + self.logger = logger if logger else getLogger('mbro on layout') + # for parallelization of CPU-intensive tasks: + self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) + atexit.register(self.executor.shutdown) + self.dir_models = dir_models + self.model_reading_order_dir = dir_models + "/model_step_5100000_mb_ro"#"/model_ens_reading_order_machine_based" + + try: + for device in tf.config.list_physical_devices('GPU'): + tf.config.experimental.set_memory_growth(device, True) + except: + self.logger.warning("no GPU device available") + + self.model_reading_order = self.our_load_model(self.model_reading_order_dir) + self.light_version = True + + + def cache_images(self, image_filename=None, image_pil=None, dpi=None): + ret = {} + t_c0 = time.time() + if image_filename: + ret['img'] = cv2.imread(image_filename) + if self.light_version: + self.dpi = 100 + else: + self.dpi = 0#check_dpi(image_filename) + else: + ret['img'] = pil2cv(image_pil) + if self.light_version: + self.dpi = 100 + else: + self.dpi = 0#check_dpi(image_pil) + ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) + for prefix in ('', '_grayscale'): + ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) + self._imgs = ret + if dpi is not None: + self.dpi = dpi + + def reset_file_name_dir(self, image_filename): + t_c = time.time() + self.cache_images(image_filename=image_filename) + self.output_filename = os.path.join(self.dir_out, Path(image_filename).stem +'.png') + + def imread(self, grayscale=False, uint8=True): + key = 'img' + if grayscale: + key += '_grayscale' + if uint8: + key += '_uint8' + return self._imgs[key].copy() + + def isNaN(self, num): + return num != num + + @staticmethod + def our_load_model(model_file): + if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): + # prefer SavedModel over HDF5 format if it exists + model_file = model_file[:-3] + try: + model = load_model(model_file, compile=False) + except: + model = load_model(model_file, compile=False, custom_objects={ + "PatchEncoder": PatchEncoder, "Patches": Patches}) + return model + + def predict_enhancement(self, img): + self.logger.debug("enter predict_enhancement") + + img_height_model = self.model_enhancement.layers[-1].output_shape[1] + img_width_model = self.model_enhancement.layers[-1].output_shape[2] + if img.shape[0] < img_height_model: + img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) + if img.shape[1] < img_width_model: + img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST) + margin = int(0.1 * img_width_model) + width_mid = img_width_model - 2 * margin + height_mid = img_height_model - 2 * margin + img = img / 255. + img_h = img.shape[0] + img_w = img.shape[1] + + prediction_true = np.zeros((img_h, img_w, 3)) + nxf = img_w / float(width_mid) + nyf = img_h / float(height_mid) + nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) + nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + + for i in range(nxf): + for j in range(nyf): + if i == 0: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + else: + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + if j == 0: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + else: + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model + + if index_x_u > img_w: + index_x_u = img_w + index_x_d = img_w - img_width_model + if index_y_u > img_h: + index_y_u = img_h + index_y_d = img_h - img_height_model + + img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] + label_p_pred = self.model_enhancement.predict(img_patch, verbose=0) + seg = label_p_pred[0, :, :, :] * 255 + + if i == 0 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + 0:index_x_u - margin] = \ + seg[0:-margin or None, + 0:-margin or None] + elif i == nxf - 1 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + margin:index_x_u - 0] = \ + seg[margin:, + margin:] + elif i == 0 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + 0:index_x_u - margin] = \ + seg[margin:, + 0:-margin or None] + elif i == nxf - 1 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + margin:index_x_u - 0] = \ + seg[0:-margin or None, + margin:] + elif i == 0 and j != 0 and j != nyf - 1: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + 0:index_x_u - margin] = \ + seg[margin:-margin or None, + 0:-margin or None] + elif i == nxf - 1 and j != 0 and j != nyf - 1: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + margin:index_x_u - 0] = \ + seg[margin:-margin or None, + margin:] + elif i != 0 and i != nxf - 1 and j == 0: + prediction_true[index_y_d + 0:index_y_u - margin, + index_x_d + margin:index_x_u - margin] = \ + seg[0:-margin or None, + margin:-margin or None] + elif i != 0 and i != nxf - 1 and j == nyf - 1: + prediction_true[index_y_d + margin:index_y_u - 0, + index_x_d + margin:index_x_u - margin] = \ + seg[margin:, + margin:-margin or None] + else: + prediction_true[index_y_d + margin:index_y_u - margin, + index_x_d + margin:index_x_u - margin] = \ + seg[margin:-margin or None, + margin:-margin or None] + + prediction_true = prediction_true.astype(int) + return prediction_true + + def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): + self.logger.debug("enter calculate_width_height_by_columns") + if num_col == 1: + img_w_new = 2000 + elif num_col == 2: + img_w_new = 2400 + elif num_col == 3: + img_w_new = 3000 + elif num_col == 4: + img_w_new = 4000 + elif num_col == 5: + img_w_new = 5000 + elif num_col == 6: + img_w_new = 6500 + else: + img_w_new = width_early + img_h_new = img_w_new * img.shape[0] // img.shape[1] + + if img_h_new >= 8000: + img_new = np.copy(img) + num_column_is_classified = False + else: + img_new = resize_image(img, img_h_new, img_w_new) + num_column_is_classified = True + + return img_new, num_column_is_classified + + def early_page_for_num_of_column_classification(self,img_bin): + self.logger.debug("enter early_page_for_num_of_column_classification") + if self.input_binary: + img = np.copy(img_bin).astype(np.uint8) + else: + img = self.imread() + img = cv2.GaussianBlur(img, (5, 5), 0) + img_page_prediction = self.do_prediction(False, img, self.model_page) + + imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + thresh = cv2.dilate(thresh, KERNEL, iterations=3) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + cnt_size = np.array([cv2.contourArea(contours[j]) + for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] + box = cv2.boundingRect(cnt) + else: + box = [0, 0, img.shape[1], img.shape[0]] + cropped_page, page_coord = crop_image_inside_box(box, img) + + self.logger.debug("exit early_page_for_num_of_column_classification") + return cropped_page, page_coord + + def calculate_width_height_by_columns_1_2(self, img, num_col, width_early, label_p_pred): + self.logger.debug("enter calculate_width_height_by_columns") + if num_col == 1: + img_w_new = 1000 + else: + img_w_new = 1300 + img_h_new = img_w_new * img.shape[0] // img.shape[1] + + if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: + img_new = np.copy(img) + num_column_is_classified = False + #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: + elif img_h_new >= 8000: + img_new = np.copy(img) + num_column_is_classified = False + else: + img_new = resize_image(img, img_h_new, img_w_new) + num_column_is_classified = True + + return img_new, num_column_is_classified + + def resize_and_enhance_image_with_column_classifier(self, light_version): + self.logger.debug("enter resize_and_enhance_image_with_column_classifier") + dpi = 0#self.dpi + self.logger.info("Detected %s DPI", dpi) + if self.input_binary: + img = self.imread() + prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) + prediction_bin = 255 * (prediction_bin[:,:,0]==0) + prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) + img= np.copy(prediction_bin) + img_bin = prediction_bin + else: + img = self.imread() + self.h_org, self.w_org = img.shape[:2] + img_bin = None + + width_early = img.shape[1] + t1 = time.time() + _, page_coord = self.early_page_for_num_of_column_classification(img_bin) + + self.image_page_org_size = img[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3], :] + self.page_coord = page_coord + + if self.num_col_upper and not self.num_col_lower: + num_col = self.num_col_upper + label_p_pred = [np.ones(6)] + elif self.num_col_lower and not self.num_col_upper: + num_col = self.num_col_lower + label_p_pred = [np.ones(6)] + elif not self.num_col_upper and not self.num_col_lower: + if self.input_binary: + img_in = np.copy(img) + img_in = img_in / 255.0 + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) + else: + img_1ch = self.imread(grayscale=True) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = self.model_classifier.predict(img_in, verbose=0) + num_col = np.argmax(label_p_pred[0]) + 1 + elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): + if self.input_binary: + img_in = np.copy(img) + img_in = img_in / 255.0 + img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = img_in.reshape(1, 448, 448, 3) + else: + img_1ch = self.imread(grayscale=True) + width_early = img_1ch.shape[1] + img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + img_1ch = img_1ch / 255.0 + img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) + img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) + img_in[0, :, :, 0] = img_1ch[:, :] + img_in[0, :, :, 1] = img_1ch[:, :] + img_in[0, :, :, 2] = img_1ch[:, :] + + label_p_pred = self.model_classifier.predict(img_in, verbose=0) + num_col = np.argmax(label_p_pred[0]) + 1 + + if num_col > self.num_col_upper: + num_col = self.num_col_upper + label_p_pred = [np.ones(6)] + if num_col < self.num_col_lower: + num_col = self.num_col_lower + label_p_pred = [np.ones(6)] + else: + num_col = self.num_col_upper + label_p_pred = [np.ones(6)] + + self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) + + if dpi < DPI_THRESHOLD: + if light_version and num_col in (1,2): + img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( + img, num_col, width_early, label_p_pred) + else: + img_new, num_column_is_classified = self.calculate_width_height_by_columns( + img, num_col, width_early, label_p_pred) + if light_version: + image_res = np.copy(img_new) + else: + image_res = self.predict_enhancement(img_new) + is_image_enhanced = True + + else: + num_column_is_classified = True + image_res = np.copy(img) + is_image_enhanced = False + + self.logger.debug("exit resize_and_enhance_image_with_column_classifier") + return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin + def read_xml(self, xml_file): + file_name = Path(xml_file).stem + tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + index_tot_regions = [] + tot_region_ref = [] + + for jj in root1.iter(link+'Page'): + y_len=int(jj.attrib['imageHeight']) + x_len=int(jj.attrib['imageWidth']) + + for jj in root1.iter(link+'RegionRefIndexed'): + index_tot_regions.append(jj.attrib['index']) + tot_region_ref.append(jj.attrib['regionRef']) + + if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): + co_printspace = [] + if link+'PrintSpace' in alltags: + region_tags_printspace = np.unique([x for x in alltags if x.endswith('PrintSpace')]) + elif link+'Border' in alltags: + region_tags_printspace = np.unique([x for x in alltags if x.endswith('Border')]) + + for tag in region_tags_printspace: + if link+'PrintSpace' in alltags: + tag_endings_printspace = ['}PrintSpace','}printspace'] + elif link+'Border' in alltags: + tag_endings_printspace = ['}Border','}border'] + + if tag.endswith(tag_endings_printspace[0]) or tag.endswith(tag_endings_printspace[1]): + for nn in root1.iter(tag): + c_t_in = [] + sumi = 0 + for vv in nn.iter(): + # check the format of coords + if vv.tag == link + 'Coords': + coords = bool(vv.attrib) + if coords: + p_h = vv.attrib['points'].split(' ') + c_t_in.append( + np.array([[int(x.split(',')[0]), int(x.split(',')[1])] for x in p_h])) + break + else: + pass + + if vv.tag == link + 'Point': + c_t_in.append([int(float(vv.attrib['x'])), int(float(vv.attrib['y']))]) + sumi += 1 + elif vv.tag != link + 'Point' and sumi >= 1: + break + co_printspace.append(np.array(c_t_in)) + img_printspace = np.zeros( (y_len,x_len,3) ) + img_printspace=cv2.fillPoly(img_printspace, pts =co_printspace, color=(1,1,1)) + img_printspace = img_printspace.astype(np.uint8) + + imgray = cv2.cvtColor(img_printspace, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))]) + cnt = contours[np.argmax(cnt_size)] + x, y, w, h = cv2.boundingRect(cnt) + + bb_coord_printspace = [x, y, w, h] + + else: + bb_coord_printspace = None + + + region_tags=np.unique([x for x in alltags if x.endswith('Region')]) + co_text_paragraph=[] + co_text_drop=[] + co_text_heading=[] + co_text_header=[] + co_text_marginalia=[] + co_text_catch=[] + co_text_page_number=[] + co_text_signature_mark=[] + co_sep=[] + co_img=[] + co_table=[] + co_graphic=[] + co_graphic_text_annotation=[] + co_graphic_decoration=[] + co_noise=[] + + co_text_paragraph_text=[] + co_text_drop_text=[] + co_text_heading_text=[] + co_text_header_text=[] + co_text_marginalia_text=[] + co_text_catch_text=[] + co_text_page_number_text=[] + co_text_signature_mark_text=[] + co_sep_text=[] + co_img_text=[] + co_table_text=[] + co_graphic_text=[] + co_graphic_text_annotation_text=[] + co_graphic_decoration_text=[] + co_noise_text=[] + + id_paragraph = [] + id_header = [] + id_heading = [] + id_marginalia = [] + + for tag in region_tags: + if tag.endswith('}TextRegion') or tag.endswith('}Textregion'): + for nn in root1.iter(tag): + for child2 in nn: + tag2 = child2.tag + if tag2.endswith('}TextEquiv') or tag2.endswith('}TextEquiv'): + for childtext2 in child2: + if childtext2.tag.endswith('}Unicode') or childtext2.tag.endswith('}Unicode'): + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + co_text_drop_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='heading': + co_text_heading_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + co_text_signature_mark_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='header': + co_text_header_text.append(childtext2.text) + ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + ###co_text_catch_text.append(childtext2.text) + ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': + ###co_text_page_number_text.append(childtext2.text) + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + co_text_marginalia_text.append(childtext2.text) + else: + co_text_paragraph_text.append(childtext2.text) + c_t_in_drop=[] + c_t_in_paragraph=[] + c_t_in_heading=[] + c_t_in_header=[] + c_t_in_page_number=[] + c_t_in_signature_mark=[] + c_t_in_catch=[] + c_t_in_marginalia=[] + + + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + + coords=bool(vv.attrib) + if coords: + #print('birda1') + p_h=vv.attrib['points'].split(' ') + + + + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + + c_t_in_drop.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + elif "type" in nn.attrib and nn.attrib['type']=='heading': + ##id_heading.append(nn.attrib['id']) + c_t_in_heading.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + + c_t_in_signature_mark.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + #print(c_t_in_paragraph) + elif "type" in nn.attrib and nn.attrib['type']=='header': + #id_header.append(nn.attrib['id']) + c_t_in_header.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + ###c_t_in_catch.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': + + ###c_t_in_page_number.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + #id_marginalia.append(nn.attrib['id']) + + c_t_in_marginalia.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + else: + #id_paragraph.append(nn.attrib['id']) + + c_t_in_paragraph.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + break + else: + pass + + + if vv.tag==link+'Point': + if "type" in nn.attrib and nn.attrib['type']=='drop-capital': + + c_t_in_drop.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='heading': + #id_heading.append(nn.attrib['id']) + c_t_in_heading.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + + elif "type" in nn.attrib and nn.attrib['type']=='signature-mark': + + c_t_in_signature_mark.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + elif "type" in nn.attrib and nn.attrib['type']=='header': + #id_header.append(nn.attrib['id']) + c_t_in_header.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + + ###elif "type" in nn.attrib and nn.attrib['type']=='catch-word': + ###c_t_in_catch.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + ###sumi+=1 + + ###elif "type" in nn.attrib and nn.attrib['type']=='page-number': + + ###c_t_in_page_number.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + ###sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='marginalia': + #id_marginalia.append(nn.attrib['id']) + + c_t_in_marginalia.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + else: + #id_paragraph.append(nn.attrib['id']) + c_t_in_paragraph.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + + if len(c_t_in_drop)>0: + co_text_drop.append(np.array(c_t_in_drop)) + if len(c_t_in_paragraph)>0: + co_text_paragraph.append(np.array(c_t_in_paragraph)) + id_paragraph.append(nn.attrib['id']) + if len(c_t_in_heading)>0: + co_text_heading.append(np.array(c_t_in_heading)) + id_heading.append(nn.attrib['id']) + + if len(c_t_in_header)>0: + co_text_header.append(np.array(c_t_in_header)) + id_header.append(nn.attrib['id']) + if len(c_t_in_page_number)>0: + co_text_page_number.append(np.array(c_t_in_page_number)) + if len(c_t_in_catch)>0: + co_text_catch.append(np.array(c_t_in_catch)) + + if len(c_t_in_signature_mark)>0: + co_text_signature_mark.append(np.array(c_t_in_signature_mark)) + + if len(c_t_in_marginalia)>0: + co_text_marginalia.append(np.array(c_t_in_marginalia)) + id_marginalia.append(nn.attrib['id']) + + + elif tag.endswith('}GraphicRegion') or tag.endswith('}graphicregion'): + for nn in root1.iter(tag): + c_t_in=[] + c_t_in_text_annotation=[] + c_t_in_decoration=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + c_t_in_text_annotation.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + elif "type" in nn.attrib and nn.attrib['type']=='decoration': + c_t_in_decoration.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + else: + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + + + break + else: + pass + + + if vv.tag==link+'Point': + if "type" in nn.attrib and nn.attrib['type']=='handwritten-annotation': + c_t_in_text_annotation.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + elif "type" in nn.attrib and nn.attrib['type']=='decoration': + c_t_in_decoration.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + else: + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + if len(c_t_in_text_annotation)>0: + co_graphic_text_annotation.append(np.array(c_t_in_text_annotation)) + if len(c_t_in_decoration)>0: + co_graphic_decoration.append(np.array(c_t_in_decoration)) + if len(c_t_in)>0: + co_graphic.append(np.array(c_t_in)) + + + + elif tag.endswith('}ImageRegion') or tag.endswith('}imageregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + elif vv.tag!=link+'Point' and sumi>=1: + break + co_img.append(np.array(c_t_in)) + co_img_text.append(' ') + + + elif tag.endswith('}SeparatorRegion') or tag.endswith('}separatorregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + elif vv.tag!=link+'Point' and sumi>=1: + break + co_sep.append(np.array(c_t_in)) + + + + elif tag.endswith('}TableRegion') or tag.endswith('}tableregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_table.append(np.array(c_t_in)) + co_table_text.append(' ') + + elif tag.endswith('}NoiseRegion') or tag.endswith('}noiseregion'): + for nn in root1.iter(tag): + c_t_in=[] + sumi=0 + for vv in nn.iter(): + # check the format of coords + if vv.tag==link+'Coords': + coords=bool(vv.attrib) + if coords: + p_h=vv.attrib['points'].split(' ') + c_t_in.append( np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) ) + break + else: + pass + + + if vv.tag==link+'Point': + c_t_in.append([ int(float(vv.attrib['x'])) , int(float(vv.attrib['y'])) ]) + sumi+=1 + + elif vv.tag!=link+'Point' and sumi>=1: + break + co_noise.append(np.array(c_t_in)) + co_noise_text.append(' ') + + img = np.zeros( (y_len,x_len,3) ) + img_poly=cv2.fillPoly(img, pts =co_text_paragraph, color=(1,1,1)) + + img_poly=cv2.fillPoly(img, pts =co_text_heading, color=(2,2,2)) + img_poly=cv2.fillPoly(img, pts =co_text_header, color=(2,2,2)) + img_poly=cv2.fillPoly(img, pts =co_text_marginalia, color=(3,3,3)) + img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) + img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) + + return tree1, root1, bb_coord_printspace, file_name, id_paragraph, id_header+id_heading, co_text_paragraph, co_text_header+co_text_heading,\ + tot_region_ref,x_len, y_len,index_tot_regions, img_poly + + def return_indexes_of_contours_loctaed_inside_another_list_of_contours(self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): + indexes_of_located_cont = [] + center_x_coordinates_of_located = [] + center_y_coordinates_of_located = [] + #M_main_tot = [cv2.moments(contours_loc[j]) + #for j in range(len(contours_loc))] + #cx_main_loc = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + #cy_main_loc = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + + for ij in range(len(contours)): + results = [cv2.pointPolygonTest(contours[ij], (cx_main_loc[ind], cy_main_loc[ind]), False) + for ind in range(len(cy_main_loc)) ] + results = np.array(results) + indexes_in = np.where((results == 0) | (results == 1)) + indexes = indexes_loc[indexes_in]# [(results == 0) | (results == 1)]#np.where((results == 0) | (results == 1)) + + indexes_of_located_cont.append(indexes) + center_x_coordinates_of_located.append(np.array(cx_main_loc)[indexes_in] ) + center_y_coordinates_of_located.append(np.array(cy_main_loc)[indexes_in] ) + + return indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located + + def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): + height1 =672#448 + width1 = 448#224 + + height2 =672#448 + width2= 448#224 + + height3 =672#448 + width3 = 448#224 + + inference_bs = 3 + + ver_kernel = np.ones((5, 1), dtype=np.uint8) + hor_kernel = np.ones((1, 5), dtype=np.uint8) + + + min_cont_size_to_be_dilated = 10 + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) + args_cont_located = np.array(range(len(contours_only_text_parent))) + + diff_y_conts = np.abs(y_max_conts[:]-y_min_conts) + diff_x_conts = np.abs(x_max_conts[:]-x_min_conts) + + mean_x = statistics.mean(diff_x_conts) + median_x = statistics.median(diff_x_conts) + + + diff_x_ratio= diff_x_conts/mean_x + + args_cont_located_excluded = args_cont_located[diff_x_ratio>=1.3] + args_cont_located_included = args_cont_located[diff_x_ratio<1.3] + + contours_only_text_parent_excluded = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]>=1.3]#contours_only_text_parent[diff_x_ratio>=1.3] + contours_only_text_parent_included = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]<1.3]#contours_only_text_parent[diff_x_ratio<1.3] + + + cx_conts_excluded = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]>=1.3]#cx_conts[diff_x_ratio>=1.3] + cx_conts_included = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]<1.3]#cx_conts[diff_x_ratio<1.3] + + cy_conts_excluded = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]>=1.3]#cy_conts[diff_x_ratio>=1.3] + cy_conts_included = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]<1.3]#cy_conts[diff_x_ratio<1.3] + + #print(diff_x_ratio, 'ratio') + text_regions_p = text_regions_p.astype('uint8') + + if len(contours_only_text_parent_excluded)>0: + textregion_par = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1])).astype('uint8') + textregion_par = cv2.fillPoly(textregion_par, pts=contours_only_text_parent_included, color=(1,1)) + else: + textregion_par = (text_regions_p[:,:]==1)*1 + textregion_par = textregion_par.astype('uint8') + + text_regions_p_textregions_dilated = cv2.erode(textregion_par , hor_kernel, iterations=2) + text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=4) + text_regions_p_textregions_dilated = cv2.erode(text_regions_p_textregions_dilated , hor_kernel, iterations=1) + text_regions_p_textregions_dilated = cv2.dilate(text_regions_p_textregions_dilated , ver_kernel, iterations=5) + text_regions_p_textregions_dilated[text_regions_p[:,:]>1] = 0 + + + contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) + contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) + + indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located = self.return_indexes_of_contours_loctaed_inside_another_list_of_contours(contours_only_dilated, contours_only_text_parent_included, cx_conts_included, cy_conts_included, args_cont_located_included) + + + if len(args_cont_located_excluded)>0: + for ind in args_cont_located_excluded: + indexes_of_located_cont.append(np.array([ind])) + contours_only_dilated.append(contours_only_text_parent[ind]) + center_y_coordinates_of_located.append(0) + + array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] + flattened_array = np.concatenate([arr.ravel() for arr in array_list]) + #print(len( np.unique(flattened_array)), 'indexes_of_located_cont uniques') + + missing_textregions = list( set(np.array(range(len(contours_only_text_parent))) ) - set(np.unique(flattened_array)) ) + #print(missing_textregions, 'missing_textregions') + + for ind in missing_textregions: + indexes_of_located_cont.append(np.array([ind])) + contours_only_dilated.append(contours_only_text_parent[ind]) + center_y_coordinates_of_located.append(0) + + + if contours_only_text_parent_h: + for vi in range(len(contours_only_text_parent_h)): + indexes_of_located_cont.append(int(vi+len(contours_only_text_parent))) + + array_list = [np.array([elem]) if isinstance(elem, int) else elem for elem in indexes_of_located_cont] + flattened_array = np.concatenate([arr.ravel() for arr in array_list]) + + y_len = text_regions_p.shape[0] + x_len = text_regions_p.shape[1] + + img_poly = np.zeros((y_len,x_len), dtype='uint8') + img_poly[text_regions_p[:,:]==1] = 1 + img_poly[text_regions_p[:,:]==2] = 2 + img_poly[text_regions_p[:,:]==3] = 4 + img_poly[text_regions_p[:,:]==6] = 5 + + img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') + if contours_only_text_parent_h: + _, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, _ = find_new_features_of_contours( + contours_only_text_parent_h) + for j in range(len(cy_main)): + img_header_and_sep[int(y_max_main[j]):int(y_max_main[j])+12, + int(x_min_main[j]):int(x_max_main[j])] = 1 + co_text_all_org = contours_only_text_parent + contours_only_text_parent_h + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + co_text_all = contours_only_dilated + contours_only_text_parent_h + else: + co_text_all = contours_only_text_parent + contours_only_text_parent_h + else: + co_text_all_org = contours_only_text_parent + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + co_text_all = contours_only_dilated + else: + co_text_all = contours_only_text_parent + + if not len(co_text_all): + return [], [] + + labels_con = np.zeros((int(y_len /6.), int(x_len/6.), len(co_text_all)), dtype=bool) + + co_text_all = [(i/6).astype(int) for i in co_text_all] + for i in range(len(co_text_all)): + img = labels_con[:,:,i].astype(np.uint8) + + #img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) + + cv2.fillPoly(img, pts=[co_text_all[i]], color=(1,)) + labels_con[:,:,i] = img + + + labels_con = resize_image(labels_con.astype(np.uint8), height1, width1).astype(bool) + img_header_and_sep = resize_image(img_header_and_sep, height1, width1) + img_poly = resize_image(img_poly, height3, width3) + + + + input_1 = np.zeros((inference_bs, height1, width1, 3)) + ordered = [list(range(len(co_text_all)))] + index_update = 0 + #print(labels_con.shape[2],"number of regions for reading order") + while index_update>=0: + ij_list = ordered.pop(index_update) + i = ij_list.pop(0) + + ante_list = [] + post_list = [] + tot_counter = 0 + batch = [] + for j in ij_list: + img1 = labels_con[:,:,i].astype(float) + img2 = labels_con[:,:,j].astype(float) + img1[img_poly==5] = 2 + img2[img_poly==5] = 2 + img1[img_header_and_sep==1] = 3 + img2[img_header_and_sep==1] = 3 + + input_1[len(batch), :, :, 0] = img1 / 3. + input_1[len(batch), :, :, 2] = img2 / 3. + input_1[len(batch), :, :, 1] = img_poly / 5. + + tot_counter += 1 + batch.append(j) + if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): + y_pr = self.model_reading_order.predict(input_1 , verbose=0) + for jb, j in enumerate(batch): + if y_pr[jb][0]>=0.5: + post_list.append(j) + else: + ante_list.append(j) + batch = [] + + if len(ante_list): + ordered.insert(index_update, ante_list) + index_update += 1 + ordered.insert(index_update, [i]) + if len(post_list): + ordered.insert(index_update + 1, post_list) + + index_update = -1 + for index_next, ij_list in enumerate(ordered): + if len(ij_list) > 1: + index_update = index_next + break + + ordered = [i[0] for i in ordered] + + ##id_all_text = np.array(id_all_text)[index_sort] + + + if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: + org_contours_indexes = [] + for ind in range(len(ordered)): + region_with_curr_order = ordered[ind] + if region_with_curr_order < len(contours_only_dilated): + if np.isscalar(indexes_of_located_cont[region_with_curr_order]): + org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] + else: + arg_sort_located_cont = np.argsort(center_y_coordinates_of_located[region_with_curr_order]) + org_contours_indexes = org_contours_indexes + list(np.array(indexes_of_located_cont[region_with_curr_order])[arg_sort_located_cont]) ##org_contours_indexes + list ( + else: + org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] + + region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] + return org_contours_indexes, region_ids + else: + region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] + return ordered, region_ids + + + + + def run(self, xml_filename : Optional[str] = None, dir_in : Optional[str] = None, overwrite : bool = False): + """ + Get image and scales, then extract the page of scanned image + """ + self.logger.debug("enter run") + t0_tot = time.time() + + if dir_in: + self.ls_xmls = os.listdir(dir_in) + elif xml_filename: + self.ls_xmls = [xml_filename] + else: + raise ValueError("run requires either a single image filename or a directory") + + for xml_filename in self.ls_xmls: + self.logger.info(xml_filename) + t0 = time.time() + + if dir_in: + xml_file = os.path.join(dir_in, xml_filename) + else: + xml_file = xml_filename + + tree_xml, root_xml, bb_coord_printspace, file_name, id_paragraph, id_header, co_text_paragraph, co_text_header, tot_region_ref, x_len, y_len, index_tot_regions, img_poly = self.read_xml(xml_file) + + id_all_text = id_paragraph + id_header + + order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model(co_text_paragraph, co_text_header, img_poly[:,:,0]) + + id_all_text = np.array(id_all_text)[order_text_new] + + alltags=[elem.tag for elem in root_xml.iter()] + + + + link=alltags[0].split('}')[0]+'}' + name_space = alltags[0].split('}')[0] + name_space = name_space.split('{')[1] + + page_element = root_xml.find(link+'Page') + + + old_ro = root_xml.find(".//{*}ReadingOrder") + + if old_ro is not None: + page_element.remove(old_ro) + + #print(old_ro, 'old_ro') + ro_subelement = ET.Element('ReadingOrder') + + ro_subelement2 = ET.SubElement(ro_subelement, 'OrderedGroup') + ro_subelement2.set('id', "ro357564684568544579089") + + for index, id_text in enumerate(id_all_text): + new_element_2 = ET.SubElement(ro_subelement2, 'RegionRefIndexed') + new_element_2.set('regionRef', id_all_text[index]) + new_element_2.set('index', str(index)) + + if (link+'PrintSpace' in alltags) or (link+'Border' in alltags): + page_element.insert(1, ro_subelement) + else: + page_element.insert(0, ro_subelement) + + alltags=[elem.tag for elem in root_xml.iter()] + + ET.register_namespace("",name_space) + tree_xml.write(os.path.join(self.dir_out, file_name+'.xml'),xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + + #sys.exit() + From 41365645efd7690ace773a78e4334b31090f055c Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 26 Aug 2025 22:38:03 +0200 Subject: [PATCH 181/492] Marginals are divided into left and right, and written from top to bottom. --- src/eynollah/eynollah.py | 138 ++++++++++++++++++++++++-------- src/eynollah/mb_ro_on_layout.py | 18 +++-- src/eynollah/utils/utils_ocr.py | 88 ++++++++++---------- src/eynollah/utils/xml.py | 10 ++- src/eynollah/writer.py | 58 ++++++++++---- 5 files changed, 215 insertions(+), 97 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 5299d3e..30e180d 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -289,7 +289,7 @@ class Eynollah: self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_step_4800000_mb_ro"#"/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824"#"/model_mb_ro_aug_ens_11"#"/model_step_3200000_mb_ro"#"/model_ens_reading_order_machine_based"#"/model_mb_ro_aug_ens_8"#"/model_ens_reading_order_machine_based" #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" @@ -725,6 +725,7 @@ class Eynollah: label_p_pred = self.model_classifier.predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 + elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): if self.input_binary: img_in = np.copy(img) @@ -3090,6 +3091,26 @@ class Eynollah: num_col = num_col + 1 if not num_column_is_classified: num_col_classifier = num_col + 1 + if self.num_col_upper and self.num_col_lower: + if self.num_col_upper == self.num_col_lower: + num_col_classifier = self.num_col_upper + else: + if num_col_classifier < self.num_col_lower: + num_col_classifier = self.num_col_lower + if num_col_classifier > self.num_col_upper: + num_col_classifier = self.num_col_upper + + elif self.num_col_lower and not self.num_col_upper: + if num_col_classifier < self.num_col_lower: + num_col_classifier = self.num_col_lower + + elif self.num_col_upper and not self.num_col_lower: + if num_col_classifier > self.num_col_upper: + num_col_classifier = self.num_col_upper + + else: + pass + except Exception as why: self.logger.error(why) num_col = None @@ -3223,7 +3244,6 @@ class Eynollah: text_regions_p_1[mask_lines[:, :] == 1] = 3 text_regions_p = text_regions_p_1[:, :] text_regions_p = np.array(text_regions_p) - if num_col_classifier in (1, 2): try: regions_without_separators = (text_regions_p[:, :] == 1) * 1 @@ -4447,6 +4467,43 @@ class Eynollah: return (slopes_rem, all_found_textline_polygons_rem, boxes_text_rem, txt_con_org_rem, contours_only_text_parent_rem, index_by_text_par_con_rem_sort) + + def separate_marginals_to_left_and_right_and_order_from_top_to_down(self, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes_marginals, mid_point_of_page_width): + cx_marg, cy_marg, _, _, _, _, _ = find_new_features_of_contours( + polygons_of_marginals) + + cx_marg = np.array(cx_marg) + cy_marg = np.array(cy_marg) + + poly_marg_left = list( np.array(polygons_of_marginals)[cx_marg < mid_point_of_page_width] ) + poly_marg_right = list( np.array(polygons_of_marginals)[cx_marg >= mid_point_of_page_width] ) + + all_found_textline_polygons_marginals_left = list( np.array(all_found_textline_polygons_marginals)[cx_marg < mid_point_of_page_width] ) + all_found_textline_polygons_marginals_right = list( np.array(all_found_textline_polygons_marginals)[cx_marg >= mid_point_of_page_width] ) + + all_box_coord_marginals_left = list( np.array(all_box_coord_marginals)[cx_marg < mid_point_of_page_width] ) + all_box_coord_marginals_right = list( np.array(all_box_coord_marginals)[cx_marg >= mid_point_of_page_width] ) + + slopes_marg_left = list( np.array(slopes_marginals)[cx_marg < mid_point_of_page_width] ) + slopes_marg_right = list( np.array(slopes_marginals)[cx_marg >= mid_point_of_page_width] ) + + cy_marg_left = cy_marg[cx_marg < mid_point_of_page_width] + cy_marg_right = cy_marg[cx_marg >= mid_point_of_page_width] + + ordered_left_marginals = [poly for _, poly in sorted(zip(cy_marg_left, poly_marg_left), key=lambda x: x[0])] + ordered_right_marginals = [poly for _, poly in sorted(zip(cy_marg_right, poly_marg_right), key=lambda x: x[0])] + + ordered_left_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_left, all_found_textline_polygons_marginals_left), key=lambda x: x[0])] + ordered_right_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_right, all_found_textline_polygons_marginals_right), key=lambda x: x[0])] + + ordered_left_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_left, all_box_coord_marginals_left), key=lambda x: x[0])] + ordered_right_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_right, all_box_coord_marginals_right), key=lambda x: x[0])] + + ordered_left_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_left, slopes_marg_left), key=lambda x: x[0])] + ordered_right_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_right, slopes_marg_right), key=lambda x: x[0])] + + return ordered_left_marginals, ordered_right_marginals, ordered_left_marginals_textline, ordered_right_marginals_textline, ordered_left_marginals_bbox, ordered_right_marginals_bbox, ordered_left_slopes_marginals, ordered_right_slopes_marginals + def run(self, image_filename : Optional[str] = None, dir_in : Optional[str] = None, overwrite : bool = False): """ @@ -4489,12 +4546,13 @@ class Eynollah: t0 = time.time() img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) self.logger.info("Enhancing took %.1fs ", time.time() - t0) + if self.extract_only_images: text_regions_p_1, erosion_hurts, polygons_lines_xml, polygons_of_images, image_page, page_coord, cont_page = \ self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], - polygons_of_images, [], [], [], [], [], + polygons_of_images, [], [], [], [], [], [], [], [], [], cont_page, [], []) if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) @@ -4508,7 +4566,6 @@ class Eynollah: page_coord, image_page, textline_mask_tot_ea, img_bin_light, cont_page = \ self.run_graphics_and_columns_without_layout(textline_mask_tot_ea, img_bin_light) - ##all_found_textline_polygons =self.scale_contours_new(textline_mask_tot_ea) cnt_clean_rot_raw, hir_on_cnt_clean_rot = return_contours_of_image(textline_mask_tot_ea) @@ -4530,10 +4587,14 @@ class Eynollah: id_of_texts_tot =['region_0001'] polygons_of_images = [] - slopes_marginals = [] - polygons_of_marginals = [] - all_found_textline_polygons_marginals = [] - all_box_coord_marginals = [] + slopes_marginals_left = [] + slopes_marginals_right = [] + polygons_of_marginals_left = [] + polygons_of_marginals_right = [] + all_found_textline_polygons_marginals_left = [] + all_found_textline_polygons_marginals_right = [] + all_box_coord_marginals_left = [] + all_box_coord_marginals_right = [] polygons_lines_xml = [] contours_tables = [] conf_contours_textregions =[0] @@ -4546,8 +4607,8 @@ class Eynollah: pcgts = self.writer.build_pagexml_no_full_layout( cont_page, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, - all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, + all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines=ocr_all_textlines, conf_contours_textregion=conf_contours_textregions, skip_layout_reading_order=self.skip_layout_and_reading_order) return pcgts @@ -4595,11 +4656,10 @@ class Eynollah: #self.logger.info('cont_page %s', cont_page) #plt.imshow(table_prediction) #plt.show() - if not num_col: self.logger.info("No columns detected, outputting an empty PAGE-XML") pcgts = self.writer.build_pagexml_no_full_layout( - [], page_coord, [], [], [], [], [], [], [], [], [], [], + [], page_coord, [], [], [], [], [], [], [], [], [], [], [], [], [], [], cont_page, [], []) return pcgts @@ -4771,6 +4831,7 @@ class Eynollah: contours_only_text_parent_d_ordered = [] contours_only_text_parent_d = [] #contours_only_text_parent = [] + if not len(contours_only_text_parent): # stop early empty_marginals = [[]] * len(polygons_of_marginals) @@ -4778,13 +4839,13 @@ class Eynollah: pcgts = self.writer.build_pagexml_full_layout( [], [], page_coord, [], [], [], [], [], [], polygons_of_images, contours_tables, [], - polygons_of_marginals, empty_marginals, empty_marginals, [], [], [], + polygons_of_marginals, polygons_of_marginals, empty_marginals, empty_marginals, empty_marginals, empty_marginals, [], [], [], [], cont_page, polygons_lines_xml) else: pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], polygons_of_images, - polygons_of_marginals, empty_marginals, empty_marginals, [], [], + polygons_of_marginals, polygons_of_marginals, empty_marginals, empty_marginals, empty_marginals, empty_marginals, [], [], [], cont_page, polygons_lines_xml, contours_tables) return pcgts @@ -4877,8 +4938,11 @@ class Eynollah: num_col_classifier, scale_param, slope_deskew) all_found_textline_polygons_marginals = small_textlines_to_parent_adherence2( all_found_textline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) - - #print("text region early 6 in %.1fs", time.time() - t0) + + mid_point_of_page_width = text_regions_p.shape[1] / 2. + polygons_of_marginals_left, polygons_of_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes_marginals_left, slopes_marginals_right = self.separate_marginals_to_left_and_right_and_order_from_top_to_down(polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes_marginals, mid_point_of_page_width) + + #print(len(polygons_of_marginals), len(ordered_left_marginals), len(ordered_right_marginals), 'marginals ordred') if self.full_layout: if np.abs(slope_deskew) >= SLOPE_THRESHOLD: contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( @@ -4961,7 +5025,6 @@ class Eynollah: tror = time.time() order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( contours_only_text_parent, contours_only_text_parent_h, text_regions_p) - print('time spend for mb ro', time.time()-tror) else: if np.abs(slope_deskew) < SLOPE_THRESHOLD: order_text_new, id_of_texts_tot = self.do_order_of_regions( @@ -4978,10 +5041,15 @@ class Eynollah: else: ocr_all_textlines = None - if all_found_textline_polygons_marginals and len(all_found_textline_polygons_marginals)>0: - ocr_all_textlines_marginals = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + if all_found_textline_polygons_marginals_left and len(all_found_textline_polygons_marginals_left)>0: + ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_left, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: - ocr_all_textlines_marginals = None + ocr_all_textlines_marginals_left = None + + if all_found_textline_polygons_marginals_right and len(all_found_textline_polygons_marginals_right)>0: + ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_right, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + else: + ocr_all_textlines_marginals_right = None if all_found_textline_polygons_h and len(all_found_textline_polygons)>0: ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_h, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) @@ -4994,15 +5062,16 @@ class Eynollah: ocr_all_textlines_drop = None else: ocr_all_textlines = None - ocr_all_textlines_marginals = None + ocr_all_textlines_marginals_left = None + ocr_all_textlines_marginals_right = None ocr_all_textlines_h = None ocr_all_textlines_drop = None pcgts = self.writer.build_pagexml_full_layout( contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, - polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals, - all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, - cont_page, polygons_lines_xml, ocr_all_textlines, ocr_all_textlines_h, ocr_all_textlines_marginals, ocr_all_textlines_drop, conf_contours_textregions, conf_contours_textregions_h) + polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_lines_xml, ocr_all_textlines, ocr_all_textlines_h, ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, ocr_all_textlines_drop, conf_contours_textregions, conf_contours_textregions_h) return pcgts contours_only_text_parent_h = None @@ -5077,19 +5146,24 @@ class Eynollah: gc.collect() if len(all_found_textline_polygons)>0: ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - if all_found_textline_polygons_marginals and len(all_found_textline_polygons_marginals)>0: - ocr_all_textlines_marginals = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + + if all_found_textline_polygons_marginals_left and len(all_found_textline_polygons_marginals_left)>0: + ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_left, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + + if all_found_textline_polygons_marginals_right and len(all_found_textline_polygons_marginals_right)>0: + ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_right, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines = None - ocr_all_textlines_marginals = None + ocr_all_textlines_marginals_left = None + ocr_all_textlines_marginals_right = None self.logger.info("detection of reading order took %.1fs", time.time() - t_order) pcgts = self.writer.build_pagexml_no_full_layout( txt_con_org, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, - all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, - cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, ocr_all_textlines_marginals, conf_contours_textregions) + all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, conf_contours_textregions) return pcgts @@ -5145,7 +5219,7 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250805" + self.model_ocr_dir = dir_models + "/model_step_45000_ocr"#"/model_eynollah_ocr_cnnrnn_20250805"# model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5156,7 +5230,7 @@ class Eynollah_ocr: else: self.b_s = int(batch_size) - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: + with open(os.path.join(self.model_ocr_dir, "characters_20250707_all_lang.txt"),"r") as config_file: characters = json.load(config_file) AUTOTUNE = tf.data.AUTOTUNE diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 7625a90..c03d831 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -64,7 +64,7 @@ class machine_based_reading_order_on_layout: self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) atexit.register(self.executor.shutdown) self.dir_models = dir_models - self.model_reading_order_dir = dir_models + "/model_step_5100000_mb_ro"#"/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824"#"/model_ens_reading_order_machine_based" try: for device in tf.config.list_physical_devices('GPU'): @@ -942,10 +942,18 @@ class machine_based_reading_order_on_layout: x_len = text_regions_p.shape[1] img_poly = np.zeros((y_len,x_len), dtype='uint8') - img_poly[text_regions_p[:,:]==1] = 1 - img_poly[text_regions_p[:,:]==2] = 2 - img_poly[text_regions_p[:,:]==3] = 4 - img_poly[text_regions_p[:,:]==6] = 5 + ###img_poly[text_regions_p[:,:]==1] = 1 + ###img_poly[text_regions_p[:,:]==2] = 2 + ###img_poly[text_regions_p[:,:]==3] = 4 + ###img_poly[text_regions_p[:,:]==6] = 5 + + ##img_poly[text_regions_p[:,:]==1] = 1 + ##img_poly[text_regions_p[:,:]==2] = 2 + ##img_poly[text_regions_p[:,:]==3] = 3 + ##img_poly[text_regions_p[:,:]==4] = 4 + ##img_poly[text_regions_p[:,:]==5] = 5 + + img_poly = np.copy(text_regions_p) img_header_and_sep = np.zeros((y_len,x_len), dtype='uint8') if contours_only_text_parent_h: diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 1e9162a..d974650 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -384,57 +384,63 @@ def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, pr for indexing, ind_poly_first in enumerate(all_found_textline_polygons): #ocr_textline_in_textregion = [] - for indexing2, ind_poly in enumerate(ind_poly_first): + if len(ind_poly_first)==0: cropped_lines_region_indexer.append(indexer_text_region) - if not (textline_light or curved_line): - ind_poly = copy.deepcopy(ind_poly) - box_ind = all_box_coord[indexing] + cropped_lines_meging_indexing.append(0) + img_fin = np.ones((image_height, image_width, 3))*1 + cropped_lines.append(img_fin) - ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) - #print(ind_poly_copy) - ind_poly[ind_poly<0] = 0 - x, y, w, h = cv2.boundingRect(ind_poly) - - w_scaled = w * image_height/float(h) + else: + for indexing2, ind_poly in enumerate(ind_poly_first): + cropped_lines_region_indexer.append(indexer_text_region) + if not (textline_light or curved_line): + ind_poly = copy.deepcopy(ind_poly) + box_ind = all_box_coord[indexing] - mask_poly = np.zeros(image.shape) - - img_poly_on_img = np.copy(image) - - mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) - - - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - img_crop[mask_poly==0] = 255 - - if w_scaled < 640:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) + ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) + #print(ind_poly_copy) + ind_poly[ind_poly<0] = 0 + x, y, w, h = cv2.boundingRect(ind_poly) - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - else: + w_scaled = w * image_height/float(h) + + mask_poly = np.zeros(image.shape) + + img_poly_on_img = np.copy(image) + + mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) + + + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + + img_crop[mask_poly==0] = 255 + + if w_scaled < 640:#1.5*image_width: img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) + else: + splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) + + if splited_images: + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(1) + + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) + + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(-1) + + else: + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(0) indexer_text_region+=1 - extracted_texts = [] n_iterations = math.ceil(len(cropped_lines) / b_s_ocr) diff --git a/src/eynollah/utils/xml.py b/src/eynollah/utils/xml.py index bd95702..13420df 100644 --- a/src/eynollah/utils/xml.py +++ b/src/eynollah/utils/xml.py @@ -46,16 +46,22 @@ def create_page_xml(imageFilename, height, width): )) return pcgts -def xml_reading_order(page, order_of_texts, id_of_marginalia): +def xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right): region_order = ReadingOrderType() og = OrderedGroupType(id="ro357564684568544579089") page.set_ReadingOrder(region_order) region_order.set_OrderedGroup(og) region_counter = EynollahIdCounter() + + for id_marginal in id_of_marginalia_left: + og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) + region_counter.inc('region') + for idx_textregion, _ in enumerate(order_of_texts): og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(order_of_texts[idx_textregion] + 1))) region_counter.inc('region') - for id_marginal in id_of_marginalia: + + for id_marginal in id_of_marginalia_right: og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) region_counter.inc('region') diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 085ee6f..2f9caf3 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -170,7 +170,7 @@ class EynollahXmlWriter(): with open(self.output_filename, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines=None, ocr_all_textlines_marginals=None, conf_contours_textregion=None, skip_layout_reading_order=False): + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals_left, found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, conf_contours_textregion=None, skip_layout_reading_order=False): self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure @@ -181,8 +181,9 @@ class EynollahXmlWriter(): counter = EynollahIdCounter() if len(found_polygons_text_region) > 0: _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) - id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals] - xml_reading_order(page, order_of_texts, id_of_marginalia) + id_of_marginalia_left = [_counter_marginals.next_region_id for _ in found_polygons_marginals_left] + id_of_marginalia_right = [_counter_marginals.next_region_id for _ in found_polygons_marginals_right] + xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', @@ -195,17 +196,29 @@ class EynollahXmlWriter(): else: ocr_textlines = None self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines) - - for mm in range(len(found_polygons_marginals)): + + for mm in range(len(found_polygons_marginals_left)): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_left[mm], page_coord))) page.add_TextRegion(marginal) - if ocr_all_textlines_marginals: - ocr_textlines = ocr_all_textlines_marginals[mm] + if ocr_all_textlines_marginals_left: + ocr_textlines = ocr_all_textlines_marginals_left[mm] else: ocr_textlines = None - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter, ocr_textlines) + #print(ocr_textlines, mm, len(all_found_textline_polygons_marginals_left[mm]) ) + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) + + for mm in range(len(found_polygons_marginals_right)): + marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_right[mm], page_coord))) + page.add_TextRegion(marginal) + if ocr_all_textlines_marginals_right: + ocr_textlines = ocr_all_textlines_marginals_right[mm] + else: + ocr_textlines = None + + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) for mm in range(len(found_polygons_text_region_img)): img_region = ImageRegionType(id=counter.next_region_id, Coords=CoordsType()) @@ -249,7 +262,7 @@ class EynollahXmlWriter(): return pcgts - def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines=None, ocr_all_textlines_h=None, ocr_all_textlines_marginals=None, ocr_all_textlines_drop=None, conf_contours_textregion=None, conf_contours_textregion_h=None): + def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals_left,found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines=None, ocr_all_textlines_h=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, ocr_all_textlines_drop=None, conf_contours_textregion=None, conf_contours_textregion_h=None): self.logger.debug('enter build_pagexml_full_layout') # create the file structure @@ -259,8 +272,9 @@ class EynollahXmlWriter(): counter = EynollahIdCounter() _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) - id_of_marginalia = [_counter_marginals.next_region_id for _ in found_polygons_marginals] - xml_reading_order(page, order_of_texts, id_of_marginalia) + id_of_marginalia_left = [_counter_marginals.next_region_id for _ in found_polygons_marginals_left] + id_of_marginalia_right = [_counter_marginals.next_region_id for _ in found_polygons_marginals_right] + xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) for mm in range(len(found_polygons_text_region)): textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', @@ -285,15 +299,25 @@ class EynollahXmlWriter(): ocr_textlines = None self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter, ocr_textlines) - for mm in range(len(found_polygons_marginals)): + for mm in range(len(found_polygons_marginals_left)): marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals[mm], page_coord))) + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_left[mm], page_coord))) page.add_TextRegion(marginal) - if ocr_all_textlines_marginals: - ocr_textlines = ocr_all_textlines_marginals[mm] + if ocr_all_textlines_marginals_left: + ocr_textlines = ocr_all_textlines_marginals_left[mm] else: ocr_textlines = None - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals, mm, page_coord, all_box_coord_marginals, slopes_marginals, counter, ocr_textlines) + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) + + for mm in range(len(found_polygons_marginals_right)): + marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_right[mm], page_coord))) + page.add_TextRegion(marginal) + if ocr_all_textlines_marginals_right: + ocr_textlines = ocr_all_textlines_marginals_right[mm] + else: + ocr_textlines = None + self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) for mm in range(len(found_polygons_drop_capitals)): dropcapital = TextRegionType(id=counter.next_region_id, type_='drop-capital', From 9b9d21d8acf4a32ae5eb888ddb33d53b701a535b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 28 Aug 2025 11:30:59 +0200 Subject: [PATCH 182/492] eynollah ocr: support using either a specific model name or a models directory (default model) --- src/eynollah/cli.py | 18 +++++++++--------- src/eynollah/eynollah.py | 28 +++++++++++++++++----------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 67fd57e..9dc326d 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -456,6 +456,11 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="directory of models", type=click.Path(exists=True, file_okay=False), ) +@click.option( + "--model_name", + help="Specific model file path to use for OCR", + type=click.Path(exists=True, file_okay=False), +) @click.option( "--tr_ocr", "-trocr/-notrocr", @@ -474,12 +479,6 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ is_flag=True, help="if this parameter set to true, cropped textline images will not be masked with textline contour.", ) -@click.option( - "--draw_texts_on_image", - "-dtoi/-ndtoi", - is_flag=True, - help="if this parameter set to true, the predicted texts will be displayed on an image.", -) @click.option( "--prediction_with_both_of_rgb_and_bin", "-brb/-nbrb", @@ -508,16 +507,17 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="Override log level globally to this", ) -def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, draw_texts_on_image, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): +def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) + + assert not model or not model_name, "model directory -m can not be set alongside specific model name --model_name" assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" - assert not export_textline_images_and_text or not draw_texts_on_image, "Exporting textline and text -etit can not be set alongside draw text on image -dtoi" assert not export_textline_images_and_text or not prediction_with_both_of_rgb_and_bin, "Exporting textline and text -etit can not be set alongside prediction with both rgb and bin -brb" assert (bool(image) ^ bool(dir_in)), "Either -i (single image) or -di (directory) must be provided, but not both." eynollah_ocr = Eynollah_ocr( @@ -528,10 +528,10 @@ def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, dir_in_bin=dir_in_bin, dir_out=out, dir_models=model, + model_name=model_name, tr_ocr=tr_ocr, export_textline_images_and_text=export_textline_images_and_text, do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, - draw_texts_on_image=draw_texts_on_image, prediction_with_both_of_rgb_and_bin=prediction_with_both_of_rgb_and_bin, batch_size=batch_size, pref_of_dataset=dataset_abbrevation, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 30e180d..ec2900f 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5171,6 +5171,7 @@ class Eynollah_ocr: def __init__( self, dir_models, + model_name=None, dir_xmls=None, dir_in=None, image_filename=None, @@ -5181,7 +5182,6 @@ class Eynollah_ocr: batch_size=None, export_textline_images_and_text=False, do_not_mask_with_textline_contour=False, - draw_texts_on_image=False, prediction_with_both_of_rgb_and_bin=False, pref_of_dataset=None, min_conf_value_of_textline_text : Optional[float]=None, @@ -5193,10 +5193,10 @@ class Eynollah_ocr: self.dir_out = dir_out self.dir_xmls = dir_xmls self.dir_models = dir_models + self.model_name = model_name self.tr_ocr = tr_ocr self.export_textline_images_and_text = export_textline_images_and_text self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour - self.draw_texts_on_image = draw_texts_on_image self.dir_out_image_text = dir_out_image_text self.prediction_with_both_of_rgb_and_bin = prediction_with_both_of_rgb_and_bin self.pref_of_dataset = pref_of_dataset @@ -5210,7 +5210,10 @@ class Eynollah_ocr: if tr_ocr: self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" + if self.model_name: + self.model_ocr_dir = self.model_name + else: + self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) self.model_ocr.to(self.device) if not batch_size: @@ -5219,7 +5222,10 @@ class Eynollah_ocr: self.b_s = int(batch_size) else: - self.model_ocr_dir = dir_models + "/model_step_45000_ocr"#"/model_eynollah_ocr_cnnrnn_20250805"# + if self.model_name: + self.model_ocr_dir = self.model_name + else: + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250805" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( @@ -5230,7 +5236,7 @@ class Eynollah_ocr: else: self.b_s = int(batch_size) - with open(os.path.join(self.model_ocr_dir, "characters_20250707_all_lang.txt"),"r") as config_file: + with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: characters = json.load(config_file) AUTOTUNE = tf.data.AUTOTUNE @@ -5271,7 +5277,7 @@ class Eynollah_ocr: img = cv2.imread(dir_img) - if self.draw_texts_on_image: + if self.dir_out_image_text: out_image_with_text = os.path.join(self.dir_out_image_text, file_name+'.png') image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") draw = ImageDraw.Draw(image_text) @@ -5306,7 +5312,7 @@ class Eynollah_ocr: textline_coords = np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) x,y,w,h = cv2.boundingRect(textline_coords) - if self.draw_texts_on_image: + if self.dir_out_image_text: total_bb_coordinates.append([x,y,w,h]) h2w_ratio = h/float(w) @@ -5363,7 +5369,7 @@ class Eynollah_ocr: unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - if self.draw_texts_on_image: + if self.dir_out_image_text: font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! font = ImageFont.truetype(font_path, 40) @@ -5463,7 +5469,7 @@ class Eynollah_ocr: dir_img_bin = os.path.join(self.dir_in_bin, file_name+'.png') img_bin = cv2.imread(dir_img_bin) - if self.draw_texts_on_image: + if self.dir_out_image_text: out_image_with_text = os.path.join(self.dir_out_image_text, file_name+'.png') image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") draw = ImageDraw.Draw(image_text) @@ -5508,7 +5514,7 @@ class Eynollah_ocr: if type_textregion=='drop-capital': angle_degrees = 0 - if self.draw_texts_on_image: + if self.dir_out_image_text: total_bb_coordinates.append([x,y,w,h]) w_scaled = w * image_height/float(h) @@ -5829,7 +5835,7 @@ class Eynollah_ocr: unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - if self.draw_texts_on_image: + if self.dir_out_image_text: font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! font = ImageFont.truetype(font_path, 40) From 6a735daa606aa50e172c8cd6d82f18d94e8e9ea8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 31 Aug 2025 23:30:54 +0200 Subject: [PATCH 183/492] Update README.md --- README.md | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8a2c4a4..1adc3d7 100644 --- a/README.md +++ b/README.md @@ -118,10 +118,29 @@ eynollah binarization \ ``` ### OCR -Under development +The OCR module performs text recognition from images using two main families of pretrained models: CNN-RNN–based OCR and Transformer-based OCR. + +The command-line interface for ocr can be called like this: + +```sh +eynollah ocr \ + -m | --model_name \ + -i | -di \ + -dx \ + -o +``` ### Machine-based-reading-order -Under development +The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. + +The command-line interface for machine based reading order can be called like this: + +```sh +eynollah machine-based-reading-order \ + -m \ + -xml | -dx \ + -o +``` #### Use as OCR-D processor Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), From e15640aa8aa4a3dec9f694fcca82bde9c3f516d6 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 15 Sep 2025 13:36:58 +0200 Subject: [PATCH 184/492] new page extraction model integration --- src/eynollah/eynollah.py | 200 +++++++++++++++++++++++++++++++-------- 1 file changed, 160 insertions(+), 40 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index ec2900f..3288b75 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -285,7 +285,7 @@ class Eynollah: #"/eynollah-full-regions-1column_20210425" self.model_region_dir_fully_np = dir_models + "/modelens_full_lay_1__4_3_091124" #self.model_region_dir_fully = dir_models + "/eynollah-full-regions-3+column_20210425" - self.model_page_dir = dir_models + "/eynollah-page-extraction_20210425" + self.model_page_dir = dir_models + "/model_ens_page" self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" @@ -1591,11 +1591,11 @@ class Eynollah: self.logger.debug("enter extract_page") cont_page = [] if not self.ignore_page_extraction: - img = cv2.GaussianBlur(self.image, (5, 5), 0) + img = np.copy(self.image)#cv2.GaussianBlur(self.image, (5, 5), 0) img_page_prediction = self.do_prediction(False, img, self.model_page) imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) - thresh = cv2.dilate(thresh, KERNEL, iterations=3) + ##thresh = cv2.dilate(thresh, KERNEL, iterations=3) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours)>0: @@ -1603,24 +1603,25 @@ class Eynollah: for j in range(len(contours))]) cnt = contours[np.argmax(cnt_size)] x, y, w, h = cv2.boundingRect(cnt) - if x <= 30: - w += x - x = 0 - if (self.image.shape[1] - (x + w)) <= 30: - w = w + (self.image.shape[1] - (x + w)) - if y <= 30: - h = h + y - y = 0 - if (self.image.shape[0] - (y + h)) <= 30: - h = h + (self.image.shape[0] - (y + h)) + #if x <= 30: + #w += x + #x = 0 + #if (self.image.shape[1] - (x + w)) <= 30: + #w = w + (self.image.shape[1] - (x + w)) + #if y <= 30: + #h = h + y + #y = 0 + #if (self.image.shape[0] - (y + h)) <= 30: + #h = h + (self.image.shape[0] - (y + h)) box = [x, y, w, h] else: box = [0, 0, img.shape[1], img.shape[0]] cropped_page, page_coord = crop_image_inside_box(box, self.image) - cont_page.append(np.array([[page_coord[2], page_coord[0]], - [page_coord[3], page_coord[0]], - [page_coord[3], page_coord[1]], - [page_coord[2], page_coord[1]]])) + cont_page = cnt + #cont_page.append(np.array([[page_coord[2], page_coord[0]], + #[page_coord[3], page_coord[0]], + #[page_coord[3], page_coord[1]], + #[page_coord[2], page_coord[1]]])) self.logger.debug("exit extract_page") else: box = [0, 0, self.image.shape[1], self.image.shape[0]] @@ -3063,10 +3064,20 @@ class Eynollah: if self.plotter: self.plotter.save_page_image(image_page) - + + mask_page = np.zeros((text_regions_p_1.shape[0], text_regions_p_1.shape[1])).astype(np.int8) + mask_page = cv2.fillPoly(mask_page, pts=[cont_page], color=(1,)) + + text_regions_p_1[mask_page==0] = 0 + textline_mask_tot_ea[mask_page==0] = 0 + text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] img_bin_light = img_bin_light[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + + ###text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + ###textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] + ###img_bin_light = img_bin_light[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] mask_images = (text_regions_p_1[:, :] == 2) * 1 mask_images = mask_images.astype(np.uint8) @@ -5299,8 +5310,12 @@ class Eynollah_ocr: cropped_lines = [] cropped_lines_region_indexer = [] cropped_lines_meging_indexing = [] + + extracted_texts = [] indexer_text_region = 0 + indexer_b_s = 0 + for nn in root1.iter(region_tags): for child_textregion in nn: if child_textregion.tag.endswith("TextLine"): @@ -5325,40 +5340,105 @@ class Eynollah_ocr: img_crop = img_poly_on_img[y:y+h, x:x+w, :] img_crop[mask_poly==0] = 255 + if h2w_ratio > 0.1: cropped_lines.append(resize_image(img_crop, tr_ocr_input_height_and_width, tr_ocr_input_height_and_width) ) cropped_lines_meging_indexing.append(0) + indexer_b_s+=1 + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + else: splited_images, _ = return_textlines_split_if_needed(img_crop, None) #print(splited_images) if splited_images: cropped_lines.append(resize_image(splited_images[0], tr_ocr_input_height_and_width, tr_ocr_input_height_and_width)) cropped_lines_meging_indexing.append(1) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + cropped_lines.append(resize_image(splited_images[1], tr_ocr_input_height_and_width, tr_ocr_input_height_and_width)) cropped_lines_meging_indexing.append(-1) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + else: cropped_lines.append(img_crop) cropped_lines_meging_indexing.append(0) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + + indexer_text_region = indexer_text_region +1 - - extracted_texts = [] - n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*self.b_s - imgs = cropped_lines[n_start:] - else: - n_start = i*self.b_s - n_end = (i+1)*self.b_s - imgs = cropped_lines[n_start:n_end] + if indexer_b_s!=0: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged + ####extracted_texts = [] + ####n_iterations = math.ceil(len(cropped_lines) / self.b_s) + + ####for i in range(n_iterations): + ####if i==(n_iterations-1): + ####n_start = i*self.b_s + ####imgs = cropped_lines[n_start:] + ####else: + ####n_start = i*self.b_s + ####n_end = (i+1)*self.b_s + ####imgs = cropped_lines[n_start:n_end] + ####pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + ####generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + ####generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + ####extracted_texts = extracted_texts + generated_text_merged + del cropped_lines gc.collect() @@ -5409,31 +5489,71 @@ class Eynollah_ocr: #print(time.time() - t0 ,'elapsed time') - indexer = 0 indexer_textregion = 0 for nn in root1.iter(region_tags): - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + #id_textregion = nn.attrib['id'] + #id_textregions.append(id_textregion) + #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) + + is_textregion_text = False + for childtest in nn: + if childtest.tag.endswith("TextEquiv"): + is_textregion_text = True + + if not is_textregion_text: + text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') + unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') has_textline = False for child_textregion in nn: if child_textregion.tag.endswith("TextLine"): - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] + + is_textline_text = False + for childtest2 in child_textregion: + if childtest2.tag.endswith("TextEquiv"): + is_textline_text = True + + + if not is_textline_text: + text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + ##text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + unicode_textline = ET.SubElement(text_subelement, 'Unicode') + unicode_textline.text = extracted_texts_merged[indexer] + else: + for childtest3 in child_textregion: + if childtest3.tag.endswith("TextEquiv"): + for child_uc in childtest3: + if child_uc.tag.endswith("Unicode"): + ##childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + child_uc.text = extracted_texts_merged[indexer] + indexer = indexer + 1 has_textline = True if has_textline: - unicode_textregion.text = text_by_textregion[indexer_textregion] + if is_textregion_text: + for child4 in nn: + if child4.tag.endswith("TextEquiv"): + for childtr_uc in child4: + if childtr_uc.tag.endswith("Unicode"): + childtr_uc.text = text_by_textregion[indexer_textregion] + else: + unicode_textregion.text = text_by_textregion[indexer_textregion] indexer_textregion = indexer_textregion + 1 - - + ###sample_order = [(id_to_order[tid], text) for tid, text in zip(id_textregions, textregions_by_existing_ids) if tid in id_to_order] + + ##ordered_texts_sample = [text for _, text in sorted(sample_order)] + ##tot_page_text = ' '.join(ordered_texts_sample) + + ##for page_element in root1.iter(link+'Page'): + ##text_page = ET.SubElement(page_element, 'TextEquiv') + ##unicode_textpage = ET.SubElement(text_page, 'Unicode') + ##unicode_textpage.text = tot_page_text + ET.register_namespace("",name_space) tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) - #print("Job done in %.1fs", time.time() - t0) else: ###max_len = 280#512#280#512 ###padding_token = 1500#299#1500#299 From 0711166524fec03ccb91f564d688d127c287d75e Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 1 Sep 2025 11:37:22 +0200 Subject: [PATCH 185/492] changed the drop capitals bonding box to contour ratio threshold --- src/eynollah/utils/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index ca86047..05397d0 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -851,7 +851,8 @@ def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch, drop all_drop_capital_pixels = np.sum(mask_of_drop_cpaital_in_early_layout==1) percent_text_to_all_in_drop = all_drop_capital_pixels_which_is_text_in_early_lo / float(all_drop_capital_pixels) - if (areas_cnt_text[jj] * float(drop_only.shape[0] * drop_only.shape[1]) / float(w * h) > 0.6 and + + if (areas_cnt_text[jj] * float(drop_only.shape[0] * drop_only.shape[1]) / float(w * h) > 0.7 and percent_text_to_all_in_drop >= 0.3): layout_in_patch[box0] = drop_capital_label else: From 68a71be8bc77567984131dc5e16a733209bf32f2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sat, 13 Sep 2025 22:40:11 +0200 Subject: [PATCH 186/492] Running inference on files in a directory --- train/inference.py | 86 +++++++++++++++++++++++++++++++++------------- 1 file changed, 62 insertions(+), 24 deletions(-) diff --git a/train/inference.py b/train/inference.py index aecd0e6..094c528 100644 --- a/train/inference.py +++ b/train/inference.py @@ -28,8 +28,9 @@ Tool to load model and predict for given image. """ class sbb_predict: - def __init__(self,image, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area): + def __init__(self,image, dir_in, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area): self.image=image + self.dir_in=dir_in self.patches=patches self.save=save self.save_layout=save_layout @@ -223,11 +224,10 @@ class sbb_predict: return added_image, layout_only - def predict(self): - self.start_new_session_and_model() + def predict(self, image_dir): if self.task == 'classification': classes_names = self.config_params_model['classification_classes_name'] - img_1ch = img=cv2.imread(self.image, 0) + img_1ch = img=cv2.imread(image_dir, 0) img_1ch = img_1ch / 255.0 img_1ch = cv2.resize(img_1ch, (self.config_params_model['input_height'], self.config_params_model['input_width']), interpolation=cv2.INTER_NEAREST) @@ -438,7 +438,7 @@ class sbb_predict: if self.patches: #def textline_contours(img,input_width,input_height,n_classes,model): - img=cv2.imread(self.image) + img=cv2.imread(image_dir) self.img_org = np.copy(img) if img.shape[0] < self.img_height: @@ -529,7 +529,7 @@ class sbb_predict: else: - img=cv2.imread(self.image) + img=cv2.imread(image_dir) self.img_org = np.copy(img) width=self.img_width @@ -557,22 +557,50 @@ class sbb_predict: def run(self): - res=self.predict() - if (self.task == 'classification' or self.task == 'reading_order'): - pass - elif self.task == 'enhancement': - if self.save: - cv2.imwrite(self.save,res) + self.start_new_session_and_model() + if self.image: + res=self.predict(image_dir = self.image) + + if (self.task == 'classification' or self.task == 'reading_order'): + pass + elif self.task == 'enhancement': + if self.save: + cv2.imwrite(self.save,res) + else: + img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) + if self.save: + cv2.imwrite(self.save,img_seg_overlayed) + if self.save_layout: + cv2.imwrite(self.save_layout, only_layout) + + if self.ground_truth: + gt_img=cv2.imread(self.ground_truth) + self.IoU(gt_img[:,:,0],res[:,:,0]) + else: - img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) - if self.save: - cv2.imwrite(self.save,img_seg_overlayed) - if self.save_layout: - cv2.imwrite(self.save_layout, only_layout) + ls_images = os.listdir(self.dir_in) + for ind_image in ls_images: + f_name = ind_image.split('.')[0] + image_dir = os.path.join(self.dir_in, ind_image) + res=self.predict(image_dir) - if self.ground_truth: - gt_img=cv2.imread(self.ground_truth) - self.IoU(gt_img[:,:,0],res[:,:,0]) + if (self.task == 'classification' or self.task == 'reading_order'): + pass + elif self.task == 'enhancement': + self.save = os.path.join(self.out, f_name+'.png') + cv2.imwrite(self.save,res) + else: + img_seg_overlayed, only_layout = self.visualize_model_output(res, self.img_org, self.task) + self.save = os.path.join(self.out, f_name+'_overlayed.png') + cv2.imwrite(self.save,img_seg_overlayed) + self.save_layout = os.path.join(self.out, f_name+'_layout.png') + cv2.imwrite(self.save_layout, only_layout) + + if self.ground_truth: + gt_img=cv2.imread(self.ground_truth) + self.IoU(gt_img[:,:,0],res[:,:,0]) + + @click.command() @click.option( @@ -581,6 +609,12 @@ class sbb_predict: help="image filename", type=click.Path(exists=True, dir_okay=False), ) +@click.option( + "--dir_in", + "-di", + help="directory of images", + type=click.Path(exists=True, file_okay=False), +) @click.option( "--out", "-o", @@ -626,15 +660,19 @@ class sbb_predict: "-min", help="min area size of regions considered for reading order detection. The default value is zero and means that all text regions are considered for reading order.", ) -def main(image, model, patches, save, save_layout, ground_truth, xml_file, out, min_area): +def main(image, dir_in, model, patches, save, save_layout, ground_truth, xml_file, out, min_area): + assert image or dir_in, "Either a single image -i or a dir_in -di is required" with open(os.path.join(model,'config.json')) as f: config_params_model = json.load(f) task = config_params_model['task'] if (task != 'classification' and task != 'reading_order'): - if not save: - print("Error: You used one of segmentation or binarization task but not set -s, you need a filename to save visualized output with -s") + if image and not save: + print("Error: You used one of segmentation or binarization task with image input but not set -s, you need a filename to save visualized output with -s") sys.exit(1) - x=sbb_predict(image, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area) + if dir_in and not out: + print("Error: You used one of segmentation or binarization task with dir_in but not set -out") + sys.exit(1) + x=sbb_predict(image, dir_in, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area) x.run() if __name__=="__main__": From 542646791ded9d40ef238dfae595c40ac2a6adcc Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 3 Sep 2025 19:18:11 +0200 Subject: [PATCH 187/492] For TrOCR, the cropped text lines will no longer be added to a list before prediction. Instead, for each batch size, the text line images will be collected and predictions will be made directly on them. --- src/eynollah/utils/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 05397d0..ca86047 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -851,8 +851,7 @@ def putt_bb_of_drop_capitals_of_model_in_patches_in_layout(layout_in_patch, drop all_drop_capital_pixels = np.sum(mask_of_drop_cpaital_in_early_layout==1) percent_text_to_all_in_drop = all_drop_capital_pixels_which_is_text_in_early_lo / float(all_drop_capital_pixels) - - if (areas_cnt_text[jj] * float(drop_only.shape[0] * drop_only.shape[1]) / float(w * h) > 0.7 and + if (areas_cnt_text[jj] * float(drop_only.shape[0] * drop_only.shape[1]) / float(w * h) > 0.6 and percent_text_to_all_in_drop >= 0.3): layout_in_patch[box0] = drop_capital_label else: From 310679eeb8c97562cbcd0da6462356ec1d58aa8f Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Tue, 16 Sep 2025 14:27:15 +0200 Subject: [PATCH 188/492] page extraction model name is changed --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 3288b75..7ef2361 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -285,7 +285,7 @@ class Eynollah: #"/eynollah-full-regions-1column_20210425" self.model_region_dir_fully_np = dir_models + "/modelens_full_lay_1__4_3_091124" #self.model_region_dir_fully = dir_models + "/eynollah-full-regions-3+column_20210425" - self.model_page_dir = dir_models + "/model_ens_page" + self.model_page_dir = dir_models + "/model_eynollah_page_extraction_20250915" self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" From c64d1026136161ee9c2ea71e9cb996390531c9de Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 18 Sep 2025 13:07:41 +0200 Subject: [PATCH 189/492] move logging to CLI and make initialization optional --- .gitignore | 1 + src/eynollah/cli.py | 54 +++++++++++++++++++++++++++++++++++++--- src/eynollah/eynollah.py | 13 +--------- 3 files changed, 52 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index 5236dde..0d5d834 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ models_eynollah* output.html /build /dist +*.tif diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index c189aca..b980e16 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -1,5 +1,6 @@ import sys import click +import logging from ocrd_utils import initLogging, getLevelName, getLogger from eynollah.eynollah import Eynollah, Eynollah_ocr from eynollah.sbb_binarize import SbbBinarizer @@ -241,15 +242,61 @@ def binarization(patches, model_dir, input_image, output_image, dir_in, dir_out) is_flag=True, help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.", ) +# TODO move to top-level CLI context @click.option( "--log_level", "-l", type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", + help="Override 'eynollah' log level globally to this", +) +# +@click.option( + "--setup-logging", + is_flag=True, + help="Setup a basic console logger", ) -def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, num_col_upper, num_col_lower, skip_layout_and_reading_order, ignore_page_extraction, log_level): - initLogging() +def layout( + image, + out, + overwrite, + dir_in, + model, + save_images, + save_layout, + save_deskewed, + save_all, + extract_only_images, + save_page, + enable_plotting, + allow_enhancement, + curved_line, + textline_light, + full_layout, + tables, + right2left, + input_binary, + allow_scaling, + headers_off, + light_version, + reading_order_machine_based, + do_ocr, + num_col_upper, + num_col_lower, + skip_layout_and_reading_order, + ignore_page_extraction, + log_level, + setup_logging +): + if setup_logging: + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.INFO) + formatter = logging.Formatter('%(message)s') + console_handler.setFormatter(formatter) + getLogger('eynollah').addHandler(console_handler) + getLogger('eynollah').setLevel(logging.INFO) + else: + initLogging() if log_level: getLogger('eynollah').setLevel(getLevelName(log_level)) assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" @@ -273,7 +320,6 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ assert image or dir_in, "Either a single image -i or a dir_in -di is required" eynollah = Eynollah( model, - logger=getLogger('eynollah'), dir_out=out, dir_of_cropped_images=save_images, extract_only_images=extract_only_images, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index d9939ca..e80b8d0 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -6,7 +6,6 @@ document layout analysis (segmentation) with output in PAGE-XML """ -from logging import Logger from difflib import SequenceMatcher as sq from PIL import Image, ImageDraw, ImageFont import math @@ -201,18 +200,8 @@ class Eynollah: num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, skip_layout_and_reading_order : bool = False, - logger : Optional[Logger] = None, ): - if logger: - self.logger = logger - else: - self.logger = getLogger('eynollah') - console_handler = logging.StreamHandler(sys.stdout) - console_handler.setLevel(logging.INFO) - formatter = logging.Formatter('%(message)s') - console_handler.setFormatter(formatter) - self.logger.addHandler(console_handler) - self.logger.setLevel(logging.INFO) + self.logger = getLogger('eynollah') if skip_layout_and_reading_order: textline_light = True From 146102842aac15275647e2e565e5e2549b3ba1fd Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 18 Sep 2025 13:15:18 +0200 Subject: [PATCH 190/492] convert all print stmts to logger.info calls --- src/eynollah/eynollah.py | 223 ++++++++++----------------------------- 1 file changed, 56 insertions(+), 167 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index e80b8d0..39476e2 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -312,9 +312,7 @@ class Eynollah: except: self.logger.warning("no GPU device available") - msg = "Loading models..." - print(msg) - self.logger.info(msg) + self.logger.info("Loading models...") self.model_page = self.our_load_model(self.model_page_dir) self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) @@ -343,9 +341,7 @@ class Eynollah: if self.tables: self.model_table = self.our_load_model(self.model_table_dir) - msg = f"Model initialization complete ({time.time() - t_start:.1f}s)" - print(msg) - self.logger.info(msg) + self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} @@ -3453,7 +3449,7 @@ class Eynollah: peaks_real, _ = find_peaks(sum_smoothed, height=0) if len(peaks_real)>70: - print(len(peaks_real), 'len(peaks_real)') + self.logger.debug(f'len(peaks_real) = {len(peaks_real)}') peaks_real = peaks_real[(peaks_realwidth1)] @@ -4302,14 +4298,11 @@ class Eynollah: if dir_in: self.logger.info("All jobs done in %.1fs", time.time() - t0_tot) - print("all Job done in %.1fs", time.time() - t0_tot) def run_single(self): t0 = time.time() - msg = f"Processing file: {self.writer.image_filename}" - print(msg) - self.logger.info(msg) + self.logger.info(f"Processing file: {self.writer.image_filename}") # Log enabled features directly enabled_modes = [] @@ -4325,35 +4318,23 @@ class Eynollah: enabled_modes.append("Table detection") if enabled_modes: - msg = "Enabled modes: " + ", ".join(enabled_modes) - print(msg) - self.logger.info(msg) + self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) - msg = "Step 1/5: Image Enhancement" - print(msg) - self.logger.info(msg) + self.logger.info("Step 1/5: Image Enhancement") img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) - msg = f"Image: {self.image.shape[1]}x{self.image.shape[0]}, {self.dpi} DPI, {num_col_classifier} columns" - print(msg) - self.logger.info(msg) + self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, {self.dpi} DPI, {num_col_classifier} columns") if is_image_enhanced: - msg = "Enhancement applied" - print(msg) - self.logger.info(msg) + self.logger.info("Enhancement applied") - msg = f"Enhancement complete ({time.time() - t0:.1f}s)" - print(msg) - self.logger.info(msg) + self.logger.info(f"Enhancement complete ({time.time() - t0:.1f}s)") # Image Extraction Mode if self.extract_only_images: - msg = "Step 2/5: Image Extraction Mode" - print(msg) - self.logger.info(msg) + self.logger.info("Step 2/5: Image Extraction Mode") text_regions_p_1, erosion_hurts, polygons_lines_xml, polygons_of_images, image_page, page_coord, cont_page = \ self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) @@ -4367,19 +4348,13 @@ class Eynollah: if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) - msg = "Image extraction complete" - print(msg) - self.logger.info(msg) + self.logger.info("Image extraction complete") return pcgts # Basic Processing Mode if self.skip_layout_and_reading_order: - msg = "Step 2/5: Basic Processing Mode" - print(msg) - self.logger.info(msg) - msg = "Skipping layout analysis and reading order detection" - print(msg) - self.logger.info(msg) + self.logger.info("Step 2/5: Basic Processing Mode") + self.logger.info("Skipping layout analysis and reading order detection") _ ,_, _, textline_mask_tot_ea, img_bin_light, _ = \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier, @@ -4421,21 +4396,15 @@ class Eynollah: all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) - msg = "Basic processing complete" - print(msg) - self.logger.info(msg) + self.logger.info("Basic processing complete") return pcgts #print("text region early -1 in %.1fs", time.time() - t0) t1 = time.time() - msg = "Step 2/5: Layout Analysis" - print(msg) - self.logger.info(msg) + self.logger.info("Step 2/5: Layout Analysis") if self.light_version: - msg = "Using light version processing" - print(msg) - self.logger.info(msg) + self.logger.info("Using light version processing") text_regions_p_1 ,erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin_light, confidence_matrix = \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) #print("text region early -2 in %.1fs", time.time() - t0) @@ -4466,29 +4435,21 @@ class Eynollah: text_regions_p_1 ,erosion_hurts, polygons_lines_xml = \ self.get_regions_from_xy_2models(img_res, is_image_enhanced, num_col_classifier) - msg = f"Textregion detection took {time.time() - t1:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"Textregion detection took {time.time() - t1:.1f}s") confidence_matrix = np.zeros((text_regions_p_1.shape[:2])) t1 = time.time() num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ text_regions_p_1, cont_page, table_prediction = \ self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts) - msg = f"Graphics detection took {time.time() - t1:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"Graphics detection took {time.time() - t1:.1f}s") #self.logger.info('cont_page %s', cont_page) #plt.imshow(table_prediction) #plt.show() - msg = f"Layout analysis complete ({time.time() - t1:.1f}s)" - print(msg) - self.logger.info(msg) + self.logger.info(f"Layout analysis complete ({time.time() - t1:.1f}s)") if not num_col: - msg = "No columns detected - generating empty PAGE-XML" - print(msg) - self.logger.info(msg) + self.logger.info("No columns detected - generating empty PAGE-XML") ocr_all_textlines = None pcgts = self.writer.build_pagexml_no_full_layout( @@ -4500,18 +4461,12 @@ class Eynollah: t1 = time.time() if not self.light_version: textline_mask_tot_ea = self.run_textline(image_page) - msg = f"Textline detection took {time.time() - t1:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"Textline detection took {time.time() - t1:.1f}s") t1 = time.time() slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) if np.abs(slope_deskew) > 0.01: # Only log if there is significant skew - msg = f"Applied deskew correction: {slope_deskew:.2f} degrees" - print(msg) - self.logger.info(msg) - msg = f"Deskewing took {time.time() - t1:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"Applied deskew correction: {slope_deskew:.2f} degrees") + self.logger.info(f"Deskewing took {time.time() - t1:.1f}s") elif num_col_classifier in (1,2): org_h_l_m = textline_mask_tot_ea.shape[0] org_w_l_m = textline_mask_tot_ea.shape[1] @@ -4532,18 +4487,12 @@ class Eynollah: self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction) - msg = "Step 3/5: Text Line Detection" - print(msg) - self.logger.info(msg) + self.logger.info("Step 3/5: Text Line Detection") if self.curved_line: - msg = "Mode: Curved line detection" - print(msg) - self.logger.info(msg) + self.logger.info("Mode: Curved line detection") elif self.textline_light: - msg = "Mode: Light detection" - print(msg) - self.logger.info(msg) + self.logger.info("Mode: Light detection") if self.light_version and num_col_classifier in (1,2): image_page = resize_image(image_page,org_h_l_m, org_w_l_m ) @@ -4554,9 +4503,7 @@ class Eynollah: table_prediction = resize_image(table_prediction,org_h_l_m, org_w_l_m ) image_page_rotated = resize_image(image_page_rotated,org_h_l_m, org_w_l_m ) - msg = f"Detection of marginals took {time.time() - t1:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"Detection of marginals took {time.time() - t1:.1f}s") #print("text region early 2 marginal in %.1fs", time.time() - t0) ## birdan sora chock chakir t1 = time.time() @@ -4655,9 +4602,7 @@ class Eynollah: cx_bigest_d_big[0] = cx_bigest_d[ind_largest] cy_biggest_d_big[0] = cy_biggest_d[ind_largest] except Exception as why: - msg = str(why) - print(f"Error: {msg}") - self.logger.error(msg) + self.logger.error(str(why)) (h, w) = text_only.shape[:2] center = (w // 2.0, h // 2.0) @@ -4875,22 +4820,14 @@ class Eynollah: t_order = time.time() if self.full_layout: - msg = "Step 4/5: Reading Order Detection" - print(msg) - self.logger.info(msg) + self.logger.info(ep 4/5: Reading Order Detection") if self.reading_order_machine_based: - msg = "Using machine-based detection" - print(msg) - self.logger.info(msg) + self.logger.info("Using machine-based detection") if self.right2left: - msg = "Right-to-left mode enabled" - print(msg) - self.logger.info(msg) + self.logger.info("Right-to-left mode enabled") if self.headers_off: - msg = "Headers ignored in reading order" - print(msg) - self.logger.info(msg) + self.logger.info("Headers ignored in reading order") if self.reading_order_machine_based: order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( @@ -4902,31 +4839,21 @@ class Eynollah: else: order_text_new, id_of_texts_tot = self.do_order_of_regions( contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d) - msg = f"Detection of reading order took {time.time() - t_order:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") if self.ocr: - msg = "Step 4.5/5: OCR Processing" - print(msg) - self.logger.info(msg) + self.logger.info("Step 4.5/5: OCR Processing") if torch.cuda.is_available(): - msg = "Using GPU acceleration" - print(msg) - self.logger.info(msg) + self.logger.info("Using GPU acceleration") else: - msg = "Using CPU processing" - print(msg) - self.logger.info(msg) + self.logger.info("Using CPU processing") ocr_all_textlines = [] else: ocr_all_textlines = None - msg = "Step 5/5: Output Generation" - print(msg) - self.logger.info(msg) + self.logger.info("Step 5/5: Output Generation") output_config = [] if self.enable_plotting: @@ -4963,22 +4890,14 @@ class Eynollah: return pcgts contours_only_text_parent_h = None - msg = "Step 4/5: Reading Order Detection" - print(msg) - self.logger.info(msg) + self.logger.info("Step 4/5: Reading Order Detection") if self.reading_order_machine_based: - msg = "Using machine-based detection" - print(msg) - self.logger.info(msg) + self.logger.info("Using machine-based detection") if self.right2left: - msg = "Right-to-left mode enabled" - print(msg) - self.logger.info(msg) + self.logger.info("Right-to-left mode enabled") if self.headers_off: - msg = "Headers ignored in reading order" - print(msg) - self.logger.info(msg) + self.logger.info("Headers ignored in reading order") if self.reading_order_machine_based: order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( @@ -5000,32 +4919,20 @@ class Eynollah: contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) if self.ocr: - msg = "Step 4.5/5: OCR Processing" - print(msg) - self.logger.info(msg) + self.logger.info("Step 4.5/5: OCR Processing") if torch.cuda.is_available(): - msg = "Using GPU acceleration" - print(msg) - self.logger.info(msg) + self.logger.info("Using GPU acceleration") else: - msg = "Using CPU processing" - print(msg) - self.logger.info(msg) + self.logger.info("Using CPU processing") if self.light_version: - msg = "Using light version OCR" - print(msg) - self.logger.info(msg) + self.logger.info("Using light version OCR") if self.textline_light: - msg = "Using light text line detection for OCR" - print(msg) - self.logger.info(msg) + self.logger.info("Using light text line detection for OCR") - msg = "Processing text lines..." - print(msg) - self.logger.info(msg) + self.logger.info("Processing text lines...") device = cuda.get_current_device() device.reset() @@ -5077,37 +4984,23 @@ class Eynollah: else: ocr_all_textlines = None #print(ocr_all_textlines) - msg = f"Detection of reading order took {time.time() - t_order:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") - msg = "Step 5/5: Output Generation" - print(msg) - self.logger.info(msg) + self.logger.info("Step 5/5: Output Generation") - msg = "Generating PAGE-XML output" - print(msg) - self.logger.info(msg) + self.logger.info("Generating PAGE-XML output") if self.enable_plotting: - msg = "Saving debug plots" - print(msg) - self.logger.info(msg) + self.logger.info("Saving debug plots") if self.dir_of_cropped_images: - msg = f"Saving cropped images to: {self.dir_of_cropped_images}" - print(msg) - self.logger.info(msg) + self.logger.info(f"Saving cropped images to: {self.dir_of_cropped_images}") if self.dir_of_layout: - msg = f"Saving layout plots to: {self.dir_of_layout}" - print(msg) - self.logger.info(msg) + self.logger.info(f"Saving layout plots to: {self.dir_of_layout}") if self.dir_of_deskewed: - msg = f"Saving deskewed images to: {self.dir_of_deskewed}" - print(msg) - self.logger.info(msg) + self.logger.info(f"Saving deskewed images to: {self.dir_of_deskewed}") pcgts = self.writer.build_pagexml_no_full_layout( txt_con_org, page_coord, order_text_new, id_of_texts_tot, @@ -5115,13 +5008,9 @@ class Eynollah: all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) - msg = f"\nProcessing completed in {time.time() - t0:.1f}s" - print(msg) - self.logger.info(msg) + self.logger.info(f"\nProcessing completed in {time.time() - t0:.1f}s") - msg = f"Output file: {self.writer.output_filename}" - print(msg) - self.logger.info(msg) + self.logger.info(f"Output file: {self.writer.output_filename}") return pcgts From 5c9cf8472bc3c39827d751db8d1562afe02b13c3 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 18 Sep 2025 13:19:57 +0200 Subject: [PATCH 191/492] remove redundant/brittle interval logging --- src/eynollah/eynollah.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 39476e2..14dfbb3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4293,7 +4293,6 @@ class Eynollah: pcgts = self.run_single() self.logger.info("Job done in %.1fs", time.time() - t0) - #print("Job done in %.1fs" % (time.time() - t0)) self.writer.write_pagexml(pcgts) if dir_in: @@ -4504,7 +4503,6 @@ class Eynollah: image_page_rotated = resize_image(image_page_rotated,org_h_l_m, org_w_l_m ) self.logger.info(f"Detection of marginals took {time.time() - t1:.1f}s") - #print("text region early 2 marginal in %.1fs", time.time() - t0) ## birdan sora chock chakir t1 = time.time() if not self.full_layout: @@ -5008,8 +5006,6 @@ class Eynollah: all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) - self.logger.info(f"\nProcessing completed in {time.time() - t0:.1f}s") - self.logger.info(f"Output file: {self.writer.output_filename}") return pcgts From 530897c6c2a9455d3c7713257f15351de8732b99 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 19 Sep 2025 13:20:26 +0200 Subject: [PATCH 192/492] renaming argument names --- train/generate_gt_for_training.py | 34 +++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 91ee2c8..7810cd7 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -157,6 +157,7 @@ def image_enhancement(dir_imgs, dir_out_images, dir_out_labels, scales): def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, input_height, input_width, min_area_size, min_area_early): xml_files_ind = os.listdir(dir_xml) + xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] input_height = int(input_height) input_width = int(input_width) min_area = float(min_area_size) @@ -268,14 +269,14 @@ def machine_based_reading_order(dir_xml, dir_out_modal_image, dir_out_classes, i @click.option( "--dir_out", - "-do", + "-o", help="directory where plots will be written", type=click.Path(exists=True, file_okay=False), ) @click.option( "--dir_imgs", - "-dimg", + "-di", help="directory where the overlayed plots will be written", ) def visualize_reading_order(xml_file, dir_xml, dir_out, dir_imgs): @@ -283,6 +284,7 @@ def visualize_reading_order(xml_file, dir_xml, dir_out, dir_imgs): if dir_xml: xml_files_ind = os.listdir(dir_xml) + xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] else: xml_files_ind = [xml_file] @@ -353,6 +355,12 @@ def visualize_reading_order(xml_file, dir_xml, dir_out, dir_imgs): @main.command() +@click.option( + "--xml_file", + "-xml", + help="xml filename", + type=click.Path(exists=True, dir_okay=False), +) @click.option( "--dir_xml", "-dx", @@ -362,18 +370,24 @@ def visualize_reading_order(xml_file, dir_xml, dir_out, dir_imgs): @click.option( "--dir_out", - "-do", + "-o", help="directory where plots will be written", type=click.Path(exists=True, file_okay=False), ) @click.option( "--dir_imgs", - "-dimg", + "-di", help="directory of images where textline segmentation will be overlayed", ) -def visualize_textline_segmentation(dir_xml, dir_out, dir_imgs): - xml_files_ind = os.listdir(dir_xml) +def visualize_textline_segmentation(xml_file, dir_xml, dir_out, dir_imgs): + assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" + if dir_xml: + xml_files_ind = os.listdir(dir_xml) + xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] + else: + xml_files_ind = [xml_file] + for ind_xml in tqdm(xml_files_ind): indexer = 0 #print(ind_xml) @@ -408,20 +422,21 @@ def visualize_textline_segmentation(dir_xml, dir_out, dir_imgs): @click.option( "--dir_out", - "-do", + "-o", help="directory where plots will be written", type=click.Path(exists=True, file_okay=False), ) @click.option( "--dir_imgs", - "-dimg", + "-di", help="directory of images where textline segmentation will be overlayed", ) def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" if dir_xml: xml_files_ind = os.listdir(dir_xml) + xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] else: xml_files_ind = [xml_file] @@ -466,7 +481,7 @@ def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): @click.option( "--dir_out", - "-do", + "-o", help="directory where plots will be written", type=click.Path(exists=True, file_okay=False), ) @@ -476,6 +491,7 @@ def visualize_ocr_text(xml_file, dir_xml, dir_out): assert xml_file or dir_xml, "A single xml file -xml or a dir of xml files -dx is required not both of them" if dir_xml: xml_files_ind = os.listdir(dir_xml) + xml_files_ind = [ind_xml for ind_xml in xml_files_ind if ind_xml.endswith('.xml')] else: xml_files_ind = [xml_file] From 994bc8a1c07270cd390a59860a18e878fed1da1d Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 19 Sep 2025 15:24:34 +0200 Subject: [PATCH 193/492] debug new page extraction in the case of ignoring page extraction --- src/eynollah/eynollah.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 7ef2361..07cf8d9 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3065,11 +3065,12 @@ class Eynollah: if self.plotter: self.plotter.save_page_image(image_page) - mask_page = np.zeros((text_regions_p_1.shape[0], text_regions_p_1.shape[1])).astype(np.int8) - mask_page = cv2.fillPoly(mask_page, pts=[cont_page], color=(1,)) - - text_regions_p_1[mask_page==0] = 0 - textline_mask_tot_ea[mask_page==0] = 0 + if not self.ignore_page_extraction: + mask_page = np.zeros((text_regions_p_1.shape[0], text_regions_p_1.shape[1])).astype(np.int8) + mask_page = cv2.fillPoly(mask_page, pts=[cont_page], color=(1,)) + + text_regions_p_1[mask_page==0] = 0 + textline_mask_tot_ea[mask_page==0] = 0 text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] textline_mask_tot_ea = textline_mask_tot_ea[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] From b38331b4aba9aa3db769e4f53ba9423beeb790ab Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 19 Sep 2025 18:06:18 +0200 Subject: [PATCH 194/492] writing page contour correctly in xml output + ignore unsupported file types when loading images --- src/eynollah/eynollah.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 07cf8d9..bd8f088 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1617,7 +1617,7 @@ class Eynollah: else: box = [0, 0, img.shape[1], img.shape[0]] cropped_page, page_coord = crop_image_inside_box(box, self.image) - cont_page = cnt + cont_page = [cnt] #cont_page.append(np.array([[page_coord[2], page_coord[0]], #[page_coord[3], page_coord[0]], #[page_coord[3], page_coord[1]], @@ -3067,7 +3067,7 @@ class Eynollah: if not self.ignore_page_extraction: mask_page = np.zeros((text_regions_p_1.shape[0], text_regions_p_1.shape[1])).astype(np.int8) - mask_page = cv2.fillPoly(mask_page, pts=[cont_page], color=(1,)) + mask_page = cv2.fillPoly(mask_page, pts=[cont_page[0]], color=(1,)) text_regions_p_1[mask_page==0] = 0 textline_mask_tot_ea[mask_page==0] = 0 @@ -4526,6 +4526,7 @@ class Eynollah: if dir_in: self.ls_imgs = os.listdir(dir_in) + self.ls_imgs = [ind_img for ind_img in self.ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG')] elif image_filename: self.ls_imgs = [image_filename] else: @@ -5265,6 +5266,7 @@ class Eynollah_ocr: def run(self, overwrite : bool = False): if self.dir_in: ls_imgs = os.listdir(self.dir_in) + ls_imgs = [ind_img for ind_img in ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG')] else: ls_imgs = [self.image_filename] From e97e3ab192695d0b85395990709fb70d76a0881b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Fri, 19 Sep 2025 23:23:30 +0200 Subject: [PATCH 195/492] Merge text of textlines and handle hyphenated words by joining them correctly --- src/eynollah/eynollah.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index bd8f088..1781c04 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5481,17 +5481,31 @@ class Eynollah_ocr: image_text.save(out_image_with_text) #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') + #######text_by_textregion = [] + #######for ind in unique_cropped_lines_region_indexer: + #######extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] + + #######text_by_textregion.append(" ".join(extracted_texts_merged_un)) + text_by_textregion = [] for ind in unique_cropped_lines_region_indexer: extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] - - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - #print(len(text_by_textregion) , indexer_text_region, "text_by_textregion") - - - #print(time.time() - t0 ,'elapsed time') - + if len(extracted_texts_merged_un)>1: + text_by_textregion_ind = "" + next_glue = "" + for indt in range(len(extracted_texts_merged_un)): + if extracted_texts_merged_un[indt].endswith('⸗') or extracted_texts_merged_un[indt].endswith('-') or extracted_texts_merged_un[indt].endswith('¬'): + text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt][:-1] + next_glue = "" + else: + text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt] + next_glue = " " + text_by_textregion.append(text_by_textregion_ind) + + else: + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + + indexer = 0 indexer_textregion = 0 for nn in root1.iter(region_tags): @@ -5993,7 +6007,7 @@ class Eynollah_ocr: text_by_textregion_ind = "" next_glue = "" for indt in range(len(extracted_texts_merged_un)): - if extracted_texts_merged_un[indt].endswith('⸗') or extracted_texts_merged_un[indt].endswith('-'): + if extracted_texts_merged_un[indt].endswith('⸗') or extracted_texts_merged_un[indt].endswith('-') or extracted_texts_merged_un[indt].endswith('¬'): text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt][:-1] next_glue = "" else: From 6bbdfe10744dc1e9aaddd993bc8565e1b7739f7b Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 21 Sep 2025 02:32:40 +0200 Subject: [PATCH 196/492] extending image types --- src/eynollah/eynollah.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 1781c04..64e57a3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4526,7 +4526,7 @@ class Eynollah: if dir_in: self.ls_imgs = os.listdir(dir_in) - self.ls_imgs = [ind_img for ind_img in self.ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG')] + self.ls_imgs = [ind_img for ind_img in self.ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG') or ind_img.endswith('.TIF') or ind_img.endswith('.TIFF') or ind_img.endswith('.PNG')] elif image_filename: self.ls_imgs = [image_filename] else: @@ -5266,7 +5266,7 @@ class Eynollah_ocr: def run(self, overwrite : bool = False): if self.dir_in: ls_imgs = os.listdir(self.dir_in) - ls_imgs = [ind_img for ind_img in ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG')] + ls_imgs = [ind_img for ind_img in ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG') or ind_img.endswith('.TIF') or ind_img.endswith('.TIFF') or ind_img.endswith('.PNG')] else: ls_imgs = [self.image_filename] From 554f3988c9d3c9d092712dd0998e8287b951cdeb Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Sun, 21 Sep 2025 16:33:14 +0200 Subject: [PATCH 197/492] default cnn-rnn and transformer ocr models have changed to model_eynollah_ocr_cnnrnn_20250904 and model_eynollah_ocr_trocr_20250919 respectively --- src/eynollah/eynollah.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 64e57a3..574d823 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -317,9 +317,9 @@ class Eynollah: #"/eynollah-textline_20210425" self.model_textline_dir = dir_models + "/modelens_textline_0_1__2_4_16092024" if self.ocr and self.tr: - self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" elif self.ocr and not self.tr: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250805" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250904" if self.tables: if self.light_version: self.model_table_dir = dir_models + "/modelens_table_0t4_201124" @@ -5226,7 +5226,7 @@ class Eynollah_ocr: if self.model_name: self.model_ocr_dir = self.model_name else: - self.model_ocr_dir = dir_models + "/trocr_model_ens_of_3_checkpoints_201124" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) self.model_ocr.to(self.device) if not batch_size: @@ -5238,7 +5238,7 @@ class Eynollah_ocr: if self.model_name: self.model_ocr_dir = self.model_name else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250805" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250904" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( From a65405bead03f386cf3935df4dd58b1985cfcd21 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 22 Sep 2025 15:56:14 +0200 Subject: [PATCH 198/492] tables are visulaized within layout --- train/generate_gt_for_training.py | 2 +- train/gt_gen_utils.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/train/generate_gt_for_training.py b/train/generate_gt_for_training.py index 7810cd7..388fced 100644 --- a/train/generate_gt_for_training.py +++ b/train/generate_gt_for_training.py @@ -458,7 +458,7 @@ def visualize_layout_segmentation(xml_file, dir_xml, dir_out, dir_imgs): co_text, co_graphic, co_sep, co_img, co_table, co_noise, y_len, x_len = get_layout_contours_for_visualization(xml_file) - added_image = visualize_image_from_contours_layout(co_text['paragraph'], co_text['header']+co_text['heading'], co_text['drop-capital'], co_sep, co_img, co_text['marginalia'], img) + added_image = visualize_image_from_contours_layout(co_text['paragraph'], co_text['header']+co_text['heading'], co_text['drop-capital'], co_sep, co_img, co_text['marginalia'], co_table, img) cv2.imwrite(os.path.join(dir_out, f_name+'.png'), added_image) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 753b0f5..38d48ca 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -18,7 +18,7 @@ with warnings.catch_warnings(): warnings.simplefilter("ignore") -def visualize_image_from_contours_layout(co_par, co_header, co_drop, co_sep, co_image, co_marginal, img): +def visualize_image_from_contours_layout(co_par, co_header, co_drop, co_sep, co_image, co_marginal, co_table, img): alpha = 0.5 blank_image = np.ones( (img.shape[:]), dtype=np.uint8) * 255 @@ -30,6 +30,7 @@ def visualize_image_from_contours_layout(co_par, co_header, co_drop, co_sep, co_ col_image = (0, 100, 0) col_sep = (255, 0, 0) col_marginal = (106, 90, 205) + col_table = (0, 90, 205) if len(co_image)>0: cv2.drawContours(blank_image, co_image, -1, col_image, thickness=cv2.FILLED) # Fill the contour @@ -51,6 +52,9 @@ def visualize_image_from_contours_layout(co_par, co_header, co_drop, co_sep, co_ if len(co_marginal)>0: cv2.drawContours(blank_image, co_marginal, -1, col_marginal, thickness=cv2.FILLED) # Fill the contour + + if len(co_table)>0: + cv2.drawContours(blank_image, co_table, -1, col_table, thickness=cv2.FILLED) # Fill the contour img_final =cv2.cvtColor(blank_image, cv2.COLOR_BGR2RGB) From d0817f5744f4e78f3880d1ea87423e8260da9a81 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 12:08:50 +0200 Subject: [PATCH 199/492] fix typo --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 73d07b5..2813c56 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5091,7 +5091,7 @@ class Eynollah: t_order = time.time() if self.full_layout: - self.logger.info(ep 4/5: Reading Order Detection") + self.logger.info("Step 4/5: Reading Order Detection") if self.reading_order_machine_based: self.logger.info("Using machine-based detection") From 7933b103f5378f025eda2f5347095ee26e3eb159 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 12:09:30 +0200 Subject: [PATCH 200/492] log modes only once (in run, not in run_single) --- src/eynollah/eynollah.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 2813c56..82073c3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4531,6 +4531,21 @@ class Eynollah: self.logger.debug("enter run") t0_tot = time.time() + # Log enabled features directly + enabled_modes = [] + if self.light_version: + enabled_modes.append("Light version") + if self.textline_light: + enabled_modes.append("Light textline detection") + if self.full_layout: + enabled_modes.append("Full layout analysis") + if self.ocr: + enabled_modes.append("OCR") + if self.tables: + enabled_modes.append("Table detection") + if enabled_modes: + self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) + if dir_in: self.ls_imgs = os.listdir(dir_in) self.ls_imgs = [ind_img for ind_img in self.ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG') or ind_img.endswith('.TIF') or ind_img.endswith('.TIFF') or ind_img.endswith('.PNG')] @@ -4563,25 +4578,7 @@ class Eynollah: def run_single(self): t0 = time.time() - self.logger.info(f"Processing file: {self.writer.image_filename}") - - # Log enabled features directly - enabled_modes = [] - if self.light_version: - enabled_modes.append("Light version") - if self.textline_light: - enabled_modes.append("Light textline detection") - if self.full_layout: - enabled_modes.append("Full layout analysis") - if self.ocr: - enabled_modes.append("OCR") - if self.tables: - enabled_modes.append("Table detection") - - if enabled_modes: - self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) - - + self.logger.info(f"Processing file: {self.writer.image_filename}") self.logger.info("Step 1/5: Image Enhancement") img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) From 90f1d7aa47e481731e0ec021f9af070b8bf9a0fd Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 12:10:11 +0200 Subject: [PATCH 201/492] rm summary msg (info already logged elsewhere) --- src/eynollah/eynollah.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 82073c3..ed2c9fb 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5172,19 +5172,6 @@ class Eynollah: all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_lines_xml, ocr_all_textlines, ocr_all_textlines_h, ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, ocr_all_textlines_drop, conf_contours_textregions, conf_contours_textregions_h) - summary = [ - f"Total processing time: {time.time() - t0:.1f}s", - f"Output file: {self.writer.output_filename}" - ] - - if self.ocr: - summary.append("OCR processing completed") - if self.full_layout: - summary.append("Full layout analysis completed") - if self.tables: - summary.append("Table detection completed") - self.logger.info(f"Summary: {summary}") - return pcgts contours_only_text_parent_h = None From 5bd318e6576858718f1953749cb448bd4a7dece0 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 12:14:32 +0200 Subject: [PATCH 202/492] rm print statement (already log msg) --- src/eynollah/eynollah.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index ed2c9fb..27277ee 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4555,7 +4555,6 @@ class Eynollah: raise ValueError("run requires either a single image filename or a directory") for img_filename in self.ls_imgs: - print(img_filename, 'img_filename') self.logger.info(img_filename) t0 = time.time() From b75ca0d31fc4b8b2806569aebfa38e3203a0e7a0 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 24 Sep 2025 16:29:05 +0200 Subject: [PATCH 203/492] mb_ro_on_layout: remove copy-pasta code not actually used --- src/eynollah/mb_ro_on_layout.py | 333 +------------------------------- 1 file changed, 3 insertions(+), 330 deletions(-) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index c03d831..c6c02cf 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -3,46 +3,26 @@ Image enhancer. The output can be written as same scale of input or in new predi """ from logging import Logger -from difflib import SequenceMatcher as sq -from PIL import Image, ImageDraw, ImageFont -import math import os -import sys import time from typing import Optional import atexit -import warnings from functools import partial from pathlib import Path from multiprocessing import cpu_count -import gc -import copy from loky import ProcessPoolExecutor import xml.etree.ElementTree as ET import cv2 import numpy as np -from ocrd import OcrdPage -from ocrd_utils import getLogger, tf_disable_interactive_logs +from ocrd_utils import getLogger import statistics +import tensorflow as tf from tensorflow.keras.models import load_model from .utils.resize import resize_image -from .utils import ( - crop_image_inside_box -) from .utils.contour import ( - filter_contours_area_of_image, - filter_contours_area_of_image_tables, - find_contours_mean_y_diff, find_new_features_of_contours, - find_features_of_contours, - get_text_region_boxes_by_given_contours, - get_textregion_contours_in_org_image, - get_textregion_contours_in_org_image_light, return_contours_of_image, - return_contours_of_interested_region, - return_contours_of_interested_region_by_min_size, - return_contours_of_interested_textline, return_parent_contours, ) @@ -64,7 +44,7 @@ class machine_based_reading_order_on_layout: self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) atexit.register(self.executor.shutdown) self.dir_models = dir_models - self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824"#"/model_ens_reading_order_machine_based" + self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824" try: for device in tf.config.list_physical_devices('GPU'): @@ -76,43 +56,7 @@ class machine_based_reading_order_on_layout: self.light_version = True - def cache_images(self, image_filename=None, image_pil=None, dpi=None): - ret = {} - t_c0 = time.time() - if image_filename: - ret['img'] = cv2.imread(image_filename) - if self.light_version: - self.dpi = 100 - else: - self.dpi = 0#check_dpi(image_filename) - else: - ret['img'] = pil2cv(image_pil) - if self.light_version: - self.dpi = 100 - else: - self.dpi = 0#check_dpi(image_pil) - ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY) - for prefix in ('', '_grayscale'): - ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8) - self._imgs = ret - if dpi is not None: - self.dpi = dpi - def reset_file_name_dir(self, image_filename): - t_c = time.time() - self.cache_images(image_filename=image_filename) - self.output_filename = os.path.join(self.dir_out, Path(image_filename).stem +'.png') - - def imread(self, grayscale=False, uint8=True): - key = 'img' - if grayscale: - key += '_grayscale' - if uint8: - key += '_uint8' - return self._imgs[key].copy() - - def isNaN(self, num): - return num != num @staticmethod def our_load_model(model_file): @@ -126,278 +70,7 @@ class machine_based_reading_order_on_layout: "PatchEncoder": PatchEncoder, "Patches": Patches}) return model - def predict_enhancement(self, img): - self.logger.debug("enter predict_enhancement") - - img_height_model = self.model_enhancement.layers[-1].output_shape[1] - img_width_model = self.model_enhancement.layers[-1].output_shape[2] - if img.shape[0] < img_height_model: - img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) - if img.shape[1] < img_width_model: - img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST) - margin = int(0.1 * img_width_model) - width_mid = img_width_model - 2 * margin - height_mid = img_height_model - 2 * margin - img = img / 255. - img_h = img.shape[0] - img_w = img.shape[1] - - prediction_true = np.zeros((img_h, img_w, 3)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) - - for i in range(nxf): - for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - else: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - else: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - - if index_x_u > img_w: - index_x_u = img_w - index_x_d = img_w - img_width_model - if index_y_u > img_h: - index_y_u = img_h - index_y_d = img_h - img_height_model - - img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.model_enhancement.predict(img_patch, verbose=0) - seg = label_p_pred[0, :, :, :] * 255 - - if i == 0 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + 0:index_x_u - margin] = \ - seg[0:-margin or None, - 0:-margin or None] - elif i == nxf - 1 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + margin:index_x_u - 0] = \ - seg[margin:, - margin:] - elif i == 0 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + 0:index_x_u - margin] = \ - seg[margin:, - 0:-margin or None] - elif i == nxf - 1 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + margin:index_x_u - 0] = \ - seg[0:-margin or None, - margin:] - elif i == 0 and j != 0 and j != nyf - 1: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + 0:index_x_u - margin] = \ - seg[margin:-margin or None, - 0:-margin or None] - elif i == nxf - 1 and j != 0 and j != nyf - 1: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + margin:index_x_u - 0] = \ - seg[margin:-margin or None, - margin:] - elif i != 0 and i != nxf - 1 and j == 0: - prediction_true[index_y_d + 0:index_y_u - margin, - index_x_d + margin:index_x_u - margin] = \ - seg[0:-margin or None, - margin:-margin or None] - elif i != 0 and i != nxf - 1 and j == nyf - 1: - prediction_true[index_y_d + margin:index_y_u - 0, - index_x_d + margin:index_x_u - margin] = \ - seg[margin:, - margin:-margin or None] - else: - prediction_true[index_y_d + margin:index_y_u - margin, - index_x_d + margin:index_x_u - margin] = \ - seg[margin:-margin or None, - margin:-margin or None] - - prediction_true = prediction_true.astype(int) - return prediction_true - def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1: - img_w_new = 2000 - elif num_col == 2: - img_w_new = 2400 - elif num_col == 3: - img_w_new = 3000 - elif num_col == 4: - img_w_new = 4000 - elif num_col == 5: - img_w_new = 5000 - elif num_col == 6: - img_w_new = 6500 - else: - img_w_new = width_early - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - if img_h_new >= 8000: - img_new = np.copy(img) - num_column_is_classified = False - else: - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - - def early_page_for_num_of_column_classification(self,img_bin): - self.logger.debug("enter early_page_for_num_of_column_classification") - if self.input_binary: - img = np.copy(img_bin).astype(np.uint8) - else: - img = self.imread() - img = cv2.GaussianBlur(img, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.model_page) - - imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) - _, thresh = cv2.threshold(imgray, 0, 255, 0) - thresh = cv2.dilate(thresh, KERNEL, iterations=3) - contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if len(contours)>0: - cnt_size = np.array([cv2.contourArea(contours[j]) - for j in range(len(contours))]) - cnt = contours[np.argmax(cnt_size)] - box = cv2.boundingRect(cnt) - else: - box = [0, 0, img.shape[1], img.shape[0]] - cropped_page, page_coord = crop_image_inside_box(box, img) - - self.logger.debug("exit early_page_for_num_of_column_classification") - return cropped_page, page_coord - - def calculate_width_height_by_columns_1_2(self, img, num_col, width_early, label_p_pred): - self.logger.debug("enter calculate_width_height_by_columns") - if num_col == 1: - img_w_new = 1000 - else: - img_w_new = 1300 - img_h_new = img_w_new * img.shape[0] // img.shape[1] - - if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early: - img_new = np.copy(img) - num_column_is_classified = False - #elif label_p_pred[0][int(num_col - 1)] < 0.8 and img_h_new >= 8000: - elif img_h_new >= 8000: - img_new = np.copy(img) - num_column_is_classified = False - else: - img_new = resize_image(img, img_h_new, img_w_new) - num_column_is_classified = True - - return img_new, num_column_is_classified - - def resize_and_enhance_image_with_column_classifier(self, light_version): - self.logger.debug("enter resize_and_enhance_image_with_column_classifier") - dpi = 0#self.dpi - self.logger.info("Detected %s DPI", dpi) - if self.input_binary: - img = self.imread() - prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) - prediction_bin = 255 * (prediction_bin[:,:,0]==0) - prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) - img= np.copy(prediction_bin) - img_bin = prediction_bin - else: - img = self.imread() - self.h_org, self.w_org = img.shape[:2] - img_bin = None - - width_early = img.shape[1] - t1 = time.time() - _, page_coord = self.early_page_for_num_of_column_classification(img_bin) - - self.image_page_org_size = img[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3], :] - self.page_coord = page_coord - - if self.num_col_upper and not self.num_col_lower: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - elif self.num_col_lower and not self.num_col_upper: - num_col = self.num_col_lower - label_p_pred = [np.ones(6)] - elif not self.num_col_upper and not self.num_col_lower: - if self.input_binary: - img_in = np.copy(img) - img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = img_in.reshape(1, 448, 448, 3) - else: - img_1ch = self.imread(grayscale=True) - width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.model_classifier.predict(img_in, verbose=0) - num_col = np.argmax(label_p_pred[0]) + 1 - elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): - if self.input_binary: - img_in = np.copy(img) - img_in = img_in / 255.0 - img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = img_in.reshape(1, 448, 448, 3) - else: - img_1ch = self.imread(grayscale=True) - width_early = img_1ch.shape[1] - img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]] - - img_1ch = img_1ch / 255.0 - img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST) - img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3)) - img_in[0, :, :, 0] = img_1ch[:, :] - img_in[0, :, :, 1] = img_1ch[:, :] - img_in[0, :, :, 2] = img_1ch[:, :] - - label_p_pred = self.model_classifier.predict(img_in, verbose=0) - num_col = np.argmax(label_p_pred[0]) + 1 - - if num_col > self.num_col_upper: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - if num_col < self.num_col_lower: - num_col = self.num_col_lower - label_p_pred = [np.ones(6)] - else: - num_col = self.num_col_upper - label_p_pred = [np.ones(6)] - - self.logger.info("Found %d columns (%s)", num_col, np.around(label_p_pred, decimals=5)) - - if dpi < DPI_THRESHOLD: - if light_version and num_col in (1,2): - img_new, num_column_is_classified = self.calculate_width_height_by_columns_1_2( - img, num_col, width_early, label_p_pred) - else: - img_new, num_column_is_classified = self.calculate_width_height_by_columns( - img, num_col, width_early, label_p_pred) - if light_version: - image_res = np.copy(img_new) - else: - image_res = self.predict_enhancement(img_new) - is_image_enhanced = True - - else: - num_column_is_classified = True - image_res = np.copy(img) - is_image_enhanced = False - - self.logger.debug("exit resize_and_enhance_image_with_column_classifier") - return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin def read_xml(self, xml_file): file_name = Path(xml_file).stem tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) From c8ebe84697bd20568320526163933840123238b3 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 24 Sep 2025 16:36:18 +0200 Subject: [PATCH 204/492] image_enhancer: add missing models, remove dead code --- src/eynollah/image_enhancer.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 983712d..7383b91 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -3,29 +3,23 @@ Image enhancer. The output can be written as same scale of input or in new predi """ from logging import Logger -from difflib import SequenceMatcher as sq -from PIL import Image, ImageDraw, ImageFont -import math import os -import sys import time from typing import Optional import atexit -import warnings from functools import partial from pathlib import Path from multiprocessing import cpu_count import gc -import copy from loky import ProcessPoolExecutor -import xml.etree.ElementTree as ET import cv2 import numpy as np -from ocrd import OcrdPage from ocrd_utils import getLogger, tf_disable_interactive_logs -import statistics +import tensorflow as tf +from skimage.morphology import skeletonize from tensorflow.keras.models import load_model from .utils.resize import resize_image +from .utils.pil_cv2 import pil2cv from .utils import ( crop_image_inside_box ) @@ -62,6 +56,7 @@ class Enhancer: self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) atexit.register(self.executor.shutdown) self.dir_models = dir_models + self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" self.model_page_dir = dir_models + "/eynollah-page-extraction_20210425" @@ -75,10 +70,10 @@ class Enhancer: self.model_page = self.our_load_model(self.model_page_dir) self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement) + self.model_bin = self.our_load_model(self.model_dir_of_binarization) def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} - t_c0 = time.time() if image_filename: ret['img'] = cv2.imread(image_filename) if self.light_version: @@ -99,7 +94,6 @@ class Enhancer: self.dpi = dpi def reset_file_name_dir(self, image_filename): - t_c = time.time() self.cache_images(image_filename=image_filename) self.output_filename = os.path.join(self.dir_out, Path(image_filename).stem +'.png') From 8b30bdbae2ad630b6f07cc0e82aa67cb01da3e50 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 24 Sep 2025 16:39:31 +0200 Subject: [PATCH 205/492] image_enhancer: use latest page extraction model --- src/eynollah/image_enhancer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 7383b91..f577e52 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -59,7 +59,7 @@ class Enhancer: self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" - self.model_page_dir = dir_models + "/eynollah-page-extraction_20210425" + self.model_page_dir = dir_models + "/model_eynollah_page_extraction_20250915" try: for device in tf.config.list_physical_devices('GPU'): From ce13d8c5a329ebc6b6a32464a029801456320548 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 24 Sep 2025 01:22:07 +0200 Subject: [PATCH 206/492] get textlines inside textregion sorted --- src/eynollah/eynollah.py | 44 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 27277ee..93d1c8d 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1753,7 +1753,45 @@ class Eynollah: prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) self.logger.debug("exit extract_text_regions") return prediction_regions, prediction_regions2 + + def get_textlines_of_a_textregion_sorted(self, textlines_textregion, cx_textline, cy_textline): + N = len(cy_textline) + if N==0: + return [] + + diff_matrix = np.abs(np.subtract.outer(cy_textline, cy_textline)) + + non_zero_diffs = diff_matrix[diff_matrix > 0] + if len(non_zero_diffs) == 0: + mean_y_diff = 0 + else: + mean_y_diff = np.mean(non_zero_diffs) + + row_threshold = mean_y_diff / 2 if mean_y_diff > 0 else 10 + indices_sorted_by_y = sorted(range(N), key=lambda i: cy_textline[i]) + + rows = [] + current_row = [indices_sorted_by_y[0]] + for i in range(1, N): + current_idx = indices_sorted_by_y[i] + prev_idx = current_row[0] + if abs(cy_textline[current_idx] - cy_textline[prev_idx]) <= row_threshold: + current_row.append(current_idx) + else: + rows.append(current_row) + current_row = [current_idx] + rows.append(current_row) + + sorted_textlines = [] + for row in rows: + row_sorted = sorted(row, key=lambda i: cx_textline[i]) + for idx in row_sorted: + sorted_textlines.append(textlines_textregion[idx]) + + return sorted_textlines + + def get_slopes_and_deskew_new_light2(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot,1,0.00001) @@ -1773,8 +1811,12 @@ class Eynollah: results = np.array(results) indexes_in = args_textlines[results==1] textlines_ins = [polygons_of_textlines[ind] for ind in indexes_in] + cx_textline_in = [cx_main_tot[ind] for ind in indexes_in] + cy_textline_in = [cy_main_tot[ind] for ind in indexes_in] - all_found_textline_polygons.append(textlines_ins[::-1]) + textlines_ins = self.get_textlines_of_a_textregion_sorted(textlines_ins, cx_textline_in, cy_textline_in) + + all_found_textline_polygons.append(textlines_ins)#[::-1]) slopes.append(slope_deskew) _, crop_coor = crop_image_inside_box(boxes[index],image_page_rotated) From 6904a981828d70024ce5a97ca2823ea22ac581ad Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 24 Sep 2025 01:25:57 +0200 Subject: [PATCH 207/492] get textlines inside textregion sorted debugging --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 93d1c8d..9acae80 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1789,7 +1789,7 @@ class Eynollah: for idx in row_sorted: sorted_textlines.append(textlines_textregion[idx]) - return sorted_textlines + return sorted_textlines def get_slopes_and_deskew_new_light2(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): From 6d8641a518ae9aa1934a95094413ff65c542b986 Mon Sep 17 00:00:00 2001 From: b-vr103 Date: Wed, 24 Sep 2025 03:43:36 +0200 Subject: [PATCH 208/492] get textlines sorted in textregion - verticals --- src/eynollah/eynollah.py | 74 +++++++++++++++++++++++++++------------- 1 file changed, 51 insertions(+), 23 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 9acae80..bbe80fe 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1759,35 +1759,63 @@ class Eynollah: if N==0: return [] - diff_matrix = np.abs(np.subtract.outer(cy_textline, cy_textline)) + diff_cy = np.abs( np.diff(sorted(cy_textline)) ) + diff_cx = np.abs(np.diff(sorted(cx_textline)) ) + - non_zero_diffs = diff_matrix[diff_matrix > 0] - if len(non_zero_diffs) == 0: - mean_y_diff = 0 + if len(diff_cy)>0: + mean_y_diff = np.mean(diff_cy) + mean_x_diff = np.mean(diff_cx) else: - mean_y_diff = np.mean(non_zero_diffs) + mean_y_diff = 0 + mean_x_diff = 0 - row_threshold = mean_y_diff / 2 if mean_y_diff > 0 else 10 - indices_sorted_by_y = sorted(range(N), key=lambda i: cy_textline[i]) + if np.int(mean_y_diff) >= np.int(mean_x_diff): + row_threshold = mean_y_diff / 2 if mean_y_diff > 0 else 10 + + indices_sorted_by_y = sorted(range(N), key=lambda i: cy_textline[i]) - rows = [] - current_row = [indices_sorted_by_y[0]] - for i in range(1, N): - current_idx = indices_sorted_by_y[i] - prev_idx = current_row[0] - if abs(cy_textline[current_idx] - cy_textline[prev_idx]) <= row_threshold: - current_row.append(current_idx) - else: - rows.append(current_row) - current_row = [current_idx] - rows.append(current_row) + rows = [] + current_row = [indices_sorted_by_y[0]] + for i in range(1, N): + current_idx = indices_sorted_by_y[i] + prev_idx = current_row[0] + if abs(cy_textline[current_idx] - cy_textline[prev_idx]) <= row_threshold: + current_row.append(current_idx) + else: + rows.append(current_row) + current_row = [current_idx] + rows.append(current_row) - sorted_textlines = [] - for row in rows: - row_sorted = sorted(row, key=lambda i: cx_textline[i]) - for idx in row_sorted: - sorted_textlines.append(textlines_textregion[idx]) + sorted_textlines = [] + for row in rows: + row_sorted = sorted(row, key=lambda i: cx_textline[i]) + for idx in row_sorted: + sorted_textlines.append(textlines_textregion[idx]) + + else: + row_threshold = mean_x_diff / 2 if mean_x_diff > 0 else 10 + indices_sorted_by_x = sorted(range(N), key=lambda i: cx_textline[i]) + + rows = [] + current_row = [indices_sorted_by_x[0]] + + for i in range(1, N): + current_idy = indices_sorted_by_x[i] + prev_idy = current_row[0] + if abs(cx_textline[current_idy] - cx_textline[prev_idy] ) <= row_threshold: + current_row.append(current_idy) + else: + rows.append(current_row) + current_row = [current_idy] + rows.append(current_row) + + sorted_textlines = [] + for row in rows: + row_sorted = sorted(row , key=lambda i: cy_textline[i]) + for idy in row_sorted: + sorted_textlines.append(textlines_textregion[idy]) return sorted_textlines From 80d50d4bf6e7c1dfc211bc802728137ffd9f2ee6 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 24 Sep 2025 16:36:00 +0200 Subject: [PATCH 209/492] get textlines sorted in textregion - verticals --- src/eynollah/eynollah.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index bbe80fe..6b5b74e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1772,7 +1772,7 @@ class Eynollah: if np.int(mean_y_diff) >= np.int(mean_x_diff): - row_threshold = mean_y_diff / 2 if mean_y_diff > 0 else 10 + row_threshold = mean_y_diff / 1.5 if mean_y_diff > 0 else 10 indices_sorted_by_y = sorted(range(N), key=lambda i: cy_textline[i]) @@ -1795,7 +1795,7 @@ class Eynollah: sorted_textlines.append(textlines_textregion[idx]) else: - row_threshold = mean_x_diff / 2 if mean_x_diff > 0 else 10 + row_threshold = mean_x_diff / 1.5 if mean_x_diff > 0 else 10 indices_sorted_by_x = sorted(range(N), key=lambda i: cx_textline[i]) rows = [] @@ -4693,7 +4693,12 @@ class Eynollah: all_found_textline_polygons = filter_contours_area_of_image( textline_mask_tot_ea, cnt_clean_rot_raw, hir_on_cnt_clean_rot, max_area=1, min_area=0.00001) - all_found_textline_polygons = all_found_textline_polygons[::-1] + M_main_tot = [cv2.moments(all_found_textline_polygons[j]) + for j in range(len(all_found_textline_polygons))] + cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] + + all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted(all_found_textline_polygons, cx_main_tot, cy_main_tot)#all_found_textline_polygons[::-1] all_found_textline_polygons=[ all_found_textline_polygons ] From 960b11f51f98518feaa5b1989a71bc368e6c9fa4 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 22:58:57 +0200 Subject: [PATCH 210/492] machine-based-reading-order CLI: no foreign logger, add --log-level --- src/eynollah/cli.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 1170465..420373a 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -37,14 +37,22 @@ def main(): type=click.Path(exists=True, file_okay=False), required=True, ) +@click.option( + "--log_level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) + +def machine_based_reading_order(dir_xml, xml_file, dir_out, model, log_level): + orderer = machine_based_reading_order_on_layout(model, dir_out=dir_out) + if log_level: + orderer.logger.setLevel(getLevelName(log_level)) -def machine_based_reading_order(dir_xml, xml_file, dir_out, model): - raedingorder_object = machine_based_reading_order_on_layout(model, dir_out=dir_out, logger=getLogger('enhancement')) - if dir_xml: - raedingorder_object.run(dir_in=dir_xml) + orderer.run(dir_in=dir_xml) else: - raedingorder_object.run(xml_filename=xml_file) + orderer.run(xml_filename=xml_file) @main.command() From 8a1e5a895057aac0d0dd878e58c0ce3e70c891fe Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 23:03:11 +0200 Subject: [PATCH 211/492] enhancement / layout CLI: do not override logger name --- src/eynollah/cli.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 420373a..ab157d1 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -137,21 +137,20 @@ def binarization(patches, model_dir, input_image, dir_in, output): def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, save_org_scale, log_level): initLogging() - if log_level: - getLogger('enhancement').setLevel(getLevelName(log_level)) assert image or dir_in, "Either a single image -i or a dir_in -di is required" - enhancer_object = Enhancer( + enhancer = Enhancer( model, - logger=getLogger('enhancement'), dir_out=out, num_col_upper=num_col_upper, num_col_lower=num_col_lower, save_org_scale=save_org_scale, ) + if log_level: + enhancer.logger.setLevel(getLevelName(log_level)) if dir_in: - enhancer_object.run(dir_in=dir_in, overwrite=overwrite) + enhancer.run(dir_in=dir_in, overwrite=overwrite) else: - enhancer_object.run(image_filename=image, overwrite=overwrite) + enhancer.run(image_filename=image, overwrite=overwrite) @main.command() @click.option( @@ -368,8 +367,6 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ getLogger('eynollah').setLevel(logging.INFO) else: initLogging() - if log_level: - getLogger('eynollah').setLevel(getLevelName(log_level)) assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" @@ -420,6 +417,8 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ threshold_art_class_textline=threshold_art_class_textline, threshold_art_class_layout=threshold_art_class_layout, ) + if log_level: + eynollah.logger.setLevel(getLevelName(log_level)) if dir_in: eynollah.run(dir_in=dir_in, overwrite=overwrite) else: @@ -529,8 +528,6 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): initLogging() - if log_level: - getLogger('eynollah').setLevel(getLevelName(log_level)) assert not model or not model_name, "model directory -m can not be set alongside specific model name --model_name" assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" @@ -557,6 +554,8 @@ def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, pref_of_dataset=dataset_abbrevation, min_conf_value_of_textline_text=min_conf_value_of_textline_text, ) + if log_level: + eynollah_ocr.logger.setLevel(getLevelName(log_level)) eynollah_ocr.run(overwrite=overwrite) if __name__ == "__main__": From 93f7588bfa3787679fd5bb843176ea453c303f44 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 23:08:50 +0200 Subject: [PATCH 212/492] binarizer CLI: add --log-level --- src/eynollah/cli.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index ab157d1..19beab5 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -71,10 +71,18 @@ def machine_based_reading_order(dir_xml, xml_file, dir_out, model, log_level): help="output image (if using -i) or output image directory (if using -di)", type=click.Path(file_okay=True, dir_okay=True), ) -def binarization(patches, model_dir, input_image, dir_in, output): +@click.option( + "--log_level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) +def binarization(patches, model_dir, input_image, dir_in, output, log_level): assert (dir_in is None) != (input_image is None), "Specify either -di and or -i not both" - SbbBinarizer(model_dir).run(image_path=input_image, use_patches=patches, output=output, dir_in=dir_in) - + binarizer = SbbBinarizer(model_dir) + if log_level: + binarizer.log.setLevel(getLevelName(log_level)) + binarizer.run(image_path=input_image, use_patches=patches, output=output, dir_in=dir_in) @main.command() From 96a0d22496eca2497abac64dcb931d9d45d3394c Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 24 Sep 2025 23:52:35 +0200 Subject: [PATCH 213/492] mbreorder CLI: change options to mimic other commands --- src/eynollah/cli.py | 49 +++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 19beab5..71958df 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -13,20 +13,20 @@ def main(): @main.command() @click.option( - "--dir_xml", - "-dx", - help="directory of page-xml files", + "--dir_in", + "-di", + help="directory of PAGE-XML input files", type=click.Path(exists=True, file_okay=False), ) @click.option( - "--xml_file", - "-xml", - help="xml filename", + "--input", + "-i", + help="PAGE-XML input filename", type=click.Path(exists=True, dir_okay=False), ) @click.option( - "--dir_out", - "-do", + "--out", + "-o", help="directory for output images", type=click.Path(exists=True, file_okay=False), ) @@ -44,21 +44,26 @@ def main(): help="Override log level globally to this", ) -def machine_based_reading_order(dir_xml, xml_file, dir_out, model, log_level): - orderer = machine_based_reading_order_on_layout(model, dir_out=dir_out) +def machine_based_reading_order(dir_in, input, out, model, log_level): + orderer = machine_based_reading_order_on_layout(model, dir_out=out) if log_level: orderer.logger.setLevel(getLevelName(log_level)) - if dir_xml: - orderer.run(dir_in=dir_xml) + if dir_in: + orderer.run(dir_in=dir_in) else: - orderer.run(xml_filename=xml_file) + orderer.run(xml_filename=input) @main.command() @click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') @click.option('--model_dir', '-m', type=click.Path(exists=True, file_okay=False), required=True, help='directory containing models for prediction') -@click.option("--input-image", "-i", help="input image", type=click.Path(exists=True, dir_okay=False)) +@click.option( + "--input-image", "--image", + "-i", + help="input image filename", + type=click.Path(exists=True, dir_okay=False) +) @click.option( "--dir_in", "-di", @@ -89,14 +94,14 @@ def binarization(patches, model_dir, input_image, dir_in, output, log_level): @click.option( "--image", "-i", - help="image filename", + help="input image filename", type=click.Path(exists=True, dir_okay=False), ) @click.option( "--out", "-o", - help="directory to write output xml data", + help="directory for output PAGE-XML files", type=click.Path(exists=True, file_okay=False), required=True, ) @@ -109,7 +114,7 @@ def binarization(patches, model_dir, input_image, dir_in, output, log_level): @click.option( "--dir_in", "-di", - help="directory of images", + help="directory of input images", type=click.Path(exists=True, file_okay=False), ) @click.option( @@ -164,14 +169,14 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low @click.option( "--image", "-i", - help="image filename", + help="input image filename", type=click.Path(exists=True, dir_okay=False), ) @click.option( "--out", "-o", - help="directory to write output xml data", + help="directory for output PAGE-XML files", type=click.Path(exists=True, file_okay=False), required=True, ) @@ -184,7 +189,7 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low @click.option( "--dir_in", "-di", - help="directory of images", + help="directory of input images", type=click.Path(exists=True, file_okay=False), ) @click.option( @@ -437,7 +442,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ @click.option( "--image", "-i", - help="image filename", + help="input image filename", type=click.Path(exists=True, dir_okay=False), ) @click.option( @@ -449,7 +454,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ @click.option( "--dir_in", "-di", - help="directory of images", + help="directory of input images", type=click.Path(exists=True, file_okay=False), ) @click.option( From d6cdb69acbd1770c080ede18f52ed05c608a3693 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 00:11:23 +0200 Subject: [PATCH 214/492] binarize/enhance/layout/ocr ls_imgs: use the same file name suffix filter for dir-in mode --- src/eynollah/eynollah.py | 11 +++++------ src/eynollah/image_enhancer.py | 7 ++++--- src/eynollah/sbb_binarize.py | 3 ++- src/eynollah/utils/__init__.py | 8 ++++++++ 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 27277ee..9071f7a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -107,6 +107,7 @@ from .utils.drop_capitals import ( from .utils.marginals import get_marginals from .utils.resize import resize_image from .utils import ( + is_image_filename, boosting_headers_by_longshot_region_segmentation, crop_image_inside_box, find_num_col, @@ -4547,14 +4548,13 @@ class Eynollah: self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) if dir_in: - self.ls_imgs = os.listdir(dir_in) - self.ls_imgs = [ind_img for ind_img in self.ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG') or ind_img.endswith('.TIF') or ind_img.endswith('.TIFF') or ind_img.endswith('.PNG')] + ls_imgs = list(filter(is_image_filename, os.listdir(self.dir_in))) elif image_filename: - self.ls_imgs = [image_filename] + ls_imgs = [image_filename] else: raise ValueError("run requires either a single image filename or a directory") - for img_filename in self.ls_imgs: + for img_filename in ls_imgs: self.logger.info(img_filename) t0 = time.time() @@ -5394,8 +5394,7 @@ class Eynollah_ocr: def run(self, overwrite : bool = False): if self.dir_in: - ls_imgs = os.listdir(self.dir_in) - ls_imgs = [ind_img for ind_img in ls_imgs if ind_img.endswith('.jpg') or ind_img.endswith('.jpeg') or ind_img.endswith('.png') or ind_img.endswith('.tif') or ind_img.endswith('.tiff') or ind_img.endswith('.JPG') or ind_img.endswith('.JPEG') or ind_img.endswith('.TIF') or ind_img.endswith('.TIFF') or ind_img.endswith('.PNG')] + ls_imgs = list(filter(is_image_filename, os.listdir(self.dir_in))) else: ls_imgs = [self.image_filename] diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index f577e52..5a06d59 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -21,6 +21,7 @@ from tensorflow.keras.models import load_model from .utils.resize import resize_image from .utils.pil_cv2 import pil2cv from .utils import ( + is_image_filename, crop_image_inside_box ) @@ -701,13 +702,13 @@ class Enhancer: t0_tot = time.time() if dir_in: - self.ls_imgs = os.listdir(dir_in) + ls_imgs = list(filter(is_image_filename, os.listdir(dir_in))) elif image_filename: - self.ls_imgs = [image_filename] + ls_imgs = [image_filename] else: raise ValueError("run requires either a single image filename or a directory") - for img_filename in self.ls_imgs: + for img_filename in ls_imgs: self.logger.info(img_filename) t0 = time.time() diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 2d5035f..3716987 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -16,6 +16,7 @@ import tensorflow as tf from tensorflow.keras.models import load_model from tensorflow.python.keras import backend as tensorflow_backend +from .utils import is_image_filename def resize_image(img_in, input_height, input_width): return cv2.resize(img_in, (input_width, input_height), interpolation=cv2.INTER_NEAREST) @@ -347,7 +348,7 @@ class SbbBinarizer: cv2.imwrite(output, img_last) return img_last else: - ls_imgs = os.listdir(dir_in) + ls_imgs = list(filter(is_image_filename, os.listdir(dir_in))) for image_name in ls_imgs: image_stem = image_name.split('.')[0] print(image_name,'image_name') diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index ca86047..c154fe4 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -2194,3 +2194,11 @@ def return_boxes_of_images_by_order_of_reading_new( return boxes, peaks_neg_tot_tables_new else: return boxes, peaks_neg_tot_tables + +def is_image_filename(fname: str) -> bool: + return fname.lower().endswith(('.jpg', + '.jpeg', + '.png', + '.tif', + '.tiff', + )) From b094a6b77feb4e86f1ae07c1a5c96e5e88068523 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 00:51:45 +0200 Subject: [PATCH 215/492] mbreorder: avoid spaces in logger name --- src/eynollah/mb_ro_on_layout.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index c6c02cf..70f1402 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -39,7 +39,7 @@ class machine_based_reading_order_on_layout: ): self.dir_out = dir_out - self.logger = logger if logger else getLogger('mbro on layout') + self.logger = logger if logger else getLogger('mbreorder') # for parallelization of CPU-intensive tasks: self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) atexit.register(self.executor.shutdown) From 9967510327d33a49aa619ceba7a36f414fdc09e7 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 00:52:16 +0200 Subject: [PATCH 216/492] mbreorder: filter by .xml suffix in dir-in mode --- src/eynollah/mb_ro_on_layout.py | 7 ++++--- src/eynollah/utils/__init__.py | 3 +++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 70f1402..6d72614 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -25,6 +25,7 @@ from .utils.contour import ( return_contours_of_image, return_parent_contours, ) +from .utils import is_xml_filename DPI_THRESHOLD = 298 KERNEL = np.ones((5, 5), np.uint8) @@ -751,13 +752,13 @@ class machine_based_reading_order_on_layout: t0_tot = time.time() if dir_in: - self.ls_xmls = os.listdir(dir_in) + ls_xmls = list(filter(is_xml_filename, os.listdir(dir_in))) elif xml_filename: - self.ls_xmls = [xml_filename] + ls_xmls = [xml_filename] else: raise ValueError("run requires either a single image filename or a directory") - for xml_filename in self.ls_xmls: + for xml_filename in ls_xmls: self.logger.info(xml_filename) t0 = time.time() diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index c154fe4..6eeabd0 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -2202,3 +2202,6 @@ def is_image_filename(fname: str) -> bool: '.tif', '.tiff', )) + +def is_xml_filename(fname: str) -> bool: + return fname.lower().endswith('.xml') From f07df080f08d93564eafa96c9d6299f181857fbe Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 01:13:48 +0200 Subject: [PATCH 217/492] add tests for enhancement and mbreorder --- .../euler_rechenkunst01_1738_0025.xml | 1626 +++++++++++++ .../resources/kant_aufklaerung_1784_0020.xml | 2129 +++++++++++++++++ tests/test_run.py | 129 +- 3 files changed, 3875 insertions(+), 9 deletions(-) create mode 100644 tests/resources/euler_rechenkunst01_1738_0025.xml create mode 100644 tests/resources/kant_aufklaerung_1784_0020.xml diff --git a/tests/resources/euler_rechenkunst01_1738_0025.xml b/tests/resources/euler_rechenkunst01_1738_0025.xml new file mode 100644 index 0000000..1a92f73 --- /dev/null +++ b/tests/resources/euler_rechenkunst01_1738_0025.xml @@ -0,0 +1,1626 @@ + + + OCR-D + 2016-09-29T14:32:09 + 2018-04-25T08:56:33 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 9 + + + 9 + + + 9 + + + + + + + + + der + + + + + rechten + + + + + gegen + + + + + der + + + + + lincken + + + + + Hand + + + + + bedeutet + + + der rechten gegen der lincken Hand bedeutet + + + + + + + + wie + + + + + folget: + + + wie folget: + + + der rechten gegen der lincken Hand bedeutet +wie folget: + + + + + + + + + I. + + + I. + + + I. + + + + + + + + + 0 + + + + + - + + + + + nichts + + + 0 - nichts + + + + + + + + 1 + + + + + - + + + + + eins + + + 1 - eins + + + + + + + + 2 + + + + + - + + + + + zwey + + + 2 - zwey + + + + + + + + 3 + + + + + - + + + + + drey + + + 3 - drey + + + + + + + + 4 + + + + + - + + + + + vier + + + 4 - vier + + + 0 - nichts +1 - eins +2 - zwey +3 - drey +4 - vier + + + + + + + + + 5 + + + + + - + + + + + fuͤnf + + + 5 - fuͤnf + + + + + + + + 6 + + + + + - + + + + + Å¿echs + + + 6 - Å¿echs + + + + + + + 7 + + + + + - + + + + + Å¿ieben + + + 7 - Å¿ieben + + + + + + + + 8 + + + + + - + + + + + acht + + + 8 - acht + + + + + + + + 9 + + + + + - + + + + + neun + + + 9 - neun + + + 5 - fuͤnf +6 - Å¿echs +7 - Å¿ieben +8 - acht +9 - neun + + + + + + + + + Auf + + + + + der + + + + + zweyten + + + + + Stelle + + + + + aber + + + + + bedeutet. + + + Auf der zweyten Stelle aber bedeutet. + + + Auf der zweyten Stelle aber bedeutet. + + + + + + + + + II. + + + II. + + + II. + + + + + + + + + 0 + + + + + - + + + + + nichts + + + 0 - nichts + + + + + + + + 1 + + + + + - + + + + + zehen + + + 1 - zehen + + + + + + + + 2 + + + + + - + + + + + zwanzig + + + 2 - zwanzig + + + + + + + 3 + + + + + - + + + + + dreyßig + + + 3 - dreyßig + + + + + + + 4 + + + + + - + + + + + vierzig + + + 4 - vierzig + + 0 - nichts +1 - zehen +2 - zwanzig +3 - dreyßig +4 - vierzig + + + + + + + + + 5 + + + + + - + + + + + fuͤnfzig + + + 5 - fuͤnfzig + + + + + + + + 6 + + + + + - + + + + + Å¿echzig + + + 6 - Å¿echzig + + + + + + + 7 + + + + + - + + + + + Å¿iebenzig + + + 7 - Å¿iebenzig + + + + + + + 8 + + + + + - + + + + + achtzig + + + 8 - achtzig + + + + + + + 9 + + + + + - + + + + + neunzig + + + 9 - neunzig + + 5 - fuͤnfzig +6 - Å¿echzig +7 - Å¿iebenzig +8 - achtzig +9 - neunzig + + + + + + + + + Auf + + + + + der + + + + + dritten + + + + + Stelle + + + + + bedeutet. + + + Auf der dritten Stelle bedeutet. + + + Auf der dritten Stelle bedeutet. + + + + + + + + + III. + + + III. + + + III. + + + + + + + + + 0 + + + + + - + + + + + nichts + + + 0 - nichts + + + + + + + + 1 + + + + + - + + + + + hundert + + + 1 - hundert + + + + + + + + 2 + + + + + - + + + + + zwey + + + + + hundert + + + 2 - zwey hundert + + + + + + + + 3 + + + + + - + + + + + drey + + + + + hundert + + + 3 - drey hundert + + + + + + + + 4 + + + + + - + + + + + vier + + + + + hundert + + + 4 - vier hundert + + + 0 - nichts +1 - hundert +2 - zwey hundert +3 - drey hundert +4 - vier hundert + + + + + + + + + 5 + + + + + - + + + + + fuͤnf + + + + + hundert + + + 5 - fuͤnf hundert + + + + + + + + 6 + + + + + - + + + + + Å¿echs + + + + + hundert + + + 6 - Å¿echs hundert + + + + + + + 7 + + + + + - + + + + + Å¿ieben + + + + + hundert + + + 7 - Å¿ieben hundert + + + + + + + + 8 + + + + + - + + + + + acht + + + + + hundert + + + 8 - acht hundert + + + + + + + 9 + + + + + - + + + + + neun + + + + + hundert + + + 9 - neun hundert + + + 5 - fuͤnf hundert +6 - Å¿echs hundert +7 - Å¿ieben hundert +8 - acht hundert +9 - neun hundert + + + + + + + + + Auf + + + + + der + + + + + vierten + + + + + Stelle + + + + + bedeutet. + + + Auf der vierten Stelle bedeutet. + + + Auf der vierten Stelle bedeutet. + + + + + + + + + IV. + + + IV. + + + IV. + + + + + + + + + 0 + + + + + - + + + + + nichts + + + 0 - nichts + + + + + + + + 1 + + + + + - + + + + + tauÅ¿end + + + 1 - tauÅ¿end + + + + + + + + 2 + + + + + - + + + + + zwey + + + + + tauÅ¿end + + + 2 - zwey tauÅ¿end + + + + + + + + 3 + + + + + - + + + + + drey + + + + + tauÅ¿end + + + 3 - drey tauÅ¿end + + + + + + + + 4 + + + + + - + + + + + vier + + + + + tauÅ¿end + + + 4 - vier tauÅ¿end + + + 0 - nichts +1 - tauÅ¿end +2 - zwey tauÅ¿end +3 - drey tauÅ¿end +4 - vier tauÅ¿end + + + + + + + + + 5 + + + + + - + + + + + fuͤnf + + + + + tauÅ¿end + + + 5 - fuͤnf tauÅ¿end + + + + + + + + 6 + + + + + - + + + + + Å¿echs + + + + + tauÅ¿end + + + 6 - Å¿echs tauÅ¿end + + + + + + + 7 + + + + + - + + + + + Å¿ieben + + + + + tauÅ¿end + + + 7 - Å¿ieben tauÅ¿end + + + + + + + + 8 + + + + + - + + + + + acht + + + + + tauÅ¿end + + + 8 - acht tauÅ¿end + + + + + + + 9 + + + + + - + + + + + neun + + + + + tauÅ¿end + + + 9 - neun tauÅ¿end + + 5 - fuͤnf tauÅ¿end +6 - Å¿echs tauÅ¿end +7 - Å¿ieben tauÅ¿end +8 - acht tauÅ¿end +9 - neun tauÅ¿end + + + + + + + + + Auf + + + + + der + + + + + fuͤnften + + + + + Stelle + + + + + bedeutet. + + + Auf der fuͤnften Stelle bedeutet. + + + Auf der fuͤnften Stelle bedeutet. + + + + + + + + + V. + + + V. + + + V. + + + + + + + + + 0 + + + + + - + + + + + nichts + + + 0 - nichts + + + + + + + + 1 + + + + + - + + + + + zehen + + + + + tauÅ¿end + + + 1 - zehen tauÅ¿end + + + + + + + + 2 + + + + + - + + + + + zwanzig + + + + + tauÅ¿end + + + 2 - zwanzig tauÅ¿end + + + + + + + + 3 + + + + + - + + + + + dreyßig + + + + + tauÅ¿end + + + 3 - dreyßig tauÅ¿end + + + + + + + + 4 + + + + + - + + + + + vierzig + + + + + tauÅ¿end + + + 4 - vierzig tauÅ¿end + + + 0 - nichts +1 - zehen tauÅ¿end +2 - zwanzig tauÅ¿end +3 - dreyßig tauÅ¿end +4 - vierzig tauÅ¿end + + + + + + + + + 5 + + + + + - + + + + + fuͤnfzig + + + + + tauÅ¿end + + + 5 - fuͤnfzig tauÅ¿end + + + + + + + + 6 + + + + + - + + + + + Å¿echzig + + + + + tauÅ¿end + + + 6 - Å¿echzig tauÅ¿end + + + + + + + 7 + + + + + - + + + + + Å¿iebenzig + + + + + tauÅ¿end + + + 7 - Å¿iebenzig tauÅ¿end + + + + + + + + 8 + + + + + - + + + + + achtzig + + + + + tauÅ¿end + + + 8 - achtzig tauÅ¿end + + + + + + + 9 + + + + + - + + + + + neunzig + + + + + tauÅ¿end + + + 9 - neunzig tauÅ¿end + + + 5 - fuͤnfzig tauÅ¿end +6 - Å¿echzig tauÅ¿end +7 - Å¿iebenzig tauÅ¿end +8 - achtzig tauÅ¿end +9 - neunzig tauÅ¿end + + + + + + + + A + + + + + 5 + + + A 5 + + A 5 + + + + + + + + + Anf + + + Anf + + Anf + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/resources/kant_aufklaerung_1784_0020.xml b/tests/resources/kant_aufklaerung_1784_0020.xml new file mode 100644 index 0000000..47484cd --- /dev/null +++ b/tests/resources/kant_aufklaerung_1784_0020.xml @@ -0,0 +1,2129 @@ + + + OCR-D + 2016-09-20T11:09:27.431+02:00 + 2018-04-24T17:44:49.605+01:00 + + + + + + + + + + + + + + + + + + + + + + + ( + + + + + + + 484 + + + + + + + ) + + + + + ( 484 ) + + + + ( 484 ) + + + + + + + + + + + gewiegelt + + + + + + + worden + + + + + + + ; + + + + + + + Å¿o + + + + + + + Å¿chaͤdlich + + + + + + + iÅ¿t + + + + + + + es + + + + + + + Vorurtheile + + + + + + + zu + + + + + gewiegelt worden; Å¿o Å¿chaͤdlich iÅ¿t es Vorurtheile zu + + + + + + + + + + pflanzen + + + + + + + , + + + + + + + weil + + + + + + + Å¿ie + + + + + + + Å¿ich + + + + + + + zuletzt + + + + + + + an + + + + + + + denen + + + + + + + Å¿elbÅ¿t + + + + + + + raͤchen + + + + + + + , + + + + + pflanzen, weil Å¿ie Å¿ich zuletzt an denen Å¿elbÅ¿t raͤchen, + + + + + + + + + + die + + + + + + + , + + + + + + + oder + + + + + + + deren + + + + + + + Vorgaͤnger + + + + + + + , + + + + + + + ihre + + + + + + + Urheber + + + + + + + geweÅ¿en + + + + + die, oder deren Vorgaͤnger, ihre Urheber geweÅ¿en + + + + + + + + + + Å¿ind + + + + + + + . + + + + + + + Daher + + + + + + + kann + + + + + + + ein + + + + + + + Publikum + + + + + + + nur + + + + + + + langÅ¿am + + + + + + + zur + + + + + Å¿ind. Daher kann ein Publikum nur langÅ¿am zur + + + + + + + + + + Aufklaͤrung + + + + + + + gelangen + + + + + + + . + + + + + + + Durch + + + + + + + eine + + + + + + + Revolution + + + + + + + wird + + + + + Aufklaͤrung gelangen. Durch eine Revolution wird + + + + + + + + + + vielleicht + + + + + + + wohl + + + + + + + ein + + + + + + + Abfall + + + + + + + von + + + + + + + perÅ¿oͤnlichem + + + + + + + Despo- + + + + + vielleicht wohl ein Abfall von perÅ¿oͤnlichem Despo- + + + + + + + + + + tism + + + + + + + und + + + + + + + gewinnÅ¿uͤchtiger + + + + + + + oder + + + + + + + herrÅ¿chſüchtiger + + + + + + + Be + + + + + + + - + + + + + tism und gewinnÅ¿uͤchtiger oder herrÅ¿chſüchtiger Be- + + + + + + + + + + druͤkkung + + + + + + + , + + + + + + + aber + + + + + + + niemals + + + + + + + wahre + + + + + + + Reform + + + + + + + der + + + + + + + Den + + + + + + + - + + + + + druͤkkung, aber niemals wahre Reform der Den- + + + + + + + + + + kungsart + + + + + + + zu + + + + + + + Stande + + + + + + + kommen + + + + + + + ; + + + + + + + Å¿ondern + + + + + + + neue + + + + + + + Vor + + + + + + + - + + + + + kungsart zu Stande kommen; Å¿ondern neue Vor- + + + + + + + + + + urtheile + + + + + + + werden + + + + + + + , + + + + + + + eben + + + + + + + Å¿owohl + + + + + + + als + + + + + + + die + + + + + + + alten + + + + + + + , + + + + + + + zum + + + + + urtheile werden, eben Å¿owohl als die alten, zum + + + + + + + + + + Leitbande + + + + + + + des + + + + + + + gedankenloÅ¿en + + + + + + + großen + + + + + + + Haufens + + + + + Leitbande des gedankenloÅ¿en großen Haufens + + + + + + + + + + dienen + + + + + + + . + + + + + dienen. + + + + gewiegelt worden; Å¿o Å¿chaͤdlich iÅ¿t es Vorurtheile zu +pflanzen, weil Å¿ie Å¿ich zuletzt an denen Å¿elbÅ¿t raͤchen, +die, oder deren Vorgaͤnger, ihre Urheber geweÅ¿en +Å¿ind. Daher kann ein Publikum nur langÅ¿am zur +Aufklaͤrung gelangen. Durch eine Revolution wird +vielleicht wohl ein Abfall von perÅ¿oͤnlichem Despo- +tism und gewinnÅ¿uͤchtiger oder herrÅ¿chſüchtiger Be- +druͤkkung, aber niemals wahre Reform der Den- +kungsart zu Stande kommen; Å¿ondern neue Vor- +urtheile werden, eben Å¿owohl als die alten, zum +Leitbande des gedankenloÅ¿en großen Haufens +dienen. + + + + + + + + + + + Zu + + + + + + + dieÅ¿er + + + + + + + Aufklaͤrung + + + + + + + aber + + + + + + + wird + + + + + + + nichts + + + + + + + erfordert + + + + + Zu dieÅ¿er Aufklaͤrung aber wird nichts erfordert + + + + + + + + + + als + + + + + + + Freiheit + + + + + + + ; + + + + + + + und + + + + + + + zwar + + + + + + + die + + + + + + + unÅ¿chaͤdlichÅ¿te + + + + + + + unter + + + + + als Freiheit; und zwar die unÅ¿chaͤdlichÅ¿te unter + + + + + + + + + allem + + + + + + + , + + + + + + + was + + + + + + + nur + + + + + + + Freiheit + + + + + + + heißen + + + + + + + mag + + + + + + + , + + + + + + + naͤmlich + + + + + + + die + + + + + + + : + + + + + allem, was nur Freiheit heißen mag, naͤmlich die: + + + + + + + + + + von + + + + + + + Å¿einer + + + + + + + Vernunft + + + + + + + in + + + + + + + allen + + + + + + + Stuͤkken + + + + + + + oͤffentlichen + + + + + von Å¿einer Vernunft in allen Stuͤkken oͤffentlichen + + + + + + + + + + Gebrauch + + + + + + + zu + + + + + + + machen + + + + + + + . + + + + + + + Nun + + + + + + + hoͤre + + + + + + + ich + + + + + + + aber + + + + + + + von + + + + + + + al + + + + + + + - + + + + + Gebrauch zu machen. Nun hoͤre ich aber von al- + + + + + + + + + + len + + + + + + + Seiten + + + + + + + rufen + + + + + + + : + + + + + + + raͤſonnirt + + + + + + + nicht + + + + + + + ! + + + + + + + Der + + + + + + + Offi + + + + + + + - + + + + + len Seiten rufen: raͤſonnirt nicht! Der Offi- + + + + + + + + + + zier + + + + + + + Å¿agt + + + + + + + : + + + + + + + raͤſonnirt + + + + + + + nicht + + + + + + + , + + + + + + + Å¿ondern + + + + + + + exercirt + + + + + + + ! + + + + + + + Der + + + + + zier Å¿agt: raͤſonnirt nicht, Å¿ondern exercirt! Der + + + + + + + + + + Finanzrath + + + + + + + : + + + + + + + raͤſonnirt + + + + + + + nicht + + + + + + + , + + + + + + + Å¿ondern + + + + + + + bezahlt + + + + + + + ! + + + + + + + Der + + + + + Finanzrath: raͤſonnirt nicht, Å¿ondern bezahlt! Der + + + + + + + + + + GeiÅ¿tliche + + + + + + + : + + + + + + + raͤſonnirt + + + + + + + nicht + + + + + + + , + + + + + + + Å¿ondern + + + + + + + glaubt + + + + + + + ! + + + + + + + ( + + + + + + + Nur + + + + + GeiÅ¿tliche: raͤſonnirt nicht, Å¿ondern glaubt! (Nur + + + + + + + + + + ein + + + + + + + einziger + + + + + + + Herr + + + + + + + in + + + + + + + der + + + + + + + Welt + + + + + + + Å¿agt + + + + + + + : + + + + + + + raͤſonnirt + + + + + + + , + + + + + + + Å¿o + + + + + ein einziger Herr in der Welt Å¿agt: raͤſonnirt, Å¿o + + + + + + + + + + viel + + + + + + + ihr + + + + + + + wollt + + + + + + + , + + + + + + + und + + + + + + + woruͤber + + + + + + + ihr + + + + + + + wollt + + + + + + + ; + + + + + + + aber + + + + + + + ge + + + + + + + - + + + + + viel ihr wollt, und woruͤber ihr wollt; aber ge- + + + + + + + + + + horcht + + + + + + + ! + + + + + + + ) + + + + + + + Hier + + + + + + + iÅ¿t + + + + + + + uͤberall + + + + + + + EinÅ¿chraͤnkung + + + + + + + der + + + + + + + Frei + + + + + + + - + + + + + horcht!) Hier iÅ¿t uͤberall EinÅ¿chraͤnkung der Frei- + + + + + + + + + + heit + + + + + + + . + + + + + + + Welche + + + + + + + EinÅ¿chraͤnkung + + + + + + + aber + + + + + + + iÅ¿t + + + + + + + der + + + + + + + Aufklaͤ + + + + + + + - + + + + + heit. Welche EinÅ¿chraͤnkung aber iÅ¿t der Aufklaͤ- + + + + + + + + + + rung + + + + + + + hinderlich + + + + + + + ? + + + + + + + welche + + + + + + + nicht + + + + + + + , + + + + + + + Å¿ondern + + + + + + + ihr + + + + + + + wohl + + + + + + + gar + + + + + rung hinderlich? welche nicht, Å¿ondern ihr wohl gar + + + + + + + + + + befoͤrderlich + + + + + + + ? + + + + + + + — + + + + + + + Ich + + + + + + + antworte + + + + + + + : + + + + + + + der + + + + + + + oͤffentliche + + + + + befoͤrderlich? — Ich antworte: der oͤffentliche + + + + + + + + + + Gebrauch + + + + + + + Å¿einer + + + + + + + Vernunft + + + + + + + muß + + + + + + + jederzeit + + + + + + + frei + + + + + + + Å¿ein + + + + + + + , + + + + + Gebrauch Å¿einer Vernunft muß jederzeit frei Å¿ein, + + + + + + + + + + und + + + + + + + der + + + + + + + allein + + + + + + + kann + + + + + + + Aufklaͤrung + + + + + + + unter + + + + + + + MenÅ¿chen + + + + + + + zu + + + + + und der allein kann Aufklaͤrung unter MenÅ¿chen zu + + + + + Zu dieÅ¿er Aufklaͤrung aber wird nichts erfordert +als Freiheit; und zwar die unÅ¿chaͤdlichÅ¿te unter +allem, was nur Freiheit heißen mag, naͤmlich die: +von Å¿einer Vernunft in allen Stuͤkken oͤffentlichen +Gebrauch zu machen. Nun hoͤre ich aber von al- +len Seiten rufen: raͤſonnirt nicht! Der Offi- +zier Å¿agt: raͤſonnirt nicht, Å¿ondern exercirt! Der +Finanzrath: raͤſonnirt nicht, Å¿ondern bezahlt! Der +GeiÅ¿tliche: raͤſonnirt nicht, Å¿ondern glaubt! (Nur +ein einziger Herr in der Welt Å¿agt: raͤſonnirt, Å¿o +viel ihr wollt, und woruͤber ihr wollt; aber ge- +horcht!) Hier iÅ¿t uͤberall EinÅ¿chraͤnkung der Frei- +heit. Welche EinÅ¿chraͤnkung aber iÅ¿t der Aufklaͤ- +rung hinderlich? welche nicht, Å¿ondern ihr wohl gar +befoͤrderlich? — Ich antworte: der oͤffentliche +Gebrauch Å¿einer Vernunft muß jederzeit frei Å¿ein, +und der allein kann Aufklaͤrung unter MenÅ¿chen zu + + + + + + + + + + + Stan + + + + + + + - + + + + + Stan- + + + + + Stan- + + + + + + + + + + \ No newline at end of file diff --git a/tests/test_run.py b/tests/test_run.py index b4e2dbd..370deef 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -2,7 +2,12 @@ from os import environ from pathlib import Path import logging from PIL import Image -from eynollah.cli import layout as layout_cli, binarization as binarization_cli +from eynollah.cli import ( + layout as layout_cli, + binarization as binarization_cli, + enhancement as enhancement_cli, + machine_based_reading_order as mbreorder_cli, +) from click.testing import CliRunner from ocrd_modelfactory import page_from_file from ocrd_models.constants import NAMESPACES as NS @@ -44,8 +49,7 @@ def test_run_eynollah_layout_filename(tmp_path, subtests, pytestconfig, caplog): options=options): with caplog.filtering(only_eynollah): result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - print(result) - assert result.exit_code == 0 + assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert str(infile) in logmsgs assert outfile.exists() @@ -73,8 +77,7 @@ def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(layout_cli, args) - print(result) - assert result.exit_code == 0 + assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 assert any(logmsg for logmsg in logmsgs if logmsg.startswith('All jobs done in')) @@ -88,6 +91,8 @@ def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, ca '-i', str(infile), '-o', str(outfile), ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'SbbBinarizer' @@ -100,8 +105,7 @@ def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, ca options=options): with caplog.filtering(only_eynollah): result = runner.invoke(binarization_cli, args + options) - print(result) - assert result.exit_code == 0 + assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) assert outfile.exists() @@ -119,14 +123,121 @@ def test_run_eynollah_binarization_directory(tmp_path, subtests, pytestconfig, c '-di', str(indir), '-o', str(outdir), ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'SbbBinarizer' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(binarization_cli, args) - print(result) - assert result.exit_code == 0 + assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 assert len(list(outdir.iterdir())) == 2 + +def test_run_eynollah_enhancement_filename(tmp_path, subtests, pytestconfig, caplog): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') + args = [ + '-m', EYNOLLAH_MODELS, + '-i', str(infile), + '-o', str(outfile.parent), + # subtests write to same location + '--overwrite', + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'enhancement' + runner = CliRunner() + for options in [ + [], # defaults + ["-sos"], + ]: + with subtests.test(#msg="test CLI", + options=options): + with caplog.filtering(only_eynollah): + result = runner.invoke(enhancement_cli, args + options) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs + assert outfile.exists() + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as enhanced_img: + enhanced_size = enhanced_img.size + assert (original_size == enhanced_size) == ("-sos" in options) + +def test_run_eynollah_enhancement_directory(tmp_path, subtests, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', EYNOLLAH_MODELS, + '-di', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'enhancement' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(enhancement_cli, args) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 + assert len(list(outdir.iterdir())) == 2 + +def test_run_eynollah_mbreorder_filename(tmp_path, subtests, pytestconfig, caplog): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') + args = [ + '-m', EYNOLLAH_MODELS, + '-i', str(infile), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'mbreorder' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(mbreorder_cli, args) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: mbreorder has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert outfile.exists() + #in_tree = page_from_file(str(infile)).etree + #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + out_tree = page_from_file(str(outfile)).etree + out_order = out_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + #assert len(out_order) >= 2, "result is inaccurate" + #assert in_order != out_order + assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] + +def test_run_eynollah_mbreorder_directory(tmp_path, subtests, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', EYNOLLAH_MODELS, + '-di', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'mbreorder' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(mbreorder_cli, args) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: mbreorder has no logging! + #assert len([logmsg for logmsg in logmsgs if logmsg.startswith('???')]) == 2 + assert len(list(outdir.iterdir())) == 2 From 369ef573f9efe520455fd0c3ba9eb64b37c2a819 Mon Sep 17 00:00:00 2001 From: b-vr103 Date: Thu, 25 Sep 2025 02:38:22 +0200 Subject: [PATCH 218/492] get textlines sorted in textregions - detection of vertical and horizontal regions improved --- src/eynollah/eynollah.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6b5b74e..f5d7d8b 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1754,7 +1754,7 @@ class Eynollah: self.logger.debug("exit extract_text_regions") return prediction_regions, prediction_regions2 - def get_textlines_of_a_textregion_sorted(self, textlines_textregion, cx_textline, cy_textline): + def get_textlines_of_a_textregion_sorted(self, textlines_textregion, cx_textline, cy_textline, w_h_textline): N = len(cy_textline) if N==0: return [] @@ -1766,12 +1766,17 @@ class Eynollah: if len(diff_cy)>0: mean_y_diff = np.mean(diff_cy) mean_x_diff = np.mean(diff_cx) + count_hor = np.count_nonzero(np.array(w_h_textline) > 1) + count_ver = len(w_h_textline) - count_hor + else: mean_y_diff = 0 mean_x_diff = 0 + count_hor = 1 + count_ver = 0 - if np.int(mean_y_diff) >= np.int(mean_x_diff): + if count_hor >= count_ver: row_threshold = mean_y_diff / 1.5 if mean_y_diff > 0 else 10 indices_sorted_by_y = sorted(range(N), key=lambda i: cy_textline[i]) @@ -1825,6 +1830,8 @@ class Eynollah: polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot,1,0.00001) M_main_tot = [cv2.moments(polygons_of_textlines[j]) for j in range(len(polygons_of_textlines))] + + w_h_textlines = [cv2.boundingRect(polygons_of_textlines[i])[2:] for i in range(len(polygons_of_textlines))] cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] @@ -1841,8 +1848,9 @@ class Eynollah: textlines_ins = [polygons_of_textlines[ind] for ind in indexes_in] cx_textline_in = [cx_main_tot[ind] for ind in indexes_in] cy_textline_in = [cy_main_tot[ind] for ind in indexes_in] + w_h_textlines_in = [w_h_textlines[ind][0] / float(w_h_textlines[ind][1]) for ind in indexes_in] - textlines_ins = self.get_textlines_of_a_textregion_sorted(textlines_ins, cx_textline_in, cy_textline_in) + textlines_ins = self.get_textlines_of_a_textregion_sorted(textlines_ins, cx_textline_in, cy_textline_in, w_h_textlines_in) all_found_textline_polygons.append(textlines_ins)#[::-1]) slopes.append(slope_deskew) @@ -4695,10 +4703,12 @@ class Eynollah: M_main_tot = [cv2.moments(all_found_textline_polygons[j]) for j in range(len(all_found_textline_polygons))] + w_h_textlines = [cv2.boundingRect(all_found_textline_polygons[j])[2:] for j in range(len(all_found_textline_polygons))] + w_h_textlines = [w_h_textlines[j][0] / float(w_h_textlines[j][1]) for j in range(len(w_h_textlines))] cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted(all_found_textline_polygons, cx_main_tot, cy_main_tot)#all_found_textline_polygons[::-1] + all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted(all_found_textline_polygons, cx_main_tot, cy_main_tot, w_h_textlines)#all_found_textline_polygons[::-1] all_found_textline_polygons=[ all_found_textline_polygons ] From 58dd192fad4dedb4161e2ee9a695039c5d4db964 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 16:05:45 +0200 Subject: [PATCH 219/492] smoke-test: also add enhancement and mbreorder here --- Makefile | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 177e87c..f78d7d1 100644 --- a/Makefile +++ b/Makefile @@ -82,13 +82,21 @@ smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/models_eynollah fgrep -q http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 $(TMPDIR)/$(basename $( Date: Thu, 25 Sep 2025 16:08:40 +0200 Subject: [PATCH 220/492] CLIs: add required=True where missing --- src/eynollah/cli.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 71958df..9744ecb 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -29,6 +29,7 @@ def main(): "-o", help="directory for output images", type=click.Path(exists=True, file_okay=False), + required=True, ) @click.option( "--model", @@ -75,6 +76,7 @@ def machine_based_reading_order(dir_in, input, out, model, log_level): "-o", help="output image (if using -i) or output image directory (if using -di)", type=click.Path(file_okay=True, dir_okay=True), + required=True, ) @click.option( "--log_level", @@ -475,6 +477,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ "-dx", help="directory of xmls", type=click.Path(exists=True, file_okay=False), + required=True, ) @click.option( "--dir_out_image_text", @@ -492,6 +495,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ "--model_name", help="Specific model file path to use for OCR", type=click.Path(exists=True, file_okay=False), + required=True, ) @click.option( "--tr_ocr", From ef1304a764530802b34c54b8e2a53fbe8a6809d9 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 16:11:39 +0200 Subject: [PATCH 221/492] CLIs: reorder options, explain -i vs -di --- src/eynollah/cli.py | 72 +++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 39 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 9744ecb..3e9fbe4 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -12,18 +12,18 @@ def main(): pass @main.command() -@click.option( - "--dir_in", - "-di", - help="directory of PAGE-XML input files", - type=click.Path(exists=True, file_okay=False), -) @click.option( "--input", "-i", help="PAGE-XML input filename", type=click.Path(exists=True, dir_okay=False), ) +@click.option( + "--dir_in", + "-di", + help="directory of PAGE-XML input files (instead of --input)", + type=click.Path(exists=True, file_okay=False), +) @click.option( "--out", "-o", @@ -45,7 +45,8 @@ def main(): help="Override log level globally to this", ) -def machine_based_reading_order(dir_in, input, out, model, log_level): +def machine_based_reading_order(input, dir_in, out, model, log_level): + assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." orderer = machine_based_reading_order_on_layout(model, dir_out=out) if log_level: orderer.logger.setLevel(getLevelName(log_level)) @@ -68,7 +69,7 @@ def machine_based_reading_order(dir_in, input, out, model, log_level): @click.option( "--dir_in", "-di", - help="directory of input images", + help="directory of input images (instead of --image)", type=click.Path(exists=True, file_okay=False), ) @click.option( @@ -85,7 +86,7 @@ def machine_based_reading_order(dir_in, input, out, model, log_level): help="Override log level globally to this", ) def binarization(patches, model_dir, input_image, dir_in, output, log_level): - assert (dir_in is None) != (input_image is None), "Specify either -di and or -i not both" + assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." binarizer = SbbBinarizer(model_dir) if log_level: binarizer.log.setLevel(getLevelName(log_level)) @@ -116,7 +117,7 @@ def binarization(patches, model_dir, input_image, dir_in, output, log_level): @click.option( "--dir_in", "-di", - help="directory of input images", + help="directory of input images (instead of --image)", type=click.Path(exists=True, file_okay=False), ) @click.option( @@ -151,8 +152,8 @@ def binarization(patches, model_dir, input_image, dir_in, output, log_level): ) def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, save_org_scale, log_level): + assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." initLogging() - assert image or dir_in, "Either a single image -i or a dir_in -di is required" enhancer = Enhancer( model, dir_out=out, @@ -191,7 +192,7 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low @click.option( "--dir_in", "-di", - help="directory of input images", + help="directory of input images (instead of --image)", type=click.Path(exists=True, file_okay=False), ) @click.option( @@ -400,7 +401,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ assert not extract_only_images or not tables, "Image extraction -eoi can not be set alongside tables -tab" assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" - assert image or dir_in, "Either a single image -i or a dir_in -di is required" + assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." eynollah = Eynollah( model, dir_out=out, @@ -447,44 +448,44 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="input image filename", type=click.Path(exists=True, dir_okay=False), ) -@click.option( - "--overwrite", - "-O", - help="overwrite (instead of skipping) if output xml exists", - is_flag=True, -) @click.option( "--dir_in", "-di", - help="directory of input images", + help="directory of input images (instead of --image)", type=click.Path(exists=True, file_okay=False), ) @click.option( "--dir_in_bin", "-dib", - help="directory of binarized images. This should be given if you want to do prediction based on both rgb and bin images. And all bin images are png files", + help="directory of binarized images (in addition to --dir_in for RGB images; filename stems must match the RGB image files, with '.png' suffix).\nPerform prediction using both RGB and binary images. (This does not necessarily improve results, however it may be beneficial for certain document images.)", type=click.Path(exists=True, file_okay=False), ) -@click.option( - "--out", - "-o", - help="directory to write output xml data", - type=click.Path(exists=True, file_okay=False), - required=True, -) @click.option( "--dir_xmls", "-dx", - help="directory of xmls", + help="directory of input PAGE-XML files (in addition to --dir_in; filename stems must match the image files, with '.xml' suffix).", + type=click.Path(exists=True, file_okay=False), + required=True, +) +@click.option( + "--out", + "-o", + help="directory for output PAGE-XML files", type=click.Path(exists=True, file_okay=False), required=True, ) @click.option( "--dir_out_image_text", "-doit", - help="directory of images with predicted text", + help="directory for output images, newly rendered with predicted text", type=click.Path(exists=True, file_okay=False), ) +@click.option( + "--overwrite", + "-O", + help="overwrite (instead of skipping) if output xml exists", + is_flag=True, +) @click.option( "--model", "-m", @@ -515,12 +516,6 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ is_flag=True, help="if this parameter set to true, cropped textline images will not be masked with textline contour.", ) -@click.option( - "--prediction_with_both_of_rgb_and_bin", - "-brb/-nbrb", - is_flag=True, - help="If this parameter is set to True, the prediction will be performed using both RGB and binary images. However, this does not necessarily improve results; it may be beneficial for certain document images.", -) @click.option( "--batch_size", "-bs", @@ -543,7 +538,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ help="Override log level globally to this", ) -def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, prediction_with_both_of_rgb_and_bin, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): +def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): initLogging() assert not model or not model_name, "model directory -m can not be set alongside specific model name --model_name" @@ -552,8 +547,7 @@ def ocr(image, overwrite, dir_in, dir_in_bin, out, dir_xmls, dir_out_image_text, assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" - assert not export_textline_images_and_text or not prediction_with_both_of_rgb_and_bin, "Exporting textline and text -etit can not be set alongside prediction with both rgb and bin -brb" - assert (bool(image) ^ bool(dir_in)), "Either -i (single image) or -di (directory) must be provided, but not both." + assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." eynollah_ocr = Eynollah_ocr( image_filename=image, dir_xmls=dir_xmls, From 5b1e0c13276db179f74770408fb805f9a7b84d87 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 16:26:31 +0200 Subject: [PATCH 222/492] layout/ocr: make all path options kwargs to run() instead of attributes; ocr: drop redundant prediction_with_both_of_rgb_and_bin in favour of just bool(dir_in_bin) --- src/eynollah/cli.py | 37 +++-- src/eynollah/eynollah.py | 233 ++++++++++++-------------------- src/eynollah/utils/utils_ocr.py | 4 +- 3 files changed, 110 insertions(+), 164 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 3e9fbe4..a0608f9 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -404,13 +404,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." eynollah = Eynollah( model, - dir_out=out, - dir_of_cropped_images=save_images, extract_only_images=extract_only_images, - dir_of_layout=save_layout, - dir_of_deskewed=save_deskewed, - dir_of_all=save_all, - dir_save_page=save_page, enable_plotting=enable_plotting, allow_enhancement=allow_enhancement, curved_line=curved_line, @@ -435,11 +429,16 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ ) if log_level: eynollah.logger.setLevel(getLevelName(log_level)) - if dir_in: - eynollah.run(dir_in=dir_in, overwrite=overwrite) - else: - eynollah.run(image_filename=image, overwrite=overwrite) - + eynollah.run(overwrite=overwrite, + image_filename=image, + dir_in=dir_in, + dir_out=out, + dir_of_cropped_images=save_images, + dir_of_layout=save_layout, + dir_of_deskewed=save_deskewed, + dir_of_all=save_all, + dir_save_page=save_page, + ) @main.command() @click.option( @@ -549,25 +548,25 @@ def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." eynollah_ocr = Eynollah_ocr( - image_filename=image, - dir_xmls=dir_xmls, - dir_out_image_text=dir_out_image_text, - dir_in=dir_in, - dir_in_bin=dir_in_bin, - dir_out=out, dir_models=model, model_name=model_name, tr_ocr=tr_ocr, export_textline_images_and_text=export_textline_images_and_text, do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, - prediction_with_both_of_rgb_and_bin=prediction_with_both_of_rgb_and_bin, batch_size=batch_size, pref_of_dataset=dataset_abbrevation, min_conf_value_of_textline_text=min_conf_value_of_textline_text, ) if log_level: eynollah_ocr.logger.setLevel(getLevelName(log_level)) - eynollah_ocr.run(overwrite=overwrite) + eynollah_ocr.run(overwrite=overwrite, + dir_in=dir_in, + dir_in_bin=dir_in_bin, + image_filename=image, + dir_xmls=dir_xmls, + dir_out_image_text=dir_out_image_text, + dir_out=out, + ) if __name__ == "__main__": main() diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 9071f7a..533b38f 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -191,13 +191,7 @@ class Eynollah: def __init__( self, dir_models : str, - dir_out : Optional[str] = None, - dir_of_cropped_images : Optional[str] = None, extract_only_images : bool =False, - dir_of_layout : Optional[str] = None, - dir_of_deskewed : Optional[str] = None, - dir_of_all : Optional[str] = None, - dir_save_page : Optional[str] = None, enable_plotting : bool = False, allow_enhancement : bool = False, curved_line : bool = False, @@ -221,18 +215,12 @@ class Eynollah: skip_layout_and_reading_order : bool = False, ): self.logger = getLogger('eynollah') - + self.plotter = None + if skip_layout_and_reading_order: textline_light = True self.light_version = light_version - self.dir_out = dir_out - self.dir_of_all = dir_of_all - self.dir_save_page = dir_save_page self.reading_order_machine_based = reading_order_machine_based - self.dir_of_deskewed = dir_of_deskewed - self.dir_of_deskewed = dir_of_deskewed - self.dir_of_cropped_images=dir_of_cropped_images - self.dir_of_layout=dir_of_layout self.enable_plotting = enable_plotting self.allow_enhancement = allow_enhancement self.curved_line = curved_line @@ -423,21 +411,11 @@ class Eynollah: if dpi is not None: self.dpi = dpi - def reset_file_name_dir(self, image_filename): + def reset_file_name_dir(self, image_filename, dir_out): t_c = time.time() self.cache_images(image_filename=image_filename) - - self.plotter = None if not self.enable_plotting else EynollahPlotter( - dir_out=self.dir_out, - dir_of_all=self.dir_of_all, - dir_save_page=self.dir_save_page, - dir_of_deskewed=self.dir_of_deskewed, - dir_of_cropped_images=self.dir_of_cropped_images, - dir_of_layout=self.dir_of_layout, - image_filename_stem=Path(Path(image_filename).name).stem) - self.writer = EynollahXmlWriter( - dir_out=self.dir_out, + dir_out=dir_out, image_filename=image_filename, curved_line=self.curved_line, textline_light = self.textline_light) @@ -4525,7 +4503,17 @@ class Eynollah: return ordered_left_marginals, ordered_right_marginals, ordered_left_marginals_textline, ordered_right_marginals_textline, ordered_left_marginals_bbox, ordered_right_marginals_bbox, ordered_left_slopes_marginals, ordered_right_slopes_marginals - def run(self, image_filename : Optional[str] = None, dir_in : Optional[str] = None, overwrite : bool = False): + def run(self, + overwrite: bool = False, + image_filename: Optional[str] = None, + dir_in: Optional[str] = None, + dir_out: Optional[str] = None, + dir_of_cropped_images: Optional[str] = None, + dir_of_layout: Optional[str] = None, + dir_of_deskewed: Optional[str] = None, + dir_of_all: Optional[str] = None, + dir_save_page: Optional[str] = None, + ): """ Get image and scales, then extract the page of scanned image """ @@ -4546,9 +4534,19 @@ class Eynollah: enabled_modes.append("Table detection") if enabled_modes: self.logger.info("Enabled modes: " + ", ".join(enabled_modes)) + if self.enable_plotting: + self.logger.info("Saving debug plots") + if dir_of_cropped_images: + self.logger.info(f"Saving cropped images to: {dir_of_cropped_images}") + if dir_of_layout: + self.logger.info(f"Saving layout plots to: {dir_of_layout}") + if dir_of_deskewed: + self.logger.info(f"Saving deskewed images to: {dir_of_deskewed}") if dir_in: - ls_imgs = list(filter(is_image_filename, os.listdir(self.dir_in))) + ls_imgs = [os.path.join(dir_in, image_filename) + for image_filename in filter(is_image_filename, + os.listdir(dir_in))] elif image_filename: ls_imgs = [image_filename] else: @@ -4558,7 +4556,15 @@ class Eynollah: self.logger.info(img_filename) t0 = time.time() - self.reset_file_name_dir(os.path.join(dir_in or "", img_filename)) + self.reset_file_name_dir(img_filename, dir_out) + if self.enable_plotting: + self.plotter = EynollahPlotter(dir_out=dir_out, + dir_of_all=dir_of_all, + dir_save_page=dir_save_page, + dir_of_deskewed=dir_of_deskewed, + dir_of_cropped_images=dir_of_cropped_images, + dir_of_layout=dir_of_layout, + image_filename_stem=Path(image_filename).stem) #print("text region early -11 in %.1fs", time.time() - t0) if os.path.exists(self.writer.output_filename): if overwrite: @@ -5151,19 +5157,6 @@ class Eynollah: self.logger.info("Step 5/5: Output Generation") - output_config = [] - if self.enable_plotting: - output_config.append("Saving debug plots") - if self.dir_of_cropped_images: - output_config.append(f"Saving cropped images to: {self.dir_of_cropped_images}") - if self.dir_of_layout: - output_config.append(f"Saving layout plots to: {self.dir_of_layout}") - if self.dir_of_deskewed: - output_config.append(f"Saving deskewed images to: {self.dir_of_deskewed}") - - if output_config: - self.logger.info("Output configuration:\n * %s", "\n * ".join(output_config)) - pcgts = self.writer.build_pagexml_full_layout( contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, @@ -5283,21 +5276,8 @@ class Eynollah: self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") self.logger.info("Step 5/5: Output Generation") - self.logger.info("Generating PAGE-XML output") - if self.enable_plotting: - self.logger.info("Saving debug plots") - - if self.dir_of_cropped_images: - self.logger.info(f"Saving cropped images to: {self.dir_of_cropped_images}") - - if self.dir_of_layout: - self.logger.info(f"Saving layout plots to: {self.dir_of_layout}") - - if self.dir_of_deskewed: - self.logger.info(f"Saving deskewed images to: {self.dir_of_deskewed}") - pcgts = self.writer.build_pagexml_no_full_layout( txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals_left, polygons_of_marginals_right, @@ -5315,32 +5295,19 @@ class Eynollah_ocr: dir_models, model_name=None, dir_xmls=None, - dir_in=None, - image_filename=None, - dir_in_bin=None, - dir_out=None, - dir_out_image_text=None, tr_ocr=False, batch_size=None, export_textline_images_and_text=False, do_not_mask_with_textline_contour=False, - prediction_with_both_of_rgb_and_bin=False, pref_of_dataset=None, min_conf_value_of_textline_text : Optional[float]=None, logger=None, ): - self.dir_in = dir_in - self.image_filename = image_filename - self.dir_in_bin = dir_in_bin - self.dir_out = dir_out - self.dir_xmls = dir_xmls self.dir_models = dir_models self.model_name = model_name self.tr_ocr = tr_ocr self.export_textline_images_and_text = export_textline_images_and_text self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour - self.dir_out_image_text = dir_out_image_text - self.prediction_with_both_of_rgb_and_bin = prediction_with_both_of_rgb_and_bin self.pref_of_dataset = pref_of_dataset self.logger = logger if logger else getLogger('eynollah') @@ -5392,23 +5359,27 @@ class Eynollah_ocr: ) self.end_character = len(characters) + 2 - def run(self, overwrite : bool = False): - if self.dir_in: - ls_imgs = list(filter(is_image_filename, os.listdir(self.dir_in))) + def run(self, overwrite: bool = False, + dir_in: Optional[str] = None, + dir_in_bin: Optional[str] = None, + image_filename: Optional[str] = None, + dir_xmls: Optional[str] = None, + dir_out_image_text: Optional[str] = None, + dir_out: Optional[str] = None, + ): + if dir_in: + ls_imgs = [os.path.join(dir_in, image_filename) + for image_filename in filter(is_image_filename, + os.listdir(dir_in))] else: - ls_imgs = [self.image_filename] - + ls_imgs = [image_filename] + if self.tr_ocr: tr_ocr_input_height_and_width = 384 - for ind_img in ls_imgs: - if self.dir_in: - file_name = Path(ind_img).stem - dir_img = os.path.join(self.dir_in, ind_img) - else: - file_name = Path(self.image_filename).stem - dir_img = self.image_filename - dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') - out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') + for dir_img in ls_imgs: + file_name = Path(dir_img).stem + dir_xml = os.path.join(dir_xmls, file_name+'.xml') + out_file_ocr = os.path.join(dir_out, file_name+'.xml') if os.path.exists(out_file_ocr): if overwrite: @@ -5419,8 +5390,8 @@ class Eynollah_ocr: img = cv2.imread(dir_img) - if self.dir_out_image_text: - out_image_with_text = os.path.join(self.dir_out_image_text, file_name+'.png') + if dir_out_image_text: + out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") draw = ImageDraw.Draw(image_text) total_bb_coordinates = [] @@ -5458,7 +5429,7 @@ class Eynollah_ocr: textline_coords = np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) x,y,w,h = cv2.boundingRect(textline_coords) - if self.dir_out_image_text: + if dir_out_image_text: total_bb_coordinates.append([x,y,w,h]) h2w_ratio = h/float(w) @@ -5580,7 +5551,7 @@ class Eynollah_ocr: unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - if self.dir_out_image_text: + if dir_out_image_text: font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! font = ImageFont.truetype(font_path, 40) @@ -5708,18 +5679,10 @@ class Eynollah_ocr: img_size=(image_width, image_height) - for ind_img in ls_imgs: - if self.dir_in: - file_name = Path(ind_img).stem - dir_img = os.path.join(self.dir_in, ind_img) - else: - file_name = Path(self.image_filename).stem - dir_img = self.image_filename - - #file_name = Path(ind_img).stem - #dir_img = os.path.join(self.dir_in, ind_img) - dir_xml = os.path.join(self.dir_xmls, file_name+'.xml') - out_file_ocr = os.path.join(self.dir_out, file_name+'.xml') + for dir_img in ls_imgs: + file_name = Path(dir_img).stem + dir_xml = os.path.join(dir_xmls, file_name+'.xml') + out_file_ocr = os.path.join(dir_out, file_name+'.xml') if os.path.exists(out_file_ocr): if overwrite: @@ -5729,13 +5692,13 @@ class Eynollah_ocr: continue img = cv2.imread(dir_img) - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: cropped_lines_bin = [] - dir_img_bin = os.path.join(self.dir_in_bin, file_name+'.png') + dir_img_bin = os.path.join(dir_in_bin, file_name+'.png') img_bin = cv2.imread(dir_img_bin) - if self.dir_out_image_text: - out_image_with_text = os.path.join(self.dir_out_image_text, file_name+'.png') + if dir_out_image_text: + out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") draw = ImageDraw.Draw(image_text) total_bb_coordinates = [] @@ -5779,13 +5742,13 @@ class Eynollah_ocr: if type_textregion=='drop-capital': angle_degrees = 0 - if self.dir_out_image_text: + if dir_out_image_text: total_bb_coordinates.append([x,y,w,h]) w_scaled = w * image_height/float(h) img_poly_on_img = np.copy(img) - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_poly_on_img_bin = np.copy(img_bin) img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] @@ -5808,7 +5771,7 @@ class Eynollah_ocr: img_crop = rotate_image_with_padding(img_crop, better_des_slope ) - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope ) mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) @@ -5823,13 +5786,13 @@ class Eynollah_ocr: if not self.do_not_mask_with_textline_contour: img_crop[mask_poly==0] = 255 - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] if not self.do_not_mask_with_textline_contour: img_crop_bin[mask_poly==0] = 255 if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) else: img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) @@ -5839,14 +5802,14 @@ class Eynollah_ocr: better_des_slope = 0 if not self.do_not_mask_with_textline_contour: img_crop[mask_poly==0] = 255 - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: if not self.do_not_mask_with_textline_contour: img_crop_bin[mask_poly==0] = 255 if type_textregion=='drop-capital': pass else: if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) else: img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) @@ -5861,14 +5824,12 @@ class Eynollah_ocr: cropped_lines_ver_index.append(0) cropped_lines_meging_indexing.append(0) - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) else: - if self.prediction_with_both_of_rgb_and_bin: - splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, img_crop_bin, prediction_with_both_of_rgb_and_bin=self.prediction_with_both_of_rgb_and_bin) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) + splited_images, splited_images_bin = return_textlines_split_if_needed( + img_crop, img_crop_bin if dir_in_bin is not None else None) if splited_images: img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) cropped_lines.append(img_fin) @@ -5889,7 +5850,7 @@ class Eynollah_ocr: else: cropped_lines_ver_index.append(0) - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[0], image_height, image_width) cropped_lines_bin.append(img_fin) img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[1], image_height, image_width) @@ -5905,7 +5866,7 @@ class Eynollah_ocr: else: cropped_lines_ver_index.append(0) - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) @@ -5918,29 +5879,15 @@ class Eynollah_ocr: if cheild_text.tag.endswith("Unicode"): textline_text = cheild_text.text if textline_text: - if self.do_not_mask_with_textline_contour: - if self.pref_of_dataset: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.txt'), 'w') as text_file: - text_file.write(textline_text) - - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'.png'), img_crop ) - else: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.txt'), 'w') as text_file: - text_file.write(textline_text) - - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'.png'), img_crop ) - else: - if self.pref_of_dataset: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.txt'), 'w') as text_file: - text_file.write(textline_text) - - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_'+self.pref_of_dataset+'_masked.png'), img_crop ) - else: - with open(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.txt'), 'w') as text_file: - text_file.write(textline_text) - - cv2.imwrite(os.path.join(self.dir_out, file_name+'_line_'+str(indexer_textlines)+'_masked.png'), img_crop ) + base_name = os.path.join(dir_out, file_name + '_line_' + str(indexer_textlines)) + if self.pref_of_dataset: + base_name += '_' + self.pref_of_dataset + if not self.do_not_mask_with_textline_contour: + base_name += '_masked' + with open(base_name + '.txt', 'w') as text_file: + text_file.write(textline_text) + cv2.imwrite(base_name + '.png', img_crop) indexer_textlines+=1 if not self.export_textline_images_and_text: @@ -5971,7 +5918,7 @@ class Eynollah_ocr: else: imgs_ver_flipped = None - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: imgs_bin = cropped_lines_bin[n_start:] imgs_bin = np.array(imgs_bin) imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) @@ -6001,7 +5948,7 @@ class Eynollah_ocr: imgs_ver_flipped = None - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: imgs_bin = cropped_lines_bin[n_start:n_end] imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) @@ -6040,7 +5987,7 @@ class Eynollah_ocr: if len(indices_where_flipped_conf_value_is_higher)>0: indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] preds[indices_to_be_replaced,:,:] = preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) if len(indices_ver)>0: @@ -6087,7 +6034,7 @@ class Eynollah_ocr: extracted_texts.append("") extracted_conf_value.append(0) del cropped_lines - if self.prediction_with_both_of_rgb_and_bin: + if dir_in_bin is not None: del cropped_lines_bin gc.collect() @@ -6100,7 +6047,7 @@ class Eynollah_ocr: unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - if self.dir_out_image_text: + if dir_out_image_text: font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! font = ImageFont.truetype(font_path, 40) diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index d974650..4fa99f7 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -109,13 +109,13 @@ def fit_text_single_line(draw, text, font_path, max_width, max_height): return ImageFont.truetype(font_path, 10) # Smallest font fallback -def return_textlines_split_if_needed(textline_image, textline_image_bin, prediction_with_both_of_rgb_and_bin=False): +def return_textlines_split_if_needed(textline_image, textline_image_bin=None): split_point = return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) if split_point: image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) - if prediction_with_both_of_rgb_and_bin: + if textline_image_bin is not None: image1_bin = textline_image_bin[:, :split_point,:]# image.crop((0, 0, width2, height)) image2_bin = textline_image_bin[:, split_point:,:]#image.crop((width1, 0, width, height)) return [image1, image2], [image1_bin, image2_bin] From 1dcc7b5795d92619cd87699e6030cea088441f3c Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 16:38:43 +0200 Subject: [PATCH 223/492] ocr CLI: make --model vs --model_name xor --- src/eynollah/cli.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index a0608f9..3436250 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -495,7 +495,6 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ "--model_name", help="Specific model file path to use for OCR", type=click.Path(exists=True, file_okay=False), - required=True, ) @click.option( "--tr_ocr", @@ -540,7 +539,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): initLogging() - assert not model or not model_name, "model directory -m can not be set alongside specific model name --model_name" + assert bool(model) != bool(model_name), "Either -m (model directory) or --model_name (specific model name) must be provided." assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" From 2d14d57e4f42988e19cbc976e8b5174dec671b1b Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 19:52:50 +0200 Subject: [PATCH 224/492] ocr: minimal debug logging --- src/eynollah/eynollah.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 533b38f..6191b8e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5442,7 +5442,7 @@ class Eynollah_ocr: img_crop = img_poly_on_img[y:y+h, x:x+w, :] img_crop[mask_poly==0] = 255 - + self.logger.debug("processing %d lines for '%s'", len(cropped_lines), nn.attrib['id']) if h2w_ratio > 0.1: cropped_lines.append(resize_image(img_crop, tr_ocr_input_height_and_width, tr_ocr_input_height_and_width) ) cropped_lines_meging_indexing.append(0) @@ -5961,6 +5961,7 @@ class Eynollah_ocr: imgs_bin_ver_flipped = None + self.logger.debug("processing next %d lines", len(imgs)) preds = self.prediction_model.predict(imgs, verbose=0) if len(indices_ver)>0: From 5c7e1f21fb5c36c4012eb8b7231af47166da2820 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 19:53:19 +0200 Subject: [PATCH 225/492] test_run: add tests for ocr --- tests/test_run.py | 80 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 73 insertions(+), 7 deletions(-) diff --git a/tests/test_run.py b/tests/test_run.py index 370deef..cd24225 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -7,6 +7,7 @@ from eynollah.cli import ( binarization as binarization_cli, enhancement as enhancement_cli, machine_based_reading_order as mbreorder_cli, + ocr as ocr_cli, ) from click.testing import CliRunner from ocrd_modelfactory import page_from_file @@ -76,7 +77,7 @@ def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args) + result = runner.invoke(layout_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 @@ -104,7 +105,7 @@ def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, ca with subtests.test(#msg="test CLI", options=options): with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args + options) + result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) @@ -130,7 +131,7 @@ def test_run_eynollah_binarization_directory(tmp_path, subtests, pytestconfig, c return logrec.name == 'SbbBinarizer' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args) + result = runner.invoke(binarization_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 @@ -159,7 +160,7 @@ def test_run_eynollah_enhancement_filename(tmp_path, subtests, pytestconfig, cap with subtests.test(#msg="test CLI", options=options): with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args + options) + result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs @@ -185,7 +186,7 @@ def test_run_eynollah_enhancement_directory(tmp_path, subtests, pytestconfig, ca return logrec.name == 'enhancement' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args) + result = runner.invoke(enhancement_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 @@ -206,7 +207,7 @@ def test_run_eynollah_mbreorder_filename(tmp_path, subtests, pytestconfig, caplo return logrec.name == 'mbreorder' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args) + result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] # FIXME: mbreorder has no logging! @@ -235,9 +236,74 @@ def test_run_eynollah_mbreorder_directory(tmp_path, subtests, pytestconfig, capl return logrec.name == 'mbreorder' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args) + result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] # FIXME: mbreorder has no logging! #assert len([logmsg for logmsg in logmsgs if logmsg.startswith('???')]) == 2 assert len(list(outdir.iterdir())) == 2 + +def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') + outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.xml') + outrenderfile.parent.mkdir() + args = [ + '-m', EYNOLLAH_MODELS, + '-i', str(infile), + '-dx', str(infile.parent), + '-o', str(outfile.parent), + # subtests write to same location + '--overwrite', + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.DEBUG) + def only_eynollah(logrec): + return logrec.name == 'eynollah' + runner = CliRunner() + for options in [ + [], # defaults + ["-doit", str(outrenderfile.parent)], + ["-trocr"], + ]: + with subtests.test(#msg="test CLI", + options=options): + with caplog.filtering(only_eynollah): + result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: ocr has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert outfile.exists() + if "-doit" in options: + assert outrenderfile.exists() + #in_tree = page_from_file(str(infile)).etree + #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + out_tree = page_from_file(str(outfile)).etree + out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) + assert len(out_texts) >= 2, ("result is inaccurate", out_texts) + assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) + +def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', EYNOLLAH_MODELS, + '-di', str(indir), + '-dx', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'eynollah' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(ocr_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: ocr has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert len(list(outdir.iterdir())) == 2 From 11de8a025d8e7dc5a3be54cd8009144f947c7a24 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 25 Sep 2025 20:11:48 +0200 Subject: [PATCH 226/492] Adapt ocrd-eynollah-segment for release --- src/eynollah/ocrd-tool.json | 12 +++++++++++- src/eynollah/processor.py | 9 +++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index af5e03f..e5077e9 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -82,13 +82,23 @@ } }, "resources": [ + { + "url": "https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1", + "name": "eynollah_layout_v0_5_0", + "type": "archive", + "path_in_archive": "eynollah_layout_v0_5_0", + "size": 3525684179, + "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization, image enhancement", + "version_range": ">= v0.5.0" + }, { "description": "models for eynollah (TensorFlow SavedModel format)", "url": "https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz", "name": "default", "size": 1894627041, "type": "archive", - "path_in_archive": "models_eynollah" + "path_in_archive": "models_eynollah", + "version_range": ">= v0.3.0, < v0.5.0" } ] }, diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py index c2922c1..12c7356 100644 --- a/src/eynollah/processor.py +++ b/src/eynollah/processor.py @@ -1,6 +1,7 @@ +from functools import cached_property from typing import Optional from ocrd_models import OcrdPage -from ocrd import Processor, OcrdPageResult +from ocrd import OcrdPageResultImage, Processor, OcrdPageResult from .eynollah import Eynollah, EynollahXmlWriter @@ -9,8 +10,8 @@ class EynollahProcessor(Processor): # already employs GPU (without singleton process atm) max_workers = 1 - @property - def executable(self): + @cached_property + def executable(self) -> str: return 'ocrd-eynollah-segment' def setup(self) -> None: @@ -20,7 +21,6 @@ class EynollahProcessor(Processor): "and parameter 'light_version' (faster+simpler method for main region detection and deskewing)") self.eynollah = Eynollah( self.resolve_resource(self.parameter['models']), - logger=self.logger, allow_enhancement=self.parameter['allow_enhancement'], curved_line=self.parameter['curved_line'], right2left=self.parameter['right_to_left'], @@ -33,6 +33,7 @@ class EynollahProcessor(Processor): headers_off=self.parameter['headers_off'], tables=self.parameter['tables'], ) + self.eynollah.logger = self.logger self.eynollah.plotter = None def shutdown(self): From e6ee26fde35d93584f295d47f3d8f85c1d65124a Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 25 Sep 2025 20:35:54 +0200 Subject: [PATCH 227/492] make models: adapt to zenodo/v0.5.0 --- Makefile | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index f78d7d1..6566293 100644 --- a/Makefile +++ b/Makefile @@ -9,9 +9,10 @@ DOCKER ?= docker #SEG_MODEL := https://qurator-data.de/eynollah/2021-04-25/models_eynollah.tar.gz #SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah_renamed.tar.gz -SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah.tar.gz +# SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz +SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 BIN_MODEL := https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip @@ -28,7 +29,7 @@ help: @echo " install Install package with pip" @echo " install-dev Install editable with pip" @echo " deps-test Install test dependencies with pip" - @echo " models Download and extract models to $(CURDIR)/models_eynollah" + @echo " models Download and extract models to $(CURDIR)/models_layout_v0_5_0" @echo " smoke-test Run simple CLI check" @echo " ocrd-test Run OCR-D CLI check" @echo " test Run unit tests" @@ -44,13 +45,13 @@ help: # END-EVAL -# Download and extract models to $(PWD)/models_eynollah -models: models_eynollah default-2021-03-09 +# Download and extract models to $(PWD)/models_layout_v0_5_0 +models: models_layout_v0_5_0 default-2021-03-09 -models_eynollah: models_eynollah.tar.gz - tar zxf models_eynollah.tar.gz +models_layout_v0_5_0: models_layout_v0_5_0.tar.gz + tar zxf models_layout_v0_5_0.tar.gz -models_eynollah.tar.gz: +models_layout_v0_5_0.tar.gz: wget $(SEG_MODEL) default-2021-03-09: $(notdir $(BIN_MODEL)) @@ -73,20 +74,20 @@ install: install-dev: $(PIP) install -e .$(and $(EXTRAS),[$(EXTRAS)]) -deps-test: models_eynollah +deps-test: models_layout_v0_5_0 $(PIP) install -r requirements-test.txt smoke-test: TMPDIR != mktemp -d smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif # layout analysis: - eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/models_eynollah + eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/models_layout_v0_5_0 fgrep -q http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 $(TMPDIR)/$(basename $( Date: Thu, 25 Sep 2025 20:51:02 +0200 Subject: [PATCH 228/492] enhancement/mbreorder: make all path options kwargs to run() instead of attributes --- src/eynollah/cli.py | 20 +++++++-------- src/eynollah/image_enhancer.py | 23 +++++++++-------- src/eynollah/mb_ro_on_layout.py | 45 +++++++++++++++------------------ 3 files changed, 43 insertions(+), 45 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 3436250..93bb676 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -47,14 +47,14 @@ def main(): def machine_based_reading_order(input, dir_in, out, model, log_level): assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - orderer = machine_based_reading_order_on_layout(model, dir_out=out) + orderer = machine_based_reading_order_on_layout(model) if log_level: orderer.logger.setLevel(getLevelName(log_level)) - if dir_in: - orderer.run(dir_in=dir_in) - else: - orderer.run(xml_filename=input) + orderer.run(xml_filename=input, + dir_in=dir_in, + dir_out=out, + ) @main.command() @@ -156,17 +156,17 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low initLogging() enhancer = Enhancer( model, - dir_out=out, num_col_upper=num_col_upper, num_col_lower=num_col_lower, save_org_scale=save_org_scale, ) if log_level: enhancer.logger.setLevel(getLevelName(log_level)) - if dir_in: - enhancer.run(dir_in=dir_in, overwrite=overwrite) - else: - enhancer.run(image_filename=image, overwrite=overwrite) + enhancer.run(overwrite=overwrite, + dir_in=dir_in, + image_filename=image, + dir_out=out, + ) @main.command() @click.option( diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 5a06d59..89dde16 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -11,7 +11,6 @@ from functools import partial from pathlib import Path from multiprocessing import cpu_count import gc -from loky import ProcessPoolExecutor import cv2 import numpy as np from ocrd_utils import getLogger, tf_disable_interactive_logs @@ -33,13 +32,11 @@ class Enhancer: def __init__( self, dir_models : str, - dir_out : Optional[str] = None, num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, save_org_scale : bool = False, logger : Optional[Logger] = None, ): - self.dir_out = dir_out self.input_binary = False self.light_version = False self.save_org_scale = save_org_scale @@ -53,9 +50,6 @@ class Enhancer: self.num_col_lower = num_col_lower self.logger = logger if logger else getLogger('enhancement') - # for parallelization of CPU-intensive tasks: - self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) - atexit.register(self.executor.shutdown) self.dir_models = dir_models self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" @@ -94,9 +88,9 @@ class Enhancer: if dpi is not None: self.dpi = dpi - def reset_file_name_dir(self, image_filename): + def reset_file_name_dir(self, image_filename, dir_out): self.cache_images(image_filename=image_filename) - self.output_filename = os.path.join(self.dir_out, Path(image_filename).stem +'.png') + self.output_filename = os.path.join(dir_out, Path(image_filename).stem +'.png') def imread(self, grayscale=False, uint8=True): key = 'img' @@ -694,7 +688,12 @@ class Enhancer: return img_res - def run(self, image_filename : Optional[str] = None, dir_in : Optional[str] = None, overwrite : bool = False): + def run(self, + overwrite: bool = False, + image_filename: Optional[str] = None, + dir_in: Optional[str] = None, + dir_out: Optional[str] = None, + ): """ Get image and scales, then extract the page of scanned image """ @@ -702,7 +701,9 @@ class Enhancer: t0_tot = time.time() if dir_in: - ls_imgs = list(filter(is_image_filename, os.listdir(dir_in))) + ls_imgs = [os.path.join(dir_in, image_filename) + for image_filename in filter(is_image_filename, + os.listdir(dir_in))] elif image_filename: ls_imgs = [image_filename] else: @@ -712,7 +713,7 @@ class Enhancer: self.logger.info(img_filename) t0 = time.time() - self.reset_file_name_dir(os.path.join(dir_in or "", img_filename)) + self.reset_file_name_dir(img_filename, dir_out) #print("text region early -11 in %.1fs", time.time() - t0) if os.path.exists(self.output_filename): diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 6d72614..45db8e4 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -10,7 +10,6 @@ import atexit from functools import partial from pathlib import Path from multiprocessing import cpu_count -from loky import ProcessPoolExecutor import xml.etree.ElementTree as ET import cv2 import numpy as np @@ -35,15 +34,9 @@ class machine_based_reading_order_on_layout: def __init__( self, dir_models : str, - dir_out : Optional[str] = None, logger : Optional[Logger] = None, ): - self.dir_out = dir_out - self.logger = logger if logger else getLogger('mbreorder') - # for parallelization of CPU-intensive tasks: - self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) - atexit.register(self.executor.shutdown) self.dir_models = dir_models self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824" @@ -56,9 +49,6 @@ class machine_based_reading_order_on_layout: self.model_reading_order = self.our_load_model(self.model_reading_order_dir) self.light_version = True - - - @staticmethod def our_load_model(model_file): if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): @@ -70,10 +60,8 @@ class machine_based_reading_order_on_layout: model = load_model(model_file, compile=False, custom_objects={ "PatchEncoder": PatchEncoder, "Patches": Patches}) return model - - + def read_xml(self, xml_file): - file_name = Path(xml_file).stem tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) root1=tree1.getroot() alltags=[elem.tag for elem in root1.iter()] @@ -495,7 +483,7 @@ class machine_based_reading_order_on_layout: img_poly=cv2.fillPoly(img, pts =co_img, color=(4,4,4)) img_poly=cv2.fillPoly(img, pts =co_sep, color=(5,5,5)) - return tree1, root1, bb_coord_printspace, file_name, id_paragraph, id_header+id_heading, co_text_paragraph, co_text_header+co_text_heading,\ + return tree1, root1, bb_coord_printspace, id_paragraph, id_header+id_heading, co_text_paragraph, co_text_header+co_text_heading,\ tot_region_ref,x_len, y_len,index_tot_regions, img_poly def return_indexes_of_contours_loctaed_inside_another_list_of_contours(self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): @@ -744,7 +732,12 @@ class machine_based_reading_order_on_layout: - def run(self, xml_filename : Optional[str] = None, dir_in : Optional[str] = None, overwrite : bool = False): + def run(self, + overwrite: bool = False, + xml_filename: Optional[str] = None, + dir_in: Optional[str] = None, + dir_out: Optional[str] = None, + ): """ Get image and scales, then extract the page of scanned image """ @@ -752,7 +745,9 @@ class machine_based_reading_order_on_layout: t0_tot = time.time() if dir_in: - ls_xmls = list(filter(is_xml_filename, os.listdir(dir_in))) + ls_xmls = [os.path.join(dir_in, xml_filename) + for xml_filename in filter(is_xml_filename, + os.listdir(dir_in))] elif xml_filename: ls_xmls = [xml_filename] else: @@ -761,13 +756,11 @@ class machine_based_reading_order_on_layout: for xml_filename in ls_xmls: self.logger.info(xml_filename) t0 = time.time() - - if dir_in: - xml_file = os.path.join(dir_in, xml_filename) - else: - xml_file = xml_filename - - tree_xml, root_xml, bb_coord_printspace, file_name, id_paragraph, id_header, co_text_paragraph, co_text_header, tot_region_ref, x_len, y_len, index_tot_regions, img_poly = self.read_xml(xml_file) + + file_name = Path(xml_filename).stem + (tree_xml, root_xml, bb_coord_printspace, id_paragraph, id_header, + co_text_paragraph, co_text_header, tot_region_ref, + x_len, y_len, index_tot_regions, img_poly) = self.read_xml(xml_filename) id_all_text = id_paragraph + id_header @@ -810,7 +803,11 @@ class machine_based_reading_order_on_layout: alltags=[elem.tag for elem in root_xml.iter()] ET.register_namespace("",name_space) - tree_xml.write(os.path.join(self.dir_out, file_name+'.xml'),xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + tree_xml.write(os.path.join(dir_out, file_name+'.xml'), + xml_declaration=True, + method='xml', + encoding="utf8", + default_namespace=None) #sys.exit() From 9303ded11f98e0a04eb7f09b424d62812fb84d66 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 25 Sep 2025 21:12:52 +0200 Subject: [PATCH 229/492] ocrd-tool.json: use models_layout instead of eynollah_layouts for consistency --- Makefile | 2 +- src/eynollah/ocrd-tool.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 6566293..20f4755 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,7 @@ models_layout_v0_5_0: models_layout_v0_5_0.tar.gz tar zxf models_layout_v0_5_0.tar.gz models_layout_v0_5_0.tar.gz: - wget $(SEG_MODEL) + wget -O $@ $(SEG_MODEL) default-2021-03-09: $(notdir $(BIN_MODEL)) unzip $(notdir $(BIN_MODEL)) diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index e5077e9..fbc6c1a 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -84,9 +84,9 @@ "resources": [ { "url": "https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1", - "name": "eynollah_layout_v0_5_0", + "name": "models_layout_v0_5_0", "type": "archive", - "path_in_archive": "eynollah_layout_v0_5_0", + "path_in_archive": "models_layout_v0_5_0", "size": 3525684179, "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization, image enhancement", "version_range": ">= v0.5.0" From 5c0ab509c4f3620f23d69655f339fae3cd2f02a4 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 25 Sep 2025 21:17:32 +0200 Subject: [PATCH 230/492] CI: Update model name --- .github/workflows/test-eynollah.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index b27586c..b270ab1 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -27,7 +27,7 @@ jobs: - uses: actions/cache@v4 id: seg_model_cache with: - path: models_eynollah + path: models_layout_v0_5_0 key: ${{ runner.os }}-models - uses: actions/cache@v4 id: bin_model_cache From 0bb1fb1a053a464675c9c41ea21833763cd37b01 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 25 Sep 2025 21:47:15 +0200 Subject: [PATCH 231/492] tests: adapt to layout/ocr model split --- Makefile | 5 +++-- tests/test_run.py | 25 +++++++++++++------------ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 20f4755..41f46d6 100644 --- a/Makefile +++ b/Makefile @@ -115,8 +115,9 @@ ocrd-test: tests/resources/kant_aufklaerung_1784_0020.tif $(RM) -r $(TMPDIR) # Run unit tests -test: export EYNOLLAH_MODELS=$(CURDIR)/models_layout_v0_5_0 -test: export SBBBIN_MODELS=$(CURDIR)/default-2021-03-09 +test: export MODELS_LAYOUT=$(CURDIR)/models_layout_v0_5_0 +test: export MODELS_OCR=$(CURDIR)/models_ocr_v0_5_0 +test: export MODELS_BIN=$(CURDIR)/default-2021-03-09 test: $(PYTHON) -m pytest tests --durations=0 --continue-on-collection-errors $(PYTEST_ARGS) diff --git a/tests/test_run.py b/tests/test_run.py index cd24225..aea5808 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -15,14 +15,15 @@ from ocrd_models.constants import NAMESPACES as NS testdir = Path(__file__).parent.resolve() -EYNOLLAH_MODELS = environ.get('EYNOLLAH_MODELS', str(testdir.joinpath('..', 'models_eynollah').resolve())) -SBBBIN_MODELS = environ.get('SBBBIN_MODELS', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) +MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_layout_v0_5_0').resolve())) +MODELS_OCR = environ.get('MODELS_OCR', str(testdir.joinpath('..', 'models_ocr_v0_5_0').resolve())) +MODELS_BIN = environ.get('MODELS_BIN', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) def test_run_eynollah_layout_filename(tmp_path, subtests, pytestconfig, caplog): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), # subtests write to same location @@ -66,7 +67,7 @@ def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_LAYOUT, '-di', str(indir), '-o', str(outdir), ] @@ -88,7 +89,7 @@ def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, ca infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ - '-m', SBBBIN_MODELS, + '-m', MODELS_BIN, '-i', str(infile), '-o', str(outfile), ] @@ -120,7 +121,7 @@ def test_run_eynollah_binarization_directory(tmp_path, subtests, pytestconfig, c indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', SBBBIN_MODELS, + '-m', MODELS_BIN, '-di', str(indir), '-o', str(outdir), ] @@ -141,7 +142,7 @@ def test_run_eynollah_enhancement_filename(tmp_path, subtests, pytestconfig, cap infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), # subtests write to same location @@ -175,7 +176,7 @@ def test_run_eynollah_enhancement_directory(tmp_path, subtests, pytestconfig, ca indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_LAYOUT, '-di', str(indir), '-o', str(outdir), ] @@ -196,7 +197,7 @@ def test_run_eynollah_mbreorder_filename(tmp_path, subtests, pytestconfig, caplo infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), ] @@ -225,7 +226,7 @@ def test_run_eynollah_mbreorder_directory(tmp_path, subtests, pytestconfig, capl indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_LAYOUT, '-di', str(indir), '-o', str(outdir), ] @@ -249,7 +250,7 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.xml') outrenderfile.parent.mkdir() args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_OCR, '-i', str(infile), '-dx', str(infile.parent), '-o', str(outfile.parent), @@ -289,7 +290,7 @@ def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', EYNOLLAH_MODELS, + '-m', MODELS_OCR, '-di', str(indir), '-dx', str(indir), '-o', str(outdir), From b4d460ca79c8b7361e2ff34477f33fcb62a7a319 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 25 Sep 2025 22:16:38 +0200 Subject: [PATCH 232/492] makefile forgot the OCR models --- Makefile | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 41f46d6..a920615 100644 --- a/Makefile +++ b/Makefile @@ -16,6 +16,8 @@ SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar. BIN_MODEL := https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip +OCR_MODEL := https://zenodo.org/records/17194824/files/models_ocr_v0_5_0.tar.gz?download=1 + PYTEST_ARGS ?= -vv # BEGIN-EVAL makefile-parser --make-help Makefile @@ -46,7 +48,7 @@ help: # Download and extract models to $(PWD)/models_layout_v0_5_0 -models: models_layout_v0_5_0 default-2021-03-09 +models: models_layout_v0_5_0 models_ocr_v0_5_0 default-2021-03-09 models_layout_v0_5_0: models_layout_v0_5_0.tar.gz tar zxf models_layout_v0_5_0.tar.gz @@ -54,6 +56,12 @@ models_layout_v0_5_0: models_layout_v0_5_0.tar.gz models_layout_v0_5_0.tar.gz: wget -O $@ $(SEG_MODEL) +models_ocr_v0_5_0: models_ocr_v0_5_0.tar.gz + tar zxf models_ocr_v0_5_0.tar.gz + +models_ocr_v0_5_0.tar.gz: + wget -O $@ $(OCR_MODEL) + default-2021-03-09: $(notdir $(BIN_MODEL)) unzip $(notdir $(BIN_MODEL)) mkdir $@ From 4c6405713a087781f6bdabf6a31d00d81f7b9a0b Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 25 Sep 2025 22:19:36 +0200 Subject: [PATCH 233/492] ci: ocr models --- .github/workflows/test-eynollah.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index b270ab1..042e508 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -29,13 +29,18 @@ jobs: with: path: models_layout_v0_5_0 key: ${{ runner.os }}-models + - uses: actions/cache@v4 + id: ocr_model_cache + with: + path: models_ocr_v0_5_0 + key: ${{ runner.os }}-models - uses: actions/cache@v4 id: bin_model_cache with: path: default-2021-03-09 key: ${{ runner.os }}-modelbin - name: Download models - if: steps.seg_model_cache.outputs.cache-hit != 'true' || steps.bin_model_cache.outputs.cache-hit != 'true' + if: steps.seg_model_cache.outputs.cache-hit != 'true' || steps.bin_model_cache.outputs.cache-hit != 'true' || steps.ocr_model_cache.outputs.cache-hit != true run: make models - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 From 480daa4c7c92e22c16f1b7fc56cca48177953a3d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 25 Sep 2025 22:25:05 +0200 Subject: [PATCH 234/492] test_run: make ocr -doit work (add truetype file) --- pyproject.toml | 2 +- src/eynollah/Charis-Regular.ttf | Bin 0 -> 878076 bytes src/eynollah/eynollah.py | 23 +++++++++++++++++------ tests/test_run.py | 2 +- 4 files changed, 19 insertions(+), 8 deletions(-) create mode 100644 src/eynollah/Charis-Regular.ttf diff --git a/pyproject.toml b/pyproject.toml index 4da39ef..8a63543 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ optional-dependencies.test = {file = ["requirements-test.txt"]} where = ["src"] [tool.setuptools.package-data] -"*" = ["*.json", '*.yml', '*.xml', '*.xsd'] +"*" = ["*.json", '*.yml', '*.xml', '*.xsd', '*.ttf'] [tool.coverage.run] branch = true diff --git a/src/eynollah/Charis-Regular.ttf b/src/eynollah/Charis-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..a4e75a450996c466d8eaa8528943c77590820bac GIT binary patch literal 878076 zcmdpf3!IJB{`a~(w|$vCV+_XJ4Na5eI1)qBnEULR*|X=q_a@DK5JGb#hK3|bk|arz zBuSEnG)a<>B!nZ$aU92yBsr3#nfJTa-ouQePMy>H{@?fAzt8%u-}*i4SZmDI_X2DLLgu%L|_pLc0*6O-XfT=e2!-UPp+&ijdEzq~;YQ>^`_|Ga-NL zLCB)Y?7Tm8FIhip5TOq&KqYi)$CSVG9u(`93- zqmBoLmAvUUp-B)RPtHa-rK0r+C z4i2s;DeqIfc`32{ScmOB2SX9s>V`U`Pa)lD@bK!f+NXaWLM)d82)V6(*xh9%*~WrO zV(q;H=^evM#*Pdu4Y-q7@12MIQ6ox*SDYFh^d-{k2#I%$y!)Q&@v3Jlv92=+c~u!X zx?*HlN#QHRufmG_-w;X|u|XodPK|>648$*?JxRVIoSdQ!gwjUZNH`;`KcOsvB|uJO zk3*i#Wtxzk3T%;92c4@;P57&l69-$qA z{Dt-fB;8}u6>ch>KRT&veYo}{ma{D%Gpkes z4DvyfAE73j$)9jjfGH4iYf}*9Hm3U^k28&fJl<3Td4g#INgorp=H)Fl~jr&9oizN2a}y_nGh%O=nC$K)z_Y2>D0TkB~2!EL{mrc*2bqH)w=uVY+}7L{ayv8bzd6(#3OURi4mrXc0Xfnf1-XOykC3~Y zZ-(5%d<*2B<~Yc`&9_62H=_=iQ_NYA9p)U!x#nERd1llHbAh=CvdcUW@*QSanE6g~ zG2{|+DdaM98RT+v739I@VUUNLheIA=9s&7o^GL{}%+-+pWd0N6G3GIl?={~Gd93+9 z$m7i8AdfeXhg@SueKX&0Mtw8an(9t9(HG1vPYk^+d^IDzP-n^FPwK1=C zd2PvSMPB>yT8`Idyw>8i2d^b~ZNOXOz4hB$v%U4$TZ2V?U4fdn61CP_U#V#aYAQ8- zi#qD9k)kd#+;IgpQq)C%@K)wl;G!;4b9*z|0$Jy%`DWBMYK}uaqvrmoVWMtjgXf@z zQFA`(7B#!gZtwx-0pNFVg0c5#ke!eOfQ?3n_e-k!2Nm6w953lX*Fzmi)kzF(Z{BprcX?} zOuJ2cOrM(e;(nbq{nK>L^quLv>3h=!(+|R$g%#tDwZffhFYXZTPaN(`f;rKgWKI_M z26rYOcVqzWL>cbDP+a*aT=7`*Gq}pTEn`rxviu$X+5S#{afR_(0b0V1Xwhb(WqQ%{ zD%zDZrXS7R9AFMKw?>N)Yz`4^0qnk~xfiV535#}_J?4StV)GF5U9i#7=4x2veXzd! zVQI6>kDF)1vgVqfGS4$VZJuvlU|t9-g6#;Kfvt?R+$*dBwLKU0TJ+au5<$8Xo|KZA zPn>1_0h?pZ#f_{*wc%pvj_xwo!9Y|PKMp6_Vn3tOX zPBRHhcRJH(W?_1Mlt^I@s-Ki+Nm;Zvrl^ggv}Umae}hsdxQO$8vC9{u)f43e&XxNy zobNWDWTS0~1`k2(0~bX~=;h>APP@_VzGJ8~ljh_6%ISW&O_lO6T1{)@HMOQY=}~$f zh84lOvlQlGBiLj%mn~--*dBJAUF0?%&3nqcH}eSI9lf=OkKmK}T)v!d;CuLSeo?V0 z(MnInt_)H}qhFq{tW-8B`<0VQgBqyDsC`t2TB?pwr>hIq)#?`Ypn6(kT3fBN)=$gT zDz$OiOl`5YR@<%}*3Rl0A&u}H45v0FB zT#a}qp`E(jw4BiEV;-y{G;(KWw8}A^N^tGrgZ_w?KD@FI?pb)>TVUbg3GwJ>!uO(_ z6vW~gbSJdqndq6==XeK{6*)5s@^#0etb|4cVLL28e2F(-_+a0DnTXNXg`dOmHzS_n zJ0Ed=XONCZ`ddQ7aGb^igoWFnSBZys=n@%Nct2hGHu-deP}cU2-a6Q%vlG@Sv!ks=zMjZe8x3Co z_uiK;Y$|j!qmQ@H;TYkEqxVRi*?Wwz*@Shx6uqHEzKO`!EgDbbN_nAspz9Ysw}ozl zJjUea<%KRsc`KqvNS(!dj8NQ@up`l)=DIKk_RS5)eZF#xus+y#WH@}gX1W-{I!p}r zT2Irp1!CX9;ixB9@`W}K7Be$^Q48Hk!Xg7u@0#b^k8@cRj{4AS-}BII=}<3q=#R05 zh8~5^(EJ+=*C5zZK2bkn-+Drxvu?L zT!#_0UFv+-*8VV#(Kl*s3*C0YuIm%ESn4$IG1{-i^C%V8g|pCo$HlV@ofeBbuD$hRS?ZHs*UP!GCAF{$(2 zm-d~Z8xwiDg|01hQz8$x(BTEAgA%z#>U{Sv_%x0Y7rDBH4$m!QdE`Q=^WCdp)Qynt zk<+ElcOAi~8&P$UW2DY^F2Sf9?N3COHrIuS>+nQ6nwJ-X>u7&8vQG;guA}{h$e0#7 zTt`@ZWMB&&t|M$}L_>33dt67@oQRVxbO&*MK@s~~=(gY(X%U-R=vKo9QzBNj&@F@= zjfj}vLWlC&&yJYdLMO@#i5T5Nhw|EYix?zzzI)Xk<%N!ouuEOjbDIv`hKQambYq~~ z5fR-&SBi7VjIc?a@42-{{R^%Qzt}>D`WHPh{CEo;>R-gt@I5{qOJ``O$>=&~HMxOA zqds5tL$7f8*Id4=ltLp$7SQt@KqzdvokfS6xd@ z%Q8@Ivpu{=YMEc0eWMtE_&2}i_n^7la+a-M;ni}yr0cejT4bqJ_W>=?^k1E zV=>Z=2qB`3mcJ|cy!$krA5zWk=9Oy^=i0av=RFNU)RzSBU2L3orQAl0`(V2~gBJSC zD|!Ai$$Pc(T71{}o6DICKU@5~yxH!}?_`s@`Q2&KwTzqY(&f0By*1_Z-o2*O&&8;} zm+O(Ze^>G~mxZ?J8AFPbX*`pLwMdOlDc$^>ME&}iU(36+ zT--J0qS3zGDKajHzbJkeC2z)>j@3-{8${If%XRpdce|3$cg>=%qYnPuuW`Y(Ex&Q; z&+YMB^;hbt*MeK_-*jIZC*n++J@ckqzSc)xfw1XomC=;9DQ=wIB(;p2>W;UjwA^-$ z)GxQ|#n;&5=Twt_ZiCP_o)RP_-+4FLkJJ^5974JhOSxsg^53L^}c7hN}i5nqCNc$zit0MFWS=0zV_1BMqjDZI0x}o>Z>QsYwoXnrt zZ-(rfe60^ZPjFX=DuqYXDSHsWlQ&- zyF9kkHKm(kv8Cm&Mfoi^U6pD&KO9e5knAg))!nA-O=pApUEd@(ZMzaT-F2^~ajuuH zof2BHy=Cl8HRG=}N)j66zbe(FYZ-e}mpSd@WIqzp3r{dkK^TKD9|2FdcQ@#35tcwk z?dpYIu@|0lTo^(;0$QawSbp3(ghL2;f^nyj7lrp84Ox`eyc|*1RY8`EGDW#fL6$Ab zZwjLPtAacR@f{0sFSL9jkGu{XGj5mf90DM>MG)uG6vQ<)4N;`s7>OA(Lr z$+iEF$AfYHBbr@9HRP$jw21#-2>+*^g}4_p5Ky;7=o52AZqoIdhx%fLejx(vq$!Ac z{Y#G6Jn8X5ke1yuD%#;K%5ADe4#1*KM{*_ zxYlu->zm{hJePjRpXrO6+B?x+=OVZe1|gIqG;RBL@z87J9qH56AQoZrF9dPy-x_9O zpE(Hge<3WwwxtLw5LP3s`=1Scl5o!v#CbHuQno|40bw)3_Wyyf3+E%wVL!qlUpR{R z1j6ax5Y8d*MQ^)zbwB*Oq%~jJfrx_A#e zgkpqBgkcDye`6Sn{1bam0T*Gq2)$>4&qY{(un}P~!ZL)F2z3bc2tvOFVF$tBNj1GF=FKVelh34O;%GY6R;PAiTQiXaFP2_7w@l!M#C(p8KYHe z$XfCy(x&F=%loPNRDC^4R%F>m^E}MTfyuypdmp-tzDi$H#_F%@Tl5e0?fOUh4*g?& zr@jl@jVmQwE;X3+;(hpSydS@v$MgO?LAUDudMiCpZ>Nbe7KwSuuN^t!JC~d_Yb7ls3v?GOjaXjO&dX zjLyc5Mi=8Iqbo}CXD?&U{T22qdyTE)IXssSx-f?vd9qR(ngIEpTjv`R;WzNg{=05jjf!MniEw!f6K@O*_&~^g7ywj-mImRxFUU zW5KK!i(?zvCiWrQp)JwYXm4ukw0dp5_Kvm{<#F*$3>C}X-EhrPIfF@1N*_sPYvWROMCMXlk%{7DbHKmSy*t&}TwC zC}l$Cgj`(z{|%U%8;P5Em`gdL7HWgFT5Xc{jP|W=(`)nzz(-EjAJC`h58~-h(;wES z>yPL&^hfoX`eXVmeU3g?pQk^suh!S-YxOtvb^2R+z5X_=f144CtMexVlqZxq$};^^ zeXo8{|6D($f1w}N|Dqq!|EgoI6c~wtAVTUvRG+WUN1ODH{tjx}8T|}V^mC|js-YP= zX=|7a6A31Fj<3H=JH{^_zXPddpp`!Dj`4L(+iUDK#`ulnw~XHb=`xK!Cgp)Opz(VI z2ieCT7=L*DvGFG%of&_A{3Yb+hroQXckg~RMvV;=1Z_x7R80)>h+L(Ty41wgbVtgL zIL^04q>`l0UgNAOtSQ7@x%0oqjxGP6M2+DJ93+`!fM?Ts`Zirp-=Q1myYxN!KHWw? z;#TelR8b_4;vInTodjIb1AH2Pm`~@A@JIPfK8HWapWzGnC8fRckTOkqSedRoqRdbp zRc0!WDYKNvmD$Py<$YzdvQ7C=*{+dB7LQtQodGB zD`&L`<33}8albLym|}=|=l>tkc{h_h)`qoZAE;TtUS+E}YOZF}0=3p!kk$sst6(iu z>!97C4btw^inS80LaWk-Xv2VrzFV81_t9_F`|7tD?Tthu$w)R*j8db_C^ssMDr2xQ z#29MaWehWh8$dmxex{IAl16e#KDnE`PCL_Fng_&L0WGAX={-Ol1!GSFtJEDh7=c9y|1SvGUB9F`~hUpE`T2C_TYAoRqgtejP{DmIu6V~?^G z=$Y5Bwd_sw&l}iwUdacm52%aPm(}I!Dxk~WRM)BX>U-)Y^?mgNV9mCwyELM8(qgn^ zEmcd?($R}&YEFHVzS$UWJVb6IWNhy57*c-6vc!31{*L7h@|ER6ohSG1{FyM~XY@ap z|3B3q!Er>Gfl%G-nBySVB24w^u1?Q}UIek9h{f^eAuL2#;!8K3)AFm*P5NIGuexgA zwMef=*o3g{s%^ifyr0Xn7iBdM2a!Gq%c=+3FNYKm7r6tt!-?d6QcEV0=jrt{7U<2L zbQj%6575t8C<_O6^L@6Jeavnp<4a5X}WR6D5AY9}B&Z%}Vk`>D69@oIlHK}}SX)MPb9O;yv?e6>h*sczMy4p0ZF zcdEr|iCU_))1tILU=4;_^JoLKfm*3nrj=_$wY#+O+C=Ss?E!6pwoqH7J+Bk2!ccTg zH*}M3!77X!^)C8NdRP4qdN=)#dUyS1y@!5_-c#?T$LYQGe){ctyxw0=)RT0(?$Gn} z0^OyLG6Ib@Mq8tu5p47}ZZ-NEw;BD6+l_dmzmZ_18tI1J$S^VuhmmbKja*}-aSyzV z$ALx@*tpgt3{RmGS_lig;ycK7q!jo6cJeUjPBIJmipkTUQmlS?hLpi;ejX^yrJzb6 ziC-sGc!F<};bbFu4=v+X@@F!N90pAWCj1C_06zAYz$u@i;baDlq(x){b~8iT{T}V+8_x_9Jh_?&G2S7B-9b;z{fkvHpg=C05?B ztypt&FWb+@@rmq9^v55w)BF?uSKgQZjUVIL!1{iPcau{}C@)Z=lvw_t(pl-xXDTVm zgM2Ca=Q@5|c@w>Hkn(}@jnY9mt9-9yDwotYO0F8LwpWIs4n-;>P=C58_o!XfIOTq| zuR2_ruHLPVSJtZ&)Q6N0)rZwOWv}|CW>Se})jFtoT1V|hb%=J87N?HZ`e=RBi9inb zSMS#nwOsW9Enmx5pAf5j)Hz~(kNPC~v8UAK@J#=#?$tim4r#5#`W~${>Xg6M6YqiT zv?RR$U8iN}*XyyG3-zl;^XL=x`?Xi$#XhRNra!JP)Yj{Z^d;JMyvZ%oKEb=(N^Ot6 zN?)z*6RUr;16cR7TRR4C^;7LTeV=|nI}fk*FWN=@sD2#v_k?~*SM_gumSBw?<3S*_QQeSDjVbtlb8EcKV^w*6I#=H6& z<2_@O{-&|j*rvZ_Y&SmA-!^s`yY+XBKN|=1ca6iwU-b8kzZ!qj-#30V8uTqDX43TS zruL=~{S%Yjl&kMH6`BgMF3Dwb>7Sb(H9e{yGR-&5*S|2;nKtW(P5VvX=-*&{kJT`V zl{!W%u@=XOHV-lnGCG?}&85bTVug*-#r&xGQKK7HuGoz~S~glfGWuBdTlO0%mM^T? zMyfT(?+v5gZ;fq+Io$S`ZLaw{+tapJ%s<&)^{18qtWD`-sqnwezn|qx;dWifLRyU2DBBxOXkkvr`p9MWh&VlBV^FYu%MJ|CBkOo-hv#`vY z$vgBGdJEY@d(qzHQ+g|Gbw6x%3i&6U3W}uD=ybf3&!9{3rYx+tAFTJEGzE6sKp&PC z`-HUEXJN5j=wjgPyVDJ@*#7igX{$S6s}IsYvxnFtuu@^GtzoNA;w}3rP$+wvJ;TD- zv+Oe#$vy`SWM8nqz&rXY?73Ll^H^!mk4bx8F70`pwC63b=doer%UU7URv*BX}xbs z>;0>C5Y`(Zt+#`;-Xv+gxv<_?%`H~vX+vPKOSE@krK`2SNy|JVE%OpA^MLNBAA|+A zmKHctTHq#Wft#fTZjlzaO6R?-m#NB>!W^;n_Bxl;rE0&*zZZdr_AmBp7wjg9476&m#}k-zp!&lJ89>grJdg|?fgyu zRR1)~Td?z&eRkeSj^f0USm?Tto1pCuQb`;}(KL*s^~D>063B%2i8PEIW|A!Qy<*ja zAKou=F%sy)=!3r;1!#qKp$zMDDnLQ#Ye$eaWF)zlgp>QwOJ65jgsx}{UL)Pm7r#z= z$QI!i*%tId-+L5C_y;+Gv8hwkN;0U8`eSuaE7~3_Dne-}xff$!;bbhVJDS`_Z=jvY z1dM>?k^AvxQ$${Z{ohTN(@}I3c~7pT*+eJNN#rB?0DX$=pik5JhyHBWGdPk>nig{&Dgho5P+X=h^e@MatMqY$es% zDz*ws{N7-1&;aq~LtC-8*xR%4rd$b+f%r?^ywvBD0q3k2JgNCtvY#)ta z2iV_eB>Ox2lHSO^V&Bqk?4RtPv==+i&eJ&2tJB{61pkuWinc(fw<#vYMBR!-u~H9K z^|Yb`mDWmYTCB8H+R+jvR0*YJ==q~*xpJKnL#xpL_ohRXTa{bseM&#&b~;Y!uk@!i zSPNvQ6BLJ%LnkVEN&%g$xD+>iP#K^Mpbsf`D1+!UtQ8tfrz_RUz4Q^ZHR1FzjH33U zv(@qHc>07oRh>%bpp{ubpTxSM#q?z{5=vL9uc#~N>*^b79eo4udh6&Kb+fveuEm?) zR{Ey;iTVj$r|wpd(YMs&>Ph;s`n7tR?$M}5>0XU%obJ>r(eNy zOrWRWH#+EP_>TGX8@)&`qTlLneKu)xbbYiwni&}9s$nLyI8&Gzqg-gOF@idq z1&L8#)<$>=teqH*Vj&of0_@!2hQ?ws0(=AOV*J7A#(E2zWVgZ^zhJ4BzgWIwX_oIT z7ulVbOIDSYS#_(K4Y#(k_GY83w_0yy^R0!}LiUVxqIDu$V6C-IV$WJ1us+6~vp#N} z%T`$DSr@X^)@DkJ>ta@Kt+TFSZ(G+{-(v6hsebL*yMCd5p=_^TgkKcf=hxA% zBRk+X-ETVk%x{L@5_ZsUso!$;Prq0FRvfXY=-U(K| zxrOD&pigJ8{#a-`qXke%SJ=N!Zh`$apxd;Bm{3gZW2WXfUbiNa0m7i)_y%~ zeFQM}BVpk;OAAky7M?0CJVV-crnK!lq*V`+R((ILI)qG;_WX#n=NTBK=uBq8o(sw2 zu;fu>J#4s|>=Ps0=-cn3HROP_oE!P6Nsa4=TyPihE5G( z$<&0g@pjY#pS%OLvW_f<`mr0?jj-6>tTzqBxX10V+eDU5gD~ci0b6we%Nr)fI%qhn zWs_j5uz%V?+W$?k{}r^mwEsA1`MufOu>4!4<@d$-`$l>j+W~9uC++-pY3CWR^KYn= zorRqjh!J;MBu3n+3w}W+^@zCzS_1E204?Jsyo3(rL-|O07rgzcbTr1=o}u?)UElk3 z9RHYqOl$crzKc$hEyZNC5hgkXt%HRwmaW4J@brV|OK2V1(q*!Bcp0rj6kU$>fYJ07 zv=Om%g>s{EBVDE3gf`-J<#x0Y8)X~u9{i3+=q8NhJxaG?+;a1wLAS*wi@T~9-2(QQw!BX`V-zd zJ+wisREN_>b(}h$>7pHBCbT0jv2e9ctz*}Vwu8lrR)gIj+6&eh?Zs)<-L#tYwv$@pzjJ*0yQ8*f8x=?NjzAZJ)M}jlpcj z-`Ty|SK3$Xe(hWBTULv?Cx12xo=Y1x8J%$hJ^~qz; z=^nH=i)34~R<<>7%C_ch+19)#+nP&l zWczXv?TeW;pk=w0`&kRD1w77LWcBdg);q0t^4moF!uyH#h2JjP7v5jAFWiClWdYB( zK4)FT-Dp|f<^!xBSau&3AewX!A>t}qJ^@#O2zsGvU`Yj)a7AKU~h_;45A=(-~ z*RQwV?ffafTt5$A=vVAl%wO^=_Z!NW`Q7a|mcQmV*>5s`%kN>o>AYUFM|?fnqm_K0 zXo>h?zt8=S@xS68l_=W|+YY6JXs48D(Kaa^c})4SitZKTD%Mw=s*J64RgS4# zRI#dJM8)iiO%)d^&6PbW%PS`%2l9yR*mi|RAfKSAUPA6B>Vf@06(N;oS$_A**z%K= zlTo%fmI&gAl2B@F`5Ad^7s{Vnv8w!hh0*LNSC02PO68X-SXDyRf{ImD3x03<56<3J z5mXUX5mME&A_TFZ7gwQu9fj#yZTkU4|1neO~H{fD+j@{e~EINZ5>qQ zsVw&f@S#;6NyU{T(Ka+u4QyG2E8LqBn#q;ZKr<^BU4!O8H(%B7=YrF$VYlyocUS=OVpx@2WZeaW$sOQj*DF{Op2{SazP=anum-Bx;}^ms|hf0hQ9jzGPsEggq2qZv)TEYF6G3R);> z3GB5cttwp$OKnN(k-lu1|Dm)KSNEUNL9{4nQ~rfcHNPufxuo;8sq}0~=h6$M4c8*H zR9BN3tz0u|YK?wn+}7I~{Twd~Yq6zo$z>{wE{iRTEq9dXmvt@c0qP?uUbdnsXi2X{ znPs_UW6E4*gUZUwhL(+lTvIl=Y+BjU--Q-kE@gh%%(6L`Q_V}Ce@$)k9M_gFmtB(3 zL}e?=R{zJeu54Y|2GC~EcF?Y0rt;vHRJOnD=)a{y*T`|AtT|mQyP8fzbFS>7_ro^e zm?XX?S<8P93T$~*ms1ht|4N<8JC}F+7wTL7>!@dodc5-2pv15$rpXw(mc%s0ucb0i-nx6Y9AMKuNav*cb&WKMr| z2=+>pc@~L0{FIaz$oyuR$6J1((6AJl=Yq_$N9I|stioP4A*+VaE0-j{S@PjtSzUyE z8^p1chlPg9@|oOIoYmG<=58urR?@RcMS= za<4r~CN#InJR9WoE|gq8YoPkX5f@8-NZyAea7Lc-J znW7}j+GxZMvS7406Qjn!70TJ6e9Q|KVSXr(2#jbDMv5yiBUFVMp|)~H=z2L`+zsQ! zD=_B#8tCTB^Fs9)ffgu@o@5&+j%+8pF(Umb5F7pFXmX02DN2=N%IR`Uc__w|Lor_@ z=7{c{uat#n>_V z3}eTw$w7v(WAcRi`di^-RArs$-cDf&u`6qE18 zOcA*xM~W594%Jf?=(zVVcO>S8EOK7R%C=*q*iX(21<3K@R&u;J9OK3Rq|xk0_9ML! zsO(m>iySHLCP#{U$&upTyo8t0cs`g9p^1D9A48Mnh%jsrb4O`%T-c6r;Z}4o=8J;p z-53!LrlXV)C4^SX`Jz9`vEZ?oFS-@8Lt>_ArkoL)B}aj0%TeH`(F9|nWEKlrsxeZ!b`WRFRQQ8-Kv-) z+Arsb4#+v8&%|giJt*ghK9_Suhs2mKJp+Uwr9a5op&!M_FTDh`UsZmgU&x*g-Tb6Gsl z&rh>teY5@nOV$6WpJVCzdHs8qq5q&?WLf%824mSkFKev82sJ`kA#kequ_7_s%Ut*l zY9Vu*pEEzl2FMZKfpSJ@u$&PZB4>o|k~2bMF=D%jO%!9b>>)84%N~<6KTGAz&&wEx z&11{`3j9251;$}3*=ji#v_{SatrcUh>=QW)bU=)yvd_e94?8Djd)RkkwuhY;qp0kH znC)Sh>#_a7E)qs>gKet7OqpSq0?Cc9-(oJO6=rzEoUe+Ro9kh>H-L1^`TPO%yxn1Q zX3`6DzBbYi6a?E&fyJf)D;x}~b^v|lB!e)YD`tMWz>@F5o?|fcbE~xEREhRTC$E6) zG#f#(wFdxgQ zm9C(#Qh%U7UPIfk8WadSe}}e~Sm$;?J8#9xjh&!KAoq6DDEc-1hDHl?EWHkGfl9AO zTVSF$F$?p1WBv&rAn z=h-FJKwpqEPD?T4*!iw6Fbv4flcgA>-jDG7Wy7AD{*ua zR_pYko3UaifqsD2CzpPNcIQrd0PRi*{TnYs>vN1(^J@Bc+4>yEY}G^bAHaz}Lcb9> zae9`|;`-vKlJ6g|(M<}cD81jdnRKsmm`3?Lm>F%yuEtFhi?4PV3j_&UCh z+4v41-Te6(-oRR62Heb|uYP$F3;AR{}n7$shbXV)u9N-~R8(v&pT z8ME7Y>_)U@F4jfNZnK-j>^AGF6e}gH8`j`dvF>914eN;&IKx?-ScSuSW7fNx-KvaL z#xb6xQ5FwW`x7ixATgO8SjQ_is}<-Bs94OV_s zeq=+GpOl~2P^D36WOs?Sl?_u3wG|sJ+E!Kryk|Q$L12K`M4&yR*#l^8JFzK1ea5f{ zf&9FIO$EZ|M)r`He`C`y|CY)gR_&^tJ)vf**=&wLowIpBg66aNn3r?2XMhDAz@7sh zw3ICZYWo59B3kVi*;1@R$#+pxF0Y{H*uTxM=g%~3v?j|Mo>%Up+Jpi@o>$lFL z53JzVYgO7XehW}aBlxXYO*NAD#fqwGej8SXjp6Y?vX1BdwF%lJo&Y@S13VQohckJa zHcOkubG6ypY@UaeVsm)DHdmX=3xIQ4zzczTUC3QPzG9vY-$Gr?J=zj&2_Jy@#?^eF zR;R7uRaj$H&xc@*)doIX+o)~iBehN1WQQ_V`ojKvvB2N*dLVG^e7%^@ zcO#`n!q(l>a70MUT5#(XeuGi=yk>^xcXfRv0OP0uGGsL~g>j;>0gn0-|WF#DRS6arJ6 zsgwaxoU2qB#YV9*%qTS~l;K!80lPQuG44^ui+)|fVj}}~k98>bE45gQGFh2~^(YT1 zld*=;s!YYI%2vul_-cF`Wtypjse|&c>3Y-k%5=+UOSSR{dfdIT#|=XtYDQ0K1%=9f zH(d6+;qZe4@m1Pzgechu>%dUNV4uz)6}_*(UG)H|@Opcq_w5C;NiW+9-?uk>>OP?M z=xuLBe=M+Bek2|giheo?y?ioALyw(?o?fi*GfS_$HG1I;=(9k9(l^&6ik(XQ7Dtbq zhrT}_#L+Joz*i^)S+mp^2hV&~%{JK@&q`AOp{j_c4J;;dp!K0{u-i2TwQ`=oAkB z1J_0G21cc|SWg3uK&nvbi&!L5MZp>yd?8+XBsx5j$)pvqD~pIu7X!7TOVo-E@8mVe zuhZ9wO5dPwKwd-FAm>{8CgghhE?nUE=zHMr)Azx*;Y-sR{fK@94zC4A+(mbR@1y&` zvF-r;I6Y2m!h<0K38P>YsmkyzO*9~Q1;uI~tnw5qe!vCF%_6)RV&E+&7;*^1KL!BX zij{xDvjLA}QA8E~4fGw^4UlhSUBI!@6!~vuw?f9cA#6=#NjO$A!?*K+oW(x|2)_p$ zeh;{KLox~P2Qs`L@M2aBj^T1}_(0&5Y%usRHVnLmO&~TljXjL=A7PJx!v})?3APMJ zd6~UJ+F{kv3TR$suR>mlH!UVs=|X;;p`Ktp(wpEw=0Lxmt%tmUZ2;fMHsZ`Tu}$Fb zv-fe0AFvN_2evY-kriGP^xN5XFKS!5sGlSfEij?mhe-@O$~a;5EDkyp}(JqfFrskpQgEdjvAPE^v5V(7@{g z$67sTp5jm8xWExX#+nnz3;9CG&+%oD;fI044+DoE1`a!U@_uDMGOP8XUeFID9qe;j57Vd_g^s_z7fa=Mxh4xfw!!Y9iF&r-9AAv`m1cxKoN#1t`Mwax&@@XWyBnTdZ;QJ=?a z$zpXe>i6J3s6T)=s;K{XFURl}z7@|Q!=D3(KSvDV&w;no za8KaX1w(GHg^&RFcA=2Nv~b7~S_ILs-aV4E!m6VvC!CVW2dKWKk|zNglc z*x>CYq5Nbm8TwSs4xXu@h0(IKEaV&6No5|ml0l|!!3Dj-*B zRp7vk5fj!_4uOpIP1rh28wMFGEWzP5;;i5`g2QVBAFGW8hwq4-6SN7)gY{65Yqd!z z2Yw{vXS8P^FVGf1hQA2DNJIZ6JVj!Gr}!#S@GXGVSWjQ4)q$_k)4R_3HjqI0k(_04jnzFDr+Hw$kPTYu6TNr;Zl6eE)848(Y=&Pkx~DIvqB1jo8AaEp%d0(_gn zN^I~fZN#JlDF+$;C3t&196VBwB)T4@cY@qm?~VL@^xN>&hkkm0$nZG9;c=5`HIS_?_VJJHf?@ zbARD?5boHC)^`(C_@<;SeAA=g$MtVWuw32lFITtw3$K*;%hm1vSlt$g zQP(y`FnD{TJu&dDi4fu^MvsUIzG)2P>y7J)&4|UtK-d|#rqaW_`}8dxni5_#a!g2SUF8a!J3 zi;jV1?>OQFV*)hrZo%Q*f=@A~fQxTAn2o7ed94`_Vg0T6f{2ww3V#1iT5%@QrTGGLCAN~&~)bf17Md!9A^0&eXy%yxJdc{Y2_6&-Q~X4ySGvh2y*Q}SKQQma#s zyKU(nccEiYhLYVav#0w=R!~;8W00rZGubmIyUueoabn`cq;albQK~1m=+G5PvS;}y zXP1{mDNW=U|T*5_BVn%G*XD7 zDI+!`Hlm>=#aC!I2+GUvE}FErd-}US96hW zNzU$x!Op!+dnG!Ok`f~lJEfgYJDu1$>v-1j#BTPz_Ps@i1ox6_L6g)xzP7e`jwac) zAhBm+Uy(mCL0pZXM8_50q`Nv!Ompr{bQB%Me)&Zw5E+n2wJeGJe z@eH2s`NT{1Qw}9%u85OZlHm$YvZahp3Q7t|igJy1jZTVjZ*mNB9&zqW>XOtwse4*^ zQe09$@FdjfSXV@H*QPp|WKVJ?6=vC8o$RNQNbxRtN zG@|IJJ1J$OXM4&<@Z6;8)F^L_gFG(nqGNSZZPL`V^5oUYtCMCV%@A=~x%b*UJ(6a7 zdU$%cdM3?t^#ot&>M7FB!Pv6I(<8kS@$#fqGOSHnkDPszHi5P!?G(X&Hfe9tk)(qN zN3x4O>pbhyXC)nXB_y44ElX+uT}V2Ma6!sQCo9SLjJCl7VyCy#V_ zT*bIg*5onCHOV!|Gt7A~d2;eJ@R`YTTq|9{$xD;x3oE{w(uZBG@ut&ledFUbR9_Ej+O&$2k2<>q2!}E z`*ZfYW85(*B>6=0=@c#bT=K;flA@(pQvzL6Qi4GdUh>2j9YUMg3Dnts+%YJnoBeo7 zPuKL6%9OqXtSJctNJ@SSN=tDdT`UOOWZcAEvr;^uVs|Yxl_|rrj;D-H8JiuEGBIUJ zGn#IX%W!4c9Imw3?1+qjlvycrQx>FLN@cEPUdpadS&XnOy{}I*H)Ul`0Jbkksq@TF zsdp{2U%k+mg&*9WLGJ4;|*#x>p2)jU{Jj9+w*$2{Ar>#rdc|dx~KyNuG3k1|;2z`vr1`xI@wd(*s59CD)Gh zU`SE!nDo8~5$T=MJ0snt=n(RE%PMqtcXvJDXaXa0G?!xpP z-~-clr0+>TkbcWf_I@r;#(sN}+-`T2+nx482jC}OvJdoJbkDX| z`)IJe)IJzgjWEJqow?XP&Jk~~wNJIr$nIvJZJ+0~lcvM&d%0L7hBy6-3w9TCi^z~PWL>K_U;2u;oxODVn1#_Rdf{fW{Lf*dz1Zw zJIUSv|80*9u^DCuyr*kW*K6pme?a>^U2Z8_UxL`Bcq3VNk*THc*hvW zn2Z!qW=3v?D`Sv*c}980(2SAU-Lkv6my3`wM#x^usL7a|F)d?e#+;1#8H?QOGnQto z$XK1RE@MO1diSP`&8}gtVeU;$l(9WySH}K~LpcG0GL9n0iOilEr!&rFoI`vulel-n z$JIQGGOdUMGlMfD+(()yvs30oLGFV-&B4sh?t_`#pzE30H!~qK4Kys%k(r-4%zZr5 zgHVigB~nK+M`w<8pF++lnbR|8W$tjz6>YAM=KAW@+04am-1p37nJY8vvUXFS7CT-7 zSnZ>wj@8KHa;#{c)3Msg9P2#gjt!2@I6}E+kfd@~f@8a5JJLfPyFmNJ(LK->9rcWK z1v(Bn$2pEVPB>0G&N(h-6TDYx*;a5t!JaYM5wca7oZT7kPM0aWlV@^vCwCHfXV^g+ z+UUO7j_d?Tj?3i9&d>J13!IZ(nLR9fboN-!9MDXJ`Or;>72J*)8!wIaLj3iqar0fb}O zCq0|9&v-WAO|cnepBJg@OM*LD6UkJNGstNm*bss|M?KpST97lu8RFUR+3$>U#vm5k z_InOFyC6Q{?4C1J_%4E+al(J`rB34^DFgU_~m)X?HrEh0cKhLsvTo zJ4b+za|JqUol~7l1_TZWcFur@zYt-zbDmFv$M0P3T;*I_bfV~lbG>uDj5j&A$=J(x zI(OoYZ?E&X6Ys;$gL2Ce=P4OOC*xDjv#z<$3(kfdCC7|5sVyiB6rB^B(>13@PM@6k zoRpl*oZK8&&Y+y~oS``*bH?PG`5-Cq`#J~Y1`FtW$xJAVY#Do$L1NieRC6X({df4e3a_BM#;s< z>5 zg`2KW?ncm-+#R`lau4M76O?-(_pqd6IJ1+vXYvfhmvYY|ToQ7gQFtMbA!d0tkRdsa z_X`yDv)iKb1OnPv@TlT@-(}jv$s7TMGgUf(s%FI)OaRs9Qnj zf^HyBLC=D|1qlUd1&)IJ0#Bi>pt$hD6)Gq$nD~n{96$`V|UFDzrmaSU3<1R|Xf3D4Z>6Tw!(LIMD3E zTA7|&xE%5F!Wl9(uW(`E5~P~h;V;Uo3b*-aE#ggu>zn1?R=BgMYhiWPsKrT=wLN3D4%QUE{ z9P&t+9$7S|s0K6#VRF&5qM4B97cDAUTC@VRx@cX|hN8`|@m)pxF-Cb5{m|*6b43@$ zxSJeh3wA}gI=MPyGz()-7=`NVN)Yc_t~8g!l`qC?#3+qxw0o0ltQa$KO>s>hKwPsh zQnvu(3>Zg{Z}xSrdc1k#&Dym`yeW&fVb>YgdGRibX=MX%sX^ZNLw6Uv3*s%$`#vY% z-Uhl$-GkjD;LDA3*Se>=XSiqM9cCWF63}w@D)(A9ynFXH_fGd-pErKoead|n-m~<7 zJpo>S*AwgMit)2P@K509Ne|BjuMEBzG$TD@;9E`hO!N6fi#$s`D}*N_y_M~rUDCTa zB>W2LM_dG)0v?0#2nIw9=!ALdcEBsBKs^Zj2+#?jFrXa#31-VckrFeaN!%uvxXt!J zUv|Y7fyr(o@gp1v>YITO5hxN4H0Le&f|o#%_)8RtpG1+EB#I=1-7(LAOXO36)q91t<~+uq5JJhziLCence#KSF^XDF8yGkQ8BG zfgDjJazrNrIpRfu>qLAzgpom@Fo_(|BxZz5%!r>vi!g~6F-x>au*8WN5*rdEu_59c zWlM2y1VSWIB1AMGL|()73WP|IM2Lh-gh+@)h?pcoBvK+oG>H(=BtnEsgb0@i5kH9# z5!jGWAUncCN5KZDiY`}xO2pq>Ux{QPZ4}$M80kOIqniX^f@dIx4Rp^0S z1qX5!TkB~(%6yx?gJa=ay^w)l1s6yVo4~Jv1HTGPia>ubCeR;NiT+@i@Y6{EzV>PW z#b9D4Y_%}V@&O5AB^~koS3hWgZiPnRK~#wZVG;?V0tpfhd_)9`fL|fd>f?-4nP#g4lor8GsT7GW>5l zyMx_@|vwgFK#%hb+({9EjOk>^q4~ zLOH#{X zm-rD?;zyXoj|52kh$8VL3fscAAdf(iC=x}YO8f|u_z^|oM-+j`#aRK73rvSVkyr#G z7d)7UfQRDS>r~=LsKk$0Bz}ZR{0IYnfIHkSRd$f(tB( zBC#Zj#F8jL@IDF6Ts{{$y@-;h`O_$2KA#Vyi9nYqK$k4Qv7QCSgh`Bv0*uLG$S?30 zpkKmYg!~eJ3Gy;vPng7>FknxB-@zA8UxR)Xa41aTP?*G_Fo{E90uPM)CU7Xh5{F`u zIFw-Vbu6Gi%!(Q43_ryW90+0X2qgmekw_&Hs1t!wQ6);H6>!4%nwf&B3Ls4|j7qpb z3!?;}g~0_@g$lGVu}GAPMWR%g_-;Q8s#2*WHhjIm3i1$T2;`wauP}*TVL-2pgggou zmOz0eMmfL|lTd*rCar-bt_7c@Ou}*Tl^x`nq2T|^#VQNPk1LOZ&sLs*M&Mi+a4t_1 zE57-<2(m!9Fo85i9w3cT0+7bw`;>ho)`wv^rX0gHdeJMu8>1ZHjlqF8#=Zi>!X$=; zN(>7Xcw?Y=fH%ff0B=le0&h&(0B>xN7_8{9LH1WM^REV|tssNI1?EK&U-|)$P=OJ{ zmuG>Y6KG_}0{@~k!@n?ze_;~;V#3#GvEB+_qD@5}fqzjX{zZ}a7Yh7K4z}h31!I#a zm;i}_QGkLef(GBM#a3XIfoc(m7^_6Ys1gxl01@*7@F`2wB|y*!EQ|^)%uC?QfQX?I z5u-{(j3E&*szk)7K*RuNhqWkgA*aB_bP}j$$RDU5fNxQ^VBf94$fyz{!zD&Wl^7W= zF*00YWVpo0nA8iv$lyCgz{PNZb_VW4pkug1$Eacj3-BXBS`e@y_}VwHDnLGi3v`SH z=$J^z0t=%^EQ~6#Fsi^mllB7t4Bk%z&Q9yE^@l8QFF=ZXMZ; z(#^`s8d+0=KPO>LjV9}~fwHDXaITvOxXni~1~NjI!R!J2KLtf>jXnwlukS+0E8 zDR31io$)WGLVQP|*i{TH!O9vhSJp&uWsQ?7Yc#H`(YUfkt=U*>6D41W0B5cXUu?&BBEARu`>yW;W%Z54 z)i*A#zHxE&jf<;qTwHw<%GEb6tiJg<*3RJFxjbtMkp zr8t7Kc*FH3)-)Y=9S4oo<5=B;_vix8;ae0g*98~WK)DsSiq$a~3@M%QCO}Sf!@8au6$5Xtz6rFf>j~w$9v9a2#DkU!Po8Gby(e_a&?a;tJ9S(T-~E$bAa%Gz_{WO#TP1vl}-p;C|g<{-jLHUvSC*GoQ4hl7XNzxHvj(Y z@a&yA)9U^8jg>|98w(2R+XFp|q5^@^EydGInoD*DdNwF!`x^R{FK(EcaWs8I;6ipw zVMWpKvYr)D9U*hyI$NP#=jcQe*L#5{=YNc!=D zuGpVK>0ApLlNv(O&!%4}+n3Rj(UN{y{$`sIk})-7s=p;;YT=2FkP*qyJtNZQ;K;xh zjt#sFuW&OGvGtQJqbMUSBL`HGSPF)Uj7k3%YBTIGDPs^M!?=yi7?aT~xsu?Yg*2p;_PK8@il>Q+3T}66m2QLlD#>5XMK$l9Q5?fsj|9QrenRl2e)EuTRMt!Z5sH zK~7UJG;%*7XL8Q8+++!m%?^ecvainwTn=2$nU}LLH#uiX&hng9W#e+zmTk&u&DjzR z8?oP>v#V%%&fc5@Ifu(O`B#?C%Q=>FGUrUr`P|MqmvY*3+H)Q7!e0z|xt@Vtb350U ziy;00;lTzxz)J^*s4o%1ar%`*_XBjhecSRqHbt8>@Y52?`Vhv&A{54YvH zo50_SZ8yRW;=bH{w$IN!h;StLNJk&=<$dn)+~dN{J(YW^Lz;WGgSxhFs&CB00GnY= z?uERN+{>U9Y(`#q!>oo`dEN6O^St$w>nGGt&P&Yemp3SHP(eXn8umH$GwP>x^c&b0 zVXKhiqDIlO19WERp;ycsS>etblh>R#DQ{|COWv%!xs^qE3-T5tEX!NgeM|Q(^$YV> zGOWp4Uq26FN#2IM&3W4pcING=U!J!=?@;}6?2lqQf$enOxx9;cSMs%dcfOS$f!?@B zepJ4%epUV2{G|HU{FM4uUu$*Ke=iRlm0&2AiY6Q_va52MW3Z z4;S>TKP*@fQ;<-QjL4~QhXn;?1=R(Cf}sT?3Py9AUoftCcm1*YlLZqCW@2n2VG6eC z*k;zBVK`s1vS3dA`TFzVEh<=Aq7@L`W?Q+4{f_()C3yvV14RkFyUT_MW3k8?)iG+|sqhs_D=y@%~2vry!NGJ?193&x- zZ0`%Z2T}_oZM{No>8`@W!hWUOB^0J%%PA}hWEED}p>*Eop|G}e9xMziol!Wla7>9( z*jzZNaB4$j;oQQO!dbutMNx%|3zr4T3RiMew7yWn8f+b-r1gaxaI_h?0leT=6oq)3 zgWEPa3Y5Xd(ZZdDdkXg#9+Kn2qtrT4c&_ktFq|vASa`)hD-Z}&7imTABC9B(s0Xxs z+!~9LiigUGstAJ*gcNLYWOMRxR8oY?2FD#+V^Je`_SOh~C1^jk;n;?7Z^v#dYATw5 zG$$8LE1FR>yJ%j~!lEUCwxZ=ls{(2WCo;een)#LeD9l zALv=U2w_F>Qfw=VR~N4f%qeaw-den=c&n`$m|wiRcwh0s;v*%I#m9?JT?-eAFBG3G zz7SZ1ZE5l4ie30CsN5bfO2SLJmqZ52O1veBCH+d$N^(kyN-9cfO9lm2l#DDHhRw!i z?3+uP1FHiou&omsVGOoOfwq#VB`twXfvtfZB`qbh5ayOF2+S{8R?)c?w6M?DP2{%oMBbzTJSb@T&<;BKpg}g3G^)8WnYcJU8Q?V50oA*J%&Hb8WKuR z22KTz2TqlqDLs!K`RwN-xw4+6?WLE>96*FJN0|q*&N9BcE?h3_Dn}i$K*f|LVD6Ju zmRyz!%#uImHYnwb%Z65<;y{O#FUCvciu54o2l(Ok% zGaI^>%?Sj`=9eukTZB!*in7&Z>&n_7*;KZ*A+jN|Y+u=tvK`mLZX8QE*x&{Kc-g74 zvt<{`!^KRn46`QN0xib6U&84Yv_jnEYB$~Dz7N7Egw`q4D-I6 z@{#2uu`dEOro6d)5^|3|rNO|g>+>+Hd=`)90&#Yj^;Jj!wXA%lY#621HVi6XQ@);I zTlt3a%@s2$Fcy)}FtU7S!?5!G!LUd6m~kSMA0i$tKUaRD{B-#_IjYdgFP2}a&=A}e zR>RbWF%=PC4B&Ny=4;fXiXNa_Fmv-&B-ug61fPeBl!mzsiyQ2)pdtg*vWCSKd8oU} z3V+3ris2QF6-^ZrDkfJE#itz99)n8#_-N1DN+YmM(h>eao`v&eCco5;pz~ci?4Lm#W!obUT z@l;5q@dX>*nd`*LewBkN(<*Z+iz+KBYe5gI99cQ0vbpjQ!k%kkO{Iio*o3#Hasjr* zLC(s`N#w34ZUAnm++4Y>awn+$v;$jJO_ftCTPkN&&Sf}MolN`7U}-Vzu7tcJH9IXk zB|9~nBebOPH7QgctvrEny7F9=ukvD*4`E}~#>y*r36;BQdF_xYf0b1gQPra=3XjQgK!?<5<+`bdo?~(R^7GQQ{5TZlOdrxD;QF-PpHPguQ~=Hp*r~sk}OKf zs;jF5)kCY7R*wjVrJxaN)>n_lejJY`0;g2Zte%d|me0X{e)S?At*Exc>gv_i>#7ej zY_0xcXyfrF!CEV*V{6*Dqk4DsKJbrJp9+TK+@GyJTYaI%s4=Q9*C_Z?E2x_An(j4` zHEA_ogtRY&#F~CJX>}88a%zg|Ce|(lRe^0-O>NB}U`x%&;MRiu7;Mc9Q)^GwOsbiR z&{8w2c2&)+ngum;5f;=eu31*I5@AoxnrmSXj_j}=`wcalYqr(wtl5Kjr|YKI?5{am za|qjsn$xvW2t7JN%|(WDH5WTLwLQSOf@2MMMYw{bB&HVJdf+Cg6|@yq8*!b@s9Ilb zQf&%uN9EO))K=CGLGUBUen{w4B+s=ZX# z(+(X}ZF`X8;NDZ$xvndy&#%0WE3m9Cp)RH_8P{O~Lvmdz=qz5Pa;4g#;JTRAb%DB} zbu$@807uu&uNzl4v2IG;bR5k`m~$=6uUk~Nv~C^33WRlatFg7!9k1I|cd%|Bwu5zB zvF)haeeL)-q{r)y5RccLsypRZ>ds=jP6We3osq!E4AC-GQ z{?q<*{)=*dNABv>M+C;z_mDd+^-1+94GZcs>htPLaF3!Ao7^uLjv0HC%)#rY$&4Da z70LLgLPjtR!!QCF(_oJ(Cdqh$s|t(X05MGWd9|rdqcBt5Xo`C)C{M`cS0qRH9c9&s_ww1o2c%=&^!syw?I8KkTdt zX@`a1396%9(ZK%rx4U|Eu@lc`uEx8bR@5sOoLhl=$ayVffyBjgAhF;ObWSBgNBKxJ zmHqO>5z=GC>BJ=JoFt0n*Fx~@8Fo5LiegzU7Hog@vt_WbRq*O-q<8l!pG_9T7qtZY3tnAJj2D|%E(m>PkD&6AVEeB~hZAok?#CM#kfP9d)1u%8 z;xQt=_>1St_@XSnwBiX!{)S z54GdJ^Xi-J4+7t4Z-kVks^uzCpl_lzgP0_u7j*Uz zsbjv-`fNc*4f!t%hHfE0gBEs@epb*`F6ithd>wDHK#JsRtTpX1IWP5}#5ak*5=~9x z`JN{}O->|{ZBKnmXw^@Al>B>%ZxF+YE0|U{q4Abk(N7}%Af?0)uF`+?MxwF5)AD_{74YF zl31%5>v=&no%~0L3h_lj^{OD=xg+T&liwjys9>Q)i@DbL0xGue6bx ze`dP>qGmNUhf1EctCT({9QA>#mjs!g@uW*gzef5sT41lL4k2dle3)m zFB9)0{+W15P#-FYv@o_&m#FW6lSrv4V;J~eDak?&qX?~A`!>NV7 zIV}3xRmOdh7*6SIN-LSGG339(y#0mJ43X*uVOhDRG*hYcla8K|LU-j%A6BrOk`Xxk(G%i2o2B)P{{Vmb9)1A^Si?0N{Y0wY zPCA|Z={%R|l8WA){32R-;;Q(ECn$Y_d59Jo-7ewn}LIeVZfbV2x|r zg?8T06erSVJgq$}e03`GfUj@E8cR=0pyuo3zs?l@Kxqmsua~&mdZx~+Pg^g1U89t3 zM*F!)^##IF4v8Iw{z2`_bnTL)A5gB+K0UeGME#FhKE*<-^dV|8)A}cIB>8=bix_tf z^EOzd>eCWe8_X8E zaE*P3I!Wy4^c8w1T0TL`f2V#WrH_dnJ&iVN1og)Rod#)oLiIahSu0_g+$?;RZBKiH z^sfaWg&%f?^X#5wjXW#%)#FU@D9`06&*d#5uO0OW^WbG#2Kiw^htvw~x|{YNpf7!x zd6+;ueW=;z>MKb1CemG)hkJ;Vh*dX4Ya-xWR1l9ML+rLu(P3qi2 zyn#ArY!sj*88;f4mr<=j{0rp{0XMZ`LfP7qgT>e9rT}s?7QteLG^_{eTr^M0koxTJ@Ks^q|CpTdOm`%4FOmLAeV?&z6&Z)aG41j9k5d ze-NqaGi(V>R|~;!Vk&mc-bMN%lx(yjksG-eN5U6YD#=B+nZKG={OsCUUur4)PF?M)$I{QBhQZ4 znmUEjw}j((nsG0(Bxg|PEc0NGk?v(Hy<7OI?FqXwE=OhR_sO5h7Wfg%WFSYa1IhUd zt$X5AhaG$sU`Zja-|&BQfDVEOeEhPiO~b-2N>&BiKUOB=1}Hy z1lwDK=&K_b_f}e7B&fFWT5e;VE+=O^ulMo7(Zi@2*^YJUI{l<(kG`Iu{tubTINBda z%UQzFdfLdiJy|cjho(I)96f~6UBs2dpHh=vNPU}SPCmY~{U&nA@tE399rpEFC~1xv zR9>4J{eWhVUfM*ehSGAOpnA4_C31B)V@)CU{0w&~F-jMF|FbuM=`0_ESV*+{8hSR3 z?LmE+{5u7;VYFXBoJ(wGx-XDU6;vOlX06Q;sX{-Xu+1oE#1|>69CXqo6*YnQDN|G~Z>D~C zYCes6=g7gKP$!Z)t4On7Q|+FPQiVQ58$hYep%>Ne5RN1KvyDJaP(Mi=YGeBZl;n?u z)+UqV`3(1~waHhXg65?52%+13pf6BoK>J*91_(#1B?&Zo|b ztF7QX$P~Ac-$GnWOd$TUy$L#9h1M-{9%YJ;GR1P6Bk1yzj-$W zVc}~(vN@zz5|>f`4dMZE))Kc!EH$2I^nKyzWyGf`ZDJV~(wbd&ql2*|#ox%8>VLI4 z#C6nMC$w%``Zv6py)oYkcNU`qu!_8|R+gJn zXng~5tf1pbO2dfXBSsK=2s#u=CB;7`78?8M-`YF;=h1MVA8AUSx z4^jFIW38|?ZJnT|wBGwk_n=NCb32%t_mZ}+np)bQM1C!?iu&W3N;5g*iT!z|{i)wS z$iZ2tf2C$Jv4nVoNHK;PgtLn$T_Uu)^RqL+PZ{@CLFF8!tZ~dWB-U0+Cz9?ZIZ>_% zs_o*XwQ73rDeN)d5!6RXKca6C)c#7HKM;qKGnDjT#>!&9=@Y)1PYZ8Qr&3UFq|SKi zXru>Ihj*XU7}ECLj5^Y9(eiL=_9Vxi#qXp|dz97ns@&U|EK=06q&tTr9L}}1PiSEt zEu@K5;e0{4OluqiD8C`ik&m)LXyr+2a4K)@={v+lh9hcjpRH@`+XVf753o$ zgrjUBZI873F^4^V{ zoFud(L{P0Gj#74kZjvX<+QagMT^qsY>~o0^5!?C9{as?atC{v+1imXz;nj9|p02jL zYKbxqY!BuE{y|TaboCNqSLWdovD*1-q_v&)|1OAc`Oj*zoiFl3C_4;dq|bi2*;4f!MJa<-o&p|yVG zR0~HRLp%3{d;op|>3yt`HaSzRjXC+0I8D&S)fcYGq(8DzH1z;=whKDy1=X=E=dp68 zS~s@vZnW8rZLb?^xf{#pA4HbFTE$p(a#gCwXs0jBEu5SyEIl7}d}3LBf$4ri3xnBW z2eZWvW?LQ1R0gxH4rbl`Q#AFrY56Ih>1tY7&Ag2zeHYPfBhPNR(CTGbSEF8Lx$SqN zuJyev)xlB=`fQe-d{04D7m-e3K2w;_-#Tyy4^PSEyb_(VqGKm*{+8#poafb@`ddg( zWUGi{u5M#a;;3(zRW#4#1M+Vq|9;x}gx=s2@?%&gZd$X;XAnz%5T%3ICg-#L=*;bT zYF;A!2Cs%j8;9pTxIFaD@s-y-NN&q@UnbiT}AnT1;F( zX*yGRLRz)@6!YLD=T4#3P0aZq;#A`6M3#)sl|@=3Iq59b=`7XhQmT4)@{0u3C!{>_ zypzatxr6+Vcy>$3`Gh68Txh*j@}?hU4(;|liJV^vI()>NDWzZ4**@{r+Xd*CU*)-^ zvHa6$Gc9Bg__igAuOwM82e@n+37L+?wcpl{rS{@*LbpUg< zS4vFzJ9B+29(>>-xq2coHG2x@JD>K;n>P`ZRQ^%869C8;UA z3r6&xqh>ktz*S+|b3Csl%;!GptYXgJ7j#S&bkY~86Qy)l#UrUeeW0GwyogTH)v#>7UyiL3|}p?$s=ylHU>DkFhk^wjDKMQ-6&14brSH{hZM1cUTMGmKKF?dg2`M zwNSxbf_N8<=sZiEXPMSGUQ@@JmVNCXm9(_CL{qy{VyQ<&s(YE1ouVqV%M^4sQS&f+ zgu{4$2kzC-!eL%b4=}BVZDfjl*jx3XP9K(DA9~K4n9nX!4%$7$Nlg7NsXMI?Q>h|y zJbNs9&@9})kutz(?9_Lr#T_OojcU!*11?A^nk`Op&VJ4Vev`OMI1@J!q$s^gHSlDF<~L%VZL3iY;7A zW;yhgbJw{_Q~MLoq6>50TlBRQmdr^(ygf}uXz#Op*z>B-(B?Bd$8Yl-iV*7cCIsA;+OuSvb z&aOT~TA}_Eq;20@LQW;GN*D17o>4cJ3|FhFKcK#kxLeLeoy;8G$uqi>HN`Qo=8&9d zA2L5S(wk|&B;O$x@O53O{d1%_zEVFX=dz$4L1g`^E6Kl^XWEzeyrigd)UC~?^m$@5 zbNge`l(m3cVm7Y5LB+HV}Kvin~e#18}=tU8-Qbo3#3$)4}?}$ zhZH0K)#O(aUlr81OIo@qG+y2#I_oKYMQHWu5bTdGcLb!@ z&{~AAEu{1ZEb|NG+{^seF#pSl_Y&!I^bFE}Bkm@$mscN?($h-Vs!M30gtb<}S!4+{ zOQavym@91@IpZWn-OfoSbMlC^7Tv|1+(6D0_PKAdg`cI(vn=Pcyo%4t9L@2xSW{<6 zTQAJKBDV90zyt&Z3r7VxEG(aEz>%16&5-;`R`EU~O(_6EA0xK~qLhQqu_(TT+YI^u?fjiMMo=Bf9F8D1FrOnB*Fzol=?=S2Z>9Z3wBJVkHqqCf zqUF~_A8$+pYRhSPDs`sPT7TAcf7Wk*>TpH6R!nP8h>kj*{_2OkQpPdvIL7VCbS>iJ ztkcJtVhHVAvXQ;aB}y-`gnvrpoLBoPbyg97$`tLH$UCIlg|D8Z{YJ)$q-H2Fn>w5+ zYf;qUs8Y4ZT5NUN-PA0kowL-rn|7v<-;@0D#P^-gAgy#>-3HIPkY4T$o;CkFpgqkL zUuIq0$(B5fb_$4diS(sfJZ-+f6jMn*%xiNBW7SeKRiw&?VqZB!T*Y!8$P{Bj_8>o< zNjs_eqU1_BF8@SOmWTWv($S>PlU^&d8b$gfY4)wk`^@u;w4j9KBCdjWW(msEep9rGot{l`zC;f0)r0>ab#4`OzE93wM2zlWGYt3;&pM6yw}=HS zVSD6hk!H(LKNYFQepyklY8V#m+&a$+9fJJm{}5{qg*Mp+6t)?aJ-f!06A-kTd4DWq4hH2)8setTnb3D-u2O>ecA{A{t>3W+(e2DxVr0=Jla;Zn$*MdIA)rhNJ6&fgcew&&Kah5F=3>hq_^IUZ1 zT+bwjwV>N=?>VORjPSL+#1q8T#6J+9Bd%auTERM6!B{Ioyl8uNdA73EwX)T$FUa0v?VjHcsQU3{|-OisP{{<&jQ=os8 zb9spP3}dYz{gj|SM>O?0q}f*W9@H633zara{d-BjMEWJxbuDxFQV?NBt7WW7)Tt#_ zG1hqUCy>)j98c^|et)4=`|3+3Kbiaz(l=20A?bM11-@smpvv2JWb`L_%V4>A4j<0--o6op! z2(49$j@~GUQPV1LO!75q4yNW_(lH!G{DyQL`EOBkIPLf3IAooSMb+iRos>p1CtVq9 zGVNC}tvSpO@0Mux-OhQmlP0L_7ER?cQ!J3U%5O+>{HAOm&GDY{jEql|ztNhV;&V*# zCP957>3EjIlR~SU`6{n4xBY~#Y~jqPiMi@WJ3W|llX15YGlUPXi=629Q0k#3p9tuW zQ>TP{j$zdKB2|YGcN4!uyo>2h5E@Szydj1=UF`r6StgE*zaRDZN=Z>a)~2 z&D>I|Jxk3^l$uQCN7PwC+)ONXxe)h$#$r2BE`)p+oUxqQRZ{11#+}H#^&{s|(r0Nu zUpU$=w(z%z|xuKcval2m)qayGGy zE$waUU!=80TDV;hBRwg@Q1badQ#FkxJWn_ZM`$YN5%`CaN7=1JDtbv-^&s{5sBcYb zR!=tHGkF636*pKtuU^(P&Ewdlh2uT%FZpPx@wUyJNq$TidT4){bkZwe#9#UDG{!xZVSAEl<=_ z@J8|?y;2Y8!}LaczkZTFO`oaH#e2k;>MQlNc)RyzeY?I}->)CmkL#!P^ZI3s_&gZ< z^>D;E5*;acSz?jW@1A>_Cp!;~N_l9MnFvgG_8T=N zEybAz%y8xaGo3}Frlj|CRsj1uYk{{pj{(!1gMjJIVZcl$QqSn;90~0290N>oHUm?g zlSWO+?B|>c?C(4WOmVgVGn})=OuA>3b8ho}WAAY;XrA!!{m#YB4~%MdE_*=el@HxN zYNB(^Ll5=q?_3W|b#4IO=G^?yL;d?Zw*ga}J0E(u@ge7)haR5zkaNEi|I&0?idIuq z66M_A5GVpV+7)e}-(TTqA2M)IlyfB7hN7qmA&M^hn=jkPmZr$x^;B0PzF&ZS2*%cO z-2Irncb-&2D92m6WsmQ$v-0u(WamP&L_=dc_-xJ*L4(@ja_dD%<^l5v4%S3zc z+7#Re_2n(TO!usvu5*oj92ybip9>xb_padi$Q=ra=Z*~S`vvz!!Tlh6?<@)Kixel% zD=R3+lL|S$F(tTP9+c0pkB!lG`8mf0_szk5=iq)*a6clrA8GF$mx9N`g8Sk29)5uJ zfqx!y?>H1ZE(`AQUV6!o6JI>wK6oDR6Fd$Hrt1inkI`i3(|N+KSIckLpL4xkzm7RU zJHd1vGwkDzJ)Vo%`Iu_k>3+uEcf*)L(l_#g_SXmH4%*jU6|P2CKhh4oZGD&XuyYfS zLfnhpZSKA9)1e`uF*e6F&$S9~eLmnigZC{<>k17$=}|(9JUu+Ap~F12o~F=AHs3QY zbW-TtFt2B}XJuH4XRGHhYs2G8b{%#{!sc98j_b6$pSwMD1!Hs#I}mmu{?C5edd|BriotJdF)Va0GrSS0ZO`UgjKGxale5tdyb5@tmowjt^*J*91 z6H=y1j~g}v2i&k7n0kX;0k_>C*HzjLhk@xg90z9HAXiN04d;PbH%M#Gj?jQP5guS} zL^v=nq6aWPA_iCxkq9h|kak=Ykp(P{C<2y5R02yQ0>H9}VZic;MqpLMIAC?eBw$U% zG+=DROki!qTwqGXLSP+p8%5+l-SKrA$$u=-{x|SV;fKNThaUnqgv(fOP`LPt`tbcg z{&O1Ndy!Z-1L=DrO0ah-A;_7g3{)y3{MaWe{UJAS^}Cf&1z&T(cVli;x+>k2?uu8D ze?nVdD}ybpUj7U=BpnTU!g0}(2VaaaV9&z0a5^hpVD$!kE2hKpO-c{st0&GY65o`G zQlga@oSj#RQ{t5bd}}9B>8&Io^<=!8;PVvp|HBmc-|_!P=ivK_vrv-~kbAYy5u~B^ zIrv#SaNlQFfQj$~Y9H@sR~5Cd)S#NY0cENtuNR%9Stt)RsYNMN)ZXwBz0YHVFYW{X z+Z%U;dT+!g{5EW-vB6*TK80;9wv*UaV>^LuCAQ<(mSa1HZ7Hl~C|c6KB($TXG)3!u zpf}o5QYxtZz0sbMQb1iwLO+ny57f@yyFn!*Mp_bDD)lz^-Wt@~)O$-%Z)5MxwqEZI zz0ubstyA>gkoMjN`l8}Uic0b(B_t)`2ygmN8k00TsX1vLj?UvKBB^^)&!i}vU1!Ca zGy}4ENeh#fC|aLDpB*SYrH`i%-v-bJ-*Z4dvXb(Wijwf%q&^Xf(|skhDzrAVK6I#* zXZA%!P29qEi5qmVn5u*%RVVqA1|?F&o!uWb4)z8>@y&$?H+f9ZbF zz0LiKd%yc1ZhWC3DFl)qxR<(r>RyY!c@e@%+Zq=J};(jc1+b zdCywUFFg33Owu$+e&^ZbdBL+DoL_rdJ--6=OGrjSveolj&jxVTdNz7q1oeWa4U#fQ ze(%`~PAe?E1nRf2@Eb^yAlU}aMp#-0>i4klJI@wK!Xer1dBwBK^C!XMH6uI8&e!=~`d!_rC&nYR}>9bLiv(WdQeojd~JG-Am)gVSqxGF}Upe28`=zwX54kA~n%TrYH=EkZ*kb)fe;lelmiJXXR1 zq5G^5`kHKC!lr3^5pOGG`})ihIw*7Y+K@O5Tjex(ym*wE>>-o$a`sx1Fr(L!#Jp*X zA%mBb{WaP2#Bsf*Cybsp4YD1OVc#J`ozLjAP(relft(Gl^Mv_*S|lXPdox%{rPVmc zp2$;1pNr#$GyV;bsM#C9$4IYNRG*`zsn_wB!O9u%-F2UpU9+$lF6UP$wKdtfH_M-S?|kz z93{i9lOT@?ogqm|I^FwN7LJe(&n{~$g=}K{9N$F9=EUv8u^mrKIIKkAlWcmJBg`2Vg3rac!$QM6VPRpN!#ago zVP=>Sc4JsX*bQOfVe}Z8`(eo!pXWnP;`0*EZEvC;*FngPQ6fLo* z(%tg|^=5Uj`k?wVtyTL7-`>i0{>-`B`M&e4E8q2iTk|Y5TzJ|43iD?3m*#p4y?3t} z_+D4NUZM}e*T{zGx9daoJ8)OvPWc|0?l=Bwyk;CQ{$?CB{%#yH{$U(8UN_)@(0<1# zTKwvG)D|dd?`p3>;LMLls{v(miolr?KLwnkAO~ZE_!03Vz>)XdNUTV3%HqpN2?zC< z;75zFuZy@K2dz6kfi;B^r4Ggf@jc0j4{~~d)0v!vAjb_3{0#JafuqJY;h68g0f*zL zgcXYBJLx-#G*^HJorL+|9QH{&o*(2)2WPJjJ|$s#kb`fE`L_GEgEKD384AuuAAD@W zP@5Bv>m_~>Xh~hpLXF229X~V3k+V?amtx}^fWVcrP~(?i8xhozvryy3H&q8Yau%={ zpJi{L6G6>XdinlTr53Clp8QvV>vremS~kX$B;_A(4=l+J2eoccr6ZE zd2}P<^ziM?JSgq!TAU4;TOg}|Oo`v>+b}TgnoJ$o1Ck`g8FM0jrEf{*3Ljd`wRqDr zB_HszF}r-z<7Zd2Uz4d7D66Ooh_}u+GJafUlW*iTndU30*a_( I(wn_2BExlWc) zu@Rj;9TJk*C@zAECb}MSC-L?#ATd9rU zZB!i5dwug#8{yk&F%|tUa_5NN6xWp9@OPv*|Bk~<9iWJQlI^En#FeV*q5 z+rjS&esbKI+@kCFYZdj@cJRB#9T)%_R(L+SxGHWvspwJd-nPb=>-49AzZd+I-laL0 zujAwVZ`}y~Uhm8Spo4a_xXDA|BX3=Vo!T7 zw)cDsGeGS|1)g1=93+y48x=Ub887&vmlaAwwaBexo=dTaL9(Lcc{Z~ByV_B)RF2vY9o$D@o2PquQ~=t z@&-pmZBp-3$Khx^kDAr{)d@IyfJYNC#>BOxKE$J`_=@s096idT>G+y*3yvP+kzcKs zcT=eiJQ}XvrH;VS-8`DCKCHrvtB>GFi(ZPmDjz5xDjz8yEB{h1DW57=RSos#R>M$6 zX(*X2H6LYAj5F8O5Y(ZeKB;~O?+N>^x8#fpc#*Ic-qnpv)xXI{Y%*I{mxyC$m zp}ELhY~o&0^b%#L+DA=Bt@TG4m!o8dD(@(#l=o1JDJp9Zd1JjHf2=Vz5BX$mA+M|_ z)K8^ag?gz~>r|GA+Mu#tj4nnv?uy@HL>jjmQOHj;aukC+#UfWeBi=|r-g+T-iO64X zgN4ZBij;|QM?f8Rhx;su2=$)70sBy4XqYqTir*)o;?1^?rE9_yL|9UeGVKN_B!@C_B-Bn&2-Ijeb1fa&T|*Ii`*sdGUG+#H^y&`myF*jTGUEq zqS{BV|Gz8$ubuh)AJog&%%;>{tCcHpE92J2wZ(0Y+a9+&Zhze2xZ`oB3z`ew!t#f)H~Zz<*jYkh5)4Q%)Ajvj~Ez~T5t-*MmR=!wx&qNhjC zjGhxcKYCI0(&!b@tE1ONw?%J?-Wt6ldUy1`=!4NmqK`+Piar~CA^LKR5)%?*#DvFm zkBN-&#w5n{i%E;gi7ASyh^dVk6f-PlWXzbD=9oz_Q)60UX2r~nSrD^0W?9V2m^Cr$ zV>ZNWj@cHoGiHzPe9ZosLor8VPQ;v!ITv#==1Qy<>yEWzBVv2RM#cJKlVVe1Gh*{% zOJXZy{jo!0hsQR?HpNbeog6zYc1C=2?CjWiu?u6D#4e9r6}vXJHFjg{me}pFyJGjo z9*8|0do1>3?3vi}v6p<8yRD6FkJr2ougBXt-qZDvx2w0OH^!UbP4=dGv%Ce~GH7C=9?_K0w>RsVo?Oo?>^KSBP_3rTQ_U`i@^d9jZ_nz{e z^hvE~EjTyv*+(0twe z&~o6(SH4wc4YuyGriEP&`^>o0m|;F-uCn5dT=T`SV_}!fXU%Qa*s$ZqtgttY#m3|K zNNSkb#SAs)nG4MM*3H(f)-7R2!`=!zfhTgO!cK?1ZFs}Zgq;mL7j{1ELfHFZ7sLJ) z_DR^MVOPSg%FBrG33}ad7)~R^a2uWQ1TWMGGYrEtETglLW8@k6e4bZi6dNT*nNe;G zG%AcrV~|m0R2wx$EnZMxZ`4_eF~qpt7;4;M3^RrscNtB_2;**Jq;Zci3eN)XHO3f| zjQfoV#skJg<3Zz5W3usx@vt$~_?9uv7-x((9y4YdPZ&=c-!YytzH5BXm~T97EHJ)r zEHr*#EHZv*{K!~h{20#@pD~sh&l<~(pBO(iRvOP4tBju+tBp0rFO0Ru^LX0$OJlw9 zE92KjoACmkJpSI;X1r`{H+C31jXxQ?j8}}^#-EKn#$Mwu#t~z`@rH5Kc+>dUcneP` z-!@Jd?-(bIca2lV8RLE9tnq9W@o4xQ1G|5ah`v+;a1*UU5X%>uK~EHaDD60_7SGt12yv%(x`R+`mjm04@n znSQh0Y%qtIx0^%FJIrC`NOO>Rr#alb%N$|eZQf%xnxo9o=Dp_qW|MiJInEr9C#Vz5 ziROdmBy+O)usOwi#GGn=%ba0OGpC!6n={QP%qPuR<{a}o=4|u3=J(8}&F`B(Fn?(N z$o#Rn)O^NVW-d2>Vy-ZMYOXY&Gk<2THh*reF@IsMHJ>-vnZGhy&0m`v%#G%6%uVKR z&CTXZ<`(mJczXML^JR0p`3Lh)<}ULUbGP|tbC0>#{HwX&e9e5-{ENBIJYfFK{JVMB zJY@dEJYv3K9yQ-IkC|_o$IZ9R6Xr?tUGtRrPxG|-o_WT6-~7NlXP!4dGB21Pn-|T0 znU~B@%**Dd<`qjf+s)4`)w6@J1tJE2x1O+OSx;KC@tpiS)*NfD^<8V8^|bYUYoYZ$YrgdZYmxOsYl-!&g}LAekM^gq zzlR+P`$yQ}|CQ%?J?u!>oA5Tr;e#&OUdVXdm}M-6w+W@cc?kYyK0MAU^I7F-iJ5(0RHGhc%+ZulRo~EPg1Y*N-p@N zQ23<|uha>C=|A<|asSIvRfn&$JzXh%9X{VgZ}$aXht&;(zSi3{Uh{Wj;n$k4^Ldlt z^B#iFn*z^g`@Qk-gVT*~U-Nyl;r-^o|ILL5ocCY(z!gC+`17D2{P|aS!WWDU7;$aH zm}?V8U7IoP+G70fI^Xz*ukenq!awfAXlDPv-5q`%_lJ)Jz2?9B%=hG)y3TWU_|1>6 z^PHc2rSJTA&lz!@@4Ok_Q|^S{67-<4@U-4*el&P*{7e2+ylLA1!k-TQFFfj)>pbfH zU+Gh)nUBJ!e%ov@AA^6r-nY(%cYW%bcU=hox(FV2F?{Tj|H{kao<2Qo+c)^y9p)du z;BQ}r#}!X&dtKY#inl%d75?^}|B=VN8uYpNOV@QiH|(0%?Q+fU-bl~e&FcO?@VlvB z>v;>TLipYidf$rw%Kwgl{~bvWJc>S8{B4IHzTcVvKRglsc#`$df8~Y0V=aIee(G!d z@M3u4A6Y-PmRiqP%WR)~-$7?6ee(ZiBnUr#ZTxtQBgZd~8#!tW6VLs{aU({MLErtg zK3g3se!F9IDgOJLMwp%-gdMeK$(UPyjCti%`0!7^;HSUnr90-~ws-EBjsK^)E@ryd z?gBh0BT>6Y3HBggH;VoGF>HrlFUO}N>DA?l_Vr^}`1OF%G0y$UQLZuQYsa~bT7=e> zzJH7{_N#jbEh8xZV!!a^KHqHGszr4 z`lf&H%{t}*U!Da7doRab|K5Mex(fDS|8M6D|HJu(W2ZToGmAUFG^_aH%;VqZ9b-82 z_a6BOUZ2N>*rI~diQUbab4f{cg(l0_53c^wVwa0 zW@i8UdD)jo2f6?C*y7)3bh(y2qOfOk-#oYb=K0 z|ij^_u)iqCtyuej^G5A&q=&_4{uOrXObiND!lp2ax1 zi}#Pv zXF8gC(kPc>?c2t~vUe?Z;S3c`|Dz$#LN(_jCe}$fr#K&Uev8Ksn|P|EXdw&CcV+MD zVRe$dJHfgiIT(rU9^?W~RW1B`*E18_6WAWdHV1l+(BFo>?(v0{E4n?~Qk+e@oAx)& zX`0`(NO1@@Ep5X3fTm?lE1T9dtw#!TTu%X4yMB%{e+6fab$uZ&Y@s;&9gz54^@=9G z0QPn5W-Z6KSJ50+=%>KWM9YJ9@{Z0#%Y(dW;n*Uub;Xv3Ed%9*XKqSJ=*yvRhrZ_t z^(1>Lajb?yTXDD_bWf6dI6}K>U3IQ$7D^UpGdA?;&}Tg;=}DNS9+wvRoHqa{uMAA4^Z-By-#4c~+JUP+e2%*^R9 zIt&goGc$7>W@ct)W@cvSFm)L1FoVBcCot)me%|%0kABT)&5)DbPU)=t2F`pWt27I#azW!=i=Z+P7E2(N&vf?pNYNZt9+^kwS$H;IJxe!B*7Pdg-^NYzH2H+D3N?MzbVbwlO$*^G zX8GJr`0MO&FT2;=o1Vm;WS*4XLf)cY^FDYd;B|%hjSTMHV|YxzmwuPci^&Vh(f57r z11kElA1uceu5x{@-;L--cBA_7X|x=+o0k-&L@ls19l_?p;cRhlY%i)Kyh*(&E&NsC z0{#bgu6h2S+|KmZ+UeeMA9|E0m8S@PPP@C^+wNnJ?MdT-8{AUndWE@OWv*9x&fq=d zp2uFii}YWF_l(}D9k_YjW_b7DBf#tIckjEQ9ID zK6antClr2{i=Rt{-tZ~f>-v=IJw_=4ZjWwBBlbbb# zSyR$gkv|&m3%xrY^X>%V*+MuaxD`w*KHc=VyjiQ7HHleMm^F=AGnzF!tt_p1e1}%- z3HrC?%$mWh)y66tNFkPprDB*esCZ-n!Fx961kdIcQko8Ua(gZGZ%uiD&dXLYi=TfME7 z)^Y2!b>6zj(zEI!ROMAo>`nGD`<)Zd`xlhnzFjV%gORkkEog|4w!)Cq$ikv^B24e6`|NJ;H&v7>ByXFzFMH3s%Lf{yS2kSW1#Tl z^v#AI0lwa!iGKT!Jx@C@rui(#`|VkC5}406ed>aAC40TS$L;Qpa;Lis-IeZf_l5h} zBRmm2kvx$-(LFIxM;6Bu*OSqc)sxMW&r{Y@!Bf>!-BZ(3*VD|?!qeK*-qX?3+0)Gv z;_2-f=b7l4o01sGxl+2dP+Uh_0er z=^Hk)Q`&v(D>$X}?kV@Qd&WKMo^#K;f4VQ-SMF=~jr-Pp=e~D8xF6lG?l%w0hdqIwh@PmPn4b8a z1fEQu?4JCd0-l1NLY~5?S^w2>1pTb;OXj_?#<$DA*69WdqCM{SCIl;1YNRm%ux?1U{jg7lWh5W*+z-vcUV01CED$tN^?YaFf6> z(2xyagV51?7^Yy2W#idIK9Nu6Q~7j$g({n1aJVwwi3bL6W2=Xz2UuK zrGwh>)=Cd$^pjP@`^D#2#eGS9NvtZkj#xjggLW}9SHsQKIOb|xb2T1wHC}TyK65n@ z%+*9PR};ltO*C^gvCY-QHCGegTunlAHHppDq%)^PdUHzrVXiEfxw72m$|{&Et75LK zp*ceu;fm_v`ikMiC}UN#YT^2tTP>`XRx7+~9dTlHwz^nztV_7EQ2dRbn`@)@iQc7e zRye{2TvWzAg?EXrimr(qQYe-1~x!!}^K=&3NCiN%&WqEyP-c?Lw41 z?kj=EMS;7yuO2WW_ca2FrpI(Wjew@PuRW*V-jSEW+6l;<`v!7)oxvQ=B;Qaz4eM|| z1M6%KCjq_-A2+^x9qUlcsHN}zg>^Q6jdiW?VBH|-eb|KQvD~*sM8>*JM8moR)04SK z>f41irEfQ$*(cIr-7o&YdO&2vdPvYaa9C8vdPLyU?>j20Vm&6RVLdLYV?7~iU_B{n zV?8a(V!dV_ze3i+#d6;_Jo5w2^9!iu^$v(oeP z9D079L(eZa&!cY6Jf`QDo9EY?=hvI(QCElO;T^>a|EO8%=XaUsQFn*O)4d$)Jg>mI z+>7gU)_d_@IlH{LT6`COS$x;Qy4;Ju4!(=eExzkDEB@Aa2A^Sk*N=6cHvsE&FDirZ zGk9dn03==Z%DQI;wKG_r5Oy)`z}?SReZmVSVaLjPr-ApSFD2I3zEoJ> z`cmUlYWs51r`LDV5~!yfi+*JSRh3hmnYf>i8p;K@Ux7->mAK!4ddiKs-;JuuJ-9!D z+RCH2KZ^=W)B>UY@=xaq?r%BQoWCsLy#YVy9o*l7v#lEL|01-A`#W$Mq6dNZ6TF5^ zaQ^|0LwHQR;qV={!2NBw4r}24F1&}eaDN|X*h}0$#3}X`_m6Rop}x@j6ernNOZbw& zk%*oL-e+)<*1>%!3u1L~{~QiQ)Tnu1!cU1BB=2iD6rQ7 z$(V{^Q|1xI5`+{0_iTH&6<-PjuUl9gu(aIfHVY=e87s*-T; zu*xhA?oXkgc}&Jl_mp!Bf3V>br1zfnoMi&$klqK@YxrZ|;L#)NJtj(j#G@zH zXSig);87T&Qyx|ik6yqjnh#!9i4)fW7HNP-ZxF8vv4)n!$s3!{uO$O|{Nxu{G(_n8 z)&oT7N7iGOl2w5lwJYnz`m*J01KY~>QD#9Z5IfDzvGeRAyTq=tYwQNQ#qP0p>^=L! zzOrvzb3gLlC_E14%H~IpmP)(|ufyx{2D}k(!CUb*d=wwe$MCUy3*U;~EW7z$et;k0 z$N4Fa>2&WU_!ndl^Xie6%b7%j$%@nQ<)H>6jHwPL;4 zAU283V!PNO4vQnm1|Et>;xV!Tsu_!pUM&e_5}5-1TGGfoCOar4%bNawP+BkRis zlqZpzAUntb=nOVlPLXrvJh=$5`H(y;FUjljmb@z;$mjC4d@J9}FY=qRR16hUC00pQ za+OM@Q@K@;Dxu1%N~)%+r5dP4s-qg92C6~m2fbXaP;1qCbwC|gC)H`qz&F@BP)E=a zbz+@VC)X)-DxF%V(?xV?T~SxojdeTSNq5oRbPwH6570C9LcK^Y*6a0ly<1;i*vah_b^*I2xDxO)c1632UCnN9_p*E2gYEHPp(fi??dkSRdzL-N z-eB*w588+A3-(3(l6~2}XWzG9+OO=_j^lV7pW}BTJ5iiCPEIG-DdjYFx;uTGz7F^$ zeBS=4LYmN3@&6C;RrJ3htbfPb6$x|wJKAOo*ly&-55UL71{;%`7vm*)&tDjP^-mPe zU;PcOB|`rE9kCS=MMbbEh7Lg`1o0@PMH%eZ>Bz1R{tsx*0iMf=y!&5>?t;9c9Mws6 z`Gxf<-#qdY@l)mr_9>PgqwgbAkA*ChxUSZA_wPuMxIrhy?|6`SF>qAG_Wa#SyKhH!zY`h!ZZI5&Q3-w&72(HG1%49M;HSX~#PP=U z#`DGp=aI~t+?&Fi@;|Z=?K>6FgV;CVvqDx8Mda&dr;6R?CV5i?Z+EE%wJ3YLnQx&WM%KfK^S9M$bv z2i6hOWV^se-<$Pg8`&v#mi@^tuq*5)yN~`*ird`bF8A^P%$|*iPEk>LG#;JD;4#rN zDmIUYzERnDc3zXWUWqdhb!B-;tU⋙N7Q=0fp6rS_-4L?@8rAq9=?z7 z=Lh*AewZIc@2C^}BtMG|Qs?NASWBKMEaMp;ylmKQZm z&1gN*Kr|LjMQ1TlOcImDRP>aZA!dr%Vvd-L?o#u`0Gog z#8$BrpQ1oh^%}+0yvC3*Wdd}ZN-R^$Ak@QFL;xPM0&~EIC`wkqhNwxkNseUuC!oRB=^26<;M(iBuBw^-ZZ#tMn?f zDxeCgLaMMTqKc|uRZJCEB~>X^T9r}dRC!fFRaBK#6;)MLQ#DjW)j{=F^T3}hREyD7 zXQ^7I)~I#pqP#_IQ@hn3wO8#^`_(~pNF7$k)CqM)TiVgC_UkA*s*a|k>liw=j;G`6 z1UjKksnhE}bYWdo2kYXxq%Ny#>Ds!kuBYqk2D+hcq?_obx|wdSTj-X$wQi%)0acci@ zR2vPZQJe4BuI;hCw%^Wa=R((>Ane1uc0N15UC=IM7qg4oCG1kzndR*Yb|t&2-OlcT zJZ*qI&>lpQ+a6{QN8g~4_E>uyBKIVFiapJqVb8T!+3W1>=q9w&-fi!*_uB{TBldaw zx_!gGY2UK%*mvz04rp43J80zWfY5bp$8{n((VXZ`3@4@&+sW?aKo(fYDeM$+iaN!d z;!X*tq*K}{QFr3j3{L_#A&d|3Pt|>Ge1rM#1`~xuh(LAf9&=VY%-h2=Htv>$X2uMYzN!P z4zk;rsx7h4ogaIg_IGlgf~Vw#c@bWe2lL{*EU(5p@Q%C_@62cZ*x@VrDzn2kU?-pD zw|OWJyDV-Z-&e@^ZLp3m_{soDyPNiQY?)o= zK-|a$_Oi4rW7x~zcX=&Y85Ke$aI0YWz z448y-^1Qq#Z_0b}KKA?*`Bc7YLfww6Ei0S0~g-bTaH}+Rr7hpG#p! zm(%6VuC8PDHSOqT*v+(;+hZSh#y$qeO8~-7xis@N8i==^aK4+hw10~wf?9->#zEo4mUgAu_M`$ z?I?CsJDMHGj%z3Q+paHWH@92ZE$vo_!fotsKlb@pv!mzRE3lt;+NbO@*u^)^4t{Ds zw_n(A?GN^6`^%5r{CywO4o>1^`L%zGVds{0$~je>s!lbh*00^$di(cazcZh>aB&D{>&Xs5diD#;$`B>SM09B{9=SD_?bcW<~ip_}{#<>U^u zlY3B49zZ{N1O?>@G?Zsy2B*ni5LvDf_9 z{kQyg{b4v~|8>eToU=O6#VH#Q7#J7tb{M7vtupyh;E zJ*=KqFK9Y_tiIM5YpgZZT4AlRwpn`&U;3RNMeQgU(FkBfqoKNv>I6~UiL?HDC14Tc z@4?9BOCT?%8oU~)!b1h5sj#Dpx+bWxY)#dZK=D0TA2#<_ELg;ru%&DnTftVcRfr60 z**eUFKf^Aw>l7W>UG{)IWRKWm_JloU&oCY3IeWogvX3BF7&?dw#19{j%j27867zeTY2va{?WyUK1d1QDwz zVpeb2NA{Kd5WA+z`EtRJ$hAtYmTTl%xlXQ^8{}rWRc@2pz+ETclR14Qi{}u6C%MYL_}? zB3`I^u3o5D>a}{K-m3TNgZikxsc_A;&`SGsBpn%9U0R(KRb5S2*EMuaT}M;o8>vU>(TIIxe~W&T^=v&~FVV~Na=j99@DL*2 z_jq?+|EVt^>RtL3_wJj>_gFvCPxUh$YGU9^{R(mLt$vSK7;gK1i-6JXn8-V0n@AYX zj*n=V&`xA0wv*UN?NoMZJB^*zPG_gLGuYYf!gdk6D7tA@wwwJeO18B-*c}loyW1hi zTl?AbiN~^+AX=`p*Vt?AjnIBK+gt3d_94X1(}fomhyVNu6X)a%90Nom5VG^wZ4XWO6biu4YAEoEu6>9%v=`pq3Q) z6HB7R=? zBt#@kZX#eqPh)QZZ(;8q?_Tdd?|$zA??LY&?_uu|?@{kD?{V+N-=g2&BOg`v{fv7* ztMO*T4fX#<%&Ttd)c!rP!;z0M@<+-I@EvtKaO+5O+D3o|3d#F|5E=> z|1LzgEmC02zyyH_0}}@(M@&nLc$PH~ z3WYTi-7ABjUe>poLP4b1k`T^;HRx)&9k=AxVe2e9Il@I?WkOWwV`XFg(cK_BRMkON zPAIEGtXxo6hgrGV2sYXZg33D9$`7S=yj6fr#3a6g$SM|Ch1ept$SRLWvdpT0NV3|h zj5xC1s?IjC%~mZ$lIxh1{1>}x)kQRUY&AqAd1W$!FnG~>gqeFEE zYY$>T7wZ_}Ko9FUqQExml-MN>TBi{OPFj}`0d8B@>3p`H%d`@#xXdbZS|4R@ncMnC z=d=}0=QM=t_Od+_vXkrvAu>evhn;Gv+`uB?1m4HuDW)9E)buDn%ZQUU63a&CEPB?b zQE6Ci^@qyDf>aijh2_Uto1GO0>`NT6;8z<{aOvZYn!qI!3Ysq@yBu&fu z>I^zN>xXkRCmV*dG(Q`TbF>f}jgzz#8-vrd45r&6$6(|1Ks^!@Lr3e0Y^I*9@hQOh zIh!rS89JXW!s&U4E!Ic$Ikp6+=bvmnou6z2&P5;FWFy&So9&2pbhd@gMz$MgV-mL4 zu5Pzx$LzLtTXqZQT?qTjo@Xy$k8#c|VbAESW1%?f*0Sd~o3ujs^&YZYTG7b-)laBjvqGjcgoeEAJ9+OTgo($-}D^KC{aC-2xbW-tj zIIH&ZKX6(d=NX*a&TXC%9H8KtJPkb!d1g;zPh)&GIjIxmmjOy7=M(KNFF3nez=!pL z-tt;QajRo?T3z6{sd#oeZVe1$*ATh-BC9db-CC;&@ZEOk<9n>V$l(uz$!kYkp4AVS zFU%SMP2z<$7&`eYYY0?{H+a=|P|Jsb9sXjChLRuG8Uq}d!de4C^n~XD+-Kw7MPRx!s0-SSD@5jpF^j)iCY=k?ZBe< z(PzFQdy89r_JMt34S*UIYYbk|Wi5$wU~RxG2D0|VF0u~b7h|(dgeF-JaEobJZ=48u z*)U#^mx8;a3@^v#@Cv*lo6jrr%IH2pE2eYT1>0MeYuPQoKHnVlh@2s;bhxtaY5oVklV2RFHgy(WH=y#cRrn7t#0 z5=i$1Kgm855@lb(QeFY-#fbww1`ZSam)r&#_27K5c_MsZBJy-#K4SB1U_TP^>>{a1 z%JTr1rs8?QhGgUgz-MOR#c&1{;3dF^l;>5@5w8w!NL&YRBN~auyghi$ro1E0Bd{)D zIJ@vZbRO}3qK_EP2Z99|#m9mP8N``ZVRCm=~2>lsc;RjG6Zqg= zih)nM1cL6SeyHN;$NITSreEr}DlIVcdzBHm`J>7N9_h2ntiRfp$^tG)svKaCwaN_! z*{6cQ9|x)e6ggBOM2@7Y1Q2w7RTV693DuSuWYvv0WEBDyxw7gBCb_EWjkwZI^+81G zp{4*m4^s2MDUVc3h)q_jz$Z^oYrrW_Qft90Pf_c@L`_rc!7tBH8^AFyP#b}^m#D3X zK O%v4HxT!3wGW*00dXgSjqo@~zGa3xkdS_g~M*j$BCGhejcOdFq=eQ%m4lQ*jf*o4!&HxX;&YcM^ev>;7 zT>KVyA=sjA?jmsVyWGX#jrP0Cz#AQOSAjb^;;u%w|5NT-aP?>1P2lU#yPLr$U2wO6 zx4-3X_22g2cK3o;y6f%(kN?QsPh7rx7%2ROdlXF57xx6=arZQsCf7YrOp|++xF+`o z@OfnS7MT6$?rk9Txb7XG^_1?zz|?`M-Iu`YY28=E@w>0V@@I435*y`y3d|Fj$A!xV zar{3qM`5clFh^0`l0g|R1}t9!-n$fFoT{KheSP@u(phbRNOBoOQpzBb(gu-~H7KN< z;h4%B6jH^YkZL9ys9~~!#s-BnHz=f)K_RUT3Tb0dNLzzKIst`5w>krb#0M5h2NW{a zppeN1g-ihoDQ8Uw3TbVv1ODg%L_!(H36o)*G#SQe!XMTd!XMUolRbo)?BS)!9^RVl z;RE3h>yyDBaI@j&0C}7Ng1L;F$6yYxK^s2g7O#L~2yMg#+W5%g0c}W@0Q{7XB?YR8 z%aT#H!ID$9!BP-U#WI4a%F8kvgptc2j3NeM6f+2;1Q13IRuUXmZC2LcjPeF&R4_QB zqQM!J49=*Ie5E_90dA`|YXoL%AZupOMr(sM+8MOb9%y3;@Xac|iuDEN*v|R^Rh(h{ zDVt#fD4Sse30trsge}-mV2d|w1TvTpz&$`1Y(60jwwe$I+X94Q+Yo*L(bs+fe#k&0hCwqL4*%5V;A^<&nA4pR}ntIRCnM5%x(id;G2OD z9`UVEYr^<;LJ52ip#;8{Py*ivWZ>ufk&Q*=XNmFRmw*OR@GF#)@vD@R@oUJ*vcp%L zN9E^t!F^Td1_$s*gaCLb5I}qWmJk3Yd?6b{!)_n|5LQ3{Fcbg*2oDgzT+A0|U!^Ln^BqtmoQc^xAQV|XiX$c32bjakwMFzqF zqB0=>QJpw3QA5Ynu|zE$N5>U)2nUFIgabr=t$X~=tS9`=uFw3=mJboQ*;A+R!8(A93XlV4iJ5S1GTaHJ}9xNwN%$8FqX3J@mkILzkkII=8v*j$xN97!f*>Wz$Y`K71VwJ8C~~WaP^0duNa)-Zsv?7zvsDymQhpT!x>S4>6WUaA6$|=Q zE)|=4bgB5zr)sH$;OUyGB;e}Wsif!;FjA$2N;OBN0#i3%Wgvx0Wh9MCWrj+%S7jxo zN@asqbwcHUUUg38gl6?nXoGmK*5Tw3PQa~r-GqiWmLtW zVr5aqp=4!OC7@>IQ63g{h*R&`aDxII+~dRA9eh5DnY8ql+Psanvq`l{MM6;o9m zXj`jPU2uK7R3m6xS5;%^TK7~7aD5L|8}NP4RNH{CfH2h&jNcd4DIh!`Ty=xGB~*7P zTpksIH^!%WqvuEj)dxCPB-IyMS4`CpdRJ!EA4*qlH56)BK{bpNFEt#RS9vuOx>t2I z3ffmqH5&R?3pEBBSZ6hsbTBmz-Iw~Q3Fy5vL`|fAA8IOa&uBFrir82+gK&?U>0j+% zrxp-rsFncV>{ZLa9qv~v2;Zocgm2VpaEQ0m20}AxJE0l118m|WwG%idOzrW1^M6x& z!74Jf4|zvAmoND*U-JJiUxJD2gurwY!lr6B72a6A|{ZlN0u`QxNvDQxf*FX~tq6 zJ2hc1I~`#!J3V19J2PP~J1b!?J11c;J2zo3JBYBCorkcOotLneU5K!kU6`F;DgiVxH_Z#5~#ShzEidZ#pQQP2g--BCCc-zn6l`i2{+vs9-j2EgNI48?$0L?0b)fir=NLF+Bp`uwW z;D3#4wKR%YdnjTVtsdZ?f~;WAJx&CO?sKNi1adR3F&3lO6X<%tkrN#ErQlY3il?%QEfFG)lO(+8?8O0h*^6{ z5wrG@A_gT62+ulXl87UP2K|WW9mUDvk9P+*NMt6WmpKV8W`&YrzK9mbYaJUaNjg zldFnFGn!Wna#yjKV4mi)*zi?tW{Hg2mD#9W*^Szj!>C=kjM|mQs9pJt+EoB**F{zo z{;L-(7|z}gV8YX6#S>XO>U8bIyJ%$kFj z%FSB9k5z!RBh8C-Ce4fWhvrp}je^Iw0UIqEi^gmWDPL?X99bRNI8wgYWW!EPg(Is! zn*p!?WHyt${%j7p{nqA#V!C(Vm(fabN6ZH8O;5I8d^ zUu-AwRcs&eRqO)#)PG@@;Kt(Y5xiJl_KZ|8_J&k2_K{RD_Jw#X%%X=1#==PjLpudH z05~TV3*nfM*BeHJ#@~oxvO)M2Z;CPn8DX)s}?wC5pZ;(>PZxTZWPVS64gGmfJY#BppqZCLK+b1(&u&R3TrQs0wG=I`DkZ z)I?pXu@Ft*v^Xr9!K-#kw18jj5?H<4VAnbtb}a<#+G{X#Z^5w*G#uL?!?6v9L(LLH zNM94fNM94c24LDoL0|KW@$jxi7ZVKAHVsT$da)SZvL&wG)TQvnI~Kwblb19&|Txk#sk4opd*Ghjce_k90S2pL93z zfOI$UkaRb2dvoo%;xT+~^TiY5@WfM6-$WRxZ{j65yshF5ImN|CRDSFR{|6-w{2%do z7L>RnXd7_~tR5#lPHNKQq)mF93@{8|AbfGpWCYUVWMnXWUu9Id;cOY7R5_Uf6(W&j zN>buvD){44$qewtWsuq7i_0o=kt!$ik}4IK#d#ZU?^^zx{%NN8}-dEur@E=QB2PL3fQDaQdp7Lb$RtSc-hLxT&JQwUAU zse~rwLPC&o5g|yqj8LOoPN-3?25PJ!*P?5FExC@cqg+qeQEmWgoGG`U*Zmy1gRrCA zMc7g9ChRDI9DRG`e!`BDJmx3lA;OOGFkwe|gs`JLPS{bNAnYhl5_XiQ2|LO&gdODt z!jAGHVMlq1u%o<8*il{~>?p4TJ64r9&;hc#{EHBzyiEvF-X#Pn9}G7jCt*sJi!i0i4NQ4W1;ImjQ{^W#sR|I9R7D6) zs$fEsssy1)RT^kgsq!G-T~&!NrK(D3Qq>?dscI3LRJ93Bss@JhYehIxwI&>?+7gab zod`#&E~r;J440J8%46k&SE?|OV^QEns%I@>P-AH*mu3G}?^xw;6_2&8y1%*TTLV%4 zZ=Cc)4Z>UmWVsC}@~**;VgIY%BR{?N5r6AH@_T)3LY9anW=U95mW(9_+WbDZ=er+2 z6T_UP|HY)9?|%K_K&B;WiqgN&>p^8^7ogA(sGn4;wF{NDd!d0IWQW*cII&I<)->KL zViMu30*V9>G;VyV=5pak@|lXw7^YG)E^0ONLgOs-tFDV`bxZMbq;>M@gdllS=$L(= zT#h8fh$`MmP%fuHx17eOW75&z{rL;{!XF;}?-idv>%o4n_k^bk7;``2NubHIRO2aJ z5kPnmom(QJcS~e+Z;6KfEir)}ff-HRCv|f9-&Tp$!tBsGK#%ZDLA~rN`T->lFm+-> z#V|3Pv`i?O6Mj^PPDkC?tbeQzUG-aa=)J#H9)J1KNk#w}YExI{MeS%r%t?(6G?*AJ z{p7%aR8>|M9kGD)Ow}m-P5)Lm+7?x#gXNGPHDx2^C^=e=kz?gJ;Jit|cT`6@<5wjq zdG{AW$vpO}Z_Ro6r@R1F^NPGGuR+(mBOd|fJ(bVk&nItwTvKOCRi;0E`2~Jdng;)^ z#AflmZ;t%!rapBakO(>QZ~UlCeepjy@~yvBszz`k0*gfbRj>M6-y&11`n^uI+;5d` zt$x&}hWw~aJ?7j&HM?^4&-ym1RK;QO{@*hDOgN#lz|9&PCv-9RTIpm?WtBz_o(67` z2B>Ii1W!pG<0%O?o|5XuQ&Pj^^tDV*U)!9xb&S8Yo^h5mG0u`^#^2h)9{*r#iU((Lq)fSvpC4_=OMq3Z!JKDdkSkI&fN^i z&NIPxveEcXHW}Z^X5{_3;fKuw_sI@)0jUgsNj3C3IZM@0)&+D9Yi(U(?UC!>H@W^J zlj}b*x&CwV##%3pH|34-ro3e%(NXCGn}YnGlgk~RS@OvSfCE?suL=2NBO9M=G@R7i z(4%S>&gobA(}bLX8)BS%QpnHV;mC!SaJAuvuZ8?=CB1d@V(db2b=N`LS7XbScAH;QTMgS{lDgE8}-*1HVgE))o<=Hfv}6E}f0v zrHk>qbTxjLZpQD@3w%R+_;b5}Y3OH|hOx%)GS2v2CK|uXB-C^*VN=l$UO9^;JJ%kS`K z>;P4RvD5J1eqiU|j}dUXD7a*CgRQ2;8`l5MxI1 zXvTpX9n8cG9ve|+K97&y150>Pi z<6}@)){~D%Wm!Kyfg%{6B1eFen1+}&lP{#c2Yi{~BvycvSjJb&Rp2DngOk|7x1qjl zKO8oQWBd@sFMibca*iW@-QcH_3maV*stN~v7iz$%L*8DG@%9G7+0#r!pvnso1(g>aL{zXE zokeu2YZEc4@8xygtc{*C#n@-R8ozgU$^iJ9&OZPU15}9^>`Ni+H(9 z$2>E^n*Sgu=w!&rfy5N*XubcN_3Rv8b_YVrb$HRun~ORO_Y#(MaH zhKdbTV<)zv#%`?GP8A|zhha5#!Y8yq>^2UegXkHtO&mwO-YL$4$v7#_>GS%6xQbf4 zOX3E_aB)-L*Z0M3@EM`v4)}~$;-P*6KI5_BGoB)rTjCj_xfG#>$p}MK_lf7^6c(@H z8Hy<0+R?#hyaS&RTfDbXfhRtp0xu~z1GtF56QB!)j0ip>7)${AP{^3XYRK5cYQRMV zRs${~uo^NUJVwpnC2DK8lNrHnbd{M=kJkg9A@Umqp;N^mnU@$2nGanm#>pbYaLA&> zaL8a{IAk%f8uMgvdjXtDC5?{RcJzA9(N(l`E)ghFnQqGvsP?&8RHbI@O(;auezU>&dO?meE%3L|2{mau+HD zJImcpH+Z4;kbhb3HD0I##tU_jlsI|FFeS$gQ*zRHp-vg5rUt6X=t;RpP4n&Z?Nf`7f918J(CRy5a7+nDp13$q{c29(e~%<-$FV`5TYGo1(% z`8w(hn8DXw=S2VVo;o+pi_$@uYB5&lCC{}kL!N707W4O(>I#_rv|cv|*ch-=H^o%F z3%VU( z;pv6wLLX5t!9=_mdMTz(rqio2YblpLK$GqCaZFY!q|f?``pfBam}XZ^-@pvJ8u}jQ zQa0E3F|nYNevKJ*z4TX1rt51fnoft#heQ2CZ3lDdM%XU;+mEq5m`*pz_L4)~jzFA> z9mBr_YF#Y<8vhzQj(;80y0}KIi)Yljghr=JW%V4y*j7FQwiViO??Cj*GvU3EYzl)s{Jc?uIMvoVtoyVwhd5tQU zA3RDdy8!iju?vAsNo*HE&zGcjad@hd+a-)nSJLQorO*Q=gIyZ@N+!Du)py!ujZRk{ zXnwijS1jQ6JgEB2hg)3mEWuVh;P?_&B6K1sjhfH0U{#W$k5o-~!)xP~0lZ3G^e>?5 z&`e-eIzX-K`m2hqn8EkOO_$k{=r=VNuR`@}rBOk;7K{zmuT?aj@yfBv1E zqU!a6sd~LkXx{qEsD5|B)Of7NgyXGe=&lvn3WeSm)q0K&TQRH`U~XbT^&{=?m8p+? zi*8%ltZ?eAz$_lbgFyM@g?n89-v=`ZKBpA(7)Luv~l{ zaC~my`0cC!c%EIXph5G6!1wG&enJSoh|v&(;cCCkO2Ntgl9i!~TvnGVa#=m#bq#$G zV@p|UFhYT>4LG5QtSwlf$WRHRiD;}nn4y@ggGelru#Vt|Qb9LNBhs)g#_iq}*gZSz zMs>XC2aGyiOouKY3bH<;uqe#>iL%fc`$K1}!3Gge#76$m842Hy0kZD^g^~R4GmOGG z6PSJgRK-DJFk2{w!VACHc;S~BFZ>F^_iQB?q{VD4^+senjM})%sEvD#+PDvxehb@g z-0=tCjz7Q-8ny9+Q5(;Sdr%wiq3^>Nc3(m|We=n)UG|W0Je0?X&>SBVj%QDd+W6F{ zjnBX_C1#<7=2;k_dG?a3j@c_z9p_-5sOlKHWImZ6x+JQOIWx+m03THvx+FL$NPZP$ z1<)Opph7xer>b$+sE{7?JgUvTREr1IvLVpFpSm>iC`OBnX59BNjr%?xaQ|?gT#key znFegtB%TTAe-6)VaDQ&&zRwH9zmJ!Y2jxLt1-#WUsF2{Sph8}jm!Us`wc<4m?yn8* z>JI$=;IDXHVz799pnRJ*q*`d+!np6-0_Der7D@QNo5An{Y3@5&cZ zhYG%!a6DgXG|DyL!$$CRV8ll8_29(DK*1cR=0U*(>%|X&_gco`sWnO^_^%&IZIEW_}86*lvCXEZAOD5`YQgm(@{q6xt=}mDk|tKZmRa{hgp-q8=NW4C=9=OOiVI z1^p{E^hY!->KY~H|F$~I& zi7M{GBCbZKC=nmM--?Tb#^ayJApXS0nGWQ1zjMr1L_ zJ{yqzNRb^?-=m>aj@1)I0fX#|8t;EFn7IX_7?AxsQHpxOL9ayJAN0yY@c!4LYH(4T zs==XGUIe~x4t#$TdgWjG9;y%?=m(-bQ2t|7Av^`v?})xq&rx|mN@Z7&9$a)s7fxh5 zMyc#!Y6AM2nt*;l_A$j!sv(D(naU1=f|=LOD;A@>Tz;_x$iA3Z3ZAZlSOWy#O01;{ zb8!#|z8jRv5IaPi1Zp29P8nVDtWhD)Q}qC<4N!S5E>ZP>xQxp4b>gZ~9!NDHnTWPtO@Ilf9_xyj+8>^LOQ1%hcbd`cQcnvv7yYd(c2<%lkm* z8{`9Yjo&06qI3Kf`3T+Px5>wtXuCr`0bV~TpJH7<<2aFIiw8%hzpc0q^ zok>Z5D}O7cs4o`O$tnIR%5gtp0@4@gcqpQD&YVOP6H5eXQaCp%px}nRmx&Qv` z_s6&fJTHIz#P{#~{EGg48cY57-p^P2*VhdHE{&ysegEfA{QeB42;pb&--vt5N{^pS zZ@z*x%=!ag{Q*+o`_+A4zhA-MpJVtPbnSHe{Z-QccJbx*hJZtdv#+ z%c@YUWCExI3BtpjDc^jUK$AkvOJK3dcuh-OQ?+rIz_Q~HrvX#=UwCNGTH~)}UH9&7 z*y_3;8{c}rFLETgd)c_4q`gv{x>9UMqbqqfhGaT8HhZkFvh(~0V;;V8tatDHOOuX? zlpttI$r;&V*E%{icR=Q8Ba3vI&w6~VlQ&(er%Rh&Y_#*sgY>5+evQ7oYU!udw$AAN zdUD|N!>jl2{&J~K_?J$#D`o3a_-3B02fGLMPWspCZ=W9K?O7`PaPNocjMyhW#ONXHGlmv+v-)*yM1{u zx5cwEb=ti6^J1}iZ&o$WmZbNm?%SqBJoswuvf9^?UBkS6Pn^x9TNei%@4^wQb|mR#aF{yfvTSLGEEmo~juyHkf21zWCaQ~yZO zeOFRfeV=#ZzOH*;?oOSve}xk(I_0Qy@6Q+87Z-aU|6%5&H=9rXurt!hlG}8$(1**i zuWp!oeYOR6E=`zFc*KCyCqge>P3RsJ%CLO+03I;MaE_D<8;{ z_e`6hu?_68W5OO3&6UzWXjjY^x$8W88+Pq=@kd}Zrp|M-s$ojV6 z+z(F*#5^|pQSaj$@0?uFs`RR8sn$*Y@Gk!FMQIoJyB~M|^htTMhF+O^l0W^Hwauz+ z(Xw8jT_@|5Hap9WiE(Oac(U4^O1xaR>1y{zD~B)Iac$_6-KURMDf}kVtDd`COt=~) zJg8aNspf5iW5s#3{LQ9mEn{~*^lI&l)Usk|;civ34ZqPe*E5$b9a{UqmuEGi#jjU+ z#o~kQD`)<^G~(*z_p3F!xAw)fhgH@#U6^X}!RaF#&$v)D&yyqz=AC=FEpp4`;?9fx zQSOHo6Tnd8)ALH=b2` zdgbkx!B4lp+feZ9(`JtbUs!f2p4yk@&=HYY~H+((ZEBD)_qhF?fIXBzY6jvvF>ijNo zreV{kgj^`M`}vU5pFWRB7OTM@W5;iMT=`k+=K1b*eY0|hNK`O+=oRY@?3#RAno8CBHf!^7Vu}6R z-zOi>Q_Y+Fwnq2md`w%neD$d3mbYkIsqWFN3m*3h95OO9g@eNUvp8C1HpG3Q3kR6I%sl`!jHAur1u95OrTdbT^! zD)E*m+%RmbY-9_Gr?a!&HirA7tUF3@rNm`)G#&7N4jK)>MY%dhnaT4`NL znsw~u>~84VB>vI`hRyzC%z(nT2dxe3l8^Vi`uu6NZdIa{kmG2zq0;T9oPhwq=#3e%X|1bWo>aPsaNnTi+sm8uxWeq*l?l2h9$j zIlQPP+N&%1bLUOqp19aJ(wnKDOM{;_J1egD-!2-*H>j8w>6}eV;AU-czS9?w;K^ z+tb6>W=D-RI9rAbS=#+IrQx+c%RcY?^g;9sbmI5jxpi%#rThU<^PuB<>~x8%5;r?V6>B$zf5?$V1(Hb^N9Nm#`kC$q0FnH`9HS2 zaXl>hf$`rC%=wmNL0)%BUiWbs{-jKSPviT1iah>HgRC+s|wR-Zwj}!J^`}iu` zu^eyehppc`B(z$$oiWc`*j^y}?Ybjcc4+a&mofReVGqX)to+WGG3%yd563jR*yVJB zk&3o6luzC8iF%^2fKAR+NjrGkAhejP4 z`poW-X?ey4!7E#2^4wZ~wSnhZvvEH_Z?yae+mNGLNr)VR`v2r{B(-(J}me3FmdSg>hzNOnB#gj_t2QnqAL& zaD1wl9cOO7-+oPniD_EjeNZgItZ82lH%y;sZrsg55ocD)dg*3ZU%X&gi;JZ`_k8`w zvg!qMR2x5UYiPxi zI>}nk4|R8Ud@U+;oY}J1AD+D#Me_Hr*1T1=BbqqUoBex9oh}|5{H^%ksA-AMbG` zT7~6N{pYKr*X741+rMN?y=PX!`S&Lzoe)sw&hnm-pR@D>k|vBeXwiVgiQ-I+RHRUh zl+N7MlhRfWZPKFFhGm!6w_G&3(oNsf;ONVXot@8ZxxcCVNWl)z`LtY*^l@L&EoYH_|Eo`3NA+Fbd&=4B8=LuquYyhY4tMPS!r(>@0K@CzvZ_5)>!L0@7^on zzZxg^k4@jMx@%|paep6=gjuI_5}hNTmH0hepsrr38h3VQ$vXS1U1nXn-G25)g+)in zslATYSFX9uFuuQJak}ZOW{bB2RUS6y&PY>rZx_35Uotf9lj*C86)EQ17hRVxowD|h z`-9Npi76d^!DIP5PlEC+LoH+X+_aMNQF`vDLrO-x+O2csLA_P{w5TP^M%4(+=R!T} z#F7a=o}XyaZ68_|mv}h$>$Lc<=0^3|4w4VkD|_%tS+>5@Zl}Jxy?(;S)hB{0FA2%} z+)HmKAIxGb?E>W6)BMiVhv&yuejGkor6YBRcdV21x#`JEe!0vKK2iPRkoD~9`O)#d zQ{GhDm}yxUKHcN+^J>JFN0o#79C(w;FMhk(yul%L-#AV42Z@{KWvaY$t+eRWc=ygkq-N!tO3M?4{Q`;j(AGj2~INJQTS)T(cdvs4I zg_mr35w%$-Lt8?2Sz@xWYzd3L3iL+u8!#}Ar&+pbQ809(S-14_sUsD})hj}bHYBs@bO-|nU z-{F1S*V~Xei?UG;dYG&-X-QTESyRlXJ9oO+23}YR?k2Xm+ ze6iuuV(+0-xKOvjk0txG<6mqre81n}o^WPc%IcVlqn21@PKv!isK)Pof5mLdzCABv z$80dEPD<wfmEUuX_$Jls4 zjK{~KA$*7XiqQQJ*RL;c94-twruXhgi9F~t?`fnnkIue8%>3c16n#@;<@k5InrH01 z5bvs4m+v{OYxxoFp|GcI-9E-R?702&Tui@F^Yj~se)H?PI#@R6(bDCq;V+Z-->toz z68L;h?(*{sGaXG{Cdc?z-p{OGGQX{FmSd@A%+QAHpkFqt78q+3$8@RgoNl!z%ZZxa zQu!ihS^ZX9uhrjI-_5x0HfY{E>&dB${J(y=8oAzXo>$7Z#JTIN{ajL#@|E1m54o+` zB_H>`@QH85>!OG6UvH?}U6gRj_2rDP_1%_VGg2D%{I)5#@0x#I_A>WD_r2K7jUReb zZ=(IuK1E*YGQwh;kLmTDO92mHF|bfsy-iQ~Y{$ZeuuJ-OWr8tn%RA;lYE{!(2<--Q~;k)64g}5%uGZ9`^oPXz1`Ox0upsHZ}-z)*h$7 zhiQASyqqcKsC>Db93F7(h#w2Dhn+jR(^B$Vrzo`Onw{6vKj9mqo4kHW-;v7G=LK04 z&8@kwdfrajGW`-?ao*r&Q~HEJS@zj`w^g4EcQI}-^Bc9|`vbF=ANGD}&VN22>$%x{ zz4cGc?6=R$sycmg;%)7L})#0uSHIi~+#OK@St*W-n$BBlE zT^M6$j{|1bQ%VLtJraJjBIaZ5YSnE`D%SiukEx}jsFC}HBfZ-SdfQ|snr{w$KE-UC zx7mR6{n;;Mz#pr#_WF+^RkFRtrcW4OrT6a9mwhXUvi3cnHgYcpbbOa));>zij0!k+ z^hM3wUz-Pv)v|rD zgRaYRnr7PiY;Ha~wN9cjg&k7uVv}%npP`m%Zp_7QZX4p8h7Xa~tw>frn&s}CbSJ^8 zPpjU9@MiaD-}7FLBNx`zSDTRmy4nUKpALLL{Fxi`dv1^AL!&O-?6XYGnLa-FL-!GW zmZ8sn9(iq2TvptF%9Z80Q3G{@8ucbCnV4L@-SiDEO#i!)C>tI3DW%KLyxk864;f%c zB!&`(=^b-}%wP4p<3WAszWhywU^}_*&b|{Xq^EQP*N*!g-m8DavP1MKog*91s>{d7 z4Tc|AHc0XJI(|pD$Aj%7Y%>}|zf_!(zwa@ndd4w}(H**dS3C6G*FJmHZ@(jMz2nHE zsdH-*X0{wQp1Gr7Ow#yo%_CxL&nTOYZLYqVWOn%MnaRepEE~sEeq5;Z{_e+i)29V% zikm6BzF+t4T2s(CdAqmFATeOfw`jeGDzah~*@)}oH#~k_JI&wKVzO3EsHLvk6BkG8 zk6m}#_0tuE#E><;JePeu=J_mA?`7|>9ToYH~|*whnMY|R4GP1U+Rrl{nE%S*^Z zOEeB|*POH5)$*5Hmw_L1E>?ujzUp*RI@ta~S^fl>V3D@C_=u8MS!@4|Iv?t0J$f^~ zIo+sg^zhvMOzX*6k`+GG0TaF`Vb$cj*=G`_MXL3zG<%R^{dh*^;BBu>&aSNvF@MHK=a~6D2x*@)*z@?^1!0>cNq=-@U*%@;GrQl2K4)+$ zg=%oWbX)FiZ%#RlKT^LpGDG{v%kQLiZC@4uK^ zakpnsh^6}Ek$2bJo!fG2^xe7I-^Q3{Du0RAch|jO8?|$Ap8d9+1s{z(ri}WdUwnAn zp45P|r*t0-aQB`VFjxI$<{9S$Jzu@9J+{2)$KE?fE}7^o5(}Qp%o*9!<>jxXiDtyp z{+;xs zR7iZ;+d554sYUx#=&a%7`I{<3lG*pJH@3_Q=GmwbgocWEytIr0^Ha!yh&uXX% zzoc!Lw`Fr*bARtiMFsoHLNogjvj-UQjhhYPCyg6A*Zf}HY1Q%5ojYP_wT9Kkf7uo0 z=hnTHF3Zx?a-MuW=$V$yldjX;wmq2mRiAx5ewy%LWK_xBo2&gst-4(0Ixpf@u&(B; zcK#KknEer6xR z_{1xHr#qu3#~X!o(YpCAz3rZb=}e`&s!B7&X)B8iyiO0v^l{kdabm&4QM+8uTcz0? zcdu-Zz8~<$);RgMimkJ$=N#XCgA=a{IhSlc&GB_sda=<_VEO<6UI-r8At+`ZZkbGIJ@VJjPT8GHszmd+Ii~f#D@h-(gMTBrn~nHYYuds zKfupEdiQfbjRj7{_nStT+|oB;UfE2m%XDpyn)v2aHoc%`!vv!beoD@JOMJ?W`n7IN zP8(Ongilw|3H-cedVryJiA_NIyQ(lpN13DJP}gUZlfajo;VmFma|;_g8fu{{F|p&L<6W`^8wgAFr%xRy#$CO15eFUQvg_=BMvgp5neI zu)SY>Q~sEp`SD_x>0@|N_Uk`wy&o6ju~^!x0mPYDP4Ekvc|bj{>N8q&Q2+iMm^p# zAnMS<8NGPRs@*>>FWWr-M38f7aC*mM=Qyq0Ce4wXHMscRjP{g`KdKM@OmM2-A5*tE zEPXm74&38*S}Ylv`{%F$Mpt3xv zQg%Vxz1q=?Em{1KwwGC5o%_DRh|<1ZJF;h_njP_eXTKBX1UotFs%e8&%#3_5H?y?m zgHJo!QBG}pb~Bqdl%0FA*&;1_ck=xF$z%8VZQPphgN_b;_=0|udb@7ps-}(4T?k(3 zJypZZ@BVsVP?6eJ2X^xyp?_bGT3^X)wY=u-=8tqNkA5!MJn`P~S@(`_E){w8(+zd4 zi_}5~crRT(b!NR(y+)!`UUc)b<)pnfMk#BI=B_^a$iQ28q4V zNmH&(IX&&r-kL*Ux@)IQv}m~3ctb~EFSXbAlPc2s^f|L2I4${wVeT0HemSL4VOarY z@hd7@3U(Q;o~a%v_Vt~;%J0-YwRQE~4qGR*)X07;8QZ({u1=0z^K-Yadxy9kjh_;w zGis#!WdlLD{9f0qaP(yx-MJAt8Sh`4cn!WgAiAK}C!O?7)Sp$4G}03Xoet1C`C^2c zTKbTnDf3dy4qwpTZ$Et8#a<`7I5+J3Ib%v(H?s|!cMSSn@F1ajd$-*yRFj@8wH;pN zAFQLjd3Shzh*fN4Sljc56*XF_z5(jRT}zYAj&#?H+x}+v{=KP7YOP*A`?-A1CiBEA zdhVu0)-Fb@Y$M^z${R#tt~JH9EgyX{)F3;Gur< z?4Ug-)DBFuz3pmST0Gm#FXljbkrHX`*|X5aD@=bZoM!T{lxsgPjVwR@!*jad%NrTSA!7={GK`P!=qt!m+*+pTaM2e^_h0PV<=mII-yU>4 zI&!ch-RR}&*d_9^)lsM@|Pu+_!G{&n7_cFS0k)#-5ym42pfIC3Do)pdmHnB)^rT7G&yvXWo4 zw@MrSH8tzcT>t(LZFP?Q_6vS0T)GvX5whh~RmF{si`0W&EFAo8@S8gu&33PMGd-If zSm_ek;Sg-tS?k|gmJqY>Nn23Mv0HAr$4s;rMm0XYVZPzy?Ch@lGgk~32QQ1u(dhE{ zsfYP68^&||%e5nZfBhxB?C4(}@uTqE809n0>?Zamclo zr_a9=-()}j)wm(f;qKGxQ_oh%y)l^mZOO`R{X1@CZEyXke{qOoqpu&yY&6*tr!@IS zw-t|Kf(E_XGU~&okJ)YsK(ByXK?Ct%vOo}~CA~k|O zl@}++J$Jd(&-z?4d2h}Aza2Zb&dsjtGibWpT(wHn>8E1X7Jl=X|NM*>f8GbTYHu&t zE?c<6Oy`w-kIfau*6yw|TD5l<*z~%gO+pl!7 z%kP{UT{CoYZOiiNHD8_-w%y1+o42S(+k4-?X4gw&+9eMz_&Zp6U%R(B+P|T6AoEsZ zZbN@-Z|6&&BFCvFf3J<5rDWpduVnIcHFy7`G<)HH2u41I^r$u&$FlBqJ_i?vnwv5!S7tTmo3 zztDT8^Rg+U&m1d~Ei4X_Jh{=jS~qAVoj-fRi%l0^&HVY`aQVUG?>|Z_?H`iKNp_LZ zZ z`u*-*pAE*>r~eJM(@0sk=)ompW^Gz{Zp)X(^+Qhlxw9zq!Sm1JG=nx9kGQV}Z!-eg zjt(y%44=QfA%B=WZcel9y*F`dA`awk^qy+;u}cPAI@Ksl`Vu*J(Ayho&i3A4tLjEA z4YPYQHD+S@f%PNoqc4ti^6HU$&E{?Djx)8Ng5-^AE03I+xOU$+hnBi0d4=cQN;G;z zynE^ug_njVex7(s`w_|LZ|6SCm-ah?e|CrwI zwfjeA8+C4W*}l;Cc=L*Im18;2%KX1MUA7^s;H=G(cduL4X_rI<%-*#0y2h$8 zNsg%}ul^Yxf8)62+f$JqLvpVd7G!-K>S9n>e*bhowyYrP=8)H}WiG8>_6@%~{~im7{`%}&!=j`KeOgj_2RGTMI&7ZbR9EP3 zIQp|=&}Z4N39?utc0s(pbLXX~=S5%F%~_jQr5WlUQ_wSb^3cnZnp#fJ@xDCD5Phql z-&y@=a5yyYOkFp^%4M1F=JvtnW^VfpsoJug-LG40=F2ta2C2x@S}gWdM)rWr)#RV?Xu@pwyBL+lg#9hnDvl;s{#B`H>E)1?s6gB7J@w*&W!h~&E}Z%?7efD!R$gkx16jMKUAZi#xHHi zjB7hLck9mfx21uzFN}?_`R4iOoKv34T6PLBDA#9PK=^I}fEU399py(y<`^11pmv)g7S2O2r%9!UNhHpy{n z*)h#QJ$uBrM%V6NSg*Tje4BFB+J%dJ9(+Iha?zRxb29Y*eEzSH@Fc3-q|4C2V|H)J4dayErk$T1UURYJ!M5oo-UnYa{qEa~ zciH{w^W=d)f9)RYIJhcr-kH{wb8>%|Z1|D=VX4yZm@Ri6EUoG>_I^y4v-g?DOUU}m z(zVY+f8O#P;o^4lzSV)X+m%fA>dr0q-!AW*)!skPA^T+Acei!Mp6p%y-)e^YO%fa1 zy?@^Lo4g=%?EdL5FE32JW?$_XdoikQQC4SyXNzO*jd}KOZd^Y8t$xPu0{QwuZHu!y z0-d|83S*pJ4gB`$-KSEmx(!KtpLVS~KJLcB37dL#jMR(m8t}_|$We#aZGUb_J`SBU zyTwv7pE$GJ%CTnj(aTrs{;YnkWqI~@zlxk;UJs)WWZ&_08Ln#Gc-kRIb;Y{*ul?8j zHpmOkxf^pO?qz*N>B&v~zx(fuKci7qsolzjTrvLZ^u3_$k!66<@bs!_U9alQ=o=tT z-{upTSFN;a9BgKfaDd+71pIr{_I=@Rnfwy0LWW`&b z8D)K1(;KvYm0qrB7%qF^nEboJ?|riRpp}Wi2hNij_Fv5wY_7{X)o@{MS>&N7v--O> zSNu^f9Nf?Be566itji(R`Pug_`WYVDt~+ID_^-7udv9|*SJgSJ$jGr?xAUmUN~?fx zVcW>|-hr+fv08TdMs$aR<^7R0CuAA-?tA7&J~v!!9esb^AjjETQ|_(HmOSq@d-#^R zUUBlplTOUOf2Yu4#=-qduY_wEm%j6^jWTbv~}2tJ%~zz<%_86WPG9nLVd&tXgH1R2W#VWPdJc+$G&fLsva5 zbTA~({CL{lKKhu_<6V5EUDKrl7Jjc^$@2CENFU2NtqTRzyQ*go3-?8xXNGmPaw$)F5|CC<1HgygA_3wqP z)9s`BYDYRwk1J}auAjOuDA+0cTbV$e7av>i@@xC>YBDQExLKXo8;LFSM(Zk3~ zHOu0|e{LMM;jE{@e9Cy|UzoGi=EYu8^fZQP=WPY;9Qfi`wQ1qLcZl$N{nC`F0t{nr#RD zXUhxz#9klq-`j!{J;rJ7@vZ9bIW6B2j?qVKi2UDU&T*YupS>9# zyZ=5MyT8fZ)4!?BE%{#E+W&L^MKAl?RQ#DII+?B!QeDT%YkWMj|67HPKAuo@+*aiJ z$MJVprTeK`7(%Jj{SrMR{?qzXu5CZ#I_~#>A4FVqE_PnsJKP{=-^G3T?e3Xl|9O39 z^x=Oy$-Z5_Q`-V<)!y?{pWlbo2mM}r!sCArkjQrWIEDPE`5IqY>asU0de@J}PBux) zLhRg)J|qdB>4Xn10)6mhH6Qp1-OvXNA9{qJDYbDDiBt!!WZ|=oo1_y+dvYbLYJGq_ z40Fa4U|vZ&%pti>7Ld2dJ1}>ooNT2QQSQ`Q+N4)bul!zRy?1ElY2MT<)qJ7(O0!L? zQ)iUU7@bKvvvizv+;vvz?AAG|ldtnpr%_i)x0`McU2R=`T?5@gx`TD2bmMf->Za;u z>0Z;V)MNDIdfoI?^?K=P>FMhA(;J{SSkG2(f}We+61|mrk$SoMqJB?(ZTqLTHCSob&9H~zRKuHwpN&HM{}~uEaJw-DVNR;XJ&gMr z8yVXe4^KRnq?DwV6q}TnRGRcEsWqwdobh?n^Rvz`IKS}xvgX0f>zX$jGfS8Eca!9)PyN5@}hk4^$qsNr9Q)SyCoiV98F-Se+P znYuZ;4zEB~{e-&D!=b^VkFI?}MJ_i-*t#70sr+-erTv4IB2FeCQ z|9=(w^shp$1GgDVjA`R;P$50ze#XO$?GleBNs_uG^+-xcx}EeQ>046!|EZ8ivuU$m z^M>Y7sL;XYhs~AEZ@+OZI=>(ksH3y9^8?ffzUV{n0b6Hoh-4J{N&5ZY_dn1X ze?qDRs^kCXtD#aN`MB&;a>HhHL)C`QmUXFh|9%(Wv38q8^8V(Bk8rPz63N?ucd*nq zx}|Pk-3fdTiR4X`L{c*w>|Zk;zOSk=sJZ=S5nMu_3I6v5Ly7S9@d*0vO?RjVgfPx$ce-9gyGlQ;kFE0Mh11gw22hwnr2y*;{N&32syp$5IjC;dX!Do>d{JTjaz$&Bo51QhAs}qHK$FEv+><Bq)!y!Y)2j+$@gN)eakSvn`^G{bnx(r$$do8Sx zy-u>8SPrvM17VHqjj%p;kR%&spoTzlOsFJ`ZDrIUU4sv(#f+M!kj^ zWy5J-dMxb)>mjeFr_%2sE$KC^aa>MsN4d7lR+w3}3zCes!3xC1FgxlV^uKcE5_6w9 z&#Ez>SUK~N?ZMQsi(n1l#gISb!VY4WLiUg|%w<}^4rlMPS)4Z8F1ZFXer&iounw;y zXHRNDA{eZt3Ta@qQb-RW2Er`TKv*X=gm9+M{#zfrleCs3l55yYq` zDll(uEu=s-!g|U%3_%yc+R43O7S~fImsbePH8Al)VTOFVvM^4uo{fG**?Vh82oel6hnp%=(+hP;@b@WIRoJOxl%bB2ws2 zkZ@H6iB~m{b5%$h~lbv;xH$#N`AfM zrR0_5HOvryE_o}&3*Ff`>JF2{KIOth4Y9YVDQbz@!X_bzZxn{|J;kNsa&eWoL2%-a z@+bI%{7;@2ABm5}3b9hyDMX3GVFuG4K8gP$XbTp6i7-PLDR>F<_(OcN;3F*OPx9q_ zDlZde^W6kTK91il1PB|2P+^X+Kv*aE3oH0QAzC;fI0z$zxxy@AH_T1*7LEu#`6m7h zf1gj`e+j(>bHR%L&2Qv`_+|WZ!Bsda92a^Cs)Ae?BE$*S!ZIO*|0_@eAx;s8hzG=V z!Xo~%&`tCc*NZ`dyD(GeDmV)&{7e3WU@RC2JYOJ=m(&yc$a;DkYb5Q2WSNlZIHeqhe1-$V=9j%U@mJ2%*qXbl`M1UGV&!eh|q?~aQ&GvFc}a{}g$C&G;Benc;r1q$<=VJ5OCd5VmpV(EqK4@hQil0Jr& z+%LnN${lnl?JE5$eF!VJ_kdLB{^SVAWO_s%gZ$z%Bt?`%n(+}>q5c@GQ-2gztB)fq z$rI#bc!m6$yhwM#eA<2l&FZimB$TkQb~z6zCvRCjR)kf{dq7638oVxygIVTRh;~@{ z%Ye#y6GiHBt1K2jsoKuC@^8+Eb z>lw^LH-mN8-$2UjYsliZC$v=Z=MWrI#T=DV_?Yyh->RKlut~oL9j6ga_mqYBQ`eWyPWS*&h4_VVW>s z94t&1dI-wGLE(^aScqk>u=#wjpf0Qu)(ETl7+CYmmzBbb^8#d?Wx#5{A0Z{okJt#w zT6ZXQNPJ47<&c=P1~Qbb>6?T)eTyz+ILITq2bn<$OdRCt%w;FDQ|W$?VN(nlFKf6+ zZX0O}nY>pSQ*H)kq&v&h4g5~WKGq=r*7skhWq>MyNN8`6_$M|uf;p6Sb6 zVe*(-b`(3CoySgR_jALz3EWi9lfJ~a@NN8e{u}>;Z)Y6&$9yaQnZGKK;z}`CdI}Qc zsvvdZJ!F2qlU^eX35r+^shw+y@q|Ayj#x)bfQ0w4L>=UB))J>7nf)B`l}IL!_BzX6@8QvX5zPJ!+s2z|TzMZD12Q*EU(t7qAP34xY#Y|f6G z%Z-Jk<jhZBo`w7m3DHQLCmM*e#3$k;tm}Lb66RuHP3Jhsn2UwPier$nAf<*< z45W&mr0XE}vI254Du$g^aJ4kdyfn5-)$z*Xcj>4f;2oOaFz{VSQOUcuz5e zT?=c}tz*Zr8`uf#dUiZyVNc|=*zb@jE#dTG_O}k(L90>?uQ;OB`K#e-sucu33=GsH|WUA!*latFv-?h1EDB<>=Y!`*^+Q5D<^!I$44*zkIMUr9A&#cyFNiEJ{M+Cml5J!xA;nMq_` z5+;N(q~BRlKWQC$1!Kn?W*>1;#57_MmC5#m1iB8!mJE=lG0zAys)>2dDl@TEGq;o5 z1uNk0mbP-y;z-d>v=v9tLCjFePWBA9gKOZva-X z`$!p4qo~o;7&e|g$0oDqsj+Mg`-S~V{UUUT74#tTBh{PIq_o&zb~hW%?t$#D$7~h* zj=C-VMU0?(5Zj1IVmquE+Ji6`Er=aN6j#k9a;c(?XeHW<=Hg^=s5nQQD>{ku#Hr#m z(M|La7mM!VEOCZ7TXYZ|#hH@BLaz#1G&H@g{tKejsnmn~JN&0DcTVnjg!LEMfug^-kyfH!&|M(Keved>`3#wZ2dFEs z9^nFRy7Url0ckj}dI=d|sFD!%yTVNGqfd#9iWEai_RP+%4`Cqs9HAn%G0^DXNRTge}5WAwq~0twl?& zlb7-mVY{#c)*oC72?0yUd*mKglj($%uoaN}mCa>wS0OR~4kT{sO3z94AfZQJngThR zsgR<15i)9q5j`Psa2J&ZdAb*=-Lwi-ON-QVNYY4y#Q!XYr0+16j2bfpQt+%HZ*~S` zV9aDDKz4->vyWK@No+gWhioq7l+9r5AxUEdqX}sbd$|pep|An+5FnME`v9ve)WaGH z6PXZ5p;rL zT*%^tQ;ggOIY5Qv5Xih)O-`pSQF|bVWCEmVct9#f7K~;|$n~@`HG*-V(phcDXlsPT zpdXO+b`!EHMneK=4_Ik}glzqxgc0+In+MsOlbK2;n@xk%yi7=_SLRF!UDAS?MolD( z>D#m=Ig}YqTT%A3wR8<560VT`zlJRk0)=3)R4fx8h!5c--ecu+Af}+ZTut^xt|cER*OnX0&E-R6$+9c5yRstL6Iqq4TGk+Ik+sVH z$OXBITvcu&x08LBJ(N9`_heVg`^%TfedNpKzVemw6>>lMD)~hDWcg_MER@^@S?CvJ zmxv%p+^UiNl6A_Zkgf$Q6vApZ){vy7BiEDbL-zP6`EXd(FauUXEQTEPB*;#`2&?WC z%3jIdLOxfc>?Z^ciE?GRhP)4?mJgN>L#gYK0G17D>Y1`K7*pvc?j({&722SzmlxEk;c42*qABzX$ZzU3=G$6BD^ zF8HQ`J7BaQ2vmdHKq24;6e-vPM)yN_0L2RKgVAFlP`lhyfNn>(A<%Ut3Lbz<0c3RD zLkx_?Nnjif;|YGGpaP6|450hM*ziA4`#e#w6#Nteqj(Z{w~yHiey#xB7q#U}06o?# z1zzCS7)}$6+5mv@KN$E`upL~bzz6(Jfj9WQf@R3N8odQ2_J@6S=4KyfN3;v?OAN&<)1l9pf7#N$BNKijOpxS;@5D0Ej5CCpfuo2v* zAQ=2zK@hlI0qUne6rl6ALqQ1mr-Du3U%+o56!?Q-(RTh~U~ClfG88}`m%_z=909|H zf5^dvg56*egMLp+DL4(L74!l#7(yS+V&EYpQVv7(0rMCFZAZWm24E3GpzSDOh`!)1 z7y|8!3`0O0NzwiR#6++%hJboXRWQU9a90cg?JVtvAx?q2V~7u6RSXdihQ5m+puHhe z{2xRTxF?2yHk3l`5X40=I%a@?{w3AG5GTOBF(mX!NG1OVIUcNqA)#+TPWeB`31A%z zxe~04At!_NFeLOtDXJ+zqT@n!1jrj;R3m^41*19wWFgoPLv9E6!%${mBMb?B3bGXc zL79UGV8{;eKn!I99)uyG&q|Fk)G#o*4WLllnqpu)TmqTv|3J?Nkka@MY9!bKL!mlZ zVkkI|r0915Y8u!IL#2SNF;oMZHVQ+%29L(jp5QSU8a>`v4DAaZhoRARsJ#K&3p@csqwP$> z(CfjdjRATp7_}=vzXzkA0s06S)dir@K1|2Zs16PoSVLI?N%sGM|0hVKGl5x>e}6Bf zv*9wpYy~@F(0{ui^Zp;qF0d1ZaRsB>0A?F_9)_6@cE&KpVDvM9{$mEI`Tt<>%(842vFnC5A=)5uJwsi|Vlo!}bTS#<1x3 z)?irFH~cXyI^MMyHVeEC!=dMZ^%(g7jYPTugI;S$127!4r8E%3*?>1 zU<`-ac@u_1eLMt1YJo!)M{5L!f%1z6j8$U+u;aKfuu#;x~8?hUf(E#lZitB+`8t z^!ik~A48&cIDlbM-#dsQkq=?0@!%K?g`P7GW2npESPX@>a|A;@1IJ-F74T6Ejp}d= zL$3uN$I$2;ipS86-~On5SS= zJAlarpTlsA!04O=n91Pt7^Vh%0mGulOu;Y>;8YCT8+;MNqQ0DlVaI?`O#v45M^rC> z4Fq4oaQ(pP7>)vGU^r)RCWe~?&cbl3z}XlM)$l5Y^9AQ%xaHt$7!K9rIsnJY6@YJG zIMh$`FpMrZAH$<+RPap=lFlrBgZ~@=J(A~ks7`hwy zE{6IHzK5YveePpuRG$(Ijp|UU0QI#p1v2mh3}p&_h#}j+bR^>2Vi{(_-TgTG=pe=s^%0d6U{2}4Z>H)Du!@HY&Bo>N;e!~$?D zhV%iWHUr3U;O`g`wSPN?843P@q0zR`^8!HIgMTWB2cv$8=nni=kO}^yAOQRqL!<3< zV(4_}uM!M;UPrIF5$JskdOiK`HT5nqi9zoV2ucC;34&H|0L&#1p?82~7#jK)A;-{YyUG|Q0u22L!OR19#W0az z=uZd+&M5+Fgka}`RWa-Uuo{MiHX?dp*n!}l7zWNWLLI}TfqP+C^cWf#2Kpw^8^f&! zYht*SU@Z)p2iC@rVPG8$y#cJN0PVjXhC%h#$I$Rxg5Dz`IJEzLG2Aq;0S5h@Aq+9- z{SDDi0rXSCNI?p?KZgDUM#m4(sJ+lJ19TM_wHZJk2BUTY=o+vIhK9aJm}2N!uo;F< z0GnetbW9c)7Wyn!bSl)4qFAN zf7xMZba^<2MQv!0VXlKmU^oZxNDLPQ9);mHf=6S}^AIrx!}@{8VpvqqaSB?&;}u*6 zPry*?!0>#8pwPA^VHkMcASPoNbR9Z2fY}Y6iecQqsEq**wK@77fFr@^u>kabj+lW# z@7su(81x#Gn1w-q+Xz%s1Ugq76?6g5QGjkk?SVj#H&?+8FzT0xOaPrrh+F`*4I&>{ zpx_qRML`jGp@MthMHmV_wkw82*Dc17Xy4H>1Ed$&9m5_0dtlf@;3XI`2JDF;QQeke z$bDe+`v4_^y)l#y*at(QKC=u%y#z1EQ18LM80rmp1%_$`uf))B9ulY@0JH&k6^7OW zug1`*P1j&(R9k-xjgDh2hCT;I{R3bm;Pn{xHh2SuLE8$zFld{B7zW*r`Y6Dl$3}e@ zV9>D!V;EGYO&A8BCt#R~;1d`QwcAMy7Y;t90M-1o0uS&R1?U(PF-$u+ z3B&vXpT#h3;By!f)h!uAeFdM#P~X58FzgL*3PwQfkP2Lc<3cq^!;qW6moOx1lgk(i z)&B~HnFCJ8FzEcqz%Z!qXJQysrz{MM?wgHaTfkQ_JZjGz43FCI8iq&havj5~f^#vv z3K$&&AfWo7J_HC0!TA^tokIl}4)x)i7;yy{^XWG71L6iS>T`hL1a8Ifa6S`l82$wK zJBB|9ZpZLH!9OrO5AMK-s7=sqfQZ`R7e++w^BW_gHu-}Qc7p$6geY()MjQ_39y9}j zKMs~+czFIJ2@DVQB;gJS9v+XRFoHIi#t0T*2E#+YBUy|v1I%HBkzhCmgy01hFv2{r zh~W=`l`uTilI(&Je84h{0Cgti82%(!8N-)@RWN)ixGRQ7zt;^T%mzarLh$H1RgB;W zR>SbnugD%4em5A}8zBUM)iJ_Ga4(Dy3WolK5axh;W6-DdNllEf4y=U{{K2SpfUp9r zgW&_gx)>oEtcMW}fb}tg1Go=H7y<5!5$1voFv2XbAx1#^-wz|8HZ{Ts-r)Wi;RtvD zMnK0p5W_ct2VwX#U}Fq_A8dl*Q^2Me{ukH`BcK|XV+3T>KLEiBY>DBapOS+CxQ<8l zv<8O2!x8uw5{nF5w#s^GeAUrUem!M*R{H*MsL^#2~N}MsNqu#R%x}(76N%UBS*60eL<~KU3egNqImZS#;y-p&RC_vYHV(4w) zr5M%-?1e$&2c$O!y-z}88UF|YqqYHP)Yj&bL=io>TkIu#Y z3Q#{nZ3Cd!nB-v${SAB^BR&BqD0mM(i{Vh8Ifr2c@Fffy^CmB2{*Av}fy)4k`fWM} zy+0tcF?1^U8iqrC^*V-~3C_nz;=riZ0HX)Kg`v^yMHmLv@D7Ik0KSW1(S6G?^hIzh zhHinsml6#6`%FnO=VuUOKy9cl7s;0M{Rk+5=$5fKjah_7m6uL%szYVmLE!KLGWM zDlqze0F5o8_oWClrbwZE1<-gT8b|v#mSzh!Rxklo_Z zzkl!l62UVuB&zK!4Emdo#<~8Db@c)}Vwlt5IT*4U?1UkIgXbzh`#Vp8C)gQ7o&uw` z0mvxu0tK;P7Yw}+tbqLimr?rxWE$8FBSp1!$4JpRxEe#Zf!APYR9k-xi`s52hTQ>H zK!?I*v>yQN3f_s4eg&gi0ODgXFH@*S0D50V9Z_%<9H#(XcT~Z3FnZkoa2zfp zt^ufjBhY;l6y$(W|3IL}JBcAH!Klpu65WQ{9w1T8(f$D>dJNQu0P;0B2}524qq+lh zC-@wOE&wNE&=@dv9)rg0sS6lZ2b_XoIdCe5>jg%S3vlRf1?m$3X8^v0;dn4=dw^{K zU%{|%!RZ)Q51fHvMQ|pDvjk^hIMlxA+ygiZFse1csew_w0Q9%{slCJ>mKdK7?U3XIfs{Jhm=<#kVKy6v5AP8KfUIs4pVWvA@Bf_n#E%2LKwYpsFz3YVbP@=K+3?p~Jx+F!W+D+CP8}1AoNO zZs1QC8eLa|p^t%UF?18S4nw29Uyq@i!JjcS>hGw(0_gt()E5k)2}aLH05=HSh+%($ z(c=P~F&I4-!2Sk*!*Hm*S}<%IxD~^hgVE0brwT^>6(H@wsE-2ZJskA|BSnwbfkFRs zrG8?_QDC$m0D9j>p=|>!I;KAu7S$fL6TqV5?!>UKp^g#^u>~x}p#Oo;1O|=c($HoI z!V64c&^Q4Nzl=cRNi_NyKw}B$b5RI1=7c^wg`h&f@VtPa(BlXg68aP^V#q|W5{3){ zLqA54p8t=t^8k#hO56WAw@wK&nUtBd8D=smKnf)!p%W8A?;s?IkU%J*_aY!oK)?c` zh>ZnX6ch(gu#2LL-F4Tpwgp#R+cwa3T_MB&dG9+30d@c1_pSTmxik0Nd(U~#d-|Ch zsEaW}P&Z@J{o)y#^UysE&6(*5jCM6NklNb&Dqr>imHXrI`jBZdLV+5fojByV% zl`-yyrZL74XgXt5LNgfiw@~;EVIG187^;W#)(o`)Iz0nuYoL@ipw&Y&85QMB4>M{5 zG>cI`hGsMB0cZ}RQeNaTGzQY!Fe>T{>SGb=eb9D{wjK%}wd$35Q1~2SD}ln-2^*yw z{!M5rpzRs0PKdM!V}A&GkZ}x!BF%*33g}_RSqy!YaSVe##yDW_v?GiI`I&YUJdSH` zfj$AA#PKfZbBsy%JI0u#ur*;{1wFwy5a+a)83*z=?G?tc5c(?PSO9&EvEK@X-w-OL z>kY=34Ska_r4SjL856e8*ut3bWn{C3Tn9xsLLz;Blp{hSZhn*@LVf|&7zO$4*BJ%& z_Zy4?JNr$B=3@OQLj=tw`|S*kY5eqTK>aK~$`L{Bzdw$lvf^*WP`>$Hj1D{a=^W61 z2Zh}UT3_{}91;2p&;*9oxBZEXejJK&MCdO;lNp2Jh;l^eu%jROO6adbQH}_M;z0KT z#vUlj5nvDu^}GF)e}MXe zeu^KU_Q2njp)&06#^{rw-5Grml+q9Mx1c>4gZ!};4gpPT!HQ0DU8L6r&_UM>F~x(90S9HRu?|K$-DVdVujU zlzbPMwT$@%bP}Wf z1xk4g42pXlW9CAqFecf2Dnt8>{L>ioF6eZ|2t(@`gZymv35=%2^vl;`sqBL%vEF(~fj)4-s3l8*x8G3a8(_y)R!u?>PQWoTa0 zzl@$tQq8`MiZO9))gY4Dx@v zFEHue*E8nF&>I-r0O*a3%z$oVB-wjALwh~^H!-w+>)*l9`ltV9MtKQ(3q$Lx{+$fX z3Honk6tdec#vp&VjWNrhw=*{KuR9pI7{H%zuE<$v+=qbn>4Fqfdd7t$|MWJH!~-(8G*DO8E_p=b(=<##hiI zj9Csn%9x{}k25CKJx?&^FzAzvNqPGeW9tT`yacxXQ1WSD8v=cnu}y{2vw=?WC7%Ee z%5%yOVE+#K68J5S{{=n4*v~*;X6!#gUt#S3hQ7)eWRuqz`#aFr8TUall~Yr%;oTuyMe~(EhuCouU1b0hAen_5=l-jM4`RA0?E<&{mAn4~p_Y z&|aZ{n^F2g;~Cl~6!0*#zbufzC<~#9i~_p_k{IP{Xfi{4Mgm?&`3mY|XkSSng;D+n zCA$FHGZIK+XiYtk&d~mmKnA0nhWZ)WClY|63AGs7nxVDiK#-yJp8(1vL2Gw`Oh$bk z8fMh>&@4th4$Wp%_&^|sQIA2%)<9hcZNsQ9LfbO5rW9z$s4qbC81))xKBIMk7BHFy zEo5k3JkXv|;ah-J)pU_0NDa)o1rBPjk5!#49)ih zC?0^u+kvhO&GiIOZV4Kn2f8yfZxrak=nAwaL-S05UW`uh?ak2mHBiRr6!*&*8p{UC z8J*Hm!O)mC(1+3Mp_PnI*O4y*eHOGIqf=P&MWD}w4q$Y;ALTL7XFvxrIz4kRqo0Hh zVRY0pfuW54DRda4-v%Ad=$}JJF#4U)k&ONsbQGiC0Uga4l-|o3gUZSn#-Q|$Weh4e zRg6J-RLvOUq2m~1EA$G+m;j}7z_<>2C1Xs4(m7y|571{|(D@q1puDSPjEA9<7~@Uo zWX3oEtz(Q=pi>y*LFiP*cpW;8F&=_WXAJVCdd7GJN_GdP2TFMZj8o8AjF|+T%@}0E zIgCj*nadb|hR$P5vf+HjpgdZ@m~_8|jM)Rah%xDziy4#rm+CKIQoNQjCi(a>#w0r} zXH4?ps~D5)w1P1y?^ZJA-OyExxfXggW8MQ@&6w+nhLR5f^CReWjQJJxddB<^dIMw2 zgx<)QjnHk3jr@8$W9tdMiLsIY?qF~5t=q|>55=!L_ zm{I8Mj7)~!!KkgFcQR@SdKaSxp}QIRALwrwh5T_3BPqZ3GBO29pMj+F_b}26y_b=c z5BD+R@6h`hN%_#gh%?X!7)kbikP$yY_c4<4XFnr;fIh@X$_FZkK>Q7QfRSW-DnCF< zXoQhu`-6IoaPczD2pwBQ0+45ON`2hMHqfp$AF|?mA@H|8NTmvsKv~MqPoKfz8zQ`z_ zKwn~%+n~Q?luw~27~1O^c$uMf0nfU`~gA$(UWBUoqxlDA^8}l~A%TFqc5TWz4=%@+V*}gOc9^vp3KJ=#Pvo z2>lOZl9Js4&Ep5i)_~S;0%S*EBcCE$0~`4o*%8>!|cd=Y9g&R3vF9N|P63fh4K$B0kR$v6)~;}|FEj9@Fq ziFzgIVw|@@-Hh`tXgu&>oHZAkz~BvDBA5u0kRZAb>_XTVL%ocB6x7GyJzpZ20#XtF z2sDim6#sMv?*J3Q4B$r?)LTL1B_Sy7tr@%@Oaz0BSO*O;;st0XBQ`;iuY|KBGz(-S zZe*t%M(l^?GIn}S8^-w*6nRECpM$nzoV%cTjPp)tKI6OtTEIAOhZZu<-O%<7-dQGs z9T-9RPH_S@_*JkIW8V%fV(dGhof$hlmuy#p`2Pl43cBF<9%xt44fc5!+MThFhxTCX z3bZFanN&CFm^h>l5voYS21?@S&(c2?Brjo z8J%puhOupfu4QbL_H~SHH*`H?Yk*$E*vM`h7~7N3jg0L$bQ5EH9eOQe`vY_{WBU-g zg>jJmwla1~>vfES{N;Mao(a8y!MpB6km3r&=TP!XpwM?WF*?O@2V;`|P#ysK%|wv$ z7^q|$%2%LKcuF&%y$r!!46S(v$yb0*_Pm`j;-OTofN?+cPR95Mx`$DyZ0%(fDsy); zN(bmYj9LP{m!Wm+;C+l9gwlP1PCi9;2L|QW1B^j&e2_7J58cPu$d>yVWhnF^Mxi?3 zVTSgY1P?GKrIGRyC}jHxLu>58gN#9Gr#cj<P)d5lW&nGY5s4jIrzjJXQB7%av8Z-6diG|I!}jQI}qDzE}!PC+TZf%YPl zt_PChgL;%u%Al(m+A9%S!zifpLa0j#`kh~B9iwc9u4feTpKBO}uHC>WouC^TC;16o zkNeSY0Yh{>uze1t&%ky+6lI>U9e{3QoZX?@8D|OfCdSztx`T1jvu zPWVvhcZ_os^lipTW#adYb2Ri1jB_}Y@((zv9K6fmO`sxEVFcoysR9l5{0~%Tbi^yu zU1G7dl<8pv(wdpT*q1;P83*i{natSX%bBSR-Zv^T(-?6dG{A_PpdrRS9hwbt z&@Lk_nYoM;S&Ya6QS3}n^^e)6q_%LCodtAfV>E0U{JKYaHOxWr9@L|G^ zJk3P@5>B$!(~J}4JM$UF37dp%jJ++?&e&mtFcM1GQQpH&#@-&9!Z`8#a0X*<2lX@d zd?@Ts*bAY^8^YcJO3wh!1yH&MIG01=%i#wQ2DT1A$k_Wp_c8Vg=zj1p!c;=xpJDi- z6KM{^2gC3`=N9ODjB~RPSz*R;7Zg56NaR-*{G5=8U-l~u-YqM#UuDD-(AOBeZ&qZ# z&WPU&krU52k&ij>Rl<2C6uwG0VgH=2j1xA@>Bcx=!<;h4xfWW^IFZje$PdDaJkEg+ z5KiQCE_|MFj)$VoBAoDt+#!q;w#|Kkv7>&-{R{XK_Q{2Q#W)v2zh<0QLBC;~i=f{! z&J{wmh5r%uF+#L!Wb8A9$VYkzJq`+=BeV~p@I^vz1%;mxMiCUgNEq9o@Nq)IFY>1{ z2A-KejZtCa{OOE}vXEcTXosQnY(V>|^Jg+P*dZV76k!_%oz2LDP?RG=r#QfVgoHok z&tv2v=zPY6&*U#)%(tMh5n-ZyMD#qLcy_zxJgRW+5@T>f58KpB6K20d_L1eusRFGe|u25y{ z>!2EAhmRHN0QqBwUlk@YcHFZt2_(ap#zEmzg#9{bCdkHdM`#{-xmpgY3A zUWGjw`v7P!#@=6u`|o7rAE0+J5_U$>vv6Jym5g&c6zL$Gh(iO?LO9_+4M+>&ya9@| z5Kj0*gTXj&gqno zM7>4WDeiPH*u#N5Xn?I6!Z?OsH^43pu$!X}n!~8CLQxJ0mC^!x5+*$d_9P5?Ry)SF z1e(X#;KvR5jO_@tfU&`s8j$gX?I^T8V?%w_(1D@fM>Zh;2;&5_6C?LQix`PCG*CDo zAA}Y&lG0Sd$cLb0BOsB^hAxbJ7>fFhkd$ut10fGU;TMF2EgE_-@)0Qfgpgz#_z9uV zb-fuS1zN^vbf3!@jr^pXp}qbM6%73jqoEI@(funK?FMLHM)yGbF|-%Ep+BQLp#vE0 z1L#0T{~n4mN$B*f!HoWI=nzIHn-66S_-Mm0MkhNDXAH9I2*!Z_HH>5ovfn7iAfFn| z7-Zwi8H4<83}ete#xe$_qlz);e$|XYX&T2E+o4x5#ti6q#-Q?YC1cElPGHP?p%WSN zI%o}J{tjBpn9o2bF*dqxGGn8-)iDO;;S|Q8@p&Y0v^^^8ezrnmyz zGAP9l*ycfJF&ce8o6#taa{$iUK84O_6tevSM*9Z3kkQG{7cn~J`C`Uc4PC+*yP-=N z<2xul4;U{&moxg0(5o2z3Fr#OxCOeBG3G*7fvZv8DJ(q$sAQWpjGh2p%V=Li*D=Np z=z7MWbYH_5H$yit2Bmu=V+Np`7*jy6W$gDuH#2qzbPMC4vbL46liyN)1G^5TG6q!2 zcPc+X`vOX34A_IvZH!JfraS|B6_k7nDD9w>2B6rX6mOu|pkyPUQXF?O@=NHgjC={Y zi&48kZ)4O}&^?Sw&)v(YROaqx=yyL2_b~K(tcH6To$}&7M(+%!N?$OJG(&pJXK2>?ub2pieVeDU`}K(8wR2 zWz2f$bBx&)`XZyWg;G8Nx}US z=o^edY5kC~wTFJh*vQTwGqw)UGmJuI{5wW1f_~2!L!tj>v;k1EBTy-h|6z1W-%pI* z8G4pcDedPNgY4PJ=q1o7qxFWKXLQ)}!9vE4wC+dz2$=^R#h8DFj%U>E&ZkOvZsa=``YU8n$u5#;1|a)5Su# z`#|S0?kebffcv?JKo>FY!O+Ewdjxa|;~ofI#<&&ea>hLzif0oJ3WIu*aF2wpVO(EB z*MfCIxK2a2GOj0}w=nMh(4F8`T-z7A3*3R@(a<{?H`0CjF2)UeoZbz7gD}XK(|f>P z+#j62n{lDMp1y~19fjV@xSoQdt{_~`LGNc=FG5jQ5U#hN4=^tH|LF%A*Za_Yj0^dD zdOzd(HxzXR;rbVpo{c)f1^+vZ`hjq}pb^HM1U<;Oz0gC9>u=D*j5`2*lyT=nA7k9D zp+^`ueDE}-4Y)f&A7|WMpk!a*qWC_^xai)Le&Biq`ZVK0`96)hfpEPCeU@>Zf|8AZ z>vQNa#`RYy{O|M&`2IWSamLjMB|8GQ1B&{CaC@Ni4B$?Nq7EV4DDS6VX52lY6er;N z0s1QA`W^H&#`O;Lb;g|neS_``U?u>(R zJqUF&u7{v;j0=8orWK>Cgu-tK7vgxv&A9OVGx3b;EEM)7T<4()j9Wqz8Mg+7&z*to z-4quuwydY zdnUxV;Ady(UciO?KNDtL$mcUzjH>~f&A9eMa~Rhn&|JoK4BCcq!OzaLWn3Ra;Rl2Z zd4493aUoC6*;d{RWEiNw^+`k{yBTaVUiYt|y`08P^eL561N-6y=0)9e|Rpft&KVH{(u& zmNBkF(90NPwKzlh1YBQ1;iH6`Y}SWylRYXyUwlu`?+5xL{5{YCjOzjDK*kk;4q{x- zLkBaiZ=pjN*FT{{8P_@JFvhJzhcj*yI)ZUiIT*>f6QH9QH`#tPia>h;mJBD#n zxf#p2DIcmB4e2>k&6pH+9OF6xy@GN59Xg(IMWI(RuD78R7`Gofk#T20YZ%wN&|1dz z0dx|W47+^-tz+EeBU2bR`NCAj^%-;;<0e0y&bYpS)-!GcI)iaje$HfEuR~`st`DKJ z8Tle~4&x39@m-K{9f#6q;Cca?$+$j(h8fo%q3~(K-3FS?xO1Q}RK(%CTt-D%`HsQ? zH+=_xBHVNx${*o+7K*eHuBV~-jOz_(0pogAh_mofLWK{Xl|aFVV^Eb*lb{--BHxis zA_~WLqD)1D0O_*73`M%4NRI>l5IxU04+?SqKL8JKqO6^VFA`3~?R+DHze^;|H5Nkd zhpx2{_B7}^3t`WIuD1|!8T1+pAx}a#SP1z7bdv?WXH=TkS_q{Uy4gY~lc8HIgfa!X z)q>uKDltAM=>4hEyxxM|g(|VGMhN9O=#3Uac@4VFLZ}qC?H0nk6?&6}Q0t*PEQDGE zz1c#ji=nqz2$i0>(?X~xpmhDM$P2n|mj%6>Lz=f)&~MYFdAkMu?p&I8SkQ0irFo}? z(9y-k@4E>4-Mln+ThMROCDtqn`u({y_gDy99CWXRu(_ajTL}Fy^d1XgNa(#5!h8vO zpM@~`LhrW_#&T$bg)o*vAFvR{ROo{i!dMC2XCVyAL(1>{7{B}geaJ!>KSCe25XMi? z0~W$K2Ytjs82^MuEQFQkhj5H~*G73k@ji^>tD%ot2pjp%V-~__1wCRR?1Q04Erfj- z^l=MeCm(piLfE3vCoP2Wl#uVhUO?#G@EUc^g5FOr-_f z`ICx;P?tbe3wl3_Jc&31dQX5ni8up#H-S89SO^>JbJDaBI^uBBW+4o^kKIBTe}^KC zK-e0gP77h644#C2fG}Q!(!Ee0nDl(u0SMbhDC_`)LJB(odXI@b>9L@9Gsu$(7DAZ@ zg?#|M8$zCheSpxBM<o0bx+O$fjt26tYLaLKyIcli3#Zj%0Zf`2mD>3Yu#n^fAyj7Q&bgZEGP6 z_|(aE7Q#3!>EUA`5z_pgh&tLTIy~#TN8VTzRU*LTD(v zr%Em8y`l0{7Ym_Z35A~ldgqNi1>Xhqjvsldy9K>7K%VMhA@ohqo)$vg2km7c4CL3T z-WK#;d3mbLLYVNQQHdTupvKB^|cTx%HpYh z7W8)r1*pjCPS} zj+iB8X*K9?+;ncGwj#QT6yL_aj~+PxPSY7a67S)^@nXEb8gcqauH`Gu+2>E{^W@rS zxpjRfVg0A0zo}2s9u(qyE`6_mf>57m51#*$KR4ft!ktjjd*kc%)d+V&{r>!y(LC#Z zTg6sUDoXV|$d?s*C*1Ed>wbIApD`cO_uzikXN2N#_u_tEXdj)wB6hzoa6kTR-7g~Y zgAs9gRb_QBIOuWV8Zjthj2c%J>6{*EQ(ZGPxaIPyh>~6V1pY3tm^?W=DLoX5i0X)_ z2=_T8q^PJVFNnzeNU&yVK}5+9hr*$Ph?*a)JFF%pi}H#{Vnwi~ru?9iR8f8~TdjyF z6{D90BXMCIRMgf*wBgGRDT;#IL_$+CLiFt+SF$Y62;wkYekf5U;#@c)hF49gKID}Y zvLK@6N7TGXazzzAFXFAJhy_Rw)&(QShDWrVafjN-Ruz?#Dhxfw3mtW-t_~jL(1=kRd=>i?jC7#$9Vq0n;Z;FcV@qu?;uv04gKvU# z&Ot{dbW~E4UQ=CNoepb7;wmOb#K@|M7({_X_?$i{5}>2NpxVdd#bgTcxGpADSJ%~6 zM`T`gbu0zd!8)WTTwYxe(es0q!HAY!i!_-P!>b}@xIAJDmm>#oNlih-u*?lh1nUl( zlgfj1l9HRw(WieSW=-Yfh~6%QlNG@&!7X^!!47&hEHt93W_Wt-$m*(ab*MTRDH~OV zbLnK4SX2rkW`4w8k#|TaoXs|T43~%DF5&Xph%#wvL{3ITB4)dSh&?|@v2`I?nwW$j zh;LaG6vuawd5A&iSe?MOsY>FDr;7<_{WCHE0xnnI6LTiPrZ?`3FTp#pP886A~&SvbH?p z$)h3%*D62grhnq`Pegi=7i#wKs)JMxVXpEm$YDG-zFjDc%bJei4|JX`K}n{w)ksM{ z#J?ZDy*RUeB?~doN({ru6%o1{qapHCLF zIGNLYasi*jZvrCjykAWx67?7-mD1TVeIcYThY zV}G$ghHJZosbc@HA>fV$kxorne9&c7lE+0lwmYcH;U! z<8FMP%AZ#gsC|Ahw)DO5J}T&ceR_qv9PBKUDXk^22NLj$*VLe})OIO|l;#)udK5&u z{QrW&u_q&VS7ed!W(Ny{{isTSX$NfC(l6W(RZtZgKGeEsk-A8koCFJZL*3<#_z+A( zb((DjI~XU*BhHGvDO(D|!C;RqxM%l^g9Quuei0*F-V{0*si8WhY(&*zEvN_64{JGk zT6Hi=-x+Air~JfSbr39+7n`~AX05D}`94OAk?3>^-K7UYtc;EuJ_T2Ula{2QxDFFXs* z(xb^1LBv4MiP<9D17_;kd@ABVWfBbb4fmsmQZDvtI%g$@Egcb~s|tfX(4bT775)-8 zZO)2_k&RCSaH_fQ!kPGU&#MW?atk$oL8N!n(=6-9va5zVIF}~5DT~Wce-x4x`$l{f zRm0QKrUrXd7ar^&lTdCiyXfr5^x+qsEx+h&)3q({cj*-s`H^mUEdw-N)bzkU`H}8< zTi|}=V_Q(te=Tmv(85RuM6uFJGP!XMTfP<@v~o^5xpNpLtq>)T6Vx~VpaX3UdB5fP z|2voN_e&1;fAM0nH`QZ3!d=osE&L@^9gAl_RKVTxnyk_vpStITD9^|)vD7r1U;s>z z%*7tP6O{18!bk~9`M_WKeh_XalM*AP2s1c8(iJj5SV#b zKIV}6;$S!qBpr;%KP0VhM&iKwW)y|#hf|{|3>{ofVd!8Cg`tD7`Ivta6*#KG5!!Yf zRp%eZ#0(vc!x4XX1%;FJ-FOOT9bHM`tfL7O&N`Y%&#XkS8hR!j)Y3EQU=lr(4klBW z{y3KBVAlab+*Q z-#R{|0PFdX0$hWzy_@g1!TMy~ZzCU4@J)P3!LP-Yz4?Bd`H%u^;X?|rHUE&)>e)t& z^h27WR$|bEwye55FJhY#QL~0GYiiI7;7juSdErKfzYe3uB-18TVT$%;7+333RfIZC zC{kkDT2bUg4M%bqz1KuDHPg@$R`nQ$FR#cG@<>ppMb56%2Aj4Ye>C3v&v@fr-1n37 zU&;?KUbcxOG2n=bZ=@VNh`RMnm$CRs#1&R%uAzF!GncsowM2(`QphU@96{Td77@C=odErq- zopgF)Q72_&|4`+as_3JYq5f54^pVjx`F&lT^)GIXd#^tKw!T>3C(=c0F{{j#nv$HP znFE~;)6kRwgCd=f?lh;aNKJ)FmB|i6!LWX!-KLo4WPy}t(jk2-O)-rEBG_0`sZ8W+ zhE9#10viH~!(&XXZElpIoX6D^8Ei6P)qEp(jI7e}A2kprXKADa4` z==%ri?u~vT`_x4rm2>Z%zFX$qJ^N1CIyyM|{uJ3(oe=$KYGX7cKW&VL<;kfsyIy`4 ziuSVpsgGi+LKRET7ibRsjK~+AMGx^xS!%kp8HLicbuDO*mrxs+w{3b%ofukOT^5&+K#&5VOyI`uZ6u)&=jS1h%il9S%`Y_xsC{;+jP~o1~;}z zou;-gz6Y+UYrbX{%vP2XAK$G@=T3$BZE`}bQ+@G?@rg+}VW%?{7Br|#QlX?0>XkAq zfA(GHoNy-Duc%WAEZBT(?5W&3qyDDu$o`QlG0YhxK}he&X#xH0e<0?FM7?DrlRN**f4-?I?5bVs&u( zr6-KoiE6&Atx$DMS2dVWx9YL!LktN=tHG0MtwU5QQqGc1o}G&e3|%!^TtJS=;ZYo9 zZ7g_Mc5_GtZ*Ed_%Yi8zvgATIc(SW+pU$1y7q-bQ$c{IW6QNLXu1ksKMJf3)=SVVg z$dn}|r6oDJxy7Mm6CZuP=6vw_N=gzXyEpPjpBvpYzIfS=n#C_HyluI6RK?VTbEe-j zp=!$@Srr^)Q+%$u^4=Z(DFyc*ZCo~C(x@qA)!Egx^P}T>jv2OT)P$=$PHQ)!WX93O zn;z-pt6nu^+8tHHudA-xG%hoGeY-6^?0y+PW{h|GqrIMUs3)s7%p2ZkUUgRf+D5m0 zEpO@g@!LlAMM@>r+0wA;?6fjptQnybWF@MtJ&H4hn}woI(m?GzrYcrDBNVP*9{ogl z7Vp$Z5XNE5EO*M2g$fLk04pRLiYF<->r@zvy72mjoUH^#w9>s39a!B2wKZ%CO zE?l^4is)R{(dSL@#J7qw(iI$ROW=m4}&Xd5*J4abtCB)2;$B`L+7=uS-2oxVJo zn_KGBOG|xbVm6LS)l#48!-1*%c*BI$x1vefTd5OzMt>N;{S#l`(PR3D9G`3-uN*4= zA@TlQyY5f?L1w!b zh_X@~&6Xd@J-1Q*uR_0OJL2aQiDg)gkXCJqnlURg7KsbplISYKnICkv47G zWLaG+@?)wqxO7kro?vz^L^a}4l9K{TQKw#t*1qS8r&g_cVnvT0E1p=j>ZuhyKS=96 zYvhPoz0;s0MnaVXJEG@K%%6WkYCCpF?d1gvUXGsIvG9{a_4S87S-9}yNPT_eV?^Xm zq-;Cl9WSmZv&Y$OrY05YAW~KAL9=op^m(wcS_^B!&`ni%a}Oq~MNp|z$LE*AmStWV zOcfHDbfK=d`YLvhFFU)9RgjRRlB|-FVypEuldU|vP{Aw043!pXp>w^D$@u-lA70jV zY+9?LtPX=#y}5Mbn~R^0KJ}(DF>A`^JHLJQz{H#PX2*YKPwUZh%$5;T4lmsp{W1F0 z?g;r6>;S(aJ6tBV94W|G485uMUMx@}>4vsO*ih(g)QeZitJa_jQ_=aWM?+}VqMu>) zI4*c!(XU=-qUUB(>lV8nH5_$OtnNmfH#C6E(ZM5N8PkemMN$wVHRKf z@lw+mW9ce1-8E+4e7R}mbF+ud@6@(Zw;%BZ#_t$eH?Po8R!dpBaNL+>y>b%Y(7YYW zhprnocK7Us59Mk7v3A*_Y&tOBs6G;BD4N>j;$A~nGZV6Pm5Kht1^d}fn$x^l|45eh~5+} zlDne)>iYj$%s8psC2%0_J>+!|4fWr)>h z#Oif(cEbzwX&6m$_?Fq`$nqp-BhyT$pZbMbtS@qLrcuq((SyN@2bqXdTKJmi`DbPv zoH4M}6EudVP72T7Iqa2NI?b=^t@;|TQb(-n^WJ~t(_7zPF?fD)m+q$Wv@dJguKo|- zmzp-S7geIQ-}vd)Eq_}7yqqJozUxly@?O4w z%-Uo1Z{K!p`OY4y?6z{!R)o+o2wOV#W(j1ES>xOEw?_{rG2og zZ|i%@CO05$Qe2Jv8-@I{i<{9rpiV(0sZjOQ3e}Sjl?kek1@!$yb}NCzQP-2|)f7H0 zP-K60o*LQMR8-fVmpBs}+&SVHq;f)N7*} zYd%o!ZQLB1uD+}N^=xS$?d>P!bZXG3%_>0M(i)@4ZlZsgy<@wKIO=dC85ED?7P%8^ zsx068#RlwXmy&|Quy)A+QRnjOCw+hO)V}pS4(;weCVS#zYlqCbvgpe7)qdaT z6(g2BH1WzklgF;BPV-&9MQMNa_2n~q&Ah&Q;l{@MN6j45somN+*E}+CS&RCB{Bp`; zhLl=Uid&ng2vELJ?>7%4kO^w7fk9loiX8Ccdi00%`U@eO&d*ZO)k;dkex$6-)V67D z(T7aQ$qw0I5?`^;i=J+Ctx3f!AvsCW5<6iOK#d61oMe=rD%XE{dosJ`hEKO_IkoQgR? zbt!%5-3>?lhF;RQRKANv6-md#Zx_SSk6wkE(m0Ify;EMcESAYq8dJ}9)?Pn*R2wi{ zzP}p(k4}%GW|Ftd*~V101|`K5nPsiv7$|boP9Q~8?yxOYQI%oTwYE%1-D(iK9d4<7 zx;okldsQxZwk~PBf@kCZR^S16_UlS+wC!qntK79Zimktte^bwYsg6PU%n)tFlp{7> zL7ZYmlX^9%VW^7GrO;dUYII$zMSC$zgSS$z=AvLty_ySwFgnP}&d&6CxjMCGUp&1~ zq2`3mm`jkideM9|JBT`F@X}4)7hbk`;%(QBPP3H`>$`pK^5^G8j}Ph`oA`^b-8Owd z+Gnz@cii?VH(vL)(z#p5@jb@fclPtn0|s?2?mv*yw=;Ue_!@iD28b#-X;35)RgT*$ z?TSrm_O4wNG-!iSVM&$k1Cgw(qE1?ZUea7op}sCPzOK3L;USj~8g^6vdE3TpKT?}NuONGY;%l85 zNc4oe70jB{tM;BZW<3)9=)J}J#<6Iy46&p#KhWaZUdC`Ye!6Z#+-Xvexx>f$j= z9fyIDDX~R2R#W6O&tB;Q-Fu@RE{(Zt)XL~lVivM_FrU>NP(cvPc%u#EE1H5)0?VAE zvnsN^nJHN|XTX}R4I@ox!aSX=$*fLZYj%q&46C6Jdpf|msG+9@jy};xZByNE9v#tm ze3myk**i{7WuKZ1PW|d9J zaBF?sercQT0o7$uSI|K-?Kc2Rkv> z>WpU7WM8bjP}!on>KyzZDOZx?1r?c+Qj7{8)fXSBe<=FH(Fu>riudY{u3Qn2sdnG& zAzM$blS$w8AEyS*_DkCH?#g*-u-=sGG>XB6)FCZPqJFl-5XedueNDN1G?arJ z)@f9O4z_kl7HgQ0P;$ay5Ar=hFCY3~^z`$MCuY$=aF!DPz8wA1SL^?A_nr6e{$`zW zXycE^=eNwc?U;glU#g4FPF0k}63>&CA7e5_w3SsDRUw`x@(U%+PO*0)ze>>Vq6fA> zI83|Uv4CdDP2D!_Vox?EVm5X%Rpva2YjwA&H(uJ}YSUpyrvHyGr`B6aG$AFP z>VBGlxll+g#|)#hf;pMeB78#Wv_6q{PCxtM49dzG>SrrjORp>1M;5t!$ z!3B2QSlD5|ORj8sNS#RwQm)*bHl8HQeDMxyH>nsm+o#EP7{6H6`30+LzU6*3!4(}W zySltGaq84W>1!1|k?h^v_!q09Tcv)kr9SQUHh!D#*cL6i(U}&Fb7v^Fs+QK|x=W`$ z&h5prvczN*&H_Z(Y%6v2^)Q8Xp<>56uxhSBZw6gJ6LprRDYS%(YE0Ly3ay?7ji}gT z0TomT3I^TH7sP^LRRk4#f>o2{c&v&YYZA$x4%AinuN7KRtlHPdPxmITcyaE2)M^tC z%OPJZIyPZNNKJA1=k?$5=hf2x&p!1gETMRn-VT`-AI)vscFAi}yWxFVakwBg`q<+M zsYYDe&Cws9i+&J&rnS5+K24(k)l@&|5%^J&SXt(6m#XMSI2k>m_I6AP7R6(!w#g)6 z^pO;4Q>HoXrmE}4L9|e}!x&A9l$j@urCO11V zl#t>{O5zpXrZN=54CbYIPir{ISc2z9kUvoFm=Kit5cisQhbS7oQk(B+;Z9(wJ-b zFo~+wbxfE8LN`L-sDwH zZu_&SA#O$IPpS_gZhleqm=6P$rl)2__OYNTQa8gdt8mw40=Lw*u)nb}t=7_5gElu{0ed+%46(fxm1xDA_*DXo*u2uZH zVUvD$WAVO!Z%p?qa>7roy&t@q|5N~Qr(-H3ivAE2a6HZn&J+HM#H8o5(%Qyu? zuqI~)ihdX)F)!y7D=(g@M0qBG*e_1-_!{RiC66w3^C>G+X-h+n$CK=fr`htR zh|942rHsWafVBjiY@IZYH=acrqHU)}yCPeom8dIFcgWY|Lj3psTxyx;a?}^}#7bUR zOu@HSU_)4m z(s{PMw&GmE&k|O2_9yirE~xMjh5GnN(`u|KXa3*T+%RHkx#pI2(UKc_xYTdQ$}~Fe z3Hs)Vkso%x68*yiS4MugZPyQxiGQnk?#7#+pD^LM?KdBrs2sQ@dhWOLo{@du+$pu+ z&YgE6diK@^nRxGg(ZAnwZ}i&+99wawKG8_z1C7}xm$|dB+NV;l#7(twEGtmF;HEG& zR(Mq{Hu)dxzSq+BtPD(wW6gjTd14_kc(Ho-bqZQ?M^@)h8;`eX*5Am@MD_UdDkZ4# zBzZ?qCvTCrL>sy7%q?wdZOPtE)L%iitdQTxOquv@!;2Y%^%4HP&#BMfdSVjB718xS z%bO?va!Z5KxWXy#Loz9EqSx!UBX0tD>EfI+XA;es!R@Rnger#&^yf7!#9+Nj!3-8A zyU=^#L6|jhb>TFPVkE5;;I`GcftX~4A6Z?N9BeH`o1E5#!NL@8D<}RGT)=dC^Cadt za+>o}qj^@tw1%Cf$u31AGiirFQ|4+(e>{9oGJK3>eQ{y4!y6bzaaJeR} zi{5?n^AoQ;cIz!KOsF{?z5dDFDNo1_FYlDbZ|5G1KJn_~(RW_-FT5r}t5Q8~dHg|{ zyl1aWdXTbwPxNd24jOUBJ|Vp$^1FlRCAOEPWFXZkSk}o#^_dtSr|Pnh8o5};w?PLF z3!BOs>>m=;a$uJEf(IzrH)KQ0VvS96nu21J5xYY3HLy{g70?==rg!QnMYocUy*l+M z$ZzdWPfPGPOwmDhaKKc0W}yy4{8EF{NwWs>0iy^DcrY4G2zJQK48vKpef29Ua@+L1 zdRDNjit&3?jdg6iU&~fCw~d`RvvXQn@vNFwgBwfsA8B|YzrCst2=!gKW%I(mTD0J2 zzO(nWnVW|u`7WQeXiin<@iI;M>gs#f-_ZCLdJR;@)sNsuMKq4jlsej>3r>_p-UY@JA3rDs+I3;n)y`4;?^?B9zSyAiW_!p zdGa;6cG|HOMX7S;S8*A7eCss}`b`;E)aHtf)317Ta(iF&`4^oDhBIsP^a(4A`#wVL zRS0#)KKM|o*of5%?n}|IxHbCy@E5Uw`f}DNiCZbF=U0o>3Lg6hW5JX)zZR}6&>RH6 z{Y7nrUQKhOU58~2j1D{rS=geN8E=gaFe1W$sAZKB$~3LjrhUu(W_t9aWbgXMch`H9 zqaOs-jHTLU&37z(c1MexpB+9X`yp+;Q9f26ZMkAhnKK>}ETK5``I;J~RP5SRG1Y}8 z*jk)zIjr$nevi|-ifq1UB;(1+31Ku7yfBhU(n6>eF6o|A#opXs)K{Pj5&bTDzVYA5 z-r0&B?QY4=8*aFjT4DJNw@c@KIQQ3opH>}K8^pb*yk$qG&U}2n@@{jxBt;U^)Ej9^ z5`!?Ra<#Hy9t{;V3OP+T#-ZyZ(dlj3Rr)WRPpTiwNK8;^SvEs3|**}6-*Dp5n6QTT==MnjeL$;AzCU#|6v0KTT)$p)aOe~NU-LN zD5fnUn}{jTMDDnn6IMk}?2PV<9=thvXsLYnDwjk3US76a4*X1wk6$bA){T^(PHS_| zZr9x@RaJ2^&2_F$YkT&H`V8i7<6`kgd5Bs*Gqfx@9P}n4BDvY=9(SD0RP30ei&duy z7&&0qE;?ry(oz4W8K4^QLaM7HvGU zBtF3&{b%$=(@b3v9kN2+8Qr=GL+@16{7EJ`6WsOk;cKGT$Q`Sz;*>PEd?-59k*?4@ zF19-LN75WJBQCmHuE4;~RaKSL_^-w-sgyTJ+uxD4Al74B1yYkdZfn%&G30V-dJ{AR8*VO)Z|3k-{(f_yvgeu-(QWM=hCI}FVj*` z=Z9oZdxCqR+@l*wjboyf7fP?ZQ+bJ!pr*w&eyw<{Bp~j?5%+9kxS3E1{+uiOG@x0WH#;Yk+CW*S;j^g1J4=CPmSNE*#pbf7aMh@pDRHPFCVAv z4nHgVO*lK<>;60Lg*@>^&!{%+K{rLOLG)ADGB_g5*H1=+V`bVTDn2Q&80KCTn)MRa zdLqrrs@PgWtN5`MMQd9|NVP@X@^tih**kj1czpa%=n7Feraf$5!``aM;hNq_-)=+< ztSxMIe2DE}dz5$4>0w!iS?};*vXaj1L;3kEFeS2O38z2cmDS6MR4Uk^Twpw_6=$-A@sHu~zy|tUtHGe)#7G zMRH(uOv0&3yS7;_1>FJ#JBV9+fwAUbtPD~x>>O5fO!T88s#q(hGqGJ5+fA$~a_dmi z+Q3CShp`4jJBOPuK=Ts|-(p8_dRZ=R{VO3a-8<}Po0A>R%ZiP3dE2nH@4a*9;?C%z zVNblZJYPa1F_PA8u!ma)@`gVHmqBY&;-qCi&cdtBGJj8uVMu#;oO<8?im(+yG z(IZ>#8J;>~K#!Z2x9-#a=CxfXRbQTOmZrU$WE@J)89lRT*w*UC7ZOvYF<(x~tgP&p z(|cw)>Uz8>Ma$E_z?yzQY%6nNn4&mo{R}0IhANpbUM4yqv?fBfz)uM@O{M-9min6v z(!39^H5^FUKr^u$emNNC3;s(8o)55BWQP+xO`SBXOL)Sxqwu0FKOSCT%DXCh?#<{2 zcSzgF@Ll6J9l7%DdvD3CZ=KpH^~fuV|Mq{~ppWb`tns*~`?~iVUU0?y8~?Cy^QY@K9UWAd*(tGo?iK6%4!(6>zjft#t zF|EQwEKNh^7=}`dY-*7?v;cUCbGJnzHT>vD!RELB&jlzs|0S&TdxfT(QwI_oO%kwB zW_2LZ(2|8(cBrkLp*|lb6Ua>L*V@qNvwe40PrGGe;exLs9T%ohZst`*pG})quIihDzMYXTG&IcyLCo-a-zEx{@`7jOrwFNe&JUY$w^6O$l8rt zg8l1gD`|Ly>4MP84>qp+aO-o&l*Z|yq1Q~VYN#A@B6@>-p?2z&8&c=~dHqjxzvHiss;7)n`#(T zPBb-w6+hFo9D6pks@cd^+T}s}Auqk+LO3ju^{ebs(y0Tc`Ev`r@JaNq!>Cv>n~d~& zeF1Cq%T;cvwUM3wrDfd^R<Vea*(g>WZ#Q$4^?*TOFBQ)qU2j z18ix%3RNRMv&WH@}+Fr?rZ4g8q259!kM&N zc;*lre%jDt4x4)xnbGCCoB^wN@5`E9GBMU(Pwlg7yGCF_cp>`6};Kip7K5|#L>g_)_1 zHK+sNXlgs{EmZN_eBBFn?QVQ2F$Ft{@ITZC_$L>A9w+vMpr@K7H67bAtla_BTMN^c zQ#5gF$e7ghqwdu8$><93mM?|o;0DoqYsy0ZkG=PRuj07&fOmH9-lnT^i+a~p@9pX$ z)UFCb2uTPbgd~JOAV3sRjcLX}0C$XV!8W#Wf=P@^9O5`GiPK!-U?*`B$Io^g$7zmJ z9VaizOKfTR{%3Y?Q=#O&@B6*)_j@*y?wy^Tojo&W=FB<&bB?bnRt>17XnszqNP?;p zCR-h(&jVO8@sHf^c>SO@FI|h&dss`&PEgR6o~H4rI!!S(;dAK5ffme3D0^T`zTQ$^ zJ|Cd0mUT+`6|WG~s|JBn5Pd zi^UE|oJhn0a#!0^{KVl*D2l{?Dp}%vmGGLd;*ez$e5zJK6F``Q2-rh$5`)&pTZv!C zLDCkNozAz>Aj}z9)4;b?fE3-)ba1ElWKXuZ)c1)BNpsdcIaIf-GWq4ooa5u&z8C!@ zwdTZ_duW+^cKIyxjVVuEEgfw_G%+sUlz>lCh0l3xR`5x3VqBEYU`(ZBtyjXy0-L~4 z2;V%-fTOaD(WoxIeq)cyDCrsx7{Mss8>ZHUO;%-oAi4Zn>%&=EU7D#?H0chEXr9%t$Pm+LEV*k;$BEQ)uN);YRmJ7P-neGCA z%cq^NmjoQZHRh)oEaoU-qhZPS;Z2f4H)Hci4AuP*+x&f@i8Nsu);K}`Y3v>K!Y!V# z@`7AR45>-9NX+b20^|vBgy`qd=tseOt+?|r_xVExYy=G`X&ekfveh-0-D2-Din|Z)?|DArxjOE-h#DdyNmLA zl)9e0u9dT|b>d?Q*!Z8_h1++z`ZmSoTzh@R#%;L}@i`}(mv2jnofz~EuNb{#G|S;% z4aJumJJux3&RJR(*I2UZc=N=0&h?yzx)q>dws5t_k{Bn7hV)Po0wqSA)~-ySl@31A z#KjyoxuM-8!!WBE&ADOyaw>AVg=J^a3T91*X^#*Vs@`Q9a&hW#DMQ-?Rw&gyw%Gm+ zv$vP4#5neGch8sSUUy5ZtzX>&B8OI|#!Y(Zl8%S&oOo5+0JuD;bKrf$u~QN*8mDqv z<~0Ej%ODiOnb_hrT(lnRt5=kdg2{x&)W6?5cD-}r&YxVp^WD4dd28DnRl5h4@1Bj{ zgS)HPACLdx#v6Wr`)z-`>6SmJ*no?wh=Zmif$$n;7v0qBp_fB92~oY8b);uHZILlPa}S+0{0bj+ znT+%uk<=Nf$|_~o{PuKVe_@;*a@)$hqvM^;r`y+mapU*u_YanDDd_IndVDS$|I9~+ zEe7Q^dz2WuU_q#ysXK^iR55SgFP~a}ZbN4DL|w{NUqxqVU&T8$u%hXIifVQhlo*jwk@Ijx}V?h2}`$y zfBaW_Bs}9k#L3+H>*v;eWjopB7_c^iWZ~nn-D6LR#4!r-w4ibog)qE^*XHj;RS{|% zD3Y+x)AFVt!f0SfGz&Lr-LZ{dq83Tow24Pq)6@8gp8@0^v{Gohn>uXKlBLt`$}Cyy z%DY|>AYk92bT2trk(#@8FssoMXk`;hl16G;#qQ ziezwhYD4cZyJ-7`+E7Q(ahW{R@e&$-2q7SHo%TGZ>M-gg^bA?q*M`DwgR`;$rv~$bOZ+@}=XtMbFF>6wLb)lyw zyJY9^U5{pkvWUkW87pf#msVD68@Tb)sR$`cUfNYJx5?8j3!Y%lrtg zZY})a!x=+m2cg4B0U6Q=9HO4U`Q*m}OU_N)+1)KZl5=n8_H~DD8R%!J%6na1`5pGW zsKU|R;@QTAxBt0j;>A6yV?aIXL(YW{__~-Z+yy;3z#kGEoFM+t!bs9zI`o;UHh{!9 zbi@m%B}(!o#ji9afc3CkZCJ=rmcf>w7)SrM>}kGr_G-3Jx6l~bCt=pSMFx=@6PdAuz^>EEIFzyB*jFDbJ&kFubz1Z%!_xW&UYvmeuYz2zX5wcFh!^%NkWq z9#Nj!%3fLtkCXSu4L0ilTO-|U89ylPx6ny3-a4^gJQ$IHb0duhbmRIW^eYZ1*HGZL zB+(uLaoVTvcaXhYTn?L&D6aBZFFaF=`6gtxrbnWGHZDXPm{SV2Sa`dsFB(xkIR01V zueT{5Z)M+F0spE;nDMqheNDDn2es~s%7p0e@*zo>5X602x7B__;(Z6JVxz~NmElZD zh>xt0LoQCkogq)(Y}ZRWTX(3mN{V| z`g7+E`p~t?o*gVh`TcJ8{M@im{plx+#;{(tWs~wJRR4r+jRR6Cnsjuzl=4 zTW6=4#fH59zA2uWJ3DR4X=M)ltOWN$9qbRo}^%^k!N?crfirHE~l=EbPcfeDb1 zt(hR;SfWYq?r4Pb@b%eCXI;()WoM{+eqKgj`MmaqZ>BZ5!<939azq+F{7+1dAvQ{r zqak(g!Mu!|xrI&c-n6T(DUkMg=gJvzz&BTL#*;?TPrLBE#|-%u601(8DNGiI>1`31 z%|W&Z!VWtzKAP1c(7rIa>cwmk!F3_|^A=^FEdqTb^M=0~jW!satPu`dfHgwTEv#O1 zgfu$-wwR>c7alD+)`|a^c#Yi;*i+=@Q7J4j>~7}l9)Bk-{4`(3_Fm9Q{qkw}2()|5 zNm#Sse8kzAyjHONkz7*Us2So+ea zb>ZT;JiRR^ds|)Iw(K06J}++ZLhE>jtN7}}jRjlQCOaJi4rlV(Ed@=V+F9(%=r+bB z6h7M1e5$;>EG*RJD!+ej&!dG2afk}d@+UrGXZ2(ov3kPr8qhk!N~r}Ca9YdQ*=w$u z_=u85GT|ubGs@}H%du|Q)?wMiTp{{6ut|#2bwnOa@O5ZqgmClmbR!Np>NOAtp`jT` zHcVXD=Ltu(S;DORyxg3`1R1KsXi$gfLT6We;1q+(5ae^ZAl)PZXT)hlbTg2nKd0VN zTW3#fzHG}?DUxfG^J9BDZJS;!Bl<y>qbP@u4l|g>bf4n) zGHOF>A2=!bMnf{{lu#YvZMJtx9ghFv{VY}cDaTIf1IHKeIjp?9gM?WW*p9(5}!B_vGpz`D1a?mZXonzi|;cekwfBX7QU-`QXf(JcUDI@y> z%CU+O-E@fUuJz}0yq_&vKhfW5%1`M6%c-Ag{fC0esh_+Y{nYA1h#%zbzp1t_t>W!R zpgb#}{FGXL;RBQ(k*R&P{zF>%f1-SkOzrb>bG_CM&g!}qGSLAcIG`PQTlj^y|2hY6 zze{_s{%Olk=>p5?xoZ7~g39T+yd3WpSf8HD%kkV-c>7$wTF2ERHPcl*wuYswk07736#|chvtS-*Oej5S#H^mul6!=MOQ-C3ihje zlwTm0*iq%Zd+uSW$NLld`x9g*mEEhv4|XPWZR#9c(WPu3KP9bX-DrOew(lI+XUR`E zms?}W*HvBaB^$<6CRn3?I{aebsF%%Iy!dcVhzUO42vy# z6zH^NgnHj_9lFLPZGL*;hp{2O7x+Z(B{SWI0}#xXq?q*Gr3MJOx5x(~f9grG>tN=tl{IB@g%jay#1Ze{z;3sxpdCQDRR zUHt-+MT#HR-FAGXYe(0euf7&><4)_&^TSFm5qNzjF%$~FDkF< z@?M2af3{`$V+Wsl9j}WSs3YB7DRloK2_PiGED1*1%t(2c1%0&$SDORSgW*(e6d9Sn zB(m8;$&GXh-01DI*}|BjR>#njr`)=L+X5<+(dAP0xKQJW5 zR<7w+Xdl#*{sQse)POfIw?wAWZU~toRhKbdcJ0vdTiCMa?=0TD+S6VdkslV}itZbo z<$x+`pK>U5us>%*lsLFA<<&DMhFv$M-TJ|eF=dOYN*fH)_vDayyN)%NT$ew5=xevk z)){FI?!KUxt_Qtph55ohPeeKphRw6$DS!%bfJU}7ydT6r=nyrWAyDnOmt2Td+R0$m z?8%ihnxUcwTqWD0C_p^0Ga(Q5{woBS*>PW1mZABf}{lH2ApwqT$b{ku2sh) zI*6;rDF{_0CP>^@JBw?i*!$}r&0GULk)??UOLui9 z6XFnZvWMRPu^#D?msO*HsNY~?15i?q1Ev*(nL`I3!oGD7UoN< z@c2`?S&;ka_(rUiD_})O92c4+SlY>_l&e)ZDgpuj@cbd@_DHv=;WD!)-+_ppdYP|D zlj>t`O={d<75agIrm-kRGNEp^t1KrwDIvt9mxWeDFVl?IC4RL{p+ur+F-fEMmBSG-q7<#PM^#_@ z!u4MSfQdSX;fQx$COF;X5HL=pc(dr5^>|DZW+@09$X-KBdO< zjA>>jLjyCL7BJadGrI!Vt8At-%0T?YPQ9dS%Vv5uAK)K~1v*PGB8rkxhXs*5fw^Ih z?9Sw_V}idYyzK?qXojEIq>V7%Q;S<+7t+IiN^zL3g+=R18o=(cYWcFh1RM=ADfSn6 zX{x&_Ha@53byq=d_Z0t2RJG1n9&v(#uf9ZQ=|a?-9=okNVJFFDA}yi#J8{M8~x_Q*x1CkHf==2qT|XpN55|BNa-KUkI!GWG&)y` z2=7qt?1%^tkLY0g&VHk5w63co*_Phlr<}d|rc|dWRYvSNAb#&b%2^-Ju{1FPd13lr#<5;H?Kt?I#N@RdQAe@b9({Xnvg2>} zg(vhsvU}gTfw<7eX}N2bO5`xP4LY+@;iM%FK-P)D-k7&JpQ|wCE8~*yXlnA;hcQj$Z~Vh>8LTwfKZMYZVClnigqr z!chvo%J6J1j*5;Hdp({vuD|~E`ug{3pDlU3X5zHHYNN?g#onA1X528_CY2PJ+1b#d zc;#+35?>Um^qLD$@|cpyex-EdGvbVVIizQ7tS3Z47)=O&be8%nu!`T~*upXCM<}8) zLHD?Ly?h)ev3h9>m0>qKDcp=$G7VUdpq~!4h`Lq+L(fB?2vYRwq_vz%A`uH45A9nB zWoYFQgCmT3L{3OGWVr!sGOva;O%h?da^ zEd!+>_sqgR2I1AtNX(!^k?Nm?s9=JG7$O;i2X`DIkGr0G8?G9Pt4TR<3;E$adpxCX zxmdFN1~}lk*lsxB&92(3{PgaUYfjI8dx+(Ii96wCroB{IFkG=xdG%@To!3<{!gfke zbdZDA?w=_C37@>vhkw)cni8d4$S=iP2omdMt@^XTJ|+tV@DnZb6lcX4v9B}IZbL>u z2tfE`$yopT@COm=z}Yf{+KkK$vKVvDhMa^WOr}5dZv|m?q4cId>Oq-eAYwnxfclSH z*6_JPf4247_NCWvudTi6x~1*cwk|x<-ge_&SNWcs+uDyT+^}vz)%KCR{J~9)Emu$M z%NyBNyjn%PTer~=~2h6{^S8-F59C*XTV!DJBu&7i~jU@-8I8g!oaT;w_(h2b8g{-?SJ z1;m(D17i8(%u=K+@m*z%ZQ0MZD#ymp>?gW=`?MPzXciD_i$~ifKcrS{=Rt^bYqY3G zlv#w%LgcQ1a9fDpMQwk?j_k)j{Z1bmvHsO?{_CeE6wbw&C0VZ>eb!s)!v8*BMW0WdO2YwyIwZNDcAJ> zuwOZAP8H2PJ!TQX6M9j=u8ZmE36tW(C;lt~eyEE;_C#T)QmG@GOQ~>N)!T$+GXsu9 zIs@#7Jcf5*0R#>R;iTS4g(?TMGU(u-MurgFfdD(;EJ8Sl;%ZWa zRuwbI0+?>kh{;F<{S1DdrD@+yN9=RZ(qIAoS9cze|URtS%asfxNYqh zZkH?*&)haEvufTP@%@R~flh11e9T3m^1|!zXFQEnw~DOtDL5=9V_rBorXK4ulu{5~ zK`uxVB*J2$A)#vsAB2V3*_k;M;}T(<9bpjuNjIP_E74?KP|pa4EW&yuNd{V6{4s;2 zcS18;wm-aM-&3mwZ$4UF*FV$}2H~XVrgm3DLq&Jt@dbyTx~eYb)u^hTh6Ov?Gefev znmc!Nl}arIXTI~wBO_OTq36NxZ_SUdb2sd+8TtI$@T8daj@*^^JUsU2SWj{P(N8xg zuNrRL^wdwm7Lb`bAvM9rOw=23>c;nEyuh3CL5P{Zq;G}5mwEdQ$CZU_T+C_KwjmuXJCMdtF-oB`+F1jf% zExNKit*5EhuCFdy*CYNSZMMF=sb?Qc&M#cR+z&mx_?p>bqmsA&(c#61o2%z1^?iPf ziFs~Y;Z=)MqS{6$-fLP_e$A)&J7+7y(hc%|0c#TOv7|6yonex~$p=BD!$iO+VEK{2 zFCsdp&&JyDMxo!ci!Yb{k6TlPYU~%7iDC&TJ}}w{A|!Qb%oyv8;j0 zCD9Tl8;xn6HEqQMjx>whX-$>lO7)qJgqyzc=7{p)e=2W%x+y%wY_FYfPm|+IjG6Y# zpR%&Ezj>1-Ji0jCWU{BsAsN9fZWd30aZM@hkK)OfY)xxj&1>R1PaWRtEDKp-OI~!KeqT>~gJI$^vHJ(Rwx+m>)7#3W zh?cy(B@5E3TxD@m>F!WzgC)A$leD%=IUA*ue^B%08}aU0NtvDmib7k0cYF-*XaU+W zQONh?1oEE5qmKah5fJS$!)djrr|Cn92Wf^ujY9jnM9VEl_tDyd{)&%1dfRP}eg4i> zx3uaej)_~3^!5)d$y!h~(5rKQ>PzR29DcI<)`?%%u$x!kd++Lot6ENCGJsED@d2Nl z#vGtH9CY^mK;91wPrcACED>H+bxkoT5zsst5GrvllTBp>h(S>f*cfR%ro9CGDy_Ao zrpm(#14t*GmN6_Q8JiGfGr(A2u^76yH=@5Vk) zYGVTv7B5=R(a_%5Ug<6=%FoSkCdNlu!z@BQs}EHbQe44F4o;ciNJC_?OAmSPlg~&=ahQl`;y4uYxxsxer~y6eIC%ZGi66{gTT&lxYW8Gj?tOOI(1wce zh~~`;_MB+aO`Ojk8y(p1P8>Si)pzrXlA5 K|DZ?>ggYV;{0D8ErN57pKMEyKQv; zfGaXLa`ig+N5wedAC)^R+EJOm<7=C&Zsnu=m(X}L=3n`9Ht552gv|IfLJTi&0rnCB^cL(5s3M@ zd4UMTESwj8FhmH@Q2#>Yq}PsgQ0|ByL|_$9ZfBuA_g7C` z<;+zvi$|3KX&>fdh>#-;Kb3$$Ox!J%L!mHLp_7ykz)paF*MbJ#H_||5d>2ohTn&qc zPF(G)4djEz%a9&wHwQY|E~1 zZRO!fJvS{sd2@G)G^<_Op+z12WN-06&C<_qUO6_>689VB&B$fP*W7y3Yq|>bKotH% z8DQ`7_j3r1D#R-W9VU?a0bdt=5=pf05_BWJ%Y@XiPZLvDilW}T?PcsuFJN6X&aJL) zF|@6u?oqm=)=xTno11&3%~*rF*dg5+X#hk`Qn3aw2|`XFJvOD+oS^+}Z*e7xA_M8u2K!q$dX${Eu12SefqR zTIJSSrFnnqN9-8uI3o{yd`23ntJyvNH{HwB{)I{3P3X2A+*>CG@v8EbUQeV_a#fPDOS2evR~rAv>IYi4Z^XM&9AGEn06z|1E)z05X=Pb)7C1Gqw%AZi zf!0i%kcr~t7;|+}Vwy<%Ip4yuhf_du7vN46gzmr&4)Y)O_GvEY*W}zkuyD;lo!HeEz29naZWq%s#ywnigFUliW7zXx!`~gWg$c%M9LuD^VE>RE^49@2zK-4x;*%f^L2jpfT!>hJED$7d(Hp9Vmv9lox2hfS+9k$# zqLH8y9tOw;NTXl~HH3zk(T?7(N2-ie9MZ}pyY>g32T!rq;R_d7jQ7K{YuQ&7n__3) z59kj1la2m#P3aHF>+O%)lU421FOED94=f=ajc2<4Ak83i+ND(W57d9H%2Vi%@>}hL z^o}xjtuhCD!Jm8|Xb=A0g%1p6Jl9AxR&(sJP?6xgF$A0;-zQ-d2gh^NX~2Rd&+yq$ z)=^d2*@1i{6Fr@kvlme8YWl=;lyqu2O1j*$J9(}W3bf60mSEgeTn^!8(lO!S{p`i% z1mI@WI)t0yb%2{`Q86(AbyVC8)q&Cj!rR;I#^5>_1Jp4*hqx<+LcM6E2&zzhFmniT zI4KwB2WSN~<)RVFMFgLf#UXJ22qXd+N2k>q2PieC zH0YT#r7cmg$fAV|AY{bnT27*YuWhYkuq0}OLi#)U&l6Q!jT~bI6bg|mIqjKFuac8? zV;*D)CaS1t`2IVw+Nn`8r^FOxJl>N3ENXaed~5TId%vUn{m4OP`tH6Lo29mZSfh@0 z>l|vVQA>>E+%(=@ec#yTW1Yz<3yzL%zI8#$*5)<}wDld<{H?Y*Tc!&?%6cgF*7Apb zvS;s4&kn74_$MF}m!NBh= z7vM8Y!VGkTS&-Qv0B5R_98{iOSK{c~f3RuukM213)y#@#M!xivIB{3qZTl9FRd~*A z9=LH;TIQ=MvcGAcvKKLSB1)K!gW>{w19- zPP)O*9mNfF4-nXwyvHc~6IA8*U`>&>N>2GPAqqY(iJmyD-dIKZXc1>a*jWU&Fry$N zlY8B8$v(2II6zC|mN~{@QlaEb?5q(FCO5e|`zzX89m?lwM)9?A)`F$ z+E|cQK7W00>)sU&wy>J}mYXAud70(&*7VNbH`HLmdbbaK>qOro(YLBECz$jSrM#l} z5O9UkI>=U%2xF(N3ak%*um{jf0MhYPMqx&t>MsPKGc+(5QkSz>I~~fD)G&(!HT;EA zCv~u`c9V{J-!Y)@{G)9mi-)qTi27g2k1d5GOmnCWU>+t<2xsL>XYwsc!lNVK6i z&DBD>BG8rX<8-C*<3z=LXG6FIl4kP!S{N0fcERZ1V5&rIT){1z1 z{p|K--gj-fx7QeDC~~@5=ujX?&{dy-oh=V%j&OMEs^@bQ!=)60Ird=~d7uR$9hq(j z3!iQwGL*tIcJk=~T$LJln$YcSDJyHCANB`Q?JAMp@6x&QxpT{LO=;S%>?~_;F2i@~ zi%4imz{OmU$xTA4Cy6J^l1VHfX+Q-SqaMtnHVHDINpv!1H;LjJc1l@!P+7(9Z(xJK z(SKU+8vk(oU0645AJ>ZK#8VS1aU1q`j;-go$pI7)?iQdED#vp_Jh^7l4 z-*!;CRX*|Ydh~&4)x}zLXK?;VgN!l9(-2Fi-?Rj42u{C7h)p>DB7Z947PZ2`6s(3f zm=e9`nBgx$!iM+>WZ7{-yn00oS19pz`?Gp(P5053J4fGceIu_Vzc9Z0Xiwkqo+Zb7mmFQhj?a6&xTLt`^?9$)x~^sJ zZ561TUpRoO6=N;)ZkWYPi;peo9_#Nt*1hC7-Vvb(@#{ahoLc!bq$A8quY^hnBN9(F zaoPWs%gRJOl94H$oL4yBHd!V{`$=g3y8-RfR8u8#K4>u4s)MVJA^*x{-@x|ms<8X1 zsUj|J#yC}B-n;-@O@+|Fnmyqd3>^ZYNF`2~D!lNwVt$+pV<91yAqXOvROHT$YF^I; za-_v8Ee|zAf286sNl?Ol7V*fH-ItI13i*A41$XK!A*%EE#h*r#wnV*47b0Pj9q`?K zu^X@S9*+8iSd=geN2c0pcf+g(5DsfSLS{Iu$%vdmzQ1KI_eGAYNDCKis!_i&uO<=m zxGFG)M&JpSef6I1Sle191r?Migxg@}G|hqP`|1m;5qy}<8b40HUGS(P55e~kiG#lB z9_LX2;5|=sndZSum^WLz=K+OSY9Zce!lzDJM zPa>iwh3a0&PL%w*1C7AwrvVu7ze#}@@K|uQQyBGRdy(LYvA}`@uauM!7ZVMIn&icV zr>4Y5=_G8nbdbTePB?H}h9`;|>U9;xc8?1v$8r;>AL@H531D$9lS~H|4*-uFvQ+GN z=insl_~{J^qoWDzjUYhzjVpj5p9W99Re6l{utZTIeYKD42BQ#yApeOBAqr#!+Q5#EQKR-nlu#QQugDdY}S z8{qp!tDz=_WL%aQlBG&qP51U0BOwKGDFm{i@`%*QpP4VXe6k@u7e0EL5U4&m;5$9r zClz|1{y!iUYF(_ocX^1-SOB4LO}|eloD&jdEP_xtLbNha>{;XMIK%sMRL_SE;VzXk z%MqZ@!h$kY!_#?crs_}}hJ+zABn%!))rCsIxvHnOgY9Ct-G3hsubL8pghZVk^XAsq zO_R4eoLm}w3SNF%LdFuvPm{_B&gcb91bv=VFVG!9FHrjGqs`4n*R8*4&L1hIHCE8T zl-A;7Eovg`mhq>(DXsNi3B2o+x5AZYx>lA2U{iqp|Kg;A)hYZ4rEBJod7>5^;v$utW(}P0lK3mEI50(?^ZtV z&zQ7dl|=*UU?UJe!~3k(d1#tC@Q@QvaG93Z0kTeOC!jyLBCZH{)+ukDfM=0M%XSK5RM8bg|ot!g>MPp z6JGUv=Q~DGN@7T=E9`cOM$$!jInj~C=IRn0wxl?F ziqaHb$`ZF;U7D1g! zveK(2p@n3dar_rr$D|hRgSr+?=A%=Sc?9c~>PtzyHcRDwS@Wd~;5D@l@tRuap&9G= z+c`D44&()F_hJ@wb>RYH5E+4z!8XHva>?2l7=uU+!!gNmX}VBD+)yyx1%9Y~+JS&H z{uo5QfC>mfl;w||6%a6sf+c_^Se>WFHU(bzW%(FnlyZ($&mrE|!B|5fP$0wkkSsdx z2pK6vrlr}^5X~!IMWL&@L2Pser@9s5EbA6uj@~|8(b3bGQ!RJV$M(eyIUb#StpECw zlA4UA*U@ivECt;;@o~8n8iZu-8RPBeO8IVhIeu5-t}N&YaLvN|IIo8U#bBKxmk}}^ z)>kjrN4?)zRNUBDT+}G1(%+J%Cdj6s1N#7B8c0iXDIdZNHMEZALr4wNN<$2#4>NhntZfrV#Z@S`R!o`8H%FKWZ1i?XkFff`OOEB;7 zm#v1rZ@1Kuot|mWbw(I0NJj2dm3ck{5Uq9rA&xl!%n$w&?|8|Yd7!doxcuxFu5p%{ zhiu8+`x~z5N$~u~t}Q9$#aYXh@_C6>eeH$wo191?zuTf)Z;1v>ZUl!=!dDf5Y&55hFtzDYkqLkOC6!%>}r~Ab6ct_F1hX#ZztY-%tsW)$4 zy+01?T(y!bo)T^YRQ%S@Wdlxsv8%ucZ0QZ<$+u)n6WSb|eRF z8uXlHF9Wpc=h+JJ)XTQU)#Y>7#4Wi0&~9f*h@WfU-nBWYyg0qJ3~ZZ|+ufd4Rl%{E zoNdeMlGcK4V?{69R>#A040GHK7b_KyS}J%DK@5{zCNwJI`uO%fL{$Y(9`G$goI(A3 zS;$|9qc<-LgBticGviO+ybS1Rj3_txfu~$HD8u`_E^mQf3HQqoswD$tNy6Ly9}iDi zu%?wcumb%b4^JzL`MCK1eIAyqm(RoF59szN@9LiyGK3Q0YA%mkvf#ETOBtMxRf!ic z(O^+_njM%{N>$vw1j}P92G{4N^GI4;!1r@L5P379&jUDLdG}eXXQ;GcdGQ0Ey(+aR zq~Ds@v8!fBS8VMoTh}L*7NoY6=&o>G@7v zS$Sf*x{L|p_(|RF3wF?8X8D7xXa95Zhjcme$Dh8Px7-b^bMw*liH)&snRb)w+qQ=I zWgCWiG8%Q)*CZ7z+F!rmXn(9dUkNpYGr5MXvnL-KS-vwC=gSNKqWl{?mJT$8IAxyp z#y(O;P);#(s65NuN#(d66f=qI`1p0ToX3R9<9bCYK)1lP|jExqY145W0n<>kDM zU(z$s1}x^h4aY^=pt@)Su*Kjug8PA(PgB}RqIV7G$HSNC$0^;^ei(dhsQq|2s2_yQ z3+e~OjGEF9l^#FW>KaUs1jUu2a%!8(4+p#>wH-t!w0(H`whi}9?uU6~8hWVxctxwL zH$B=CNK2dG8;`7a4{#|c5pKsoB|9taZr~3lt z8!x};e7oSIfxqr)mWEhoG&j?i+$UOUoZwy`wnrBo_?`$x=7BtWNb4Tm*Mdr@?lW9zK(HA3V zeC}-`v5!afR5ysmG@qugD?g69zv6xSkq$;N?^@woj|`^2c;^kDPo-Kvpg9XFr`TS+ z9oS6*`|`Y2KT9o#mIwE7UeZlox8xJvwuPUvTl{^|``h66GJ|@ZV(Cq8!x-v)9^&nV zXlq=aSd6-Oo>&|-7n9Yx=+g>ap8h_yJ^g(_bu8XKae8YQNvv%pz#P>8mGdigKc)n0 zdLa)!igmS>&S*et0pIaVE;XW5%c)cXR2EVu8xD|`5s)#;w53-gCBu)?0F9W)lUW)J z(hzh58-2M7i*Tb60a>QG5&p>9eE=NzZbNLaOWpxstN#wDsVu6R%9c^dQ`SPqhY%C4 z@+@Sxl60{mG^q*uChC*d<=1x`PeH3R8H}t5yQ!@jy*#8 zh^#&{*$6+La%L20BE<0l0i%z!*9U+{qi{$F!UFKDQNPAwBBw=o z1j<$I{IhCWZs0+YNG<6`3#xzG3@u>?n~?)=&LO7n*-RQYW~#W5+Wv@n0MSeBTT$X05}yf6yQ zN`brzV;c^;Op~CfV{6MqjF;J!vC z!mFFSI5bNERS(dlO{s~Y@z=zF_zVR+ulp4m3J}|xU3SJhB})9wl8NXmAhq%j-OnmN zNreWfS-DMrQZDE8ZV?8A^`5nUy2=vp{}hdW@-Y;F!*mol%x6{sx5#WAd<|&P!5^a@ za2hOz3tQ(^xVpRM4YUq;YF#ZAEoCJ<1)xPwnMkjYC{l35Fv@wYBi90+>pBzKbv2cF zlw73zdFE7sbCe%@Qw6R*tGx5xwb#DKGR~f58T3hc=j_-gUt7QaYoCm1e=kZ^EKLFF z1SdW^{5w{D?i?%s-QmN(Q@(rdobuh@9p1I|JTu;P7c-vUy7j#B&%5qY{&^m8HGFa! z#cbuWC7n^!a^GIZ%dyu{_!){1_kq3v>stj@w=Q9ji7IzQQ7D^1@|NydG{2@SFWm^= zHd^JTxd2#)SAHqOUII*>d?3&}I@Dx=Xdxp6JKJE=OE z8pzgbvMZkhg+evZJ1acP5@_IExkkiPfQp!W8b$p94hYs9-oc0D(PI)-_Z~vJsb7SS z+8TIv3(ZY6ZMAJh)y37>nLhd!vO>+lXQm9DUizxAw5%{GfUb!D6zuRbMIzXGB(3s+ z;d{P6vgwB>_J3uTW97AnGQ9L#D1{AfE-BeOuzYiI$>!;ZST}1{-7S9{L++X3C80G9 zMM2a{fM2&S6AkIqiEn0uZ>kaM$&Bm-V3!C?{YH05Zn}}OZ^If!UNH60vVkYW18~@e zpg>teq#;Usp}9zzc+7xyi&-=nI3##T{VWWrSGa&jkjdK1YY1Y6w;2ZiG=D2s+Q2Y8 z5d#|l|D@K|FrlWkwzZE2o(;sPqtFCd1~oE) zJJaQwS=j3ut=ppI;XrndwL5FRS{(<`FRA9Z+Y}`21#$+sLb+!&C{;%%W$`#;gYdVz zA)~nZBd@H-$5bXied2=;)et+@OfM@K#3V#hp(W6f30XY?gGn5s7VBsMq>wy-?o;v` z1EQUYr_hJzo2 z(TgX(B#z9}N6Yi8F}R~1$kRR#@c0V}vPBL)sA4+`SePFaW)dFdfV#^mJc z+X^|;vA4wH%NkW=P~P2;>^oYqN7t-T&(3-Be}U(6A(<~?1L~q_uZ%&sgZ#{J!bY^r zMcfbYojT+Jm-N0%o~TO18_T08{!7V4AQA0Yl`6B&6%aN7Hhgd?;>~PcZiZ zesBsi7)A}`k3oDr3*st=0!-h+G?Kmlepea8998?97izI*x1%xuzLQhj7b4l`M$;EC@p7}`bMMpIT2Q<{h z3hlj)NKHc+LG0)LnmDPYXIL|EEKyU9f`|!XiJD2vrJ{OO_@1>q3$f;+i+MCMEyiJq zwN{e8m$T=u=lfSyw1uBYC_eb&>itJ&<*j5T@zuk-syE&DYG-T3_RgD5FNhJ}<5|h2 zyxg^`d}+yV=@V-xn;|FfvAPY##mfE8ra3K>X!OO)rcs)gMo=0`fb>wXRs?=t=Y8b{ zp2C#$P);9(kh%%JO#}i0Qvr{yQHQE}vS+RZC{dqQS3pXb07?QJhwyFxODKuA5dbR} z_sLthO}5%oS(uDC9|Yyn=F=2f1;1K)x)mUiY(pr$7CfvRjuHonM*;H#0mU&r%Ayf& z9FxX7J&^WN+*fw?@jcEG^GX%awIreL+qT9v<#SgbDlc))FWb2}MfXa<{5ejs43Mr3 zq0yzD)DFPACk&4}KqH;nLplLso^ zlGJJ@9#qjQt<6}Gh{>7pnNgA9VId|V7Wu)r$Tfuq zkcahpgy?en0B(#n;F#ne)1^4$PO{hYy0H0gKvSxYN!K||8|d>T+juyr z4cOX(`vHB`Z)a!&c6nZx?DD@2=m+ej!EL}MH&Z`gYuDO%RJhaM2HDBA^2fb&q%|?1 z{JY+APG4_*KYe}I>AnEk@bZh&_kyqg{>cB>=y)XEI?}vP!=Qe6YRuuMn;3h5Vge=7HDK5&D z4EjRgxN(4LOEw4icOw!pA`~JvUIc!yG|0Km&rP0aNNyjMF}? zvOxl_I|R7yY$t5TMiT;=PH#KD49P%>on$X|UrJJ##!!r{ams!fC_K;_M|@7UVU31- z9aU|^LFK*Pxuon4Dj$;*)v;9yg351aN42pS)OErpMKV{eFye`vB6a035&6w%C}RDH zdO^St1o)(2m^2ulzs@A+u3!2Br2ev`FWMhp@i__9;6v8_rOumZS<@mi? zPt_FvflI!M<{CgTWG@GrN3H>DX7_D#wchD>4RFj+T?6JyR=5WAUf;avuH|u&a1Ag* ze;eQ$0Qs>DF>Zk~=KakUcFgGUkQP9Dcmx%fs9nNeO(*LF z(1-TCK)Mjliqi#qRA4#btkm-Qz;fD`)c#>_Lw&w4sn47qXT{sV9=5|j=KeO+@_;s` zVob5Osr?Y%75?ULgX6FGyQ}4VZ*xqyw*f-Uv_*;fChbw=e?$>Y_M&ooijOi-llssK!Ag%ajNw5nIX%!5+xQu;LejylY@%83Y zeN&H-zT@;1jK#&5b&zV3-oWiCr@-n$d>ry5B4Q0$_yxprg3>@UgZK{C&=;g%e zu$&=x9OBa<3SzKU!0I~!Lnk7Iv4)75QEO9&f3+L4w&aQ}-VOFn;VPiMtMsdz9Z@Ab3K{&tqy-!jPNH5gmkb7y&6HmH}y z)RYh?I&f;I4vWYm9@Js(;YDT|xZXzD)79u`T!)%va9R;Oni55jawbVswKE1aoVSJuivoN zYw_FK-jU-l&H7>CnyS976^F=+F3v&zbh`Q2D&a*Z+Kg7FE8@H7Qh3pYDf{hdHxut@ zG1u!56=E^dsqTO=NntQ1Q_S@_h`C;phnVZ7-k9stUD9H%lP4j>OBK_GDC+4w;AE_hbZlWV5C1G4v(s(5nuU(_NkPpV`3v_65vvJv?Vsiv3$N zRU_xW^Gu@izxzxYgn#FmG&=w8Gim7looCXR{(H~7^2d2+f+w}6nhEXmTjo|b*EGAz z3iETaoT*x_hbmSTs*lREtC8F}6p6rS!MPq(vLWQZ(jFRO z)C={#R38-K1BpC%d`^mpPohSNYA(dUlhsHK`r&rX7E5@zAZ}O$B#j41PQ}$I#v|xr z`shp8NlNG;y7P~1TDpH$(h6@vkAsa`3XoDyxOt8zCj%)!R%~=dM9kTe+t!_?jAgX5 zuTjE}P9*+_-SPENtD8mej2=-{T1Jn1w{1MIv?4hFM`e_KcK%TR&8@XHo9D1CISU4B z=Bz6!4%#mok&0-8XCxfMMHw~4CJa|9hN}`*LnPqTh|3pB#0nMHc?X3EO7R5(2#XK{ z#8dOvG>{$3-X6MeHWK;a? z^-J!4{Jz*FV~;Pn`Cs>U-`w4Lpu{;mRNruI&6WoW3)_a<_N-ZX?X8P$>KgdMt=+%) z%0m>FoageC`w^!v2C;LNdBPB{LW+q9HDOwS=~dNPBqEbbM@WTZqP-WcPqm?*}Es%J4Q?Nz{FqAzuaq1FtDAyURq258Lc881E>*tUD}@_6l)tyLiecuGjp5bGBZm%3 zR+m0P{>9G>R=vv=E*U4xp+?EX+^j+QIvxcfSl5p^NA%u4mEII#D$+-4%r<|b3Dt{X zn!(_~rKTz8v894W0a1p0qhJ7%f=4IykAkjWS+@qd=#9$p^(MoJa%KhFe*@dEm&3c1 zraO*x!jFcDiWF+>?2M4^7~dae5G`yITdI5(V}LzpaxB2p-dKPEarJl|;{^7lblgP# zvTw05uFH5!N)c>6zcX@hlv3a@=55y)qTYkLXN6a}&PTkAc9EyzvfF)l#&*9jxxbX# z;u7s{@%8r+Zyt=v{WaxJZWsEjOZB(k*Y2aj->1?+8Ik*W{6kJ(=my^+os~*w&2Mx} z^TSieE8ExJF!t&^@ZBRX5TBSd55y+|=YfCjc<;IN z7*Bo9Dc$fTp5q^L?Kzh*<_mnZeC;yETx<6-#(c+QI?+AY#)J7qT&K~A&jD>*xqeuk z1N?c@&VfqEg9pHm2Gi^E;Q(It#|Qn(OykaVn;1i`(*!T8zMd?2jv@9ZwU0XW-gYMQ zg0ai{j%ppXJ(b6_y5wjYv?drbz4R7-DppSa?0|NYiMPndmDlC8U zWFJ%$m`&$fI`bwhMm{sPb;$2VD*gH5o3ifx9iN8x=x~Y8d-PKKG|`vlro3OHuR&d( zsC?qSpz>Z?w|M>CLFHrmTCKbwsQh;JCr}J2$dOyRUmnM&1R+`2>9Hk2qCu?gP(7Du zApOE1m@;{nUExdT^zZJniHV8HiOHb8LyuiH8rHvzi^w~trSA@nmteoWsMIr}AFlk@ zy{9iB{OE*D%27i)WVkpX1^V_jVUuS=a)KH1svKtvM@)1Q z7?DcF>#myk$BrX9yQy1wVFBBHfNj)=*k>uvRlVBu{aP0KBjp=E*);c-*O(Q)Vb<4f znY-yH$~S(548qS0R9*eWKizT1pT2l?)xa|>YMzlr8Q76Uih<1-)pYsWLn*Z+RguAM(-Q4b8gasI&-D;_-m z#?S-G?|-}#a|^Qah4(PG2D$9E#}lT?$xMjjCkAa+Q3N(X7{ytvP0dUrjNF#+AA`hJj*hoRU@BI z7PH&U>;o=Gv$t=MqeFJH`*teMlS+mmGKYD;q4G^S}5Q?AaM<_KR_sQg5 zw>(^T8qS-7Y>?!2{`wT1o2!vQP(tE+BJ%U|3-b$cHn&5~PT7)~L*(g&vjPta_6nLi zHUm%Qq`DKrreVvFB;%U9lwY5}<9A=48+3Mfu$ei3{VGd5@u$vn$L@Gy!Gd$g@A%?^ zPhLO&viSJR%2%FW@y8$fj}uDAH~;#A^6Ys^+(Zf5&nSO*2%o$8@d9fK6xTlkO>+6# zJ?60&<%1@<&g6s`YnVwisYfMRRN~A=a1#aj1aZiywW6ITYEx65&;lQaQiFUXu%55R zj2Bf&KB}-VFOSyvyn?&}81o#E13fg=l7)nCk2CPvA;eZSPEfY3}n2^()&Y1iIPMLaqVVyiX z_vGrr;TdYfgQuV%CZ?dMpeQSoT;FH$;=Q$ybzpVRL@4s}C*`>UR)l0>5%f)|J3ja=r{RVRn? zT)_Z0f&=4*KtEJXi8`ByBBj$zKfV6DfC6yNX zB)DWuM;Q3E2$XDAWixH?y;Z#wfoqIfD`<9jYY|^*6|nc)kxn-|IVn2IZnK04NbhF= z>|IsAQU)sbv@<0V#{;k#@sU)$WNUT$Z{j-A-w2*HUuE0XS#z~w%Uw{PF^mA#W9OB; zxrMnciT!bV21QrEBpQ`I;FG8~>f;}7JyD;LqbpP81-J?5Hh`C+!Ar@)5lgH&+TCI^$_Gt1I z%3v&LiSh1<1o5)sM_R1>KzU1NHx4OJEoay6W!LDW zc{CW3IN8}Fvk*v6j_!cQ&I{|XaTPMWJ&GNUhzOp%NptytP|dZ;a+h+l_XpPvf9b}K zj(dN9-;xua8=9TgVdZ4`ZH-61HoX3`y-UCJWZyNLzWK?m$C&xY*R&Mvbl;);NoiAF zNJtbzo{&}+cR#Fr@W_dej#8v{QK-jzHuCo@gkMWU5{?PL+gOXu18@_bquueN(o^IlMmq$d~%*RiBUtwU@nB`TLKrnOBIH zWp?%?t4T;=Ay15-N~y_un8iJO;xD%h(peJ<5SKg%d;A!Gcj%OLPwM9(^KymoU!F)8 z!YN7^XxW5}8xaT`DVPbxM7Gx1*g%l$b(;`kG*e1iE!$}*v*4IzVFBq*F`>-}#cne3 z9r|VyEfo~ow<@3oo`LpCn($PscN-2+2d?yXtTHj><>eKW&!Vs(KNso5(yTGi+J}WA zTn(g2Y|vP0^_pzy^?P+@QOeb5c-kP<6~1ITkqw-G%K)_H$%)a1Fo*KJI}HX?oAPlp zdr2Q*su!Q{u*o(>Icp4OAq@>7EW&V>l^7zJWB$YsmA^uFE;F0)XFoHCGic9E;zOng zrZ0PXb&T?TF~ZQTeAr_MQy#bK_ncDNp|2BNqTXg!hf+Yl!jYVE7VXdM3<_sCd;4W-h5f&<% zOhbH#$Pf=tXW#v@5wyON258ejgeq!42Ty0rmMd+6P7fYm3v!4tghG`W4IxIuwf?RM zA+japBCqn_Y+~5Nv-48!FTo|aFkSMpvobRru$*AhSTnR7bis3kL_C@!g`#hYU>Qah zH5V<&j2j}1>tS(|O#{*cSH(-A%9no{5hjbn!=fA(@l$r4Y+;F`!e>0mxtSS}P{5@mIbb7X$w()OeNJj4)H#$Z zNS_vpF^hyPQ9J^NM(l2Il7$$k$&VC0I1W+-5_-eHNWz71Ed+Lmncf;Sb`;ojl+`NQKL>ukKvYMHNYwl=*4|3O9*_ijsiq-YX zM&95#;Sp>%56+6UGe*y z2gbS#a_x`zy)?S<%O@M}`8j)~wJSrei(Hmbm9cc;#N2{p3udoViV$DYpG4N9L;?Pu z28fqtC`_VZuY;8?kW!Q2cnJC=3kw=EoHlEkhQ`Be!Wpg1sv1^`v`4(KN5 z&>R6)_c!brO`rV?RK2{8@|rGBrZ|wOgLrrGq>-h2;p-4s-l?O3mQY?mSvLlrn{M}gXT~@c-q1R#Qce*mBrM>jL z^6}DLjnQW+YDU)WQO3WoynQb-b~*3wzy7IZKRk11+PdV}(%7fYzyIOl{Mu5}v5E%v zkFWQg{QIqPSJQ%t7pzsEdh?;@Uy2Hu0CAu;mq0r~w-q$}f7p8u=(wtLZTPh5MHfCBXOl?)uhm7G9R-oIQKL`|ULuFwEhO2dx$e;duBdv(T-8Y+x$z z$b)X7f&QokIJ6wv1n5_q4LVUY!hK*8(FM|EV$8jcR4;1Wc=3mM-ZW2r#l%m+{)jL+a*774Xi{AAbN_d8AUYs=-^ zc>Z==zQEu?I7KhahI0`tiq}ug*XG_`=ujzQNs>H7?`a_k^^Y|9M=Ry5i>6De~(p?pN`~0lYzE z4|wz0Z-6)R0jESRz*TU+2wF3t7r1z%p@=sbLy)TEbZ9836Zc|Hy$A-+MkGN?Gf3Pj zWR`;vjYdNcpoLmylS%2Mr41Q)Wstvmc&bF*!C0@yd^7lYUt*JAN#d3xy~*dv&dNwn zOTnR($xRkAlbN#2lG`GtHlaZ5Vq_}Z7jvkSAYO!IHDct%r~cI8h*sc$sZu;(p9Nd` zU=R*?N%VuqlSN4QnFT?|_kiWFfxe>Byq3hEg= zK7W$mqmJYza8y)5pHgF|@dLlVywN$2tUlKe&2oF7MlPY+{-U_g5L!uXy4m26w4x#fb?g?d2t9 zQzl+{@1hL^_c!i+@dbkWlLU9rzPwWU3HI)U#Hi%n3noKWNNx`4)5T~lr8NO>%n*1Q z;A5)FFGrBQPzeZOm$4~Ugu`&6s`45ias0C3#5QbT#Z2}`2w?SiOTUx$48OxK}48-^L3!9OoT>mH!rt6$lXDz@rC&M{95wHh4>}2*&Ht5sx>a zxK$u*3h`lR!o&MB)OalTK{#BY*^8Y1We+2 zCg2MiwvevB3CCk6V>+Iwus%I}_ENVF!M`4f&_%(okZmxE7sylygpCP69SBYX(qgVZ zx+i-jhG7^w$#L%Nv_QHKCrg3OS86~MvfNgdt=j!Aa8XqWhhZUkD8-Wv%hlORoadkC zuY7)ED!R_dH#id&m=chsbKhgC;5g0i<>$}GCuN!C$t1$B0E`zu_G?7}6bOGYL6j4w%wc-qv@+&o)2bvS&f}o~E(*`-&$1FW3I*=vV%-|F&Q88MilQXV-4*7{BBC ziIpqbj{oWS))%*JdvWWwm$q(wiJg56=7rtGpXOY!9#C(U#tuKKBY9D8U z?Hv?*srN3`3~b(EkF|L;-$ZTdZ$QgUsPQ+U7LUuzMZXpzWx_iz7(SFmnC&z^M$n+| z6@-q!QL82mt6XdW8AC!0{}@_{V|WgGSR(=d;OH+eb!R8UTg_UDN}qJ-`OPC!9d}X+ zzOK_UivlQ@DJ7zX6T44bXLXq8gXoGRx?w@;SU7%4eO^X}vu1Wn(du;-sm;YpZ+-aI zrPB}HG3By14(<5%s$lb;nd7##uDWIJ(c6yBU36OWdpHj52LH4%JfEL$FDWQlxODsJ z7x(ObVtZ}hYqwwZAGem*`j5Z7d(-`0FF(8L=54*bTRZ3N84DccIl|GC{I?)yw=4B( zCPZ*KuZICFEr_4^&`+oiVDs%bK|>k1)Eiw|#%n?6A~C`S>WGkrB@uIi(x-gPV5@%0 z;FO=6o*HK{8#N-_ew7NG7#)_vj{!Kb{OL?&m8h7*pu~t3u%w(50EwU9)YeduX16a#Ac?=K)85}(t zKhlD(0Tc^^C;FNH3$W4Ntb-eJFl-vG7!Ee*Ux1sw2L9Siu7KMSv}!4ojiym~$e0LO z@~{n%L4lAPF?1a=@K++51n*Bb3CTJQ(~n|30qbGjz>}PYtoH0Ijtls*3bG6EjcI;V z#M+F8wES}B)Q*ZemxPQ0kK647OIIXVDI{3=GFx1nZ65!f`NG{Re!`b* zUX``vP|B3bsanBq6h36m$f;W1VU+)^zw@@f1C6umd#aZ0)yp&C^W_R)2Q1L^K!WCQ z9l>_W?TN#=snS3>EbQ}v!wxwLlfkaEoOC}{Nb(`p#m-?TFRIf24kf^eiJ?Q_49K;Q zI72~8Vqz_yJ1(i(x&8E>J6aFFJh9CYoD*N}K}v!o#Pcu3i(*NMD8}EW>FHiHtHN8G z-O#C#KLbz0{94=@+U`0yH}g1PCbCAzhVo48kUZl)WCLmW4Xpo{K>fea@(d)r(jXCG z878Ham8}CkixE6GW7pu@a2kr>Iz2kE(!WyC2-q~ z49_PCw*}sb{_E&7%KS3%fRrDi8>|fxBIL5*<}tPtAqi~42vh;( zY0ch|9%qfbvFp=(nGL<0_Sn-bRK|( zWn~Nxxi@?5@$vQ>QzYkn;fA#d@iRP6FdF$OU$SLYM)#rA$>j=-d~8Te-T&v|X(Uyy z5MJR9z-Lg%9Sa(99^L`?5tdPLVc{Uyk$eclZpaap=H^i_x%dkJi7Ol2!k=B}%Eunk zL1lKY%LEHnC9_C~T!hwcY<3#WOKd`1E&t#+XO(mNO?&Pfdjte$Zv?@)6ES?qhVQv3acILMA;$3A5T1lA~JF0L$!z){+SK zXaK0EFn9QSNWWon2^?PhEu@jW7<_AaisV@MkC2@D$9#t_c{#DN@8&L>4=I%eamD9C zDk-FRWk&By?S@3e&p?iehW3|wGoI6*+FvlIMlWA3)%Sp(iI|cI|A;E9TPaVdT#IBk z64y<6!hpQ?1I{Lch@{sDk!{w;L5+pbj<;AvRcsdz{~!sx=vSEh_7@hC1TSnYY#rNN zS6ed%9Ri95eX-PpI61N9*28YXrbnQ{=3UI-C)rMpYIj;S8Ly1EAej&b?hvV`#XXE|Etd;zBsD0v_Ky|jCzT5E*G8Y zw*{^7(0R?!f05Z8w)VUTUTZb1t2d)k8KOp(_biE&W74N{4&D`q;Go7uPrw6P;@W@4tTD zb5+B_2RF*E-gYK?f~m&#-80kg`I>8bleW0Q{^nW!E4#ls&plT!q{m6EEbI8;o0sox zi*Gp2|MzA4wEh1Uu9d}O`?T8r--;sAOX#jkaMvFpzbu{6?+SvC2>g&lz=zRqDKwib zctaHLrWOVQw-nyvqm)O;yi7;Q8g?P22$8i-gC87t2HwrFp0QqMzE|m#t&(pHh=_#P zVSo($TllECZ)>G|tlf$~?R@W7k2lQ^)NShc&W{pHbUxF*zREXT)3ibRvGtN>Vs~XC z^0;gU;TnUD*Ho-r)G#T}=$*wEeMOSLFU*mDe5X!+Mw-oF(4g|LR9Kef-1^7yf|k6y z7tBd2{gBHY3G35C-#uDXoP2aMPwKzlUyt-&JVb=(){UO@?wM7jmVHG9}sb2 z9>$z&%!LiVwg>D>WF)Wn13V3jGBX$PFTQxbW=>)A)(JEApK9*7@6czQXJ&%6W}G?6 zeA|xgyPZW=N!4|q@>Me`Gb z<25jS(Mb`J12hK(4M9X~n`T$CRv_b9Yj;$>1x}$LW?&Ib+Q5HCE@QVjy=FGoFHBX) zYA9EIOHsTARa=a-oMB#*=t^`ke&dYfH|NeN9OoBHu8z%d_)_`RqI&zA4@PjEU#}1@ z#HAt6Kg@SNxon?6jE6rZ^Tc@`St0jBOD9-LTt4(H0lX)L3!u_#3^zSE#Rsl~&ilwp z(!!1+pdoE!CGr|L6r6CiM9$2pCVkB0zR|4yK|$c$(`EVv8cjPTQIq-bB26g z;wQDVw9Fhoc0N4xI5+tpzYCp-cy0ma;Jom-7UGCFP7ydxzIu9`kxq-gq9}}n&{B90 zfr68|0Sz@uM@$NtHae{i@@rJ@e2&Li_$}zl&CNt!+L+4H!rX$~g8V$MDdFcis%~Ndx?z#cVzDu1HtUA&h39rgGc$RYctihvksOV;n}iQd zc10oU3kuqjp2>69HzvOQMz-QEq$wNEK=7uqCHDgPPa;x)b7-PUW(_3wh06LsxT3r&{iPU7M zobXslu4GA;;0WPZsG?YiEHtV^ZzP-CmPX(CE=l z2uc}rG=&_%Lsk8Z@zCPI??5S0)VYY4LvJu9>Zqxy>8Ke$w%IeiRZ-UlvC&v8l!TP6 zM6oXXo;-n|NZJ~&!zYOd|KK935u?>)d@k*+|M%KoO?63>sjep>`%jj7Ik z%(S`wH=)b%a(Cx~U4_-FrcAkN7Qifg_BH_i0BR{KS7k?F)e60{nj&B6yRrodrDr=}bT zc(hTh30+kUb|B`*vN9KU4n%G=JTcG9HD#jEw5A$0TCmUJcFu za^~i6TuD(*Rc-}@pVwb*tAR2Y9TbYrZD4(^h^0#4r3el|7i6{qdErU{H;C)^_#ggW z$#S^YnONSQP9P+>?)->f_SSQNWnyDV3xlCF-kdJ#=Z2CVQZyRCa_yUsbzOTS0IJdS z|6EdRex?Iy`TruE;#%Yu2HR4uk-Ii%!!T*SPAllRk^&y(nIYu|E^?hlhcEzwD+nR# z$PEhhi;5aRugIZO+Uru3VHu(fkaz0ytQ!D$0K}d8V(4ia%EF$8f4sbm<7zQZvb-L5 zUFw|{fRSYanvtX*EP+A{&{K(jfkGAiS;vZ&5*0S6;$X{Q6N^aqzckg&32&ag=WAtK zZ7Gg*(}@3Nk4>=U$OOgoIIR$6WHI))S`vu?8ifnv<1or*uJrJ=2QJ$u3H|>DaRG*e zpEJP&2|pi?ji0^{eh!aB6oFK{$D=_x7O%v$43C@S%NxK^a(fPgqJ#XLF;G-V?2ig< zF;DR@IC}c;$2WZP6T;Aww+A1(XE+SKT%%d`@^#mopVKwz+TKU+0fufNdBS43(rmyp z8(tS!_i$H&-bEn+P8UK*ieP1e8WHA#^GSpSBSN? zK|l{_7krxmSFyVg-tgaTEN!^1(MT%lGW^(scC5fHes`nDkuK;dwv zSq`6n$%GTi`7fjMi<}+!G+Xm7_`DAujt3v7uqbJg6p;Ae5EAwKlM#}{Tw4t2&B{pf zC3~<{yLU)P^dj(LQBlC`Rj{-ez>)((H)Cp=+5PK7Xc7S?>4v^OfC{)meWZ-Q6(2m6 z9|nyS8%$;by0Cq8aFp2E;{_jnkEDkYkqnLG+A?!`> z?_2h_pX~e>NcLFakqCqhS~Hw+=lQmaqAczKRHVs*Zc>Q(tUn4`o$!*Pq*3cvGvcXd za1rA5f{|iraKuT>VNAXB5K8OAHJVWTxDu(e11@BR(u>u%jC)7MtvnE#7IlVm5gg|Yr^c)+A(FN z(QGyT3$WGULTZEA?Fajp|5Xt>&MVhmgUr{qjf^X?R>-!gK&==4rfs{(L>3 z7We5hOyo2F5reaidX9Zaz9(a|=Ahzx3iTWV1DN{RZ#5Ip6W5I_tyXSBFbyTgXcq}# zRFZ9ngDnW99pzG!V0)sDk1BMt!9K`j+zZwZ?cfULm6cF>DIFm7ipq+T@i}r<1P*#T zz5~iT^)Zp8ZRp|C(p*;)@TI0C*fdnZPeT~tP0@u*^H7+FRwF(|av|KHhEnh^c?;Z>dX#-G}E z&;D!W+F3mtb8?>L*G?|Vu;phoWaUrtSj3E~8M1!arV@)jxHzM~mFmNFT+3%3DP3!V zKO&E&3Uke{_B;5OnXey}&*ba3Fke5J{H-=qyoiQ52aOmt%k!wgWlsG~_P7MmqT_Uy zO;+B_apsL-<8g2-K5UpRTDDg&kX&@qj!!A|BJ|S0N=Gq>GQAoT1WqQD}h#bgPUQ@SSdiArT(Z%>0IX#YFGGol#`uU4o{c)Mvk`km~8ie1^yrXYj*Ni@^p?HyUJ|rW*gPhzk-1uNy ztn)$E!fk*&puy*a2NJ`2C7M1jX{LmZN)u;e9W`X=UXr6quEA&R8S$tpt}g+u*AT8( zMc{glPsMfOdhRghs%VaS6cvXtxEsnm#TK&xkC)kd8*FC`X0e=bA zm?wp=KR!WmmNHE5yfOx&PhS8&q@elh?=mERco34m3>?3MP<;0H9{_@<@L>#xc~KO` za2|5%9nC4|y*1q0U;?a9&hOL1TU1qE4BuSHeS-Qh$b125{ZOBXn9<5aqv@flB)$j0 zVQpKW2dEE$641~Awuc7Q)9ND_%!8IDsd;`!9;-U6w#9RM)C6Um1i=WDSF8|j$dH*+m*0uEKkIcGh@s(+U)0om#yXG4As@K2CAMqca zIQ9C?V-ua7vU`;?>9{bK@8ieUl*uVgmtDK8@7ufle!lpuIYnz(A~Cle>7!|b92mqJ8Gu-c4ImQb z-XP|{F`}VoBvjWtfuvl&TLTY7I(Ehu;QB6H(&Mc{v+om z7P-q3oCS^r2ihmyxN+Q-Et7n~L+_tD`-kl-d+QsL%#WDkRzJ0UQS38nLEX0ah1WcEcI9{>pId|IAq6Ouh-|+AX^H)FU7Y{RzrBZ(>>{%$Juq0}mZoaco>uz1M5it+E|oz5LpA6tQuo zs1In>tjC^ERq3z;-l2_&lny?MjZu&Ckv7&^xSn z(f@_66*z<(t~x z)-MP%czD05~(CJ!YgT&m(S*8cJGU zE*H0k*AFP`BW8X=s>SyS_{&N!%YeH3hqB1kaM za5Y{v|6b;drqYI_F-?;^JsB+}>-dt3F%)@6M*4JLS$Xr)RQtHawTJ+{K0J#dSdS6> z!URSm<=o9dj2@GO48%AvAB7`Hk_U!~0XixjOFEsj0q3Te1o8@SY^@HR1+?hVNcD#r zbY_G;KP@d-8u1F|P+T$cJLqQV%BGf!xq<9*S9xl(Bf)CcYq>16qQT%Bz;7^^2KW~0 zgQ+=;0$&DRW@z#&BY2rPD`l?2%L+FvcU0wO=friECN-ruj}wI$oUPp%_+^5v)L9>> zi??-F6cj0VStxw~ymZ6+J|Q^HnFs{Sg9H`OB!`oV0W#>8Ay|c=1o467VdT7ofdstL zOmlkuUN4HsQPJpA>YHO7FjSO@v}&68S519wU|Ne?!#HsG2@)#y;=qFakSQC+?c*O0 z4f@Nhe}3zr%TH6qi(F7Q|1W$~y}Yxf>Ka`&qq#yxb5@)UH9aVJlQgAPG&hd32!h($ z1qE(IU~r0Q#2ye93Y)cBp$BvZ4N6tF(9VSp7I3<#d)=UBD>j90#4tP2I?ck^50(3!71V;UWjP&?qb$Ed4{blaH3meT)y!6}L*4 zhxn&qwS+7cY`k60XLsf0FlCWpp*O7Kgz<04L2xqxuQ%XkrQ@pRf=&!ktcM)|NJi!z zu43#%s_GKPvibYR0n0k}w#dhQ4Ojl|6#o*Gm({)Xbq>p8w0*f|V23{$Z zAw?fjeTvl%=J-)KWskFf1-p4SHJa3bMa1ACArhl51NH#rJ+Pz)4<;rFX^P?&K7EMB zTG}3X^sakuo-;3}P z?Vot)gX`bOcw&A=PVd2rnui!}mJ6GOPRNr|Zi~`Ruqfhm0=E%{8ug|i z+gmv%8YnQ~L3~6jKz1O)5ebeFo<};jhZJ0|FJ5m!!ZQIS0;gkKxT5S<9!7en`jyqLFa`I7Q!<=K^9y*@$MT(l);`oTtD=aPpG^c^1; ztoM4%x;3^Ft5+Sb59HQO%W|(|wAo+I?zfg}2{yT1G46MS1Nn$Z>53_Rd*~HkjAAc# z$*@1dXbd8bzU2P$s7sDP)q^gY)s#zhz~eIR@Wh8xK)+RR5G!3rpcCk{o6u(*iHB$c z2ROgU=;xss5gDO+9`{`x{UZ6KNA)I6#dTMBypDh;-k?S4Q8lyIK?Ot< znL?6>O`p>0V6u|2LllC_BZawDiQT_6uKVeo`~G@*&3)gRI`Pa^^L4+}_#3B{H13(N z*A}p{8w!@@I3J5bm*#RswSfOA~dE+)MN&#BfOrX)B~#%LI&M1R3Ss89ct^L znrUD~Nr}XYRR^0kG@MM_dNr<-%*j%FVeSIr$s61EwX~gG35Lc0Q?VzBC8w-@dfTR_ z))nS&@i!fOpW&xsQ(}Lxk0CqItsEO9=U6Drokr>%_6f%U@maVIRne~6$3s5FvWgl^ z>%%@K7Ud!PSZ#Lz|5?N#bczxH!o=~Q$!vc8nw4kUTK2VXym8XF{R?sn?WOLbiRTXQ zfA?zs{Zk(t4Ena@7p{AHr|ttgO&_6WY9!dYmYcWTz$Z7_wi&6%n5maSh` zI4iGUL+i1-q=r5+IT;IxgUNYqT57w~|6tUfU=Q090AFNeV%`A3cPL<^L8ln%3gDP4 zVIbKg8frr8UB_BC6y(h+T(^GNywpr{wllMI+uT{F=1G+YAM_`+rKDfGeD(3THF~qh zTi-P9IKd0@SXOkrI8ZHgyaFie`Mjvf%+P6(PJ_54+V{aDPt6KR$7B9trU*socuTMn zGzr4i94F)|%{XAXX%f>-Q8qIasDsg0@Pt%UZ9xt{{UW%mjG?|Mlj*6VvrJ4L9=D^< zw{gY0O=i{fbPGAJ3o zFwg}wU!Wuf0D+yVngRC~aiuj9)8H8|s=f&6sxH9xmVM(keq~bo{sn^PwWHGdLl^21=nMOEE+#xHm0b59$Y? zEnq-^$}x!DnifGD|+6=fKA&LnX68rd;azn4}kr|JVQx) zS$TVVd0Bf(&^OFk1bf-Iar6{uznz;Q%@Fqi-bKNDbcqJNQx_G0U6_Lt@I5%7z)$13 zbf~6Qi?U@cMNpMUDQF&hsxA-F3#q0_9!0Mc+9NTaISIkleZhkti2E)=K%iFg7r!qq z0UiXnz0}ExK+LgDPUH@Sk)$fqs8d49GB855?-3QzN$78m^bnF!jQtPz0)~AW_HI-& zB2%K;RTYCN0{(y!3r6_><9DUhgjrlNegDM!ez*CG_ik-HJZoWs z-X1@$@#G^0ewTdEbl2TA?Mb3%Zbn+#>A&qi{r>)x)IVLGX4Gdq_1x^|zn@^XQy7Tu z1Lt1B&b{k#7e1g>vDk(d&gB@9A zi2l+_xXP253#b~sf|Wy40u7qhGEul<%kS=;aB#+amzdI4d-q9`e30K|yzOLDn=J|D z&~pxrz4iS){NMNg?Q~k&hqokH5?*?~_qj(h^v1Ny|CE}tAJU!9Onr)4AQOFDYp^++ z9eU)`M(gjR6od@kyU;S}`n4Tf>Jr8H0{@yW-A5m3mv`kBnyiwgzGhCr z>2p&3QD1*sr*PYC;9nHc?He_5% zyxwSsjHeC?)a9;Z+-8I zF#6cd2`@aKfe{rMZL?BSGoE`mLoYc^VMJoRp{2(VV+wF#8h$8_SPGIvS#Z~ zk2Y_gHr2t$YvbC>j+}q=OmQ8bsL7lx7V^nSyI$F_=Y_Q?N#9-%t5uhB{+nlCK<5$L zK^#rxiEo1^`nk3sde*}m!|pYFC^F%2p)nmRJm+?$r(z^Enh|q;-miF7yd<>PWhJ63 zx^h>ch!Sl(5sJsrr{w-4r+D21)30B#nb$g;Ih_S-A6owNXFW|>PBC|#uIKuc>;H22 zrVsXKr~gAYVWPq2=z8+VcYkR%8mxE!Tg*aiFLY0=?>X*?`krgjSS}PFG2@fnC7T(l zJaf+|p80UtXJ{rPTL*I%cMtt88HgC#4;L_=(CDJ}C4}|`eP^n?gV9qWvNqlhTK#Z0 zhQb+Scu^y6wbTe-Ks6vMFMt4|um;#dog01*ZmMhF-hVWkA|ev;&=JX>{yyz==L82u8Tm zS4|u>>^Shvk02w^{e!zYh^ApM9C<$(k#jOgEG0kkZ%*9Qv(QW zxJtp07ecMg4sr}*7b9bcK=`P!(?&vXZ^OZz;sQ7;Qx;mg&Qz_4?{-yfo= zT&Sd42qaP~z@{ji2(rRMJ-`frK(+Q(Qc*=9=Cc~GB@HUc)hA)I!U*>Syda)b)hVGZ zg$o~jFd&hJDiF3PASE%et92C=JYD3|#Z)Dtz_e|L^~~Mfz1o(-GRJXSeM~-O`_l=+t9q&~ctY8KvyI>H204V_LOQ(@A7RAgPpy|R8 zLxCsklyHzrMHbXgv2l1%i3h(LLYJXSP|gdz$S}y0pC1VDTz*kLhM)xU0(rSPilnz% zjCzvv7BzknrI&@FirgnfEdx~fYM0K3|AjZM`|T}LZ{qEb&$?yRid0Rqy{mcKv+Mrw zYD1@xuE|*@ExqN!eGbuVzwi%-Sn6;^t7+MB=G-VPjlojrShVQLF3QyvxpAMZmg?YE zqsr$J?mrbcqUxLJX>p*i3KV&w-N1%H6l6A-MEIwX7b&tdC{{|QfI`W~CF(4vcY&bM z3hW5421_sgmU0)XShf*CUMX<&6c*;@!jVx@SW;Z1Rz-U<(wq(k`W!xoK<{!FML}Lg zXl){;ol(4l#ui!8XSAICCw%{wzkYqzoxJDU^Y7ZQ9;sIP#FdSge{bt2Z`VvS8j{4k zHPWiD_us+4+9g+xXMb;e@#90^=PMq}@yS1a#F8$V{rf+AM*ge({S1x99``dDIUn8| zv|ssT75xwR3)~ziy?ManG-RQy4knN$=?8YYF-kRqx0)$MgSsiUhheDI87L}XF`CU5 zs>Wc^05mp3_i`h}#|@m(VlYPHw%Kf59LHO&Jyx>XXu{067^@jAW{}F~UXHGLeDrdL zy|lY8wBF!zMl2N~!Qr4~ru;^Qfgd64rY zj0(sM5F<+$S)(3N)&R2SP=_O&V2OD<89WsIBT1*d8$K7l>hmt z(Q0jNwZ`io2!5tGfyaL_tP}Zq6Awa59@QO}gkz`nmu@=Z~jGPQQ7HeP} zEAYc))|pMn6N+AoGt*M$i!T*y9%+@(4Tn|>ZwHx_3WJ>&bhHHP>uOM+;&s~`J}<0Y z3oescyZ$I^w+IRzW&$(9CbAB||A*h8)^s4*lm-PU69Qzkxh6;JWN&hHT(Oknjpn);g{dK?Yjvl;^IAf3d4aqj zThP=m(Mj=E{x{{dN0K{g(Q*m;eGK8)^sPf!edznpXLR^iBAGS4z?e01 zlyY()a+Dq+?l6}Q@BtbNI}DdrFO&H#z@&Aya+`2dlyKx7ddH%{((!?wjW7Q9p&#%S z4+OxJzX_&n%-#PP_l*2E`TH*3l&&!4*R?NyQU^r}c1%9AV{*84!8oEW_{kv)QFKnT zyZ&cXPQ`U$BaiOLE*qP~m2GVt(Pbm&^FKDmN8{yCr_F`mFx&03_oR0{HJqbNo(>L9+Qhs8| zR~Jp*G^K=pLD=<+t7d%tUw7R7m)jTRwC%WcPQli79X-D|H=)eq&aKQ9-j+YSykqNO z1|#O91HV}rG-Cb=ZGDXBi2TF29h&Ddh9XzTW>6R;)-1^AqhUniO1gAFaPQ99rH!E zrg(>D>k})^^2R5+gtPx4e+ynnzU664hQ^$Gh&R5(XY%7Sg}BsT^OImJZ2O^nLBWU6 zS^E&T5W^E9+=GufX#Xri6Bza(kW54r9Co9}>GL_f|??s$K?w@ z+~&_g3Hh&6E;#JPk(^dojM}-r=(|z|GwL5Fq(h)-6KzSIc#_RBRzZn2R9x5%k^|O z1N*&fhDqKq$N3RaYj8HDRfM@*pSG^ zPRUr6ny|nE0}4K}Q+|aUt)+^W_08u7ysR^y;>9DUi$O}h0d zzS*Dm1R)@seaz%$0Nu1MAIe#%++K$aW^fiQW=RR~WkG8|W2a$QO#YDPLnsh>PYowA zUSmQMFyl5XpU*KWZiD$JN}L$skqFKsIt@u{CWScwZWs6PPHnDu$n6tNakn?II|cO zfkbXBFzK@k9+m&~aNm=g*OiE#p6T!L8J7N6gc|+TzdAf?V+ugAdFPW|Z}H9F6dpMu z%dhVAdik+$;jnxGpV>oR^NksoyS8oqQ+g$>QI9&)B_xd&q_)SIv=h+Bw17YCy zki=6$uQ8y>k`ov_)!=I;O{}RZ9R-M!>|2`hNgY8jQpw{F_lQFAR%Z&@g%m;qPA*-SF{tPvtDex>J z&z&OWX{neHmjZr6eS?wibP$eK8rci0hl8qwxfWW?wLrL>X9*i*lWF;B-kbykCF5Ch zRZnGfwg#F(!j}L`G+<*^sS=zNe)d|y^168&t0vyKDSJX(@7eN z#}jp^|EW&04=6E0kdnyCNQz0LN1C98^xAH4sG z5Bq^f0qARN5-5(tNyw=QA0d=;0;>lTuqbyc^yI)ZbW8~dEMb5u(x8>&Qtp;|V&t;1 zqN>rITK?~s^A=anSY6hBVs%!Vwd>B3w5pfmf@zD^FP`H^f2+pK!kK%UCm)+<7Jud| zy6~?=QPap@C7JfD=-x?b{4Br!eQC087h;UKQ^Yerl(r9<(Mvh>li3_!_6%yMem?XW z*~oAqw9Y-!_D}9qX8nG0r#kDm9sNlOf7mm~u}I_oq3i+uNh5zhRobrnzW?p0-_M}m z!-Asy{8^*Sg)89xBk0I?Ab-dP>6o5IaWSw*^8Ha!ZaMD2Ac+Pv@G=-AOiocp0Si6Q z+@Z~(SroNIhSLVEBXAo$>P_M3BP|16`;=Gl65bXh@_s9&Cd0irx_97`8Tc|pG81xg zatd+^>|U=s0b(ZJ1g{J_uQ1yX#*Y$?SEk_t?R}AG&)drrMQO79Z2OsceK&L$ZTjHh z(#^><5I29W{A_Ap;pua?9&cRBH>W(*yS8fToNFJPbM%wj>{@<%>y&?=$rH7!geMJ& z?>=?UFN?(!{Nt_iHN>n?+d*(gbfB919tNAhE#?A8EXWu1Iy9q#BcBtfWP$)(ig+2K z^&%x}!mXml69FJ#H?>IvWc7LmZNwNOCoQ8%EE^e2JYh6%!BujR7XZ^iUjx9JRfwi} zlrSb^Bm~e>1Y0V>NJax!?>mvt{MEPUG2`R2vJynn)Nt7lad7RH^>uiCwT1bC}> zOmI$xMVZgN9&}RwU53U8wxnUUE@5&A&=p~BVnA-XL63fX%2ex7q4EIh4ZS@wDv#ow z!KiG_&r1lQF-;~OB^r|oF)|k4+;12DJOWgC&vIZH;V^i_e!$j^UTVJx+G$d85^t2e z9>IihOS~z>AgWM8h(rf!H;XZd7+P47QfiEOJQB;7UP8Y^LgG<=l=z~e)vQ?*SG%8mDpulg;mMrHZ z`GvIqJTZnSG?w4wbBSY|m#+#l3~Bm>QgDZ4@nY1rF*v)RotGk( z=tSkFK9x6Wp`;q~cnd|%Eri3QgH;TG3nJ2}(Hd7;G3i8OMs>VJ1IVkOuIuD176xic zFhO;OG8Gz7j{Ta27cEHtSKV#Os}>PDSE}1xQdE%d&UNPoe6H*SJHSPti(`||9U7&W%P0M6+4ngS!D6 z2@N|Z+}8C#=B?3%RE>JhXfPh+45Hy+tc6Bc197U)_d0H6U<+U@nH*PNTU9}nlAGi6 zq^Bl1!{gB7h8>44j!s39ejIZwdeI;*G(>077$Qfb|NFlqSy2r2h4LTU<@K5kxVJP! zUxzF^;v?THKd?0S3@6X;^Ww_4*M1w zLMb8V4q`<}DM8c^A!idpu?B<(H$ZkU9f@h0t2^~pEgH{h7RR9-G`gZFKXOYf=lPOH(QNF|5(M6MVZ>cflFo(RM%5=d|dPJOMS6b zdOIU70@+u}pI~8GS#EAdI?t6=mSJ91ZfS05NiiDSyVG+ra*~sr4qH4+=t$$!Y%G_C zLMu_$g|oaok*FFihN{DAgQnVr9}(^4k=Vh%8f|7I+nG9HYh0V7C=dK9AAg%QT}+%R zTlgD#<<%+CM#ug3)FmM0W5vlKHDdD;cii$%-);$qAh=TxNJl6tlQRh z_Y@SIM|FReb*}tO+ih`;>-{M!3U7Sk)&UT9?Ra71gv!D)`*GKVJ)15pw4g2XWPtNT zL5d2_UjTlnzYy2M590+FY!8k_k5+U_mju5Ls+JUQfj5F`?3f=0nQ@9A26F^hfh^Ca zC=)m}@Wc2p^Ey+DiH%f1QW`)@5T)bP5cdSDq>NHE#q}BKmlatY#+ug)`s(Iws+x3q zQ+7xErX7t<@@eLoS-v1=Jep=_m}kaMUf%zb4x?&{o0*?x<_%pYsb=k|f_^)kG?v2~ zdiSR)z6!z%D$!Q(M9$Y#15bRYCA>VCOx2f3Xc-*v!{-FGQ;X7P978o!Bm6dGH)%8> zyGe1w@L}^bNRhmdqiWd4$Y)asbKM`VKcu9X*cK(4Q=#pOGYUMK>Ync1cTA4b#7PEf z{UzrM3i^K*R>kG}D<*Due#hxS*83-a+qkdAe!^bL|29NO&?RyA{KxF>$?lmH>_Bu# zN(4+`_n@!}Fv-efFo`%K^ky`540*Be2vtrI0+1uF0FF4Hn>vJ0i6v^o*a^dB{zQ4lbQiMX_7m0#-}K_!halvCd3=EiE|jU zCQa#IG``bo2F9di74lu^U#PH*15B4s{Oo;AD|mzjI$ zR8dBDjo0JK&Pa14#98r;7Sv{IlhMsz5weMjIXODDInmLPU43Z5&PDzThxE=!%Cczu zfyuF9bk+Po zy*P>IDTjyDC`t}i6i10OXe9%MA;D0?>W+~qi3G7VohDwd=THF0!sThGVqi4KL~-cl zk>5kDM{P}BZm_YYt+uVAEO$)a7@vn&uNgsGO#qpv(asOWerGTz2@I|U!G#G4)fE+$ z16oa}u_X1%Tsu9kIX!=2F^S{N|MhU`M&?2-URp7A@{B{1W^GGvm_Kd8+~U+_g*QHV zMmSkNKexDKUM_DheK);qcYb1Wl#27|`lP}QO$*kSSJh6wpO5nwSyC**4#mq_+t*N> zo0VEPA3-d|M@9ZW7gxlm1nux*K+n+}gR|>4d?r(aNA}yciRn`5izN)lfAh%dZ%R*g#tnY*~m~9kaH|#sT z(fgzpx)~lf&*kN~i#$ar226#fnn=k8oQrpv2J5NFAjK3pN(MnMc$SSgSf`3mP@ReL z^+tz~zEKnvuh)Q@I-pYVd(2sO)AM{2^M{kiD?&FVeU$kFw0o3$;_8GvW%5cQS&j*2 zv{o47_la5qE)4C)RkL#FhoB?+kZh%)AF?-N{GMK7HYH&}Q<(0;^adtPC}1$KvTpp- ztT!R2j9RP0o>YDar`W4os~gQSAX{}26QgD|OJ6;}CD;iuk}tUvLeeo2422lUP=E|~ zlF#vKFW4B_y`fSCGPi=Bl43&Js*2*ek{T6x)59hfP**8e7_bQMhG zNAa{ID*+6RkizQ?J`dQKR}hfz93qRfp`Y_c9j2T>_5!!a>>qxK;KkmKppsC~6@GoK z4ug?M+cy|O+P;d|OTmV4r;BkG;+#-;mxP0Vl>HbCM_bhJz!zADyey3o>lpPq=o7>3 z2RvNe4zvnIS!JO}0?MV>4N=^e-whF6U4-$V(<`Tn0pQpN`n+$$S2+L*SB+JD*d;{qe@t zgB7a&e@?&fwma@lp`XBDDN;FeUK)|H9u+*Zrlwosi)Re>-8ym-=?Xqc8mJgk)dp+X)4$qOOib ze+S>)Fc(h3jaiwm5KV+DMsmF zzfPEcik@ixDcSn}Rn$vfy@|}L2iFe2TfUCB8WZ@qao_meZd*&nlFRxQQ_B?=zbVvx9Xf>-zN<2>NnEOE73m(B?AaJ@mBX9N6=j0a zgz6RKMx#|2QyT~;vW7mxTZ}f|V4=r!A?@PgOO3b+fT$?{Q^QqUMw7u{#Het@?l5{) zr-QO#HfyMcn1%fG-7uS$c3O~qBray&Y5J$M1Zn^rj%pq*`JEb;F1ivUG;Q`z6KXe`LR;sr(P@w*bqAJUmqSZ8SvzBF3$a3IqV5;7I}wT7oFmeQOUYG ztBhc<*%D?1gB2;=*F$aNG8*LLpGT>Z&sWU(3vZD>pzhDbeB7v&ED1jDPc#B_EGufR z;1AY(O%dnmifd1=34l+Omx51}Mezxe+F3)N8O$e0+Zt&p62YTbiUjRQOOZSs#Zn|S zM_P(8!jTV1B5`fdVMEs=)VPG(BP_a<+aq+l zgxe#`yp-D`6uyMpBW%Bv+at1wUe6FIfd^yDh{#7I)Dew*k%D*ddLv^W5!wVx#QlP?AcQKouku2DTnA|?Re;*1<*ljxOU-tiei?Q4fn1xl)`AKLpoS7Z!*+c z1-&V(f@a2igtwZkyxtntKrgybaD+9O{z;t)IuNc!nvw)H4zpJ`qH%>uZ!n>bf=b&} zeGU4j(WvW&PP(+yY8EIEBOo}?qx`_KE0si6qRa6S3-X7)+g zS*;P9BQ5h%ZjO}8OSU;u|5}vZKrA;MCGQM&?5r`aVYp*{!9}3*4Zunv!eDaS*rtXt zRft9w=I1INGP~UqM>&=h3-iFs;qh0Y`Dd({8PvRT7B_>Mr%}34!&#<@@|=zD+%a~G z#VpK)D@*=*OefkI|H&)r=gRNOcM7SwsqKqWC(K*a9|*hCc=M#4?cVBD^Po&IC9^PoF0eu#NKF1VU zM3q39?ic$R!D_OED3s}bu^$UB#dN>e&qOZ8bide-g_mNwU+iZhmtwkK?8m}OG2Jir zGm%R%-7ofI;iZ`F7yFsWrI_v)`!RK?U{>sp!>dFH4&@q~qKJt&n}mKik_Cf)kmIUD zYaW%;5kmtv6ZPCm#@mQv=tXj^(M?Cw7%o%2py|xC9#_e7Q6m zB9yuW8={^_mu5qRzs*rs2L2c0#zq7+VR+*x79t@x{M8c;T$1e&m3;Z_5FK5T?GUAX z+3j3{okURUykPR!=7u_mnG)oT2T0ENJ$Ac0PSx~6VrEF5AS>Psiy62ThY>SU_Uh(| zdIMhrw*H$Kp2ohEuIYiAr+&wGrP+$=ZKbvKvVXwm_(zPZcF`RVej z(EaMUCTK?~H0N_NG> zP&EcJ&(Qcx*o$%QqQ!`29;|}MY}WQbBV&F*LpPjBOFQBALm839!W@Ky9crXaj321Y z7_#W+Tki`j#au{3w}yzDb7Du^*v5w1n(8WwxjFp)L=Sn7d`f-|b>F4R8P>F;5n08^ zbV8S**addDeHywkI3l;|!235`{Y+0ogC)n(k~61t{JM$$BTLHG^iH(3Mrs+(n~)_b zTrO|A{KXA#%3}tibjICpADOtiG;fN|@LXahI<_>PzOk)um2ZfS@zGUs>zYS}$7JKK zNaX^tJN{#-3cAG@u8teeZ4YiK#{D-`;Qs6DaR11m#r;QIkYv$VBn@GDU-YxvvcpLPro5^SjcN=8Su=rcTlo5$S}JY5P% zHdO(VjSYY#=57I!MMOz4!5OFFT5H`~ADkR@^@0;aj-GsSZ6K@lPyu}7a7e5=F zP_+uPi@c(EO}v8f$`!@96C3eA+?iyH?}IB%!TdPRD#TfbP(atBg$w4+nKmT`cZs;0 zC{p-h+(k`Z#$byiB-v1Pr1nem8_m2O%NF!DWbF3W$jQRgFU5Z354P@@RZvx@$1uUa z%~x{+wNB`n44cThZEcz+aDWLyFOt`XM2KeG5^Bic>5g>Zrs)MRESR(=U2^- zPPVLFvUW|+SJCN1J9R21l^d_Q<^Jp-I4#ZHco*<1fETClT)7?-GHLV%ng3bHfKz4y ztKA!yAS;|oM#6cj)L%bpYH-x&^MZ**g=m*rQ&f|cL5-2P0=^*B`-!r^tHa?F%JicA zQ?)}(q9aqHl+*yr^rF_l4yTZM*&B!Uys~EUsqRIm=Ujb$VMC*(!jcaO=$l&IeZxIB zbRT(b_uhj$X0{#cx;AC*3%BoiYNO4X_Kcmlwv4#K9od1bhT_8cOSfJB)b`uo zT(j`Cdv`xI!M*yc8_zzh;!-SFkbN@*7BeGYF_$`sUj!ETJT@$h1CtK_x50wubzKb^ zPubv@j#sQ8MvVJLKzd&I+A(uG4;^xIEAuKb(h;M6ICRC;S=2&+@RdUcin?&zw>bE| z`K6PO&Y6DYxbnpf`(_(f(fvV0R~@3~_3+#^$G%${6bxS*j+6e_nYp0s9S zvM<=|n>w$%Fr@@poe9agd9LP(J$q*@zOuRcir$Iq3loFgt$h^!pqh6haNdJ1e&|J* zg>2fi)D)!C8hKRH349M20<=-35g|1&t0*hT^Lr#yGMqyKqf0_xoT9u)3}^z?pxWxB z>LgvJr~?U?uzKjwtja3Mxa+OsCx3C-zF!|NSjfkfRrm$@Lg63BpS^JXJH@Z1t-oyM zck=w@9ocJ-wN~$3x@*1@8pPhN#f{Z%eQk)~)mvK|^hOaL^nRUe0 z(OG?ET1tD?s$TDuV9OMb)OF_BN3L3M^<-OOV+p^gbXKX=T{wSg*`$OSTaPcFu-2b^ z{i^0|6Q}G#E@cbHX<9U0=$jK@XDShG;s$L8=ioB9l3*bciFx!cMr$uzO{i;R#G%lj zMC7KpeEA;9g07LIa=RkC>_YX!^`NsZI!1~c7XIKK`9wSQ&~4}E9Q)PY8=pC|Y|Twi zNe`|5*0xFwZ!-wj7*J_dG3W7XpTDEOffsIPd#VAxPZrP$crZhlFLL+@a?8c^+ zcA~_FS30E-Pl)*mwI>Z4Ts{~2owIo&p^aABDv14~O`)<)Kw6r-^|PPI#fvgC7x522 z$S21aRD_tpdNoV)O|XF@P!Gc^E1)~DtjHVY#> z-y-}B&C6iVU|vRW=rgjB-G%y$;U4LoA$@9E zNBw>V{T_Fw-aBfkvr&t;BxsFim5k`!hDH;iKJ=MTY%y9x(?{)6g>KNP(hi)pr6FLW z`UxLFC{vpx_9U!Gb{7odMw{K`cPD#89p|M%$RQ=@sEH!OjszO|q-mLapZwwT{jaPc zgKdov|0X~4SRIzkmZmv7n>& zkT#Nm7BUE9(3Hf`qoL1xX~u$s`*93dB5m+GlbWKnHM{* zaO;W{&JhmF?(+XX>;IegiZ)nbzL&+#3E`~LDY0)@4w$9(V2t?$qf3)5LE(i;UqzK^ z$KZuN*8CV%3|BnYMIHsv;76J+$ow+yNYG@9#|%R<-KZ@sr5@*pYKJ7$ zx*;`wB?@CvBLrE4ZdRz{nFCVObQ|%8W=dMHV4#3!aE-LVQu>p65x6I}= zY18t{74d|QDz*wt1Jmk~R54r*DUd)Kk>m+LS|wB}tO<=1KU(ETf99F|$yH^x+0~N@ zTOYWCKTnKl%KhL~uibm*kNfuj?aniNhkPDvDZ?z-R`9RpC#PK3FuiosYH+7N6A>?A z%;|MvRI+{rKr;ZJEJTbB1WlNR3gZcLI|x1$NrifgN@rmcRgN%O2j&!N3dYj%$Saqr zJfzt>%u~dpX%+Z_&;ghVu4r_2m`9UX^jCPRy&=l4BDrrUF+sFkC6+z8;KA#;hC%>`EUh8yUN%DkHRyg*Qvu1U^e+ zxG;tgQb*9DA{1hm8hX{Ss)S-ZiaKd}6mHLpJsZrs$YW3{P-LpC%<)s;X*5;%%WWxs zmxHyNRI1HgXzdNfl;(RmOKplV$!4k$^hBGYmnwY##r{O+<+Ae?`A^q=jW^wr(q5Ga z5%<={RKae&*_rriQA6@ePkrZgG{<}W)AQyGK|eS6^{?!|cN%Jl-#4d=`nmG+=i`&I zKHV?mXf#J|xosN?*$8gZT`Jtp2jfzZQA)LXIhle+M`DiPhVpWyMh`Q^H8eV1Ex7yt z*n1D?xTwR~4N40;2q)pZwIAIJzU)r#uWEnyF4t>FXl8R;~RG`%>ZIMu6^ z(P1PJI`%4^;U@wySVzaqas|*n^ezp7Q+R3u80Gt{P9biac+)1gi-7FSXjGsX2Ek-c z)DwJtuTe~hP{F%er$G|rsOV@$8L(vf7ntZO>H4tGUsOLwy%Bv*^+u+BBL%q~`vQ?p5ULf_>Wf7pG2J5}brO)7Mgh^x zF_gaQ*Bj9o0qPcEX*H>1zUda=!F{2HI&L9qR@1yx)#xouD@;sqMB5{A6)Aj@3C)we zq4^m>d=9&X)KvHf!tTK>9C_o@t=IkSbYe@X%Y8(MQ!ZfdKeuEw7S+1%y}jae&lAUQ zcxfGw_{xYRXlvmYFsKu*k)(Sc9Xd9j@qlxG)IS3llyVK+ivbT+{YC-?)u_SBh7g#O zO7--B1+}^a<^^FvwG2=|4d|$fS-2+VFCmmBm?)4dy1;5&XY~Tw$QmLw8Uf4v^KF4M z3o}`?4LTzYeTxdzRhIjr?Y=BKEIj6eBDYhzYPL>+=Jehtp1GweR7@8a)*V{%JNdJT z0N%cO$$Gcz3xa-w+w;qxuexFU%olIXSd+lu-JYBC=Fg_U`|OQ3Z{2$H+Q@j`xO};h zkB=Pt&bLbWD{lIFT$Hd0H-PhlmqdUEJEia7ZxUDjO&z2Wrg0JCML=vTCe{K%v(S%S zWLKF*^tcwKDADXeQ$6(YaDMuU@DPuVM{+O?PQ{msbYxSao@_#j|{`DH_1$4iU7YC!)?C*vX4bANV@I@!LC(JPoS~ zwlq+y6Exu9lHg2Ts=y-_@>@{PuQLd6lJw)KY$`~=gAfl%bzvmzH%!a(IGu0{WqOmH zNlsWO79;22qp3p#$t(yhOb`vJJU%<*6?}deSl|tvTXa7!e|&%EOMLgwgk(~A*lFc8 zx83#8l~?@vcK!?6nQ1`MfAIy^2elpDEtkmSKl_P1zEe9T;g}S5zC6(R9Bv7Bq|>bj#Xh4ITMWhxcnf0M1K(tVilP20U0k{ z0B>f3rXUIH9@)I|{_T+l6|nd0*s+hFGYwL$!`dmZez^bXBRjvnk^i~|^1lnmqMq1) z^eJIxOoLPi^8qA4LvKN6YLz0*8bum4iY!CG5G@fAC=8Mp()x53@W2pdvUoCDo%W-y zMPTN03Ec??GDmZY>Gwm$hucrv#Ap@fY)f-}9d4%-B>|ZkzN8?{S}#bm6obK(RYtJP z-98(SuC7OW!oc9Yi z?i{%xYJ9bB+jF}Yo!T>xpU=N|>*L?}PYvW@Zwl?ASiGY2*5}1FKf{F6~~@(caQr+f>_>S(90V zHeN+}SU z`GM~o92gxQ*fhAQe^p;kx-T{>Cq2E8rA;`{v;@KqU5G0XHZy2akNT8^BLDa${xF}r zghed3LScTWR;R8fT89$~Fxdo$5^G~wn?B~!21#Q3i>W<7z0B!z)ZDPb*Rs8E|2?Z- zzN>9*TGp!OqJiP%={0e#g0z&mxkkHb?1y}LR%4PWv#Wf|fwC3r@>}`4HdOd?Vzz9Z zzi-D-;o6Akxb%(fW&Zi|{blWE`zz*@q?gorVoQbZ3!9q@@o#lUZEj#`bA}&U62X9^)bRhN`M3Kt)F%Bk)VjFrvr? z32ipw#5?uZ5dKP-0E;k)iOn|gCMU$Z98or?=W%?TI@^TIO;sIn8oWw$puz>XdzB6i zg7?y29KQ6I*B(Hs>iB5wjaT*F+p_HY@-hB}f#G$>VmH6@;GG{G+VbpPi`O>#y@QJi z_WS1x*W|A|b{migJ-TF{uz@?t#c(NHU7*_SVu!lm>+)Q3VqC1lZi_JLxfnjiptN72 zcLg%JoD<#@-8mU5@MB%adLFsv(*3;Ll{K)eYEypY&_Qt~UIj$POwZT-Lfj-qg$cPs@QhyD-1F(MepI}aR3x~}UvGXor( zt{yq@wPTz0PGkMk!B){N?RBl5pQ&XKk^-JB#O6R{C$Vwjmmqv z!Z0&*-;1v6edNgA{c>04z_QBCc@+o$&aQg}Kza4>^>|G~G+uK-LBf2z=ApzIye8%! zJc0QK-`Bsu-g7>ifAH_UD?}6WGr#Zn+Z~4%-*fqnFUT8w%NP5Hb4vI8j(1eo z)h*NQt-EaCn!68=80?0ct!;U6tx4JZSVCpzGPOU6c#rgJ%mcX&G-WW|p=)M-f@tCw z^f@xP=<~0rpEEwf=A_f-nTY#fJ^TbGbj=g}1n^PN+E4T}6M@$1P4`vcf9mJ=O!EwU z_mpQ;(0hbEFIqkkoM zmInvav&f@>6K1NiGiI8*2p)hJgNt@n>2&bB9}b6O*ypcJ`26AEef(H=dFWk(-$U~y zlzU=lla6c8r}KxcFVQ|XCx@>$EDXLc`&_-h?U*w`exB{xoC$*cG;8Uerd=ytue~?s z%7nk)DeXNl^Wv+a_tQscpA*cL&t)^oDdls#7vMN0uGlJX3#|dHKMU@~U1)m|Z&dDr z^_`NsRoPd@wFO#BigRK-m`urImI|dT2sFi94eJ{aT|@gXJu-lx&~&D!@myhkT3LFT zBH$veihv_mAcl8o0#1<{Y{X2sd}I9$ku4RHe4502_BjYVZ|7BOd+%v&drOmeVoAwR zL$Ve5g%#TyY7fM2`R#*uet6}7eaq~%b>4be$KL+^JBEd$`Rl%T`s_zF8(ZDJg52SG z!#nn0wt~@>Tsq2bO!0XT*D_5E*8m^(dkFAWc@OPleYz{Td4ZOKeE6eb7{R*_ zpM?%JQfMg;GUwKz3Q8P6BpY@Rb$le>#wMZDdy}B1s0|Ei8qMa5&xGO0N#tpfKciL7mtp}28@w6CTvuR0W`+TsUNI_6ih#=p zIRzRprhx*@H-J6Ex?)B}78Ov#;G~2YcVu={wmX%I1An%jvL1aM^q| zxp!6astD%rLcv$cs%D@zF#<5+`TM`Px;S=D!bVF+Z^ox*rbim0ENgtkw(Z?L_L$ea z3Ey}lNMzyY0v;eg$6#Vbl~g_Ve*2kh@-d?OtImX=8`Aqa;A7N&#LS!ybhe-+h!;vG zNu81vQ8HkuJ zI;%YboARyf(e`#V7X z{>L%ZjY)?6ro_7P4`zKKy+x5}bFUa1yJBuyWW($(CnQHe%X;B)hQmxA0*B)fNDTyP zfy00zB?FO#gD9M!vabovt*O~}N^(M4Vwwy12de^pm4yma1jSqH-_g)rBhz6v~i z+V>CF#hU{Skv8T1LjeSK4!k{uN`i+#UR_))&m|_rro^Q<>=9;zM3#+F(a8N}WjF$} zfWwXuJVEZh{XGA5X4K?WT;9Csx}Hb&HLjB6w*#{8_?W?As#BG5rk>ZXSQuZsn87++ zCx_Iq0StTxWns00nQFnGi+W#0GspY-&-1>8c;7RbYnD}w!3Jx>>z>)yv`QzxT`vm9 z$MgaNO7r6565bT-GLbyY-@T~Yh)h_2tG{-a& zp6x==eScu3k2T=a!@z_jK+X}|*e0_e8U>MtSs7>y1-eF}`c^k!fxinSDIsZ4T!c)= zD0rYMiYu}+Q&SS+oQ1B!C`XjT?M}5?Vp(<_Qr{=H>e`AGxPWvoP>XS^tpx0e$>-7S{*aKlIM57^o0X&?mzafd@uiA^ff&T zzZl!uyMOhvuB*P>Y4e)*erMB_AKv-p-|Z2O9{eh~K5#FTf4dugg*eVjz7OqPl?(#r z$zkt|mxOYozAw)$;<=Qh1oTyouz<^vpGNV0D6b2z81gGZE*rgGR9|;a^ZwGxtE*bt z6XiC3!Q~x`uUWM6hIjcn5xrHV%SEBk-(FR^_|kf#Q{T3|Vd1egYp!YIuU@crHN7j( z?7+o-T&t6m&anKbvKys2nIX^ce)eF*b zIU=oG316bJXL?yU=hSy*PV|goKZ7x0k}EaCfN~!NshDE^;ahLDZmk{YAG~w*U9Yud zRrrOnhK_B!H|eTG|C*YuTZf*UxA6JcTTagH_Zw1=2Riq*j13mG7^*0t?$nXFACqRZ`3=NWwopbR43-gFB6i=%Ny(cf{i+3n97OJdJTCRA?lz3awaki!kM5w3T2K+ za>~mgBO$8M-sny>vxI3@?xQHh;C)QlyQnMGy%fd=$Fpa1zl;@X>M~d%Z<*zV^IS1_ zM6HI)33S)Eb4Go@Wo;s=CjE3et#XklFo??;=_yIE9!GSP6+{@qWt9lwZ_G%`(5Lx> zZERgdA$K-$5wzOIfB92OpIvP+R2FF_Vb_R~88vOlpPu`-?sSmiE0-nNfIUZ-r zndWSY2}H3qO~Pv&2IpfOhAue9jZ6(F99e#pkURmz5UoX=rQf zE)_Dw)`jiGIjMYVdR+ch>sKDBt*;png_7)fIW-NH^=UlsuWu-?^71`#d5P5xD_S#` zISqlpV>pz+70dbTzHG?T050bOkE?;tj%eVr2pgB-vOzDQyY4|~=}6T=WJ0gkhE0j$ zG`JiUiD(~k<5S@h@VFuyq8cXPvLeS(ni+oPSzW1M)=)XV{*u|;uG0ML<|JnK>F4m= zEN`3fWZiAR<5q4(ptlZ9(p&3mi;bY^NQAWGFlkl?4gug_VAbGb zlB8-pkoSyRL&`TdR#*8;^K%gcO-lk;-~#aE@4lGSRs_~n9qBMc8G+?`{~BPq3Y@tup5BqoD^@2YD1^{Ee>mkU#q;}XNmeC;zLNl# zA`V{WBFyL1BTz44{4P4~Ls$W41f+TZQD7DmqA-)66@zGiNJl=kFPDuoacH5XP-sex zGI(4{r33{RP`Ii!tR8Lqa?9hdY*{(5W77kxH-2+dOICK3*|@T}bWPpSO@W5dOPZIS zoGV^iQgQ87D^6HaclUH&)?U1-p{Ok(@_IvYVQ&pY)at&4s{_?^mRQhp1n4=RG`D;@ z3+zww>1fA*7?L>5Ki7_f{CF1Fp`(bHTHmzM#5rjkWG+E>NzGIqit|t z*Ol{X>f2kl)UH3B9~b8X)uj~otZ2>7Y3|6aT$3*q*DqOEvCQC{=P&5UZ>UbpvzvN! zN%iH8b^glo!m7+PMfViHfxB?y1ciaT2nrHo;0{EONNvRw)MK6~b_>uGd|qcpo|+!2 zpxy-ifC?u_{bH&C{;w(q<>EI2yYCr(>#de8^#lDQ4-8%N>b3*xZfXzg*u5zLt>Bv8 z_M6rTy3rSIUJg02#;;ctgDc;??)t};tzNNtq}S6jclFrNOHvLb1-KhQK+5=( z3GbYdZqLt5Ra08iQYX~Bfzz{B4kHr0J2XqY@>_2W-?KZg?bU0B?;l>hwth?N_aFxX zpofO}r`N4JJ-_!F-g)wtI zhT6f)yUVg$I@@xJ2OAp)i>rFYZ#}l^OWl_}$oRLup+1o6uWKx?DjQq2Y-de_rbo5{ zAF7dcYeUL_Kw~wkkj1Gb9nx%9%C3X1@()VxP9zz%7Owh36_OiVJhH(!GiCj_61$D5{#THY@%sQs9CY3Z9sefeJ>Y zh*448!14$oH4PQYECz(3sDpp^?NfW!45qIAk2PKQj1KCpdfsJ-T2i-o*RBoLgxvh% zk+}!<)D1ZF4nB5p==#{(k8Rtpk6W}lzhg^nLcu#p)kzk+J}P6;VE+nXgFbFy=g?-K zQFNMie2@^s^j5NUg0Rd5|My^CpGbvZYE-xkyn#HwfFYQk5ljyuH>7SIL}Ca@OC}9K zkwl0SU^(k;K)pU~v!KGVqP#FaC)=AGAM1>^T3}L!V43MJMeJS)-!mO4RbT8cB3dBs z1Mdv%aQsONbJ8emm%n!54MT&mLbTp7SiASKxg*8-xe3+{yLK(EU1B$&V9%-_9KCnp zz};(82iNSq@$J}{gb#L@oTAaUd8l(?9A-PO=pS5^5v8|V0M~a45^A?}7r{BNr z*zJUu6o<}3{ImjiN!g!pk>Xr{BNYx*hQm=X!mBqE7$?CaK2R0D_54>bX)nXe(vo~+ z5hf?by2z>kEGzg5vu5uBmJE~^hNPyG>G_DV{)Rx5&T6gCx~rfm1?jlP=FU!kmfxa7 z)`w9SXx>!Xe{IF8g#*WAUu4v^EjCGVUshNUCD!v5Rn zHzP(4_(4wPf&csu{DSZkh1tSSjWM<|Ki2D@ZTPXockzFL#dQ=KU&JQ|qC-?}5gK?i zNBMG@z|f`{(aa)nf(6}}ClS0Ey+X~Vb&Pf?w~?5r;=*n||D(96%wxJ|1&}B-1MgvP zG|gk+mgA6NgkiC0blQi8eI|_8#7k1Lb6RZ;&$Z8QX{uRNyJ$KFV^f&*=VmZa^fQVv znNDFvnWc((c9C4?k4a1Fi)%fV^&M#giH${@FNzDvHzX!3_LY=2tc`QbT{EYx1NYB! z%jD<9Hz6;}xoZQqR9=*9uy)Wvgx7d{E=fgbw4~5NauP2`ET1YQ4JM@Y8P{qGF3Z@* zi29jUqqPMJppXu%hm~M382j0FWbfPYBIR7U+mjZP?#qZao04+j*-L|iSJA{~!@(gb zJs!I_+JDuBSa+0TRDS-j^=Oqw){qJ-Gmq$}u@9aO4Q2)o&R>ljs4GDDUbCRb)Dkn#o40CDz*ba|De#`K z*EwnL#e6UI-1@xVOZoHRduRW>kX0ITTo6o}4n@E+AOcAL1QL0!wx$}5k*E_^dTMf# z8{GjAYH#A3B7(6%ro7JvWt1E~;8FbMV&bYb1-XTL<{ zh{wG_d~aVe@3zT}yx-<=jgGovtn#aZ(dyxSo5ORRU2AcK#1EQZ(XD1kImVHt?2q1I8GVx(|6 zJrG%MM7cVgNlNzv6P4x=G7m6~ff^`cg308)Ml*jIu>S&^Dfb)$NN6+AIrPo5+5NdT zvf4mkbB52GlIF%O*j@H03!1kvY*q`BL$^>)w}7y*b_wDgXKz}WoV-#jz9e3>%6EMf zg%R~bLxN;VkdDd)Ue@?2cqSNN^7n11Ch0*@*$hyCW>D&oW|11b;%!>tfF_jwAmAYdFfC!*j6qgzgG4g^PYfd)^~x3L&Oo zllEMrMcIS3_iCa@K{7<%q?tSVS*-Ua=TSK(M@)1EJ z|N4qXPx7|V8kUcU`zF{E9%y?Z%^4oB&mq^sK0ikIp?$950^r9!2mG}Ao`5&qFYf?Ds3;V21sgE){-^ zu$PCi}MMHu8cBnQ8?dnIEfToT) z)Tsz!dkwJg3NN2X)VLsUvUU z11t4XEh)T)c3ADDrFLZ~u3ubFqWbI`B&zxTk0K)#Q5_likvy!65E944w@Fx!M^bks z2y4E(w8q9Sv(=UI`$$*|=YD(c9XLr3aJgJLcbMf;ib8f4dQ*{KkW}#uPB2pb5M@4~ zd72TOA&@)3!w0*8S=~YJbnf{U1``Re9Ek*q->*17&zF{x6c@v?*>qekpR4$z(*--L z3RPn_aF3RjmZP*bg|j>L#a=3?MfWb`N$6OC4!o*1LHzX@`Bx9S+iiyqjnsH*<1e$! zsoNC)2yeQbPvD)pv#po@e>b#Wb*y)T{HO8vg*O-7x~=nTi<)#T|hVS+;8QT3|taQN|YY}QLlkpe1{5cN78OjIKr zfJQ0A9a!TjbtDGWuCL*g8d_x_V}(MnXnlg&8lsLUe74&aDWK8&#qXcUXeRB4vq3K! z!kC;=ps&3=q`>4a#?X5BNnw+#C(xdtota92Ct0{L(aa`be1;eeY*!boGV-;5*m1J`5YOXX&pOGw@)t?o7QiIJZ@9hrbu zEBvxo&51&3oaH+H3uJ=~;POYxs}i(F>MKNe4=##o z(*=H^o&JfU_?-i<;#dDYB`!Yuu<(uXLE(<^PO0nDGe`OV#2-8iI>f1SN#Y;$)p&j+ zhkkoD9u=LR>suU@&`-461>Oi;6`{GHNF!*sn~Zug z*b!kR^BtVQ#2Jy5iQYtd93OLCM1BZ2#_;u89SJkVZV&B~hz(zOBkXpz2MP+lY<8Oz zyWOj(2#Vc45jR*=OA$js+#opw)L0>0t(fiOopWRONE6@neN&{Zx!Gnje_wvrEx5Y$ zZDrJoaC{rJBJ{t$S8wMRN9mXH(cK0cUlOwphr+#eoqI=j7P2Ug^Kt7RS5u0W4hG2p z@r2Ci(FMVzVz_d_Koi3!-U0{(9Zczr3{TwEXw4cOUF2lE(eZ z?)LKCd~Q_WN4);hfxQp)E$w+k{^;9p$WJ}BZPm$p+UhTPX3OL=j)l%hlJ-0&VBz1i zX-)|5-GuDwna)YB$^ZY(3H_ekC9{3T$)<$u^Al#X2%7DK3#B!sxbHgd_JBDS`8*kj zlamxxmE;*>^!pn(r^I7vlQlL#u{@=~%P^};s!Px3!Do*BOPn(aS0_^bDF=fwDm zB=hPen@^uQ?n%)F3g(?$+jZO4@{Www$V_W}<CH=4n{~xLPj&8lzdHDX z?bqBp^7W1KKfIkgDze%H%hRzb0N46$dHOxVkpov9t( z*!bcHDn8l(!y4|!fXxPWoXQ)d3gDy`hbrY*4yEoO$c}^-3=HIN7{F)4GG`5x9gNJp zL3H3?U5_efzsqQ3`L?uIZ-j0ak><`!_h&dV945G<9TSp-Jj@@hBx$I58A_H0Ycow6 zGEfj+RrFCwOG)nzCiLZowXp zDMm=zcj>o=l%LH={RMcqLemNiuMc2o8i1qb7!ZdmoiScI9ZunSjPFgzOqgYQ0kK+wpF7R% zYs$=Uq!RpK1|S0{lW93heg-fQuFvoU4JHF%XhHqaIs#B0tIj5WEdOEX@%{Ufgcy@& zZ^!WM%j?%}E~xUSMIHWj?^VqUMm&Z%Xh2u~XlVDV`)+-7_{XQQVF8U!jyoti(CX2O^J?s6rfLtj?a#K&=_6t*mb+Fzw1Io6Op~NOGkFTWM(v)p;f}wH``riGvnQ~S8t~I35|j3Id)$L16n$T z6Zu&(8&!qS;*RhHk%dN5QEaLk96K-5DOpswq+e@f_oJWR@dUqV$(F{%8QIRF)7!UP zTC(?MJ~GtEPX6FK&wmFnMRKl~D7_D_^ZJ0z1bYTB64Q`9ufa4Pl1(^-lnlU5Eo~*) zg-$`tD#_$MM^9Y2e%$Mb5%g&BiPo87ln|l+3Mw#BnS>+>;00Nq$jn3`robP<`=fEU zz>PxMOS&G|W)gbDA>#)P<*+HuVNc9(8LaW>OQ`qc11(T{*QPpe2H+rlyG{STMs6_mI|jA!8sq&&i)LmKwPDx*hY!%*zpxpUuBXeD?* zzc}~3v#%-lbM`g59~f_Lm7ct?Z?n0tJ%(yt#?NU;Nd~8bYY>Qr4hH<-0(f||@CAbZ zoQUp>nunW!ok2Yw4+Xc(xO!F#nG-sYn30j1%5fRl8QEExslHTSdK%~DyeY}qc1u`? zxT!>`5F{0Y`tUCi<`l97QI7vn*fxG#*v5!c?3ji$&-xi*3Resdzc)NgF(Tlz?nUW+ z%u#qwqk$^em1P8--~D|uRJgGGtmMveDHhtiTe1p0cU3oL}vl7qx34U zfI{A;4Sp+&F?AQ}7^va)SpXOXl>FYr^ zyTzDt7C?Os!c7@Kp=hr?HQF1Up6UR_2B&ktFm@1a`t4pd#G(G3c_EdY8zR~|K{cTj zy7o@_0M?DuUatN&@@V6$ORmR zs9E9si;5Ho5!p40{V-86s>9AO5B!)gk5f}t2nTX$wE)y3A$_4OvtP|Zih-oA&i46J z@7f;Yv`0m2$tENeCopAlGHdrftGo8l4P&KYjcnf(mDn2@FyYQ*c{VrPxH#(N#mTl! zxVHqW?nSik)_C|uU2Sek`=_^A6TpSkJC&YbcZx{@3Qr0BtlTO6%r$WL2AqCL;LMr0 z8nh6H(v$_&Nxjk32;OWSTws!*Z*cvTjk$J#5tV+Ff&|a8s01_V7HK7CK6Kud7hI&U zcxRw)P8FK>`|Nfn5vD!Xs!(OP+`Q;3lAcfqX?B;=*CTBRQVx`{i@J(-a8ZIW=5}5W z7erbBFl z`u($i!o_V?bt&}#6bkb4TxUnyyt%Cv{(^i+d@$>jJI@xblEY&AC7W^kk8Q7%0euO3I*V`lV&M zU)pe;Gw!6^!x?u{!orL@DdBU*o$=?}nK1Lt8NQApg!GS+U{lO+lZ!A55q%&BLQ=^r zjDjXW5Evu@H5K4fS~dW!P*AOcs8ZQzi7-c4kVqb3!2`Hi?k6aIt}iAC|2tnym+-m1 zm@eqw`C_`m&-KN0zn}k$KgV|}$f0nKd96*24S~|)oNUtZQoYnnI?@VnQYYVOQ!2M8 zbtA|)rtpoRkO?!*sD6tX&8|sIL#&!pSZdsqTvtE0tKDg;kbe>|Ipv=uER>&aOjx?q z8JoGUp>j^SQK*?CB$V=9sTGw63YHBnKEQucnvjt@(zB5Nesyh{ywQN*V__izkp})L zc`l|v6cy=&SpM6&?%IWUt&Mq!zIe^{?ljdV77q2v^H=PkMvh`o<}Bn^aT~cW1h%go z>|ffws3BmCFqMHRw3UKm_&jb%pQL!O45`Cr6^Ola(^CREYfVE*#E}+_tn2Gp&<@tz zQe8zBuRF%)Nux>I66=+(G!Y5H_9B?FsznDmGEW`~r8O~tFHQJKfBpXNZk#Q%F3W{l z9puyFyPl3pu{HVk)b;e0@s_Y3ykNT*;Na6(*Ckoi7N)r=%v)$aWK3|)&C6+HzX<(Z z(G7w&_5j(}Jo!sWT1PXxO|hkQ6BI1`hT{)S{0%Li@S&nWS@9KQ6DMWcrCHd-Gugrz zTpH_`$p$6>D0-!sXy`;7APGo8{+?9+3Dw{81oI~d6Z{ERP4Fhfqjfd&CX9w1n`tsE zNsg3{c#hYR3qf6{oL^fq^KTVExX4uy<|F_>jI#g;ISFP03LFG$E(jQ1>BtD~0C{VF z2;Q@gTNf~a4JGLQ@7^{K2jh%~`><44Sb4$Y@qg>)Z|r7xT#EB87k`0Cdrt+-4j?1y zKsbtxQ~oMN25@295mtfupy$^I=ayBo`}JZCI{wod;owY*C?J=HC=eW;$VSoD0`K*F zXv!Hq4>>wBu7bm(NBeR?#6TyEF0p68zDSEQm=be&6njv$N4TO0O%laU;_~i9KE@URlULX793{buNEhqwLF(6mkCwem7JS@g=*>v9wPFf zFt-CRpIsxh0Vl4ddcr&2ZDY@SN`hF3W{Qi;w3D$*}9CxD99&j71gJW^91FdNP8^t#em zMwi6U11|lbK2S!uAyid3_(=V)JeBgh^=@~)U@cm2Fv=xpr2vVIAq+1@>xu{`P%Bj> zt@$6Em>z%s-^2-pWy}I6bYGhWDLCLZX5jn}@0^x*a;vBEPQns*7$+cJxfWn#rPLQ@ z6(M>?@b3Q=N|&nIS}?JY89J0RUa-VPQhtNm85%abI{0hskfHm{*cN(F8cZM<2yUSInCv z!cBTAf&d+${{!i|Dcm31&k%B>*@jMthn7ZNtLXdz~L3w0&AaNhXKk%8O zUA2KXEP*Dg1f`V{bb1h`o6<`D;Q5ylxnzU!p;bJvhdN`@4uA{;QKxx!Aa91XcmwbN zMC#~O42ap{wHWn8;hZU4iZjh_&xkHHk!H+NA5-D~C|A6z(0jB3bJCXW{Gqag+bkZ7 zrW%K-%g^t;wk}LNmVX*ln$ao(@I-|jgQu7KV*taW-6;;MO<(>0TD?6sHsMhyJk;NYP^{T5U2ePBm~Z4 zJ>_0#uZG+UvN56%mrg{9IsHjvrTUts5zG(?#{Myrc%sGe-g_F0y6L8n)ggalNInxX zJov65zFM_C{s*3_eopZ`n(_Q^!BgOpgY=$`Y%PtxO~mwxyh~9LfwqO9LSA5C_B$DhEVE95i_e~1o_M~U=#&s-q|Q6v-klB14%|xo4Tm zJ;9Ges}&b8l`UfAt0e-dv{oxj({NsjQdh;xarf}?*3oBJH?mtP4vCVfWH$7SRC&}s60|bx*>S6vp6J_d} zQ0xd(0b+tG=nQ(pT4ZOkG{rTna*lLfO=8G$jR$NdN5V! z=}EB**fKDgn)M7yjiQ`Hq6DS64HaH!-mS1Gxzb5U_~t@~6fM91DrBFvvC+y$8eg^9 z`sA;)+VIaJ4Ddd@%`HD4X;>!zWvL-bev6gOg?GMTdNb9|52MeGmwQraa8rmZT)6Oq z?GOkwcx$+GRr_%*26A(plHf3a{AQ)p3X{Cmeg(}qHQXg)dW zl<#nk$PWz1%4ew7U9-V;9@bj}Ri-S4@~Bcvq4g|*JVWu6t_1zBV!1@T+)o2GRK9ag z9$^tRA(nj0MZh&92iw3H!J`wB38-M#Ha~v9G-eQ(*4;!T` zKYDaj>WEC_jlGCrX>hXwa^HU}8HHvVJ}!$QTc6#@XYz3$Z%1(d`JVMu;M|o5p{C z#O;#5Z*>cVj)Kd^SIHf=Xi*o-w}cntGdkw~JI->PfBl3xUU0cld%>fN1*eN({O{WV zn-XUfIrLayaW(2I8Lts>C|JZIYJ%d6q#0rqj#LEg^lM0eh0i7|>=qtN3O()o0jHi& zD36p;6Xp=P5k?9+aVI+^simSWVnZ6A5+V1x++*Ya*h2u@BmPu;|0A2@vl}B4L}M?& zk{F?z$No<6@BtoG(&bz{cSP~Urjy@P!W>DMp`36TXn~bLbS}`sp5g))b5#yGaV={5 zQHmRi&eBRi;{1!cs3#1fsBXL6T^eODQxp|dR@9RLaa7ET@uFk7;xobls}fw51KqNm zS>7~|KYluYplN%*_-CDW;en3A($9Wc5b!1^djkbJSI<1SF?etqPP`sELo9b)z>J~- z=vk=y1&u`Fl9FJ$A#4iAnn(^-ek~5kajfViOsXX-t2mg0(=UVTl09eo6^hIP4n+;M4vNjMZs6|B%%o4 zW3fUCce#cyAUq(~Ef!4nwuY9S#~&Nk`huaklki+Y^XV{Lal79qgPwyJE_^E@g-_k6 z`{y(`?zDcycx46Z3W@HMxet{?+01aa#=+;B1PLy}>p&k!p%37BAn15i-z%!TIQ?PG zmq0};+yEY|xLyfJs*{#Kc;1x+t*ns4fO%fE4FDqiui;OIMiKNaOuH0U-lJ|nA{D`p z3e1wZE<^AzvNj zBCH6J`CS=Iz1;5uHpTBMiIIRJ*)Rk=Fc-!5s%k1o0z>I0b?89-NeuE4bV(4yc|@}qrR4;V{TWQHwAuw;!IGT!VG^e zTE~Q^cqmT72@II`%dcuE`t;KwHY`UBkN-8qi3R&G|E9u>$uFkiXc~7%5Fd>Sg0`PA z;n*Rsp8>u?lZuqgV>p6Nc4&QUK#vtt(!=l~9zH6~#fWW&%VxO`%{5X|I4&hE1v!r) ztg@gt%>}WF{8KYw*7&c5=gx~=x&R8I2p;(=eiie+a14tGVi>}X3Wi0114W>MPcfFm z(ZIr)E#`Go@lADY0pBieFJa%D$>^j#VhR#Y{q{g$)|(|xhgl0FoW*Rm^dVQCYHcm3 zwFR9!HQ!VaL9Mew(8->upvEu_!ph}gzA2_e%!s}JdS-%iO5}>s0QLdQU&}!ycQe-- zXi7m3wVF7DMKI?EZdGtnnpA=Ib{u&$;P`-t!+^oIREaGNkXE^|q!c}i)}3G@fnZrVZfQc+CHeh?lwWOFUPa=u;4 z*|T%u%G(BaUez*BZ}g@$4&Jc4XmQ*(-)X3+Y7NxaG_+Os_9Rs1Em_gOO0v02eSsAi zLR>EDtf@%8U0buX-QR6?M!7xJDYk+616$)PU#VK&Ik&d2PhOgl<1|Z-hUB~@OBB1D z|3ug?ZAD!ke&)opuj%%jOE5gG!lsi=qyP8e17n0H(u*M&c|ISEez)J|T(GY-O5$^N zjJ5TgT(NaupxK~L^46`oc5B{(*!zB5Q|WK4sjjSPEnl=ap(1Nx_wpXYfUCq?yEI$p zE)!8vSX){#RKL2lWT7QG(&4E{wk~g7xjx2nb9qlkm?<&$`AP<6{(;Ro@^Lruf!GpoTyHl=eCD zPOlDquimD8ZnDvOAZ#Sw(~rLNd0Z7LlNX?azaK0JfO-dgem5Sj~PNO6Fi4u)^RdFj z#XIHu^1ADmjSUX09Nf`&)!~A^rm9^l2M^YN{Mj#c8RBAJZ}p~|+Kh2^1)@GGwPvtw z;E`?jUfOr9GbJH4YoNk&*QIwomFti{YS=#a(5{h_UmC9pw5CRyQZ|{#Veob{SdRkepZ#pfif9emVm^VJ z)15mDc>BWOeMKOuy$!X1gyan9q}vgaXK7s9NM&OKu#GT1YkKTPTIWPYAHHP*S{LL` z0S;lI_D!AB_1T{$09hBEUN_LH0U3P^LP$LmkW*e6eHlG9 zKLCvH1B`Pq#khvMIuMZ#=W1Cg=)^!QgcJ;#=#Fj-!a)fWr7;rG0QbKUmMgaox|^PX zyOE6R+$rlww}Y>m%?wcoIne-kftrxU7Z+iwPkB*IaZQFVH6_+#i@-o?wGF9K{)PhN zVy#zD5ypX#hz(83qUuz1rp)wtco<@KtsR)qe$UGd1k*1S@4c~{SghkLI5 z*X;$VU1y_e?!0+O%p3V&RIbdA5e}qO{^rShe@XZ$kem=-2kf%YNq1);G6PYQoNU3M z_wZtH+*n#TQ1tLyQ|yFb2c8=OUG~&f20+d*0dlkrfU~5hJCND!OgCasiVq-A^bP#|O)}brWdgbg1-5&XI=uJMZgwB?Rvt?%BY_897 zHlK2r;b&8?@&ZSbA5W?uo;SQVGa=fRZ7Fco4CMA#F4;rsUI*JxA#{k4{-b{W+Jw(T>p!0GIrJZ8J@lXO=Rp5aK8ODE+tBxt{-ZwM zM(d%tgZ`w4K!4G2eaxfDtyL(NtmQ>>4Q~;1(1{7-Nzw5adIQ{L5Wb>-k{obUx2haAE4RY9+xcw zQ%p_g#UF5{9Ns!~1<(vIxf67o3A!bn4RRYg7U$0fJfh(Z7BR6N6&O14AUx{ObUDzg z9_j3QL=uso%nI710S;}Q7o}%lR2_yo81+fH6$lP${VJI@mN}&lE$OWlY$h)ZE^6j* zXWfCUvEql{-&U~Z)JWg$bLa1BNbRsU853=Vfgjwv^WM?oXS24}UU%D>(|H?{b>c@| zsRg(H>)M`&PxN$^uitv_N?YVZqM@~5`;wuX@BCX#{x9!+@;8+!BomV`v-Jl$l*z2he<67@y*!2B zv{9}iwKYU6j^%Re^+~U}y>JrPQI)AU3I6t_NlpU!FMMy_(%Q*Rf`QBHkH9@JcB;)7 zUmNBgxMzRJJ#goKau4uPfgN)X?}U3m_`c>IDB0ZExtZ{aY=M(3AE<~c2;|12z?bj9 ztT13wKM)T-7PVzEd@LN7lf@w$r@(I40{{ryyV9s}Y|u@j#OxVZDn!;&mL0o5*eSf3 zefU&O)A}#0Z`$*NJqI6Kz3SE#IsVh9PZta{9=Uw;{_>t-ao)+3xhn=M{N3|A2M+X% zJ->U^9Rp)uTAh$PvS`;6W#iX4n!d1V_@>Ppn^v|o5KWwY9lGp~4G(h(TsBu6D8Q6o zygOo#cze|KB5IEDAARck>jdb8>8V(okcZdT!|On=PyT}`9=-}ub9K@!o&7WaMC`tv zs_yQp`TOSYJh-nrF6GHpIm@~iHZ7=HY*?{miEm*+dU;88S9RT3)Aq$H2V-L96dbLQ zZ;P7ORkf_XwxkB__z2cZf#_cWjwf^Z+}LBWh?`OF1|h3QgYw{l5bTvfVw9ud!e)ey zb?Yboa=OozPD_ZCW@S*cfzFDn@*x0RN=R2As(8wLz$&IX>I8+niT=gad?_P2E z>J@ul-hKHuR}X%9rE*ilbB|m;c3ICACr_SiIJ!8nrg!wj=;r6bZ|PfpVYhZm%Qr4Y zTme0%KhP}$-zuQEX>Ja@AW=vZAevU*?L@=J!`23}&$A3e%BlYdJ($-~YgiW#NO_ZEJ&`tR1V(%o@C8Pu$7v`~ z0qO>(*aQRPk%Z#ukQf9%B5r?yAY8iYtHa;BV{@-0)otdV^kulrhW3YUy8FK_KfwzR zEdAs_@GnEk?%+!tmRF-h<2-oV6crP;FB8(cHjvIub!=W)a&!u`XnIzg8 zfmxYQ1`W_8$uPvyfu|J9Se z##Hw%{>e2?apcZe1^9{4XRmgfOrAHs_xd{)6Yp?{_O33w=#am`r_c}qn&JE&D$26C zs{+wTuHt>E$qDgL(K6U!l*F}63Nx!`qErCwa)Osa$I@|&;{z;q9e(yDjc?t?7qME$B)+z`#bGoq^;7o?YiT8VshQK z1$=UyRcIO&g}&p9kKSju+_cCl>1~(p+IQF>ebpkEi;B#A#LEr`vt!X;T3Si@5wIFq zeuR`eF+T!66lKkl;F^19TtlkY`{6x^e3*S_!GCmiwae3jQ7`x)Q#Xnw5x*b~vQ z`iSf!@+Z&AzmT6v=5LOQatBd z#xhuYwLo;lK8v!?dvy6@Y0em=}O%)M6H|7c>avA_itW1mYtJaFEjCNZlu~$6P>c z4g<@HT~AGvvnQ@LfEO7MNDJ-~1h#gQ9o2K~xZ|Gwwfe^V66baHx}Br) zvrpI(b(TEL()#VR$Q6XGHR_g^+Hol87E*`Q3)}m60P+}Bn;sZHQDOiK*2|J)TQ1lBDDr_-R9|{Y| zB(rjR^M$)_Td}4#?`fYqOnRj?hfA-Ij@zbH!*`LCK1q-A z69Nxt7kRrVh-cO};f}~JvU{MBWn>vpDd<2CY9wUR8Fi#f>e5J=#4If(aHN{ssAX_N zRr6-sD`K2|53Kn%FJ66v{P(Ap{RdPU$?aJdRB7ysopP&CFPJ4=G~bz+_N5Px-1>(D z6Ez$8zwDAOf=ALL&4b9*JqiAt%~f*80(SCQ734b*J+qk60*5?TB;j&kZ;Qw_180U0 z3UM^uXvk}|ev;QJOIsX_x6R<-g1;&^hvzUjr!u#a9DX$OMbU3me^pSwVTN;X&ctx8(qtc#nd}!nSgS|gGJoM<1&dyu^bX(7(g)F_~93Y z-qqFOU{Uw|@&^x``sh0HQ*@pCy>y2Z&v_7eoP*vNm^Lc#`RE`j3jT~_^nfPNQI=U8 zlyi7xFv$-hA6fWJ4*&@=9uu$p60CgcZ~MNLlN0SpmyqD9L{`!vwaP1augBnZ_>x5A z;G@)0eab*OnkEjKI5iUeSaAczc-j!AordbteQX2_=Bh9`tPGvLm8J+lMXv+fn- zMJ=3WAAk6~+o)kHY;Tc6;_a00#9r0IGx^&H^HGN~<2I$Dl}1?xU-4qrZ&D}gp#Y&E zpPZ7h7-kt^#`lSKlf{!O*-P!rU?Y2t;xR$pt^hGp0lkuOmOqj|kY9Bh`NNU1qLJE(;XW}RN_+#w{i)D{7ot}jxmOgv;0ZY* zm>>X*(Xr3zya9M5@^bK`)WT_;M?uTFT+KpLoJS0`Y1+e{(m_ivrs zee}VegU^nRd=>tqyAB;I*w``m@?~=lEN>jyu6ym|Nq5(9MM-CC`|8Vj7N6eL5^^E+ z-=91F1UV;~ceEC-nHN|ww~3t*_xuL4<9Sox~SvuvZ{Sc z>(_2MNoO1>*Uoqo&Um}#ChNa1Pnb9PjBLJ=v=?X0<1PtUoDf90@FhuFngkr6is4FQ zDws7Mv`1l!0}qmx6@lw0ehMqnZ{viLnJgH(q z+M>j`Ir3Y9grNt@V-|F$E*MR$k;_;&(Heh?Gg2PYx{C_G%`TL`XiJM<(Jw!M57`+o zcjN=~9M0nE12r`FE&&EqR=Oi90xmV=-a{|bp?W|PsVgPVfw$rJq@j`OA@PgSqh0Q- zD7_VF$7+`gdT%sYMYV)-jtQ@goDAUu`Dc|Dd-;Nejqz3TPwV3cAMnR?bf+yCahPjq z0*QcZ|GKi~RCyCk94VAUkMO&!BKe3lJ$@x#+-yVf3;yBHjv-gmYh?bP-ctTopav-QjZ$;K~E~;UJH2gc#|a#p14FxMDwN&nUS`L z7Os-sfH7~dIP?%}s-31b3+MU?yF%{W{P2BBizb6~XvR&DHDTX#{`HDGIJyYI>f9W6 zdNf+NyRxDvEx;Rw2GkEW3=e!E^aUR9X_=0d7#FyD;zHPzOj$G-X;_Gie+_*xT;kPDq|7z5@*gxP|*D z=6)D(zy}S>$ZW`q!+ZrVZ{oDuKv}wuIzl0T1cfy;2aR-f5UttFoGTj_Sd4g*soxqQ zm`nq~J3ziMW4VHOC~W79n?SX*UO!}_a=sWGqL?c#D$L1BNp`2(?a}z-wFT?nQ8D9B z^MhE?L&K%Xo3IMdFu{~NnEUwSM6ZATbC6=g$(d%Bh-XHgXkI8kHu9Y?I;D=P7>pH0 z&Y%|+u2AWZOoH?28Q@IO8}y%<23`iv4A1NOa1d7m!~qOMKmTfziR%g5#hLy$d+z}s zS9RtM-*fMsxznp=MqQd2jYgwU@3L&^x_4VP2HW6RmYZAwo7e^%z@~0Ou|pC^Ev1dRV$Aifmrg9 zk7NXstjHqyNk$QS1}{#vzA$(&E0*Y$&~s!Lx^q?9DS0jKT$}J-wIl{knL^%vX~+*x z6->391QIeN4FX0fkHDuJ(U=*DrFI!fZFXcM@lqW;TNq5$C|Zb-B-)6=F>L($+zI#w z>i0;Jj-4S=%hLk4CoW4U(on)v&Pqh6Qr$g00mJK_&+c>}A%{Au-=eU*_PUq;ZRd$s zj&8Vhxa-jD^o3e_XZqfI?|pkKyGA?Se)w?fC7<8JUU^M>;n|JDo^P|o&)xIr;^x}% zopp+Bc-W@YNw4nsh`Ln!*Bx(E_t#&6?=iv}2*eV1 zHo{wqK~#~2(8bEuAVN14N&2{#G%Hl*Nko|zQn1lw_GwC)5R`uoLAkPLWqi02g7Wu< zsv@)Zoxktubd4+6>}m|DTNBrItsJZ3GBdjN4R$RKdvj#w*`*g;G}O7EvQgP(vu@jQ zq@<6e=P(S3w7(j}XZ!^BsY&<}s!+g30E?nfjY?7`+O(i0C+z*gF^UQW%19CG9RL2AgE%uY(Gy&Ci6xbqsgP72vB{dn=;*K2nyLR0ORuG`=J1mteJ=;>Kx>S z$tXmyX^?f#=)jjtgu2Nf3WmlGC5~JNsq&X!+WW3_yY0TwrTIdq<#jGfK5*}N^!Z0-V4u5Vmn2UVISGD<?dvD!;tWt>gWTdMBp~h#U~Qd0bH;nTMDHoY}~~I zd=N!0JMLWxpOGX}-W2eGF^(kJPK<4tnN}c^)rM&QY36}2n_pIZiqS&|E5}y>Gdyz! zbjp&5#PoiQ)1bp7BnD0tUD%O+GAV`>&=|x%>FAW60$HSdLbLj=14L;axL1dH(!iks zvLAE-=CGjv<`edBhb6@>?gpR{U4oi5I4-l@4ISN@{@z4MlA}<}0VbVkZC343WzhQblM%Z30&z4#G*O&a$OzBibtZZPIeIfW=$Z}BM#`mkodgamra zmh3mqcqZ3_%$d9r1soWS2rFo8Yb3?G2?}2-)Zpiekq5-i6gKnqXl`cFYj7Ztk}KSl z0p8o|hjgB&OXo`FqHX(V*MAW+oS;Kbl+Cqgp1f(k8wFiK_s+gyod>?rA-tq|OCg6f zRUn?yiR5;ZakS-Wp(q5v6vQbg%HC|15>N`8O&FzCbL6(rZNJS~-E$_Bp!Z}<6qAkz zcyqo4M52npE}m{oCTgq8%Oc^DfZyl!z_;4LI`n`CgulT0jr))<$C==c!1P0Lhtymb zxmHMy_~eOCugK>iy5ZuZ?2`TO+IRBpbr(KSw)4TwY~Dl`VpuJ-dyZea>l)FsXn1_@ z&QJ3^KfNxB9kG)&!lgQB8epiRL^C9-Qc#u^rM_@&aHT}_cmojasP(0*kPygbGL#fS zH2^AvP`)5aprJZf2(493eI^_46zwqUpP{M(MEvyjnwstGxgBv9G>e~Uzm4zURZ!lf zVrTS|_En?&*@JrdGa^#0@C#mnJv9pNQG*CBma3d0YE8gchY#(fGgG``J<3Et*cOiE z_8;6vzHu&RAf8=7rWn+?ggFoot}il}LN+{&DI$ykf1=LM3v5~9h~XkpAuoP~A8giV zj);~{#DKoNHgV*5_u-G12RE@7*9FVZKiqv)b7}+o_O?LTUD_3^#DVZ^Sf0dzAdJ7* zIXdZ#|6#*r@lx?)x`7H0PH)#~eI021Zy|msg~jTE3YOhEOsvM9X)CCH%y$FN01i9f z)S9d(!3W#HR@@--N_HzrJrakC^5{OC%!5wxw=l1+BC~My088~&^{$Mqsjlc}X&8*f z`WAKp%&HfSu=S;q)Yc|RrP_1d3sJb~gJmN9&dSGUV_l5$A}tQ?tG-4JH6*++t44i@ zC?QWYPEavVuz_k$Gl-2q&Ky}0XJ|w&WzR5~8H$Y;9j*u71Og?AmO4G~Cie*IlS-Qz zwKm!AEe`E1+{xoThd<_x)jZYIv~BYjPMv=kw#M`Z_R`k$-P+M%u|GV}^wE3SRqIEZ zUALBWG<8ZHNN-y>YN|-Wu+1vv5!?K}Rv!f{bGY1M@^@N5-QWvzw*J>+AYHLT>=hnt&-`tOw*!YD|2mKyqTR5vc zqCH&A>6Kh1{kbiyCzu-`GVjO~=E^ohY63_xg-rOk;7}V0TvN;;&~^~y^xjCuR2&O>BTY3E zE%~ONgtZBfD#W=dNzo=r zBe;UI()Jcnx>2te|D;*!oC=*+X(i&i^ljg|e}8LtlwBVde5d~x7V(gBTW3^ z-%QqeNxKo{+`sd_r?hMR?xlA7g-_r4g*O~xMM}4|bJ-HM!w+(trP^C3fU}*#HL5Qj z67AMjJ7hovB5^^5fEsf$g-l;oI~5v)kj?DknBkIzV@}7}RIx!g#r4_2@ZzEJL%I>& zOB1nZl!Fc3hpFz1iNN_~iE^VSqXoPu5|K}6v$yyIS_+`j=8XEiUjHb24(0dR%K`ru z_WTxKFz7>zGIUm9+qHYLrQ*dm9Vk{yyr{&XeUo)(e;fDGD8NR9O+&EE<*=E}`t{D> zp-PBxVu@Asu$Ed;up{f8OXAt*uAJwP#i@Hm6WWc#ukAVLMw@Ze{gD{*&*57GEM@zc zgRI-i2pn;WGN6!@M=i*kjXCnM8BXHpk4Y}s<&lpO#9boWNRtJZM|4qi`lMf{_RnyV zq6q7A)+*>-5^B3pgQ#nE*KA}E_5zC#C)?cQukgo%{BRA^FBiKFUJIcibC490msf?# zIDAIdtY~l4-#g(d{CD1z>ylF&{NP7Y^srX<@)Rl7&uU=o9i;D(1~Ki)Qyd5o$8gkDc}gQbPu3-PM71>U7c4Q z=r`ds$^9^4nJIbCF4k_(!YB`t&#ncoaY9YapDR{A~rgCA=NR5Bx&DZ zJ;TG=Hw>EdN8n(uuu!;D_0^Y(Zfgg#yN7U_Ggz25iLIL`vRm+oZcm0XdkMID%!E#w zJEt8@G&*M*C(`Bf=gywh(*^Z9-PnL2K?5gaK4NhY#|-!NOcAoJ2c~Mhq*c4WjYk$) z{`mZVX*)K9d~cdSzE1`Go7q2J><_GqKk@4qM{g`&-CgzWy)`U#_iZ}puF!5Of~c>t z+AN}Kf3V$o?~cPZZL>Uklx}3NwuAG#KE!-`JL*J-Pzqxtf(UszHh#r%yPb9goeFJ~ z?+>{Rw;Lw|GvtnpO_#c@sLhj&ENa38-|Oq`=|au3G>2s{ndE#=50Iz0Br8RH>r%l? zZn}H|r0}xmE-#%`D%)jGr}xOIqKr_wnIm7xckx4x0WljN!QBG5=P1>2oVMEb%Q?iBHf(TsLXqdp;L-`J#hC z8qIt9@qEBZo*WdLM0`MqFX7tRUs*55U7iQZ=!ynKo25$uk1zwj+F{QlMB|w&5VpN; zd}d=~F5)D-sPVu`{U!J7Me+FNJ&r$;7uQzT?_>Ypet9eTtspi z{rvILl^@vk(6so+CA9IUw(j3I`q3=RPu%@5(MoNnHOA$$^Mp^SC6Hd-S?P5?^YRb_ zy3*|h6woKSeHnTwcmQu9LsyQ?mqOX^sOqbW=@op5nUe74K2k2V4YQ^G( zCVPg3?2*{$hh>iigAA(M?2opr7%7X@aDj*->Y^gMh(?)dx2Ce8S4^Ag zEs+_$rD~eBQ26rnYas-ywjruf6s=e?)T4>}QWKL6Ahw%DJL0r9=dfOCb26j%hR*H1R1bI~~XOBQrx(iUVI z2|w@CEwDaqHl7im?LutNcVRRBE1nj=E`P<-45t32@st9zW`w7I<%@;<{(m`N6fT>< z7d!sa^fez_N%{VNg{tP!)&GtAoZ+pceGb9yNAhYLyI6HMM?-Gp4I)H`hxL#=QbmJq zgvnolK5Cm_>eGeRIJ^Ur#M`LSSU4y#ba&RqXY`^VzCP{yNPL~)`$&eJ_I)JC&hULC z&BpCa!Cn+- z3JG^uJCNzLJH40-H6427khatEFQl&s%h{7^xI#f)=A>7Y9O)qSL>VxG)d>|6@D;&u ziPDhVrvwwQ>A|8XK1!A0+M|vVY3{j#Qa~+sFoip?iccZbnDP4wMb7d4geo(BKcUPy zzMoJh4f7XF;Apamc!lYpq_!6w4gFexNq91s8y7Wwixv#csZW!hRMxlzyu0%;c(UquMrqQHlqF+s^KO6L@o1M}XW_Np{%8z*bZpsW? z0i}wx8U3M>eMtB-VI<+ngpq^^1tSXv@e8Yed9`V+LvA7>oF#T1Xxju_uQ5lN)<1=9 zE4UK0zUf~^3!cHtXbaQ7j8Jmgm#K;TwWGEl`c?q}5wU?=u25Sgg&E4Z>LY_VRC8cn zJXlK(-zc;jYbc+RWz1p}X@c%*g<&+LssD0xq~@yX&7;54v5P&mg|LmSBg`9RU(|7J z@aDrAdY0p}9Iql3Ui^91Bxb7Zqz{cB;(B{CVu;+0wWS`Qj)S9)?vG4jjKB_<4jdZMS8~HFPfUIUHgN6753SjU{QeY<oTBUN|apq%@ob7vcq?d0~NvN5n~ zW!p-b(gSjztfJ0w&9SJkfN*&$Z|kLZvZ5tU`#xGSl8R^YKHA8%@8crDOx}l}x}x8T z^po|*UaXnBvU z`pOaqPzPZpkO$3^!6}Y8$hD4?8peM@atMhMt5Xf}nBTYtJ}18g#F2PxeGa~dJ14sV z3)Wn$?U)q*#}CaA`$=T$64!Ej$p4`Ai)+te{fMKownHA9!#=OyUr&FsA_!?hoWre; z#sxN>TCz{+dQ2Qk_hI5#LjA(A<^%JUIk&iyC?ld^tVrC4Xg_AhKYEEyy?q*SnZ1p+G2Pqf0?*`axWCMTXv%{VH-K#L_2xb-rlDFb zL9r7Ae54UOs$b*0Kpx<|9(ksde1i*4^2}hCdL)Ro{cXnuK_6nE*i)lE{9#dQAh?-5 zxxxG`PQRe}reV!~(^*21Jm@KCFvY^(&3M}ki^Lsi*ScgsY}>uUN_AORW31E%LJP7? z*6p0_(^0!&RU?MEp8voP9k%-39%P2XBUumEI1HX(dp-|ccjKxX+st8h6Q5-F4ROGK zrq;aqPREy=uE-o#J*&iF{@5Q1nFi74gA{QZ)C~5Q|67(B`cSw4`;`2$AGXo|P5U%d z_jB6kfc6Zh8S~gH2F(~GhK%^(dAUsAqd&D~pK_WpVJxQ^6UK6yQ7~35cx4XFh;see zWfNsYswgOSW3r7x?vL7)S#F&(9sMBYJ2;2;($;45ULqzlelL;_rra4x z1?C2`VnLW3*1bJ#Eji*rHWDf$9!x?>CZjmnTL%A_wS8CHl?P7WI#UXhD?AjEmBQZ( z$P7cJa6!Hbe5_8m@Nt9#0}^n6wgG_8}#Qsy-Mj z>V&!oqTA8Y(T>n)Dssa^!@82uqD~h1Dy@5)KUk-|Hy+=LAJLsvUa~bFAEpS%pi_Lk zQj#jiE6g2;yiqH{xhu#0mCy@7N9G|G@-g%t?G%owVU&xvSmAEBS@d909|R#Y3?!Nx zptx@~{&xCH zG>Tx;Ci$f&tV@v%jvUxDh0nGUM8p-j+r+-$zh=TVoV{%XDK|HZt5UojH?kU&{y1%p zvK5TeEC|HNATdHGGe48VS9(HwY=zsaEpvNUXn(iRiA)~u*xk5{f>!wagt-UX7s z+rs>IHb;BT?q`;6{+oEBRFr#qWK{ zKg#WV7wh*B*)jSCczCl_8Nue1Z7Xwjp{;Ab-bR^1S2H&?n@sd=)@eQ+_u?mP8MPw#y;Z25FLsam zE}ulvyjgTi)O?R)t~mxZVDjHnZ;n9?n42bkm$r`Rh(3mDz*y?_ zb!xkEpF4&#&pY#;-zM$H#>5&E4#B4`0pZs+gk_70jVKaP+h)P#Wp2sk zrf4)6>FpG&3#!IOC|GyPN_3pHDI*?=zaB;WwVK9bKEg!}e+uX3jNYz*B3JVO=-w_Q zPn$8P{?Y)2oCZpLMy6PS`6m&W?5Xe zV>6wmHcd4nLqhpCx+$T#ysN8=!@ht|c;6wPV5t2!GhQ)XJ(dsE8Jz^{7H=BA0&E}& z`WpTV($|uxPG2)B$PDRgSbKGDSbHQq5C4{w%w>srVYV)ef7M7tHR&r<2^1TFk2^zO zpL}uxeHC-|)GCasoBhb-XUPgtaEdFSTaopPg1#ve!RwK|V}eI;3LfVeFRPMUU&UwE zSK-OM$op?hq4IXLwnk?nC}rda8$pegjvT9EKIiD?C-PBkRN2U88%5Z=POzHvDe|!y z&7mT(^bPVc1IUm(sDiitbva+K-DXxbnj_4aJGU`hU(nQ2$nj{Gg|JzywYcu>*v24L=h;BqT&mx6*g zKq&-s^XP5>g$zMkRuU231_U-@l}GuHkeBpI`#L7i!64ZsyIp8duE;Ls>d9YK>?P_z z;e68-;Wn}-X<3=s+p|lc+Wp`TSWLfwY>>`&;t!lB`%8i`FP)j+M-|b|A)|&{Y>|hf zT&b=7;@66v6^`^ho6kFZ@@?rq#`kK+p8Oh9r#&cs0nU8*BUfJe5%!;ph6&H%jrk1D zDIt7D4fv7z;f8wvnh_OH;c?9rPnZ%cR7AyM8$)EK9x@IbR}@3HqyvZVij8K<&LlGv zkRPw0CQ;tRoZ)a3LoG#?pon7H#&?)g%-3#5%CTN-CLz=Tto%yo#b$~Ik)v=fY68S5 zy^DRSP#0r+Jz*ByxiiQjZt;WT|5Zc_OIA2eYMm#XI9=)$@eA~jXol{!TG*W3e1wx@6J9d2@Sv(kWEiPuQ%bQ0}M(u#yZ?I!l>B`t4Bk}?z^C7<82Qt(_XBJEc(K%6Gto3sjJ^B2egZ|o>;X~ zR>8`gt>98Fx683|&m|jzRa^FNnJZFARr_H&<@ zUtLwk?18}G$5&kS*#K&7F4A^z+UiFH_er&~2UXUstx;(C1GN<}088=^@Q~`Z5``f7 zuHKaE^xhi)sLiOlg6aeCby4Mo0qPCjH0eO(hLC*~*q&FW1vIt~jZ`|@fl*~8el&3< ztv}dHr6kN88M=#>tg7r0lXtZFCG2XB~U$Lkf%Vn znB0M+U)^$|TZPDAD>LpNl%SkQ5^l3zs8lV*ui)>flSr(c55q@X>vRN4lYwx`M~U)g z6a>e^+~N(~5RFRZdBq#(npz~ywmn$de#`r3tvY#8Y48&E*7o%JwarJbXh?5oZ(R~B z#WOH$G{k$mdR}YK^@rG_{xW6*G?Y5+$mq(t&Jg=1DzD)IEAyXz(pN@S5o7Q2D}3)` zYFV@z+5afXDJ77g-O&;Xpo$*&ifJ}N2QF04C5I;!x8U3+C_`*1z*>Ll1-#yn>7-;iE7it^y`vHXhW*bykZ0$3XAd2Yu zK)kyi-8_Wm1;_iz(!SBQC{Zv*et?qHrt_h zS3lQ>vt%1(u*d;436aHM*YUn!;=Mx=6_ylv5epAs+9N|Eu=$9e~@Awu$3k59)50)yE<0)KIH!u(i(eNHCxvK`m1d z5he5q$xv4bHS4IQ7fjRQZc=K=^R6cFlZa<}@x$ye*m0?rorqL5E2XMAI4;KgiXE0& zif%I#cV~=|@+-TZeJDO1N^a2PfBwcfq9yJm*ZHmY0WUjox&Du?^E*ibEU@GfR(1$Z zR1R`ljtOpP@$Q2zhiIijK(fVcX#c3yU0~YP`9f#@Oxv-UOjY}5JPT))Zzo356$}9b zU9;de=q@k?P{OK77N-}~R%T`@D!Q?Lgr?i)=z zAM|$gJ=9Huz|@R2kg74LZBvg`6x;foIW|;9Fie7ZY3Ny#Ah40@lby3kkSC$fz$hqI zy?E`5Q{Y_|c&0pKXeZhuY~2MHXpaycayp?@fll-bi-o(?NF^$mAs5|c>0mY&iGV>c zCQ}S-G^IOk6?6%1(dFjWL81>HMA8}VE$;itWW5#}OqV-5iRYr1E}AzuySJMVv$4VS z1;xn5$4caLxZ4~pIV%bqx#-|qJQ-c7(te};UaNU`I5hsp37F~$h2Jg4-Wv{eEULcq zjq85-H=hg*w#`d^_QUMHm%pap_{8|DMbwog>`BvURO!+7SS=TQ@%GQ%4mNhH{0Z~U z>vA{U`O=9t4N6WoL0vzorGlJIjHCo^u;97rO_L5RQeCsas)1MzlDIQgja;F72Kq9$ zYP{#0Ins7fUD>c}Jd`JAi;Je!mkqd`{fW&9994m#Wu`4dk>?=Ty2lP0Xc{v}U}`Zf z2Na*`W{QXEDWLRH#&|H0H6HLxYgA~>rnBcy&=aj`CYd{Unsw>WAkwV+3hfL8#dl_B z$VR^@@77j*@ykU)2>N+&^Le{XFQ#||jbx6Wv(dq8qG$!(eQl?5jLQh;313jFO?v7T zmN3birKx=&q9nl*csop5ij?Txlu>Q>q9TPsQw6sl<|Wq--F4zK3*D%FQ%%BP8tJw{ z9Co{0?u~-m?Hc7Tf}9oWM%kH_%NEX?-QRUy_jwaMFM)STX7lPY>5^@Cj_{gCfY+Kp zhXSdqlaGRQh^JZ(wgszJR^0c8n@GN0Ixx6~OR|5f{oozZ<(+ohQc2qXt0%eiyHyu) zf1&;8{lwVGcXun!DveIo)g{8IB_`HwO9DV zJ^n()ip%rG3_M(;ymkCPAis=4zW$81m~sWzxlbO%38yJ^2A@$5srQ*GN2ab~u4jdXnT z9xfL?qE-*|i56Q|t!THXsDRr*id1VURBBU?q+6JP;%KqkP(B%cZ5gF-1%|k9D3DaQ zL>i&EeX6ep?Xh4SKF5aznjoJF(7^VyaNewbssxjWWmLiCY`Jr0s^HYX%eh?gZwhY? zG(rQHwDvq)lHUX_W$%@ie@F5(#3>QY)EM>a#9^-ETeq-V4K{^faN)TZ7P;Fc!jfa$ zrwWW@D4GwGj10RNN>C!gGh?x+o$x6xDSGPYV<>!5b<)kMKmviyUJdOelR#j`@|ebo zywFJ_WWdl*EVpW(x@2GUf#|*i+Qa${6KzKbN@A)c5(-}-I`sRt@u}Nye`+JYSBAQ* z-?69N)zxlUHI(U`TbFIsw4m)6eYO$aiUIq^O%>o_AjpSYSD0=``iHmx* z6F;RK1D+Ow9|#V{wd2spT)T4s0oKa)OkE67RJOozvuY*J;5YQ8mAvLN5%!wQKyqfFzDVJ)sYeX!#oyD7em|lH zBfWl~!@-RrFbGp-5;z@EAeivNov@c0PvZz7Aln^1wY)rjbMxr@fs5LH(RR_m{4LGn zx8T={14@*lL23_ zu_k!+ehCsSA~xK|_E?YbH4dr+sH!iQgVLijAsO3o1jk^K$6tOD5R_n>$EVcPjyyXS zaWxuOW0kzAonD}~i`-&)(B-R)(<`K|=>9+CT zmx>g6h;m2Vwjp+-ONBBAy@eX!at8AgQA|0{_P_t#kwonowPh_5NE1 z7dBgdEY{1!-W2;uX&Zlz;9cN<_W6DK^Rw-Eem6ZAUgqQPFMMvi$9(<<#@}FhF7y6R z8t(`A#4*dAnfE+e_}qBUgy+WheyhlMB$GACgv=c`lA!2Anw!q31f>^yem> zw6+T+2=)Gm>V?o{5zsOPE+o_u2WzF^j3~P6kiYDhK_y89-G-hs2B{q7ZF|Y9ScIyM zd>&9Ynv$lBN-6zWW+rNMB?vbwAwe^Z;`JCZvX64Y= z6Z=2dwlcN8vCV6%i_~|mdb?^Oojv=Cc0l}?9;@R6SL160iJnV$froj)!_v&9df?l$ zCE?q186_mCcyAXKutFOb91c7vi2Xw6mrZSCC_;L zb{V%1*&p#(w3eC>`s3hb)Q2Ju$nmTXmV_I`*-!(H&a8BEHdE842gi zHtj1$Zw9H6&D+U3m#oqNRlqg@8Q~HxBgBLoRcAFYy~GcGO2DP?Msy*fxFDc9N=aHF zDp2)KYJdXgs(`}wBr%|FJ&6oZ5g{{FsI3#HNHr=&QqcG*R%1F_;1+o|7Cr{J(U9pp}jVrba~_zAp=m0>5Ys30Rs zMPmhBoM2r&E{>Z5c5^RIIGndX9=?zjRY6~)*~?QkXR^Cmu{| z{0Lbs0oo2=y) z6)|hn&k_2!44y-{C+r|iIukoWfhGmR@Ht>bAs_{~3L6z2R|q&tHmW=v1#!;>-2{Su zYxXn?=s2q@I9@stPQS6gh_Fv}1gVlH&dS8KS`Zfn!FgCW$OxHhEdmcZ$VQjd7a0Ph z)kd;t(?~}8Ag3dso+aGo!ivyKu{)nkvaf(~cOV~yoc@QrS;b{~y_6`*O)+K~mAQc}9{)a}FI|^oyojsNr0zWGZPRL5Qy1rSlKOJy zEh?D|=J4MX>zhj|edU5Tnrdp&>6)4*%P7((%Cx2FR?&}UN#kvmsZ?b(^=s4R>a-k% ztY*mb!b%bOTslpcKbZ4;MfSPEGS8C&e+~g+h5XOvJZtW}joEqD{h4`+a1lKxzIOHv z@e$mCpfK|H#L_4wm*l+|TW@nA0>^1dSm(pNQ6OH zV#?Ey@+k`;=UdscW&6^t>o$G7w?3RKmu_A(e{hst-q5NIbtif_e=%TEB9sbO%Tm zn@XJ8Q&<1!x?6v9>!IfkAAau8;pYz>dY*8g5dzZ_z`HuQ($7YUao@CU>4}p!-|{}? zy3joBWy%Kg{={iomk4LwN>Nx;trENyL; zz*kosUw!_q=Px?6c=4e{d+uCxnA3{|*z6YMpIXr&m*`I3?}jXrvV0!0$m+-4bg)Dq z$OIpP!3De-nmGCSvObK8+~<&6Hkuy`O$P1Xidm!cHRZJw$T1U#C@+H9|rC za0z0$8}ScU4~U8NvynBG9?u#hZ#|o-Ze*wH z@}dYH9ukJ0@Vgyg!e(IwKc`1}6a&D;>nRjGqmcQ>lcj#244T9B5W0kUD-lJ#TEaH= z&8=}Bh?}TR`+vYJJd$);BOIz)VGf#1LV{^;iuXrO08nChCv) zWFpVH;GDsIN^)n*klaoPj^xYmJjn->xQ~4`Ahx%Q0d2r1T43)24-%i?woBkGY?k*+ z*XX&`Ir-O7jJZx}fc~J5dDI+}wQCB6W4<-X7)YSv$9c}U{+>4{jyd~#jCowCF~3WA zr?JA$H4w-z|@2n=pyuD>21qRU{p=QR-_tK zg6$(`14MyLK2Zv+;6^o`slmvmv}+~BS#a%ytgxuB1`{CpwGfSgm!V}u=5sQts?IR= z)(Ir~ZPcf`#5&Qanz=7|Q31f4d!IacU^Hu772h@fiz_L_=1Q^hDsuE*CBJ^?W3aJq zZ_n6R^>D@9kYuBd1#s;5u(dwUbMpZ+Zd1KRr;(7EiDq(G3JPqe=JPA^o}ZNZjq%Om zTRG!DAn3j3TsZ4H;a9JLU)>Ra7HjHx9F&N+GqgK6J5h@P?$9kft2_(J8cKgdf`oN< zcCe}7i5!@onraxw{3R$u=;M7%^O-Ff#j=rgkh8@*Ujvvp*na$D6L7DI_l&=pjph`+ zyK?J(Xr0&wBx*sd0SyX*60dn2mLRBp22zxfM+%$Kg`<38ROBrjq;RJo&4Ej_dsOlVF2HqwRsW4eq zp9ZPo;a>!8D9aV)VBPqiKj`C4RC5?Nx3gs^W~h^Ges_4T_J$h1?6R=R%I1dOEw}wF zAWHT1k{I||^9wK+CX>jhet{^XP7~e-s4aaw2t_{wwGdA@1nWTmZk=c7$Gy~}Xa-x)zsQGYb1Kugq<6r$p{S)$5Pb;ao;M=CAVEUCRxZK*sWzxN6})Lz{l ze_DHrEq*#K-w}VBEv9q>qLF8RVjaewje9LLi>FMP2&Rkd^QP=`$u1Bxz)Po8-lg#0ri zd-Ci(RHlS*r~m9vq$PMysjx$Jc)(U26n13Xx>{&tJcR8SgmDOJFu!xd50dOLc{izV z)f0w5@rBX{j}qy3x&~J&l-30zT!TZK#(1nnqzDN@EDD4mr9@g%ee`|Zt(#Z3hQi0$ z4Xul!4GYScXZMY2$>246rN@C?jt!@uT0RH*<;o$@icY}gxaAtIlW3|V%xqjAp_Nma z<#x!{7W5Uh!eGLCK|$I^q8JjJa~eVMpK&7uM0}D3B8rJE#U|z|GEQgCED~f};dXFX z>?mAqg=TGJZu0LdG7T=NVlxEl*p;II>0joA0t9bGfK;G{dcF5vNifc1$`QASTv`#i zHN`?mqdkE-$`w<(MACwgLK`40D4-m_Mf=0opVj_!n@#&&D13DM^`pe=j*1odmj&D> zne8iIXTDp0tv+za9iLFuPuy|G1FHDw*R(G@`vmKJQgaxgrWAa-#8u5R7+JMTtNDA@ z@Yu83Q(u3Gg-)H)e*Ms=v|oSX6D$N!1Lr!}$1NwNFN0oPq&mD%`Y9@sPA&#ehGF=p zRLn7w`K1vnf}EsnglSduBaC0`%>n!)9wNwtzdU$%M2LVn#r=x20=fY*jd+wPU1?lz zUNpG3*K!g`l~rD_=yH*YP>JPUu^0IIP<;l93#f7!yn0q9MC%S#13(3wDJVeF~ zR?(t+TUxySSnAMFY9LW^JW;;+{$94qi1Qdv^qy>;H4u;2)LPe;>|Z2?>f7`+qDDvs z_bd`m7FU%U#>Z!Bw*#}B_an)596gn5I%)QdSbG6 zXMgp`_3JlWxbb~U$~z+B?Pu0`f(s5ck3ZVCs@2)jv3!4$Z6LsREnTXu>c6>7J$}Ir ztJYpNN4DBrR%z{mRcF4j@Wg_y`72Z&7@O>oGFz|lI#rVjlyoH=r1k<5CJteb z5rc9drN0mk!5lcSy`g!D>iy^U&pNu;U+Wb|PW9MbO)ILjs}mhHN_B1f;~jhwx$^ni7Z$NJ%H+P69BjyaH-M2*Tk&dA(#WODA~sfr4uQ+IL?b+5_pYE5LK<)&;BZ z9)mvP`#?uPHwu5pdWc@Tgxg`)z4A%ET!q)MDK8HHjm zPC=LqT938k%rqe#v>nIdA*MuDh%4x#M8?J zFI+K}3)thw0opM+{#-tAS<{@Sfa8BK_{#ynH!3s>%hbiCxXIA2S$#F#Z%O}a5OeUk|d!Vmq3zl;86?U=rACn*g2{Mqw2g~e;_`|=O7U4Gs8r( zHiuAG4qX_^U`3$zu0k_wwjoDH;v}`Fodh;g-brXPI1=#Cs(><+%Q0S3(vDxP8#c7I zeyMrcsZ+P!y8fvTZnz-TqI|h&#RH$*^93!vti7={cvIOe?;qMJo>`FSzUp7!x4bRY zS|Z*M`Or;gN_SdQyPkbd?tYU3E@>q&(%dbVn<1`{fmmd_z<#$PY*7%1S3{ttxQ*2qe)xK^N}L#0O(%kXaB$ciPJ{{MdiOGZclv;D8-Z_ep z!x>hX$%jDPVM24dzOJS!TtaAOw+V3;w`b3z6P1uSe>e9Akp@x_r_PKVJ7^mmk9ovU za>LU4CfkEG3))_L4JWf{XG3hw&3pD<{XAQAple}#)w=~YFePr%?Hb~ze zOW=q$-7vfJ@UHo1_I~&o)HYjsVAk@(OZM!mBbvbk$JsaJA+ z-J2RnlC}@y;VD#jBihU~D#3$>v_)r8HSu%# z7OK*H?%zViy3gfX$jWf;-$Hb$1$HffK<{CtLk^jR#o?y57n?v_Ufu}6LHeY&$vUuQ ziA)9bNA)xzJ*B~4l0YS7Y7WM2pui&pMwl`*VHQd!1iB$ja^E2lE?&jwxYoN3O>e)v zHMwhdGPS6)=R=X0Gg9qS9R6+6q3zUIXFKnGD~aQeJ-A?A$zUqIY}r6r+sn*zy%J&M zYhxu2vqJzA0$M_ROSoQW5SMb9MN9+{9YBbr$M!&D0|Hs0sln<*BJhNL!AL{BZ&NU` z0M{%B;ccFF0wHOf)Vt~5?LF6b6GG4M-P8m3oYqaV`X`cb~&+b#&Ho+J%axjU{!6H?(I8v!d5cMsVi{Jt$`J8R(3TxNm7M&kcW9O2)6c$hM zvy0M-3Y8ux*#{e!KSm*a{SY%z+SGnbkU>`^tUn9J@u58*nzQ05#|{y-JpQDj&^ z2q0<54PvgF*@<)ZPJDsj5Uk8$HC|MFI2pf?VMM6ZbxeoYHWt6p;(M#09>E`3> zDL%&rlNJ_EMayYHt`WXVYFno!;&I5&{c*^V?QM;zhWaX`@k1&L27Jg;u?R7SVmje; zL^r)m#$@4VA~uYo2$`w~gQ8JF&ws^a4EG;&W-MKShMj|G*y)KNes^r#gCaJT@jp=*lXigQJHmr+=<*#b^FbXCd~gLx<*?L=E81JA$Ww(%kL7k4u`S@VzPk;fTe*4bdUpd5OzC1$jM%U zG!fZOL12@;LO;goXCvf@Z3Jd0f`KODWoHyHp&=jt;pR=Bh4(%IsUWmv00q$*^ zFeoajrv_Iw%^dbVNaYldM*>ee(bx zypwMwY5kplD@pC|fA5ZCHf^0edz9;v+73=@+S$icM-tLM_KAdJ9c$}=wJ@(C0W+gZ z;-2E~j){6?V-{15DTW?7=?uconZBJc^4#7|SUA(S6Xu=U+X>r{f|Jo1jqU0><0QLP z+lmf`ixTcu9yA4tNNky)n^6~CI!XnTb#`85i<^Gylh6!#(Cl=2qjcu7=PxgvRf>AR zo=)$PQ&Xsp(#~t6+K!AC!H$AgA~A88k4A~^%_G;J@gpzbO$uBe!t~L302!s`3lKAc`Jj2z z&L_NjXWvX%_UC*v;oLj>X2QTf=bH&1`(Wb+(9n}Q12m*`&IIN&nu_q#jc~&<1^?C0 zOJNx-4fOX?o4^EufXTF>*VUVWNdsoGZkWk3Ru0%SDRx|%GXWcN&8Liqe=tbvgF&t3yz z_Mfo^LhnC&4TR%!a>%FwO@V+p!xK^GPB%Zta3@GEqRTF))3s4>xtybP))|ZsFJ7n) z0_$6I%z4zj?_A7z)AeWHsLQ6WPs;yS>ejxSr)kn|%Bn^55icnnMVqwi`sfzik2r3~ z)v62G;X-*B`hk^bJe^3Ke3^>N`nsk>6TUXyf#AR<2a;)nC}`e0D8Y!7%_JdNpO>Ge!gYwiiuyztog9@L zw>6d|YXXWrxKBE=CFo1}mu~&UmD(fkyZr_BZ`zlaH3h4+XV`oty>UhS#23G_&01!i zGshaXpLsq|110Rscb__SH+ulhNB>28U|Bq&(~1E4jsG0B(kS9vR;kOW<3cGFlL0Jd zyuEl#5E6B<#&{!M6Q#X1I;_|mDopfp16lsw^uFTQTQA}txi2+PvArQsTOwOsi^aKv zZbyZ)Ro%P##}|$qXM^8ut8@p)4?UywEU3En)N0u)*VV~xtL8%>zx>RmZQC}TW2?PT*nNcfAY^^Gy9*tA93wW_d~XmzvAkDFRs!({QncK{$=+wQ(Q&UD(xAY1+ff7 zSKEaB(j&UI6&rA%Tg1BadQsoxB8FsSX0>nY0L7&-?A!@C^*77{C_80pTAB-vq# zkvZ=qF=2|4IqxJ{V2Y7B?O0px4mzOvVE}a9V3OSc zg`r9}qDTcSX`suy1)H}7<>Zmd=W_=oVlE-S2Sr4Q8R_s_IMWv(2P!*#r4QYp@?SYzTh?DL13J?rZ-Xs!AjBK8KIc6i8IU91!(94GyT-g=uK@6!&#mnN=rRC+Dx|VI3_jDv2DfhL+>r>(1AJKmBgHSw% zmh?eeHfYPZT}ga1t3zp@@|CCRyEd$CZ0=jyvY>ZEWcm3G<%`ReXsvdib$Ea+ zstsxXz%z5Xjp^(g);7qIL&DE=6LsZ4UkmiqIbEGezXIz3#RLy|UC1r8$nbISkF7;M zHmuxKc>;zx0ak7*ctimUKq#Rw!hw)51!uG2-Zy9Hwwj3(V7ts8iy;)_el^K^JSIRl z2IhK@P+V4b=~w1dIZaBE72+uscVG<8&?&t8kw|Zkp#(D)v9@VT7J8i4$6i#d?lJAk zLoA{F`lU&g0Lx;J+j{?JZ8pyacIn01Z&~Xl!)V!qMg`j2cEr*T5Bu0v?5<+ku>;Mt zUVq(=M2_L%fOfyO&_}t#$m5V6;km;UK+1%Aaa`Pm{MQPhQ*8^8#LVT4o{CIkacWSt1*tlWag9Sh~9>v;!!5r*A?md8BZ+ZNg`OIRAhyGF_` zWzX)c_*7X*7;Ukh3V81LuqXrwnl{O=Ii7eGU@_qIN1i_lI3Z8-lkz|2=U!sH1k0Zy zmnR_{_P~p=LsxkrhaD%z^t*VMFyK z>Yyd~gYl9&BPozV#K@kK-E%jx>+(oZr}Wd8*MIfFc}Le;k2YKTYi>ELz06X`_z|m) zgu*Y2_uTRD|GlxIN@Sbgt_i>L@+ry@L}CVY!Ky;1aPn0}u*ZeiV*?_rkEvx|#DqEF zerigfuUagMa?3gCf8Y6!6bp{07TCEtX10-;5|8ipBJGvihiAW5o#2l_7lquz=?L@Hp(Yr zQVjwHO`8F-6c~C^kzLqH zQnL1HyT@N2d;fkcmSXrm09w{497GW+KkP76oSuM0_asXFwveQX!Xt3VO5B!$`aIx9 z{G#weEt8Ew{lc6vJXp3aUQu#q*|^m~Gk%uJ+NCeM`ub9g->9+D=4Yki=5 zT~G1>Uy|a?>NZN(P)aT@9H2ek)tcNjS~j;mzIb+XS=$ep`+Br9ASzio?$?{NvmnZf z!-{hCFoMxQD;(%^KzNZ523TbIW6jv8*NgauHU`VN} zu*Agp1dgJ~EP_&q0-}5W&`rt)CbYGI>vXqucXl+U zh`vN?a}KYSwN4C7C_KE0)P?cjgresb4&{;V~g;>o_%1PQwPoo6>@BdEus(-&}=Fy1Sap??baKK6$>Uo z@0(08iSY(x^HTiCbo2A#P$r$KpB60ur$zm9@=t4U4$|=F%~>|MthWR8C^?&~Fs~Xq zJQh8zSr*hKBcYXh<-tK@)2QFpD8~8aLq;2q?xu#2ZQ;VxyYq`z?mYIjkKG&&ZAstx ztLwGjqUBcgRpHS2M-Rt{Q^?ZeAAI<_(5$AuqGzdH<=FAn+j-Zm9Hw#ICqDeSPokyP z<+y#|ALwJN^Vlz|ju`y*NTc9>dm@$zh+X-9d$^c+)5`+KJ*h|O9Jfe;D|_OUyx8ab z(Gz{yd^$z-pv@*)Q$v|XkImAOfZGL;4J?_r*|gSke>OP-4dMo&HHSMHx5(tsR;yH- zZE>p;w#)OG^i6U<`18?kX*)A*ZRPt6&RdjCe-iDvI9zd0{)z61NzrgiY!do}+3Mi_ zG;WE_nSEf~k}i{)39=KnM6YHHAqB0g0eesq2lCtm_`9uE^JkvpPA4#n?upZx4Z1>p zg3~5DVH%nC4LVhuW-^brvcjiMeLVHAxAJhS8Wb8r<8WLM1k6Qiawr<^h73IvC+RZEBvYunT!vZl(PVPM->WH!QIERhijPiA zbbW{Sts0os)17LFMN1>340BmhWh(Wp%8zx;C8Uuc%dt7>B7YK9o--yZUCb;zPw5|a zl_|>Gti&1c?q+{`y>>nOz%}E4JnReDwLfUjD@xf>ZRJsRk9MQ>yP{f=gH^h;YuHhg z5b3VM?G`Z}r5rNdw@vDpW9h{Q8p?WFtIJU(u=@y_b%rn8 zy>$1E%JCP=x9?u|Yw0!qfz>I&&PfZlWZDo;N|_)Ysi zy0fEW=STO8F8Ma?O>L+4yM?j2RD*{6Q=1%ZsO3Vi?iCKR?K-?`7tRZy?99714LW6; zZ~(I6p?&=kxbSg#4A|!lQL;lk<`Cl?W^q%#;I`RqZu_yU2Zv4tX?lo>wfS@(&K?h9 z6u1w@n{nX~&s>pl22lLL#5d`_g`D@8&ISIOm4e4Zo*eU?IgW~%zs>{qg5Z&55B_Vu z%|p%wwWF`EtBa1j?{MGYOE1~GXZNlh+t;7JeA$BeUA^pkZ6+MaQlGgabstrBIZ(cZmUs&o(X}yjO^VqBLsHCObl^`Ib zpE99J>k^-1UlyXma&-x8$kv*w^02JfqQEjJJVsGx8xrqritPq4RpzA+@$x+RZ<=5EQbd%Sn`JQ~KAvnE|Nhe!55w=uDeW}{ONad*9aQ9Y zITALf|Wt+V{fKsJo#UQ+)y!0m6HtdQOhBO;4 zflVBP_P#aSB7~uZ1!J%^>bcUoIWh&_wx3^yVo{`aX`U``-ubmYRrZ;G< z+tYV5>$(&Q132MQy6rmcO*Xo(=JKx{S>Msz8M-TS`$ILu?6~&XMoF)CFTb>4^_e7F zXD?$gV0yHlXo&5^y@$faVH$kNAj=ZO1g5AX^ zDFX@rKvT(zTFoLrE6dwZE>jRJBe=m9%P6m|PFCI)NRuLpu-H+AWx~`+W}VZ7?G|cg zR2D)Hd;UD$Q;J&{3KN z;BW@Sa_1P;dP4c=9C)ULm(@~&Xrhi_YL#T9Z&^89O-%hC4cGd5L8wpF!>V!aaP3Z_ zz1noJoyGoDJySOm_`YW;dw7ZHA|Q)8T>>2-oP9(sC9ansl4^kgBaX0p&ihOVClQ;E z;&?Fa8N3hX0U#`OVIl_|XMadZps*RjSF-^JaZGc<#IZ9NM#U{rkc*=t?!J!-a>2|t z%~h8e@>AMP)SLX4pQqIhvVYB>UNPikFKZhYvWMZH`weh%xdE?FszHJmsy=Xd8DfHd z$^UnOiX{^fxo2unoqjPWFAGf%DrM~ZyKsHMW1t<#oq$yfWRno8&Pz>=$vA2Qx457S zTfmnMf>;I}f|m>g0|z=$(>Czv97HNZ!$tHIFh}JMP>Xz~ql`UVkqQUQf~bTqhQ>-( zC|+1Tr!Yz$o9NV@)Bf{NY4Ad@hg9>nj~(2&zOlvnrREiB)@pNS*#w7&uT1y%H%OgZ zzWRwBX7!g_PL6+ttwqEHg;r=k?dR&!Bm<4Q4`5eRn4g!6M zSV$XK2=B}QUxo$AniKd3!rY00*j@0#7#xJYhWw+%LC~zBU?#DTNhZ-XCPP>8Njj%8 zzKD7Rc9QOa|)Za$TtU94993=S2)Haeq9=YD4ux zSV8q?Ii1Vf+|r1l$VFtAck zWW@JTW|BoO!Ua@<1SDHl157a?#sxorB`NL$Ga*6X7V%xb`C*89UDvlfg5i%*uM6RB&@eKw49nFkJqu#qlvLsj8a+BnOE!lFnu`$J%P)xv- z5JDUR=~+S&!h%B}34xGGLY8IoX0s_QnW!Ufgef#^p z6kEFU+&lL<&w0*s%Kw}r^a*z)yv#eKN_lnEG8_k61EO^uV zW&h0dC2kmgUcC8D13HR`y$kY7pkuc%#FlC0tpgU6bYK9=S&@2Zl-eLOyBdH%P!NyE zd}f*R8MFpF=v@m!$#n2T@LG=}dkr0c(CmwRBSt}@@@5G6?aU)MMYkbSP&?};>C_VL zqNQ!+Uj@YhUtu|gue3-OE5BPne=&4}T=**!-Alg@kx7*LOTDpR;8i_}dO)Btfw3L{U|XrP~74 zqGO3Sp%)a+r_{ZI1-q^QOxq*$w6y{VUt8rwg?@CL0O3nhgePdB=8?fsy+q2lgWzBI z!UvdP!yo^+`0xJKr$?t6M@o%-aycG@>ybTnRIB7GAV zWv=0&=Ei756J~CBKD4w^2^362|5L4lGO5u-T~x2kp>eKnUmjGy|G)OQS|W3GfYR22 zJ&IMX2o(o{TcHP(RDQLf2%RtLD#9^M6L3GxTn2e@T|mr3`0;DTd}B%@2z3{y5&tQj zpk3W72SDo);e82TXGbw~f_^8?S0gav@Yh0H)=8|ZNJe)2G3&)2LSOU)IwdFD*ef1erY)+$0Vmjl3@Z}yYGbz}2`ee1-ik;9`M??qIZ+*MX? zgvC&`d9{DCZMbe_M{V_n$(Bg0(7`6(R2mUqV>aL|Q=g$)`b_YuEz-wegKQGsp9pj` zHC7e|@~jHhZy4*harKIXtgRqbvaHcog^Gt@1yLeHTR>Q*hWbI9_QQcHe~Bev^WRY2*RY~0V6O<2RQfwupu-OlAIc9zRy6d-Z}8d4>MjZ-=5Z7q!#)$~MG)mIeQ|1oV2*$Z@YsJbj= z4!J!)lp8|NdGek`kdDHaj%V{c&xN$Lket%TU?ctCwYb!^pz47CjcWlLj>|RWF)<=P zfVDB2zpu1#zlV1HOzOI2K>km@b`JGfE(d3XPbU03CdNm)I%8GkW)#;0I4Xd`JqVO* z>SrA!pv8oyuqbba4^NTeZ$_E31Np%9**Ds7N>j)Hij}S&H7|ApA4=owqOec9&%TpC z1>z8aRbfV$*}ZGS*s=j={XHnG<$=27@gPoskTkdZ=Bi3ukU^-uRUzqAO1B++agIDw zUwn%a^4Z7l$xeyPmSz7o8HZS5b&ah4g;h_( zjlou^zoiVo)>g_46z}2LfGza1`c<}E0cO3W1(}HI57i@pxTf^}TKIFr!nFxcc?qx+ zA>`n2D!^06i9mkv3aIxu$u%#oVJFogfzO=!QDp*O7ahd=v^m0+Wu@RzRKf)4``nx7Lifd2Dc^#}XhjveaS$TU{T6lN zgo%k3c(0`MYSa(1x0}bt^3+GvdK1kVU?bVe=RBHN1=EH=AR?x%m_Psnvhc8GG&jtD zABLM0W+Lx3I(|=(6Pr(8F|5m-2pwX%9%Wv;SHlY;cFZ9~NN|nD5Sk0^b2)dg&(b{9 zwL6@y2KDX(>U&PGb?PUslwNLDZ(-f(nI!YmGhutx8)+tNJNq_fLO$9-=w?N{4`>~x zMt-Q5b8{=xV>SXep^bTEp)u-rl_K7w5n~8j!6vlltNU}VkI6{tq-Vms%pSjyu48ZT z_W|~>7U+t1CJJe10jJ^=tukQXmh|Q#JDvC{220#xw8AL@zbExvhKYj)nv}*kH44d@ z&0bMl#m821DbKkpGm`U|i)w7u*7)4zt^y}yVcvW$o8vH^yW?1SYq?USen7qD9>4GE z0i>7`tq{cH%BDJ<$TJ~r1w_-W) z62NN0SFYwIr!3M*@&aYks#UlrQJh7s*0^Z2v@|O)y}2~{pFl7$H@h^ViLT`9eWKw~ zX{Yw9z9II5Is9eM;w^&W=<)cJ z1iJ`tQEYyxWcu2r(?=}Th$Ei=cgTi1?8R#A`)d*f8h6!JaoVJJ;(s$`s$=P0cL|iC zLyqF`7bDAEe8au*YqwJ4T@uqc_;zC-J$(KM#1n)$?YVn_*KH6^B)nw`#C_0&EI|ik zz263jpCU&k7)2DQV4BIapE6POuD&=L(CnMzzUc7BxyVCZaO5k%FzNSX2F*~ud0r)* z{4h@gdN(z=qxiY5!-p2-8y=-s?yasfJyWt=eVDy)zxqD)^gUikq&IYlWVqVrUBy0D zI&sss$w~Ru>UytqyM2SY6+&imK81=TN=TQA@{_-1vyq)6)E5>q8@Z;C<&fFU!gGm& zq5`zBXJsA(!gWmF+g z2@}Jw1|Fxs@NMwAC!*R|kQYw}6;Uoh`!YHZQeQA*AVf@I9S*9x=DOy_2E2GJsO#sP zXUBfbIJ>ndDOV(PAe^pJWs#oL+?iS)B!upy5Ao|K{Vo|>L{EVs|P&or5T$Dy5jQyr|zYY`fcm@>B7+x<@b1t(}*yWd!WyaFS4B{erG*Hx*X-MW9xNMWtBvGl|L=68o8 z^~m5}{=&-Qd|zN?U&)Yv+#e~dX>6{qX^i(qM>@8&?6~bU-*?)il5n1@tXTam9{RR8lQ1}4fjmx`8}il%`3!FXYDv7;zjkUZD5z4@JA zk38A*_m>@(z6KEAOk>RpkA;BLhv6A`n3*!gQa6wU z(XdMP#)g2;?Q#_WuyhH>M#?;bvL=tgJoxf;9`|l_WIF-Sx3gz`{ zrKlfax7`?@&6ECPzFX@&qiIy2~8J!z0oEtr;)8Gg6Qi67Yf?2>dn3)|xMQlav zT?+qI1Ihsau*pC~JcSxKke*r|Fp&$hH)nw$>Mu4Jm1!qZx0oruYwzg}#yF_}1JD8@ z;OWI3^kCskjzdn0gW^&@qdw-pz5&S!$v2R!fII}zfaHalGgaYeMOS-xx@G0`>L0W> zSCp0RZRl8A$j0u=NgYTf9-tcRoOT7si4?7CwBr}0%Z+AuS<(R#oWpwM zSQI|kjWD?uNpH^?j>{GlQ-jlT{(Ue2CxFSKsJewm)%*LpyL7T=N7dgn>G$8f|B`7> zC&^o)KjcI?`1n_xkB3+{ak3lm!iIX$s^oIA6`b6uF((W-1^KpC#h_S^r}#RLVL1>h zL*Nfyk9mx}`tfQ!plkjE48YQBJL95Hpv}~seu6A1r?hui@*@(7OTK&Vc7vkf8Co_t z(BGT2?QDy0yL51U@omTSKJCh{-o*BsEmVJ0CIwH3?gbd(6v{F;Eny?d4~Ee6o%CVN zUf1MWLU=GsvRN09Y)u+Dvn89ABpZxz1DWs(F+!q!sW;6feZoaJhAgt{;omp8<+w%v zW+Yy6Jo&F;WC3yyQ5EbZkeiHs{G**A_1JqO!efarmup=<(Ij^w&~7vh!2O{I+AYj! z$BAMw>L&{Ntt-F^nVArU!@DwJ-H1NH|T@;Z{x7 zjSROmx%m-;pgSRm$i79c(?yOMXEO&jERs;C)T@uFKY3f#n$o$)P0c{$ZFZj-M^0xZ zO|B_aTD-Nsc^GHTy@Tun=pgdWe01^EQq~qYcg&O#`+jS{mOg$`X>a`p0&;bPpFjL; z*{kGX4OR(vCcKdl&X%6;Qk*TzAPHSCYf>Kklwqfb+|kOBht2mO3dU=3b)zXDyjOF9 zjwpjB!7{=kur2tD3Da58_V(7AcvTctjmrZC79<~|yvV8rqU{J<<|yXbmAMucD>74-~_Vc$QLX6Mte2&Wy z7o4+p@b50+@kC*?68ziWS7x^XTf70io~E~qua&8IgQkk2`J$+Zr-56O@_^CRLokWS3k)Yu7~_-nXeH2(m3shR8L+I4u(2b8a9 zy0Z8*dpG4xLY}$Q?VKO>3SUeV4)lp;%a+Nms;FW@P*Pz94jW2;a^DT@v^E=_VmysH z6DEdJTnkH@5SB#r5!$d~Vh5bep*S|?!-XL1gmXbZOgb-qBW|Jb4isCYyB$0ElfV{b znA2Wi@1B|Isg3KGk1QKZK$#+*aQl2XWxyD`ix|?a=PxV5ly9A7oQ8+14$i3G&0Smd z9ugAV#aB=^;xQF|WS|M0tz&<*JjR_0f(a2L}XmFfL`Kh@=TJ| zqA3AEv^YVEf&KvySV05E>J8nfJTU_iNvtR}_b8rhj|8 z_U-IC^()Iw-o1~uc}yz^gW)oYzlzn*K4)~1A1(7;W4W{-=Y^YpC8kMlH3M{9FQMZW zA;c678p|TYrM(Jnu9Y0@O5~tBgr-D&YTocBBf%>(cZ?GdPt#Tglqa&wHgr_S#0izl z)8fe&$F5Yryu#$!`$)UTyh3u+D`LJ;{7qHE>^F@0qLQNfgz+ng<$>;GKTF%$&-7gC zpXs^Ov^IQ<{LRJ2oc|fd+-UrG`Z=%6f6n=zsTL9SIMZW3C2jw&2et3|uLrg7%8b#! zYk@IXukRA?YR^Gzu~^)t@lfhI-J5fLf6n+ZX}dmNzpjthub=-Hb-#F<(WO5>YT?%* z=}PrEiO4h8X_FbtpK-ylLTmEnocGw6dJkk~?89EihNHz!S|>a(LAfi zjJmDZro06RWv-RMZ3z|MaLS1AxrCnJl=7F_kU}PRv?MhSBQO;HqkTD}aMJ1zT3~?Y zP|F7d8JQw3zQ;_~;QSpfxtqOG?lw;ZK7*#nSzBHD8GCX_y@xhQs~YF>r&Ktd$S(l) z8f8o*s|A@vq%qU%N`U_hSpSrgst-VCb0}t2}Z2&Y7OzmpUWrH5pN+rM5S9nLp@x?<1vvloY2V8;VC@ zo>BHXJB#> z2u_#z0|k$Hc5-SBc)u`;f0m-`g#oEqvT{0WY}T*~kDzcC z)xF^8b|0@Fk)2q|MOm|6c|e@{9*z#xeucgG2S4~I4wCPQA5Q)jB}b_6sTj10fxY6` z(7Ggnm^fBf9OZk*BYY@fhf5jh83GV1$dS_-tOAxVPy9sVR;IwQx?igqfZfQ`3a5FW zc5Rk);_+0^%-HN-j-^Gw!2DzwC*{5*O>$?2;wN17q# zn>;7_`1B~x|Ba9G>B=p8_hzqSnebG?UVy~8x$9ViN<0P_hR8HAz`KSeB^Z5bMNK9o z2xl!-3PGV2WI@r11@Fqbf$UXYyyYTewN-Xe(bZrPmYPx-d>lkEf3cC&kXmqW$lIK= z==!pM?|Wigoy;sedwB2L_OYL0>EE^QZ7I6^9%~;H9!+??kS!t2B0LDMC4xops*}tyGm~lBl!`lsGLmNg_|#D3Dk4IHOiZXr3y+#H5i@`l z@Om+qtHR}Kaz!nA4lWp;$C5(SqM!*KmZ;_i4;wX8L2+H7ecR>svWmKRzcT5+@}R+I zB&{kGj}OS>zAFzp9VNCS+J?9(uQ$ByHjzCK(rlog?~s!(a=MWSK#GgxWPgTiiV1fo z{9eRU3J@~`okUiNO<%>#JpF}~AY|ZUhx4hy)B~v`kO;nRvtZs$ur$Loqa1Oe{PAkA{0Pm6(Glvq8dh;l;ZMntcEe*z` z&iqv)vRy{WA8!eM?Xa&Nwwc`KWE--- z&;JPm<7zGsuSqzmu~dQI>BvVnViVR;7Zym3N6!NZ&VAwxiZp4bQCbrE^o2(XZqBEG zC~F*%{Q=(*PXSEi^eR?JdSi}*#u#naR=!=l`gg^yLy&o{z~jxo{3y#?`?z%b2a@-> z3q*pYi3M)&>Y=?KE!wuxF>NdZ2Z@3gf4q))+i}uPCN}eu z|2R@n&5sqKb`yYy`bmY<18^g<_C@&=?gTv;+!rW@E^onKGCAX93A!)}Q3U)x7Z?H0 zw1-1xLjmu=gwT=S3k)nGz8Q5}R}#FQm1S*rY;C{1DbnH*@69%OB+Iw0o?{1gC03UE zXJ4DECy3`?Iv*BiIS+<~LkX_~ltS+?snlkGe??kjF^j@VO)n~=?S~v3VB>j14C?5~ zD?LEtKi`rtS5}5xg%zatgtZ0{z=cNPOVQYl_eas1;v&hJ zqSfN;!?$d_wzzw`Y&?8wGs&C!oy{v|cGT6>ZK*LeG*_>xo4w}#j-Hi&I~v~p!!7bn zL+j4n>@VIp)lgL$+`O}41aBsu|0Vu-1ak}_s{E-$uoye75Rw_W7G*MGKd9SErIIMR zhL#1S0)|rp8nv_ls?;|hidu-%nDlPS_|Is$75Y9S12r{#?#RT&25H)^xyt2=_yR;9 z>J5>zIqO8Kkc3R!6s_77>Wqh0aUP%eNW)bFrS^lW^KyIf>eCzhw$(JpKg`S<`p3fR z-HygUWAkzGp@!vlbr%YwlHLM!k$qU{IgI=Cm zzG0mDDWQ`M;(S3$g)NLqt1O=Ik4g2$>by)+tYZFLkuEY3GetG;i~OJ}Xj$ z!Tw>di$3h1N?;bR2Nq4&1|wl?Akf92fEBnxNINJzSA-k~-`$EnI|^FxilQgC@Q(!Q@J^IdPnxIDJxy` zkiKyy)hF~5u#C<`Hk7+}DvRSasqK?%4_!Uq9wKk<1#K7}pQ_Z)6_mWmvV#EYBb|;L zO@aiSZUIZj{8lU-+OqILNtCPY5w1+wk!UYLK{FZ2qM`}?F!yOnwfulrXX!j-vhLRux9$>u1a+)!Drw-} zP2Jal`~ZirAz{gbSVA!p{@zsACIFhE`G|BX?UKQ{CP{lJ9$1k23>R+Ad=8^oceUcx z0Kzju)*wR}1!;gpi?*^?kJ!pSjLI{`_M_6vV}@e;xtXDLO1-_Rbyx+4D<7|kdU+BOFHx=x>Dwm5jpQ#d9OJa6Nh*;*!B&Uei(GQqKvp~I zAmDJLn-xkT*WZ+u{*vzCJfRu#)YJuq5CR^}A!fA}r&&a|{V%EYTo;y#{NkvOAnQMfqtBY!5ixS+jO6Bs zkhUzsZbhYu(?aT=`J0WbzkoPU$S{P5e>-As( z<+$WT54_|Q5#N{bx}AG0^K2dL0fIj)2^%jlGali!rd4}Z>!w({`8incTQYB z+FksKNPEc~-CvBd{Fbhbk%m6&J#{CswvZR-Pjn03vzsLncB)GeV-{0BQo+b`ML?>4 zD1FeDu^G*%nP7%e1EU?gkxWydHcZn1nI9T8Du4vDDeg1nKMT9y0;Be9*qyY^5(_*A zHBoN7l;;rNbm_;T37U82h4_=wFTi5PBqj)%Lx}2 zPziRs&1!+;QUdL|^R&iu(Yf4|Zid$a0E&JJ!=?*S^&_se`TRZtK!~&5mse-maFP9i z(rrDHQ{Bhb9Xs*$C$`>QddT4$Z`ibpn@ukN+;a$C5D|#ZWS8l)a zfpYt+_c%*Nd+HLsRrM!l-uEb$8f#sPli&^LC!ONc+FDo5S?l%%iA$?(5-etux@m)%F-F8pk4)bM{y#>&!Fw z;?$WUy~p&}MAjZ#aEoa>jLk-u2Q#7Bz>ve!9qsHNA6+?m!Ve27t^fUL*V018l$C$^%zv?N%B@67j> z^XNpmgkpeAeNt<)2VGQIEAuM&Kb(aiV`c1#j-zKb$MQbXee)0AuLh;!*j^US1f*yg4azCa0z96%jrb$=MVEAgreGq_i{v9xG^m_< z%JSf9CGvqaoe~aXh8kM(&U62wD?;MulHY|+S;D+Wlap2KdG#In{?u`ld~MV0%N6+# zibd~No5V8+kcl{VoZ>HV?ttEsOC#Vy^wtiMIiC>G^}}J#Cwz@GgJI4m`aLAi=X|2y zL!x@lC;B}kL+5;=-$T-I&L{kyxff}8N}OqPjpwW*XBRlX46<<__yp%Ns-bD@0yu|$ zk~ZYrg7mTDsg>97Bo8I8{ZFf}@cZceyu`1fGxN=S6;bvQzluoyX1>aO>0d=_Sd&9Q zY8`!wJdTSJomRIo=boJXwiwkf?R!#e0S{+-+mVMe`#{Utc2E(;UnEDha69{kdN0ef zxS4H*I#S50{%XmmvLYJ1NPWIZE%unF+0Zui59}_AbWvZ7xO_L{T$k|Ygr^aLt&Pbh zcVtGa;FdIe9R6l<_Cu>xZUoCAeN8+CX#?RV%e-?d90rPlV+(rV zE_?~O?X+V7*$%vSfXM*GeBtLT{s_vIxRkqbFlJr=T7$i#-HB?so@m5dNjX?yIzqH2 z!4yM*27r?V&U7u75@qE%ZuXSCYx4QzyEmNq#?iaJJl)ru7tc$?#v4~$wXtUUu7Oa< z)Q%q8FBUrvtNDi=#UEm(4dPGLD{uJDb(YJ{-o9@2p~mWUM)TLaB`dGKy>IV3S2zuw z?3>AP8*k8U5(_z0Bl+5$>d@iOVQos->nQ&GLDc#BOu|!#wQ0uMAWC75R+i~&L)}06 z7NN^A2moYfVU2Lo!iqAXnVCu62Vsli9Xf4DF^BoKC}EfM1kkp%h0`_?@q~Eo*0hT{ zO^*d?`>lNj=&9(Y)WACOKi(J9-(E-jutyC1|;Ltkt`3>r81Bbzl z#PiALInVb9Q;Ey$JhEk$T9`@hfxv`QIuon{!OSxEF`2oK$!InhZzN$#9(!;w`neeu z#kd9RPtI7Qvc9b~Qdx<@doE9e6M|cAVtg?huaFPyylcXxg9`Oxx`g0b*m=lYvh@4ThGF^1%d4<>(1p-=TbL#!;`5v0)P zXW{h&FC89#-|pMKaM|IN)$1H*ZN~NYT(93jzK!;lu^)oW*%vF5oyQ*bZ~aPIDIellttfyoPepMS}9!97Q>Erly z3p7hM4fqyYAEQPu1^_SHsEHW44^XMlr}#NtF2NMywH-x-S(&(g%NMGv;tt7* z0C>oa5ExC0$3SV(kiJZRB6J4>41Ie0cUPjdXz#Ab#=*A1s*=V-tM9tb zvV7U5j^dtz(N|fpu&kgm=qqG1?4GBa4z|ubd!&APuy5V;-|yY=^s|>AKIW<{ykgDG zKiIML$!*^fzt9jIY*~Hh!1%|pW|DwbQ*sQs3pH4?7UBMc6-ENwX#^V7?INn}Z$vT^ z&P%i-Lm!TP=zI>43)NP@VKt4%pcn(CB%{nG_k==WPu&7qz^#IEacXv*_?=|4_?;WdzW$-c>31A? z*KhAEXx`Y}G-#B*qgYp7d(X(4_Y6;b?4Dku@~{|~nR#huhJ49nSW=B0gHX?9Pz*M} zOHl?}^fm&CY72 zb&u0lvWGo29%8HgUXv(Z{W{H<16Nea&XNAvRjJoV@NU8A6LC(f&& z(2{8MQ6!d8!F~`G#e?yZuouztOn+B_SLhE4e(hy|1J&VB>AHoyegNVM)pB&uYW57W zk`Dh%9-AmGzxDpHrXEs2nbW|&>J+KMy69AEn!C(rHT;G2ShVc4ekRd$Zeq75T0w$j45YZUb!sSeam^_}jZukZ;`E(sPd%8}Td|CT`!sMsFAAQ=gpRqCZ3~aFx_|&v#0B0}T zXuWm0_-n|)65$sKJ2ZZy9j+%-`Q{Ebt>Yh6urM6QW)KM?CQ&splgYS~8)7MEL6&c+ zTF`T%mUIUl$5p92kgx^JdZ6x1auyaAmK2t_;4>sS zOh8j|Dd9}Ysa>nPl?cNx7tIFIlG5utAzF_>>v{}MrScQ%rvT=kSE%OPy*}G(;^%Z! zfBK9uQf)YG?O)N?hkEm+a?C(hrad<8P)U!bJ zkW*7Hmjnc1wil1+7NhZ^g#oO7u^WJ6)yMJ@4SMFH{w?W8bIK(gMl^qoZaNx0(l zx`Lc;giy{o!{98{=%bYQuXKTaU$T=TK$^*{p>qvzRT0Ri}Kax{mH)v z1;K+T_|9p9h`jJn!s>N_D#)u#QzZgs2g4tymB(3a+y~=7tAvy|1$|~3Dx9X;MQJnw zJMe{GcsL+0sgaOrh@7GPf_{gD6Nx`o>we<^BNZ1G`vHk?deAit?rv?;@OIB-CsJhC z!}`QC+ysKC$HXURFk|(W6fbGSKnG&={C>UHRf<1i==^8#VX{a7iH0CLY zRaF9_mqNVbC?_V$*Gjvkn21B&=P;5Ny%FXzlg>`G7lrdMr}4kGCpih(nnNEy#BR!X z)76`CLA{IE3WJH3cVe%#2>+CD6ME1CA4QoPWdg8WcsWiYYJ-4$UX}-<%H$J=JTX~7 zRg*-=Jql$3-90r5D|PPPYoz-?eUNmiHzw*+_rQAOISJ^SF!$#4S83$*v^3Y(q2y8o zMX@U@T##e7EK+I?Bo3L+Ey&HWtL+>)k6ciFi{3d~dcs@aZ!T?IW!M$G<7B{B;5{K) zF86u;O{GoC4Ku;pPuidx5sAz6T{~v7u88gXkb#Y7k$KFRw+a`Tl>E6)YfoCPy*uHC zToa{2)SI#*N((cY1Ad6sCAxOKm~_{6FcNDt^?I>k0QoMUfNKk+QO)9xx?F|Oy>)a7 z$$WyAXR{n?P^Ab{G+TJm=RMQ%Esy`C7&z(kf1~3K_K?3w3K12G9PBH*H{BuHv!vJC zZXSC$c~3C^Pmo`BT^}!imOGX(10Pd>H&1Eh{`4&LKn!J+a`4rHpV0d!pm9JOPxYJA zM9dt0996}bG{e=sT%BC~S@QKdQC?6fXEW+Y*05c0Jp)$C_eKCGQH{_hypSkLo%<3Vncr@MfdQ46sOAFG!!jTA+!JsJg71P}+uE zgBQFDF6_*=@Eg+Synbi;OSGl7H#I~G(?>inurXIA>&qO5IZCQEOQSMLvDEZMS=$?_ z{h)7wv-hYwbZDmfKnjitc?26&X@EQ`7Vb*e z9ni9TUThDD_tf$K|K0ZRcwNq_-0h*{Y>u!_Go^u_Vd@{#I*~V90*gia#CK;2hJw9G zFP8`;e|4K1y5%hi3-DDq)HUoMiGpHyj76qEtAu2wErFn|&PSQ#vl4BPm;ngpjzTUn ze+q&E0J_1BLu!ux5H0b{?D08$<$NO%SkN%1P9N{Z6Ka=!r73;M|AT`@-C!#b%TFjf zCILdu&#u5HAMx5><2rs^`K$6sT9+P``!zlN{7;R4%+j%I={n>C;B3mHxE_G@`H_T& z_E`*YY_SZxNLo_~0_Yp8Oc#$PUejFHwbSgL*;C~y@wfOuAX?KRl8@4T8QDV8QCwIU zFOKVbht!%2sK?PX@$}uLJo?5x*O9J#o%H^Ut~`5h#WF8En()PYy{T=2&Xrp=YLd<@ z-HaGPkw#frMrA`P{|FI^T86)orxo~*gcay!`29o~oCK1XfZGv;3sK>vxk)k_woF1h zRMre=R3qeaj7FV?dyjqxzY`ceF@4gO550o*s_g%IFm zhhKtwbVYKx14?8>vpu44zFF|FKSsk+S0LcQ*ntuHZVTm9~#(vx-_t9OIbl@^6}n@4c=l;OLX~;;NLX8B^VhgkpHdZ=!STEJlHh4Y{izI z>YdwG`AhvP8teD%-&IvNJRI*>+tE>w+}f~vyw_UlXdNi4U9Kg?5@L>rnNFy~FN%^8`8_?TPeT5gzP*76dV7QtppyKG!1`((c z`fKiBY)X_}f&You=1}y?bRBIWSUvuzKkBgNp_~RO_+&1E;>d;Z^Q@1BE|51LO*JS4 z#VgJFc*SygyZX-^!RBQZYfFP2>W@2ut%=BbKYskdKydrV+H0?E>00hAT0a>YSrrT> zs5*lfV4a;q?tJxdK;-={0&JjVpunmgN&o&xRef)uxvkU@E$N=9R?p%CkPfsfl^pU0 zC9%GQ<^cBwyUFK!!%jQR275BiI@KiFP#vZZr{qiDlKB_{#Z zX^{M^t7EwjB=Ad0J^y{MWmzCE>-R@uM1t1R{Ho&aNsz!NL-e0}T0VLHQN!njKH;e+ z3^PMo0Zz9@h`ux-kPujun0FQA0hWfs&xIz)W-4|Kt~o$I8#E3BeA@YE!)#(hC{t1IHB)th*>j4&G&duuYnu3k} zr7MdAZR($!gDpLkt9|(K*WJO1_qSAE(b%>uKl1jb!I8qkUioB=B6-mgV42sh-in?L zHT?Jb#(pqc=Jzv^nofUXbI{w>u`#NC9pke0E%j{V`xdn`lrx$AtlXcvF8o-0RlBb4 z=lfhAZyBTUI2&VXEnOG#h4sSc6ZWCrE=jSE)&?cnmI`F_2q?Ux*yK|{alt-2F4*n6 z;ek?={AmZv&p$Nk%(I}IIOQ)|k=Y~x4FHv58jt5mJePVqV)>cdQHg2&x|PcZ6FuFX z9c>75G}gys<<82=a;Mv0?!vMuo^qZa1^xo;NbX2Aq!s-WN1~qORQmVlm#y6!?22t`E?m~Q`^-r4 z1?s&^>rx20`~|E_k#HGo1LxSv5y|B2tU~U|7C22(*t6BLykxPU#_@DqN z9?BFVUg&i09z`)ElWW?|T&_c-o_w3ttQd`kDZ2%`YMMm!TX_K0u7jH*7YJhflolH)}ujwO_tt@YJL+acQe4u3vG*U}arpMO(?rD+aV5 zlAn1it4Zq_J%32vD&L2*VHxVF=;ANubkJK`iggSK+EQa@)a)!#03 z&R&+R+9m#Y%pk{7`y(b(N;!Vd*&q0w_D5_yvpq0S9)9DW6@)${$gZPC+9Ly~fVh_Z zNjn^zPK!}8i9K=A;VMMGHwmXr?llG1^vt3`M3r17<~k*~9443JM!-)6$1&)f+Rl-t zWk6UDjnc+}oroPt(X#nZGC82yUgD$Zcw6LA_&$T1Lgp8L zH{SG>jQ{oX-byh!lp7a&6#3h-W=OM!YGc@$o-~P!xAM%H(Rt7074UHw*R5VXI4B6K z*REbWx@K_IAiCiVEyL0dB>K8KTAI)Wr4~ZpAFgz{%Kec06eiWO^}X}t!IBPvREiX1 z%?z#O7ZT_T3l3=}l|iN2*nLQ%;<90`B!3Zg5_VloYVd?B^{D)JNVy{2J8p1+pGSmu zCtQ8)Ehr8h^@*aHl8U92$gdTELPjet6uh%&4lV0+9-0&!8a1IbHyPh>uF@H-AHPc* zk3*M_M*`)tfu63m*5;;ir#Flm5jBr6 zcT}(6({fd#cB+oebX_0LoT|r9v~D?87`y&-+u^HfZrS(4Ck}UXwsctTta$Qj@#!5` zRs@n$5hIBUV?^|hAEV<``yIz^_9D!j877?^I?HR&$%llkiHV-JX4o?kFYKAx>L9wD zDOB}|>NCtSR7hgqmN9Lhyo<^f#L7i<%Rm*%bp!n&m&*key(VO#4p?aj+$B*b$6S$K zi(hjr)*Er(94>AhDqBO^`pfObEnHjgP+!!v^%E`K(AL+FSB|VMmX2!n8dfASFuffLx22_lge<2dhbjk1$IA=E2h+d*Ve&|oTYP|&H83IzxRaeYsEWFw$OV=)X||vUTW|{J{rKA&cY27lGrs~&Bg&oajh0ZbDi!qVfH#Jys=yc&@E8;~?drfW+k#$<{50@|Lk^O6&J@!u znL7HuW=A>b@%Yr*Th8jNOf#&v- z{Hl`f$r#%l(2zv|_3t`|;L5!({rlHi1`Dk0y#*2>k62-{tEv_>899Gk?oxgaMdCLJ zJBo*xTOhy_%$7&;tprTgiP?(aJ_NO)Qpystx9rR&*mhe{n8UmiDdq4#8jU7Yjp9Yi z5USiM;e(QJaFrb(pARAynS#D$xu0PtOk|Cr;;;#OWyk{vinG!S7>ZT|o_o<@ zx#S@K_3$Jk5JB4CuAER;ZFT0W5A0GObUCKg5A1a2=R0?zrBIAjuy<{Bx?Ik!2=}=y z;v2JPq#a4W#jTad!j)He+v8ZWRD8J}Cxyq7%%@NJ-0RV|^ZnSb5n(tn7%arxLq#5> zRAcVqn)IahS&%XhNyjGm5Bk(Gtw@-fPkI1QCXfi=Y`U{l3Q62ksvB|GdCgw8o6m?l zMWy$vzv}Y-I`KBVPmP)KTDXjyYsUi`wN*FLH(^did&WneBFsQUit zfGk1>Yr`{$L)Abh@P*1s00q{WT~umBD55EDF96C0ov+{q<-uL*gaG}Dw?Zh%`3S23 z^d@fQBSd2LJY?^I)4hI&`BE9(@Qf-`04}yy?L@CEbr}8F(4UQcn_Y>2|M7B4oVi?! zDIGXxk$-vak$F8L+h<+U?|QQ$cNAlwd4H2*e+lCVy;E|^2Z8;qA->Pu^I3dB)6hUtq9 zk#;}FQll{rH{hH;kQuAxjp<{zBphV-^4;SJS4IeThvUJHXrG3D^)+hQJBxB!O3oOf zzV_+&9(?Gzkvd<+GIvkKuG5Qbs+W%X{POqCg-eg!bmgl3SJaPK&N_;o{98r1=>F2> zNh5f!mqoRB3k6s0ozWsU6({P&@rh8&>c_pAt4ii7={|GErY@w0AwhBVeoRmX=bp|BpmF0vy;~a|TC-J? zGuoM?oy9nOQJ*(jS&)y6V1^3WKqsdBA4MdriOEd>DG*2okO^L}fe^3IOL3KtKuv9` z+^c;?uvM5F)lkkNPEUc>2#{szlpq9SDMcikN^nMUs>xp!703p^@2DD6@5aBa`eFPf zG_GXN?`1FfeMfRJGQHC8W?k(m%tr4tXEQI|ochF1OE^sJuw!ncM1&#nSCaQo6^q=156jAea>J{qW6`WLkC#@Dq zJ*z$=yKPsf2d-gHeHiIm+ds_iVIzv$eg%8qHR{T8`HtC@LmsrV5(~}J&t@z0il}X; zPjQ}mLpGedR{C)s?{?*TUvod`g{jnXdsiN$d*vbMXn}Wmd7lLB}_vUtx#xnMK-$75Yl6_hjKMGkgLXe{RLcuvzbfMsX*E;N}VnwvQjk5I*K$cwtFF+ zEk~`BPQLK=7qYozp3bKAJC(C_JoPNwG(N7<87%@Q^lRfT;DkKF4C~RjB{qY6tnsz0 zR>FK?Vt1Pds!xAk4{CkMD27P@D`r^L3s=udxCm+NDwF{kUyI?$I?f)h%o&aYh%iRQ zlp|(9M-V7hr@_MPGy-H2U`B9t^HS+_N>$=xvYj}v2oQlAX^cUTZp6*N5U0lydtdZ{ zBwow4Z-s{#Uy2GQ3w-m%#&8%M&LsD?EgRQ&bs#DlMs?o;H%grN;$emRmfG6G1T5l5 zQBylGRys%tJzW7~p~`xIzu;%2>jAL@>%Ixl*>G#0MEIm=Ydh6;{~9Q&Sf3zT_@7c$cpm@l-5#bsJu*Tvl7x+n(?2pV|}tMMGI#OYC>* z^4fiub$3l{E&Em7wectV4h8F%H|9UkI%TO0ebiDWT3wFT2Of6a{{%bK*6(|zVbZs? zL#=7)vPrK!l5a1Kl}2pwy0Y12(%znS zE35*tBQ-}r2AhYD=P6G?W(b#GPz$-tUTg$c2DO9((zc^Qw4xlHn4xb&=O%4C4ADfI zf@Wv~&Bpc11{&&$0wKlVrvl4hB~o4~LpX|EKqxj!fDVy? zgliPgiKdd^C><0@p*UZ1vTSh)dM`esXGNk$^th4}y~~|;Dc~7ep@u@P<=dCZh|YXX49M+k3*6AO_BRD@7u%n|M*hVt>Gv)>9_CF8LyHVvJc zt>+LZhrxy_v5SB(=DzmJ>W{h#yM`kxE7B-3y!JR=TSt#2y8Cj_V-$JfuYDGPG7WPm zPjT!LxtXbN?iCtF2_WL^>mSXz{!Q45+W1#;t`pXYUl)GNv^>5Tasa1*Y2?=-4L&7b zF)s%&h8)237@h<8@7g;c2Qd5kSn9iw*Oz^LBVFgb)=JlfPT`q^qb`m@rL84Mwa+7p zLue7wI*!WL$mm1F)?`A|Op?sg@G2h~weWZ?L&Yqnb(YzaE}pXFTR6gMDWlT?URb%E z9Ze00qC342XD~!oZl(cETO+_w=?*mWV3#?p03}48s(<_SJ9fq6&qP}a)lZDO0^1LD z9&bS~w|Q6mp4koKPcFahodMU@TPE+@wz{RZ$#b`(W@79X_!#_d(LH@2R5&Mi+qm{q z_a(pW_T$(V&#T~vn?PH?kPtqX$Zu^DO*RA!^LkKtJEfj?3V9~Ml&3K_YA?zM1$2F* z1WGh>wb?*#lgT~}$Qn=_RjqC6w}qeno5aEWC&!ju zX_CIJ_JGn`4|QEhlm-~>o;Nn7D9sM;zOyV|wybK`JMh=bpcYGyInr(J-1;g^Jka; zcK>fc1CA+Di07(MzoM0WOT!f9qrR{aHR!B1Cv`3XqA2@{j4L{xs246Vn~~X02{x5M%djvD#Kv$+5dHH5w>8D4XdI(c1T--PRAx5hXC0g232t~BW zQ}9)aT|*IF;8}3RYwNvw-n+;~t9;|aPk{?AYf{`^R(7T)N#6Z8{WN$h$oft{jS44_ zr>C;1DFy_4Q)?4EKC#9aP(tx)tX&nj$>}4$B8X@fREVp|+<;1DtV4UtFxnhUP}aW6 z<}+5i7yyXxsFyV4&zxPC4fR3amldeRMSNBR8{Aq^DvAblR|5!IBRe|TV4HFE1kgWh zGvhQ?3lJY16+Br7PEPnciJy*8fyIlChxpHoN05Nj6@Mh`L^7?u<+uZk>nkapv8|aS zYIF7-ME*@`_jr8kK?t84pF1?UGE!rDsBW55YUH-N;|PQSm%~N-?3ds7?DmPe>fW-m zXKs}q0YE|%I6&)=ykY0>g8KRw=q_>6UCME65)H&S^fa8EFuf?Xh&uN`ra-PBKue8M z^m)h%@4heppjufT=-TBejMI)2yQT2vPJwcol0}CtQ=TSA~HY zDk<{&2s5oOYZTyIaxI+#l!z~@VH(KwL)D>z=S zt)TeY^gbLRJE)1%+O@pv*$eubLZ{Q`4q5<7HurbX1~@G7eynwk(3|K&uWC^V2D}~u ztWo)Eo(%+75)62R;`xg6B0w}vCSav~1(m)KFXWOoY4jOtn|I*}iuZ50X{4*J$#}Rl zaXPxO!fk0s+JnWt;}h*2H`Gl|OP0*?vM*kPtdE1#n-Vz>=vB!+AFEwP>%G8yfag`? z%CGraClK-3&sW>WET|W1w!mW~nLCgeqpX1u0g^!VlTi&_Mx%2HRmP}zHuM`*xJRFJ z!7N!&S_}{Z^F4 zacqe^F#El$Pj0Sh>ox)X{uj;1dImCXJEzgw#A+O&G7G@bh&8WdP#4e;1Rg>rpVHC_ zuqpI1gV^o7LM+vKAaFs*Q0x{J(2;5l&C2KJ5QZFY7HRyW)Rlj}IwbWOL3>qGix6s8$JO!++g;}i7fJLx0FjGb}H!xZbP^$t4>TKaZXsR%x zVo^kp0KjOhsrH53nwD$BHqHt}<@yLxS!|klQ;)!z+*aWwppk??7DvGfI1i${GUBu7r|EUbzJaN$Eige{M;APBBLE z$U=ufp(6#d5qah!^4h^7eDmnKj|GSy#C3_tnKl9)OSws+qB+x}`U5am?SAs`wqE3Z z97sSN^4o#8=KGezT$g+wc5&uDQdu29SGOSXjO_1PZhh-sIt0axQB?X$bHDE6*Fo-@ z<|+|8WVc6CMmFC*y7O>g#k6~C-m?A67O=m?9S7DurYw+!t5`Rc=6ZB^Tz ztO-@$@quCXy+zD$^@H_Ww#Dkpg53vK>inTT%Y7YHsIT*zM4o08IianQeiH#T>tQYM z(l2(Fr{vAGG7uf%cF#_W&5SS!)kV{^-R=wCMzD5L{q?&>1*=t=$~*$4VlVWixp!kV zGIw($A`v75B8HxR1VGL)9SBXgpy{9`q2_w{(Dxl>W1UDOSj1=$g_&tqn+DHiY;VF0 z8bAWF&p-yb2sh zFVFE`{~~;Z!l-;FUW^HY&<^ee=Al_QoN(5MD$1#Z1H?u%9B#yh1T-t8=CLMV%3wkq zfYczHiNpp6P?gX95Ko=R4B~J>2OK(8RfHU%A`RAT92Ggkv({h72SDXugtttC&uNr= zuexFf{I}@yGNrBaxLl5LtUh3ZGsz#c&qEDrc)=&d+7b>T z%!Lnv7Hw(^8iU>n`w2-hRmDLUsy3sKWE^pS&;mq3tq{aWP^O4TfTIb2X{5j`!<7`; z@KbdLUKdq!{xWY?jOaW$Dvtq~JpiNr(~07(Th@=s`SxD8F+%9I0GmKW zp#=fu6-7}8QPw$_J>QN%q{wbV(yq;vZ##fuipZ8g+eMS{AcQ5Pr3k08D0UMFDl(g^O@KE7$~n&rcQ#{(A?uhzDMd&J?yh6p!%8|+eJLq$cSyIB_wV4fv! zj&yfduE~2*$rUJi#G?4Ga?!P+rshyj&!g8aWecgVZ@zWK^3$8!>+3s?TI$BS``5+I zmv_`Pw~USTUADa+0L(YD$>i7j_jY&83^ulR$40Exp-{DTMWhGdHnMNEHEl|*DQW>` zul1Sq^#Qt$y&3_Je-xOdaC-zNwyh%EfJ z&KV}USEB40Asu;`coEPt6bIU+(nnX>?fX@yor4<*(6|nI)tB%qD>iu1k=UFJqUh-- z=fN6FlJ|=(bhQQVi?W*XEs(P%!nTAJ#lH*%NK?VfYA1Ffy)XcbRP=23X!>>8G+GiT z7WiB1qA5Uop4W?2^2;r-Bjw~QMkMr6t8?+3>T9r*x>HG6^4p})ny^!T=`*ZX^4 z0gk%=r)-nA_b*Dl=t}+{_9OL3zE(XUpMA?EHWsQo*a`eiywePI=1z{GY{Y94hSE9@ zA31l-M;9GK^$xY?yt3#R!ea62y=eN2JNo938&Ip_LDy3XUPf?SGtc1VbV$8Jivi(x<8ET|SERFv2e)lU0B%LMd| zH_8U%e$=vub|WkM=oFh8Ma@Z3i{zK#0(`1sESwNOs|YqqK+HB zJLD@UvZBW{sdrqDgBi1x;R=|jA%8H5pO}nQF&RX#g2r7q(U^zHtQCD-t$0Tzny~Cl7eul)e^Fn1pJR zBEqC_fSIr_p{yE>-9`wLN}Pcxip@^tjKNd4a4#qyynagpyBkeadF~+EX4}msAufN1x2&(KGd(?eHX9Ko2=m z4j_3G6{|oc3yWosC1o;J;8%m?w#CO2rk-GW7=+`A#!<#e_9J=Xc_Kj40Yx5Q{gGVo z!bd~<#)ew8OA-!*t7(VY)9h@Yvt7~~Nt#%7RsW3N_rrxZik2*q*-q;A_dGsfJq=-@ z;%p**f{RMNJ#`kF!6=0$1R&A^0y_tzNL^sNN41)nbT0F>g2F{UBVph@;upxa&CVS7 zo7w*@?M0R)6T;Aa-V5EQMCeGgX1zb&m|hPky?uCXczXe=p^Be)dzALhyc`sti@e$>Dr<2agx17K29)m_&SmH zYQF?-T*|jGM-6F=X^qlYqgF~Q8--gj#}Bo$0ykD)8j#w(d@pXBbFQN6`IVh@u_?NH znhLx<*7D2XzwLQ-wV!8KjT1WBiUC_sROWxbXZ>?+%^C5~I4^E*QK&D-}N8^CWGHH{wdYeD|XFR`k5TjbGdM%-qy*Au$>L{JCvyLhFoelPmLY zPIUDboG8sT*i}_x1qyP@0S+8o7I4Ck;lwJ7S0peq#>X4S9E>b5u{2zU2ssa;6IHCE6CUj`)uVVnmhKVTj3V{jewy7;<#Bc4YC*zAHm zR=E5aSp2%K_I1pNNzxr}DtL6uobwgPqaXMdIrcJ8US84ja$hV%invjMgcPcPE$0A{ z@ph$LV$cXkytv^W>0id?LuGjrBYa;EONaebEMXhjHcMuj?|<+>-rt*8n{P=cCLB9bg_#B^U32j4R^Nl1JI6VM~U zVXh0WM+68SF`yfG`7~gqs=*1Ut5!`e9Fv-okic^j#*HZ}EE}DbQjl7ZkemQ7Or$x$ z&*1Lr!o~CPt{6A!!&IwG%8|3GP*5fr0NEfK3{_#~qKiPYR7?s_!#S|@*+^ED%_5YZ z-s`g_r1~B3j}P?r>f_h)e|E}TIUkpN5BkLgY#DeKzpADQEz+w?&d!*AcG;4%GaJq= z>YSg}RMOqCz?hzqGRbL~K4f~6c+ds43{z5XW(tnAPaSJY&q$l-vP>I1qv`w4RGkAG zr$2p9ld-Y!?T#mU7CzP0_0+(9+PhVePjB}LC}o;$0duK3qBu7I|GVi>*IYjoB zMwi%uW8OZ!1OMot%G3_Qp%gso&)kgOUajKV-P5Ic5+mFc}|>z#>oNw$f*ZD zf{ks6lMiv^rTpk>@RMnScQGfPx7?gb=e~2PtF?LWs;i4;w-)nL{-Z+1ZTOFnre< zt!M2}U_EPLaE=FCqq$~V6JlLsnmR$kcIfve!#Q*gxd~3__I!u^qRmT~%9(+E_CUKR3yz&9D4Fn=elk3>E`u9M$WHJ$NT(MnU+wVD<1MbcW0{sB@Vt=DhyjAK9H z8E1w(BSd+&?LD{;xmRxU9-k^}wfeK%D|YX3_wZ*pwB7bSUa{@v!CqxAs4?(VQ#}KN z2j&S;z7*9d*uhJE*!JBI-{$&jL#|t&r`7+}{`w%yXrDg*QzQ0vYue- zfOi@^H{$2=JImME3@3Sis^K`udg#jd{W1Uat)78;6I{1@#z;JNvh5rAAwPr8p4FzX zXDeqfawXC-CQH0f9h=T2SYk0&M$J=>9R6#CohZu5mZZK43KlO$B&26M70#Q%u4i zZYy(C@?O*_$f=^eWO4HzlQ$!$w_}=+yeH(l&fl3Imbp#OE#5fHXJy8pkebbI+wn;Y3+^n$@>~+ z9y(mKy{9|Hg3QcZ45fT-Wmyl39YsvnnmmGh;qvqQ2RgbNV$!TxTt6g0v zoC#|`KL6bLw;x#f6!wNQvgv}poRPy6R}EZVDwL?wuum$r)c) z+}Mn^bjXFne=PiGHj59m%>XZFf= zx)W1ZwQA#Veeiw{4)R*Y$LW0) zKF;9$8}V!S13aK}$8vpgb$IaLODn)GvjKZ5E2^Wkpuk&fQ9ck_bBB*>&>jPcd!C+A zW@D&lh-V05idqK(03s6rff=}FklnNfEe(fo5dnx`70nnIzm9Le)ZhQuy!iNekM;Lo zYL9S?{&G1H(DNcR;GPFSyO(8Z80MF)Gq% ziZP)`!yyg zH3UxFKEG`5OoHWl2H(yHE&-*;>j;{CgV}FHvnDQ@8*3@jX&gk+N$;qIm*{rXYc_|4 zxw)C6&CyYjVWu!sM7SL_JO2b~HX(<=i2qfS?M3eEx1j2qYhMwwUm3z?htT#M`UAR1 z5e9_4nwgLh{@4lh2P?AEKGiZ?#W-3~+@=Ea2}9ElH-9((NH8Cnk{KRbk3dFZOkpvy z!xDtO85M+?RG5)oXm(RQ)OKU-+8b?iZ>(8!qcm|z z_qcK0OA@madg+H=nMaBegSQdBrcTN$(TMS3@HbgQq9)s0V?en|)KEti=0VP-9*m56 zGJ`s7#5!aRYS}L3<){y{I7WoyY)s~GaygCQud;bf!;j^eFB!JkFdAi0wkR!mZ_mDW zdltNRaQ`oRUiR4DGh;=8DzLaXYf?!a{WmH~FVYai!j>Yw>^7(Ief4yzn z*SmLoy<^AM#w|~FKGqc)Gb$@)R7;d4EIzE*f5+7x=5rIdc;Q9iy!tS7q+s|wQ4>Sn zbcC=SQ5u0xs36x;FP@*^phudu-DZb)4}gCrHUS)MBFJu!#7G*Oe9Y9c9U`C4lorV| zi}rlkga`M|8apGm+8p=LfwUQ>vMt`ZQFYV2_;PpY%Y@>(8g^PvS98h!J0Fx`|#`xhtOT>w9uVPR@!a`}&?e;*ag_enQzELd=ZSMA%!tA4(4;m@U;`+9zTVeZ@uzwYVz z`25_t=Ran9`uc}+T2GA=rWiy5XY(whec|9w4Gr=0cEoHF(Sg*jQoQ2?#5L)l5NIaP zKDe*!t0%pu!kiEn!43+X4VeJwp@4&8LL-8F^yuc{ZGa;H$Aojr6L>l~+MB^g>$&_x z>GP9ozid7^<$(p8mVd|x-?!n<%^NHGyVf>$HQn4h<<+j8|9-Yz`fSRp3+ac2O~SV) zrQd$Id~)%P+*dpJ=m$^oK_4!!nDDcLd)uX7pV@V@{`=qO6t+M6Z{g{@V*Imn?+;u( zwkp%7dZOab%I2~x842-56t9IM3kQm(3hqml7W24C#9{y)DKv_c3kr=XGJQ;MLF{`S(XKL%doa*YF?5Rp#Nj&6L zlzJAD)Z;AC|1%g{Z-!la$aBb&R(J{+p@Vyj3_i4rCG(<&M{QPotSKDJL&D6_5jqFBH(B#SbMj#O7ORK{0cMVX z-^091SZ)#qcNLG5fw<)3L$$kh)r^f6r8jbx*VQjoT+S*V*TQkBZf+$v7RVsn_w^Qc zLqyoX``uTU_FkRO{$_hf01i9?o|1^{d-N2@^7e9rUaAd%bz~L#7&3@l2{nX02G_u! zGFQ-ne~N7O#u%z@Hr;9gSWoTXLQa(9p=l{l$;=@^)I#1t$0hQK6c06}3^ueOPNOCM zgGrRciV3|G<4U7f(DoZ$V=~KvaE1#I%E^CR220 zV#kX|YPbDquUqr3va0?3BZW!HMa7B9g=4e@udMso>&^Ao53l)Xtj=uC>T+{+bt~I` zrF+-c+e^e%uP$D4t%rZ$i8JS(seLqW)I;)FL*J<1=bMbfG%3QoIC;sp`OoS`q+wihSpx$|o z#T!vpylz5pkSB73F)+{z2W%}+@fq9+i4P7(i`*G9x^DRi+lxm4rnkh$`bPvrxWJju z`;q;xL{u{)Gi7$m4%0`ALqu#f6PX2cekSWKwZg;$U-ugW?v6WF{L9q`|Fqf9^GNJn z#lKxuaVTqGMMNC7`D_1>r7!jF>K0dgdT7kz@c~O)T;2G(rrGa?82L3J zRio-w<}D4HkN_AYf$ma~vlj?|zX>tvEN-S{Iuh`;;ep_4L718?Qfe{E0ujce=(DKO ziQsh_^z=uwZ_q6Lg9=bJ3g}S%7-{CYQDDCDvF7B+WMhcGuNU%R(Qwhko1Cmo{blwn zZ<)~7D2FIAy%=wlcW-ldpuY%1V7{rbf9%*TjWd_~h4&UM>g9Q(&93gZ5A1x# zobkkz=3Ymst5#G`Uzm~AQ$1~AX66FFub*%T1FfJyl9)eYi%OZ7_0C(aNB_x`B=>MbnAzlI z3U{C!U>R%D(G-=WCYtz%MIz~gxFActf_PqK#rhRA+SC z<;8Lovir=Iyp+~46@G@fQbCb1wHDc&nrHokL46W#?R8cGnwi?MtOS<1v4B?qy%3Y>L)4b|C-&QidSJ z6>YN_dqo8T)M~jn71MbK{km-Bkk66toQQmtjI@O8#B6lv33EpXnh&9tf=cR7M&_VH zg_JzSE$YEI1LKl_g(#DUEbIi=t#2%teqj2THB-8tnBQ}GUghed8T)H{Umx3EKW=(V zRPBV8*)LaiWb}6}TG#8h@VP}T_jVc_tM<%Ues=owv&-k~s&w({JU*-E`Gu+tYxbkD zM(M_ivp1V+4^BUH>hX;`pO*C*z~ps~{J%{W7ZY-!;kOD7azi^i#ZdIZzL^O%C?}Y~ zA7FQ#Y%ANxED_2hA)b82@mUF3zTV7NJeUNfK!WB@lW@Sc!5$;w;I;z`ga=(60f?Pl z$(af9Gm7gvlT%X`ZLvaRojn-3PP8yEtbaPHJ8WSC?Rsg$!m#eBr`I!>v>fXKOgabQ zz{|d3{yWZ{L$6c;8)khhnW*pJ%P=d?jY742s+BfV<@e4I^WT-#viFt1|940JvSrhs zk1hWBvSl{_Ajl=*oT>nMD4`Z~CXkAjS2Ku;I>iZDZ%8;g76;h>##JQ@%YjN2%- zkMMe3wW?s-rSrSy-E%j|dAuwC9kZ9L@5&>!3Xf#whTXe{EADDG)Q^G(@AA35GiEN! z7Ty<^eRSa9dyDw*Ip&Y<4XaQ+_$UzXl^VCMOM+;#XL=Ntai_}s4jSL^Ga+q3`q2I1iY(#@CVU*SvM zILNDB>Yjf?`u@;q-s9vc>6`mbN?)DE&jbsB%Mu1qA4dMW5bh3n3lz*z@)m~h?crQc zJl1i2-TD_g+pllfaJ}P5USC~PUvBQo`o>kcs)u@CSiJE1%B9yAEq-BH>#62hC!1&8 zk7xmVkCGq$qS`3lWbY9IpZK6TJZS5)mmmzaBB-pNjLcct*#GzY?)%4!`{yq_ID!9m=U3bMzuLd+tL;0!-nIDI1q+{D*zJv;3A@zpp-S_;VfFe)N5(+n)O% zpE2^eRQsi+`ZES!^ziqi^OE7L#PD}3F?{*FqHNE5*m_=earfbCUc2oYx9}CNGvq#%= z(9s}(=w&BCW7?V4nkr ztvo`0M*NUx>=P1{XW;rRp20r1e+DEn!*M2`0$@4U$d>-d_$4ytm0&Sz;D7tjB{XA% zCOtgDLW2T50z3jBy5w;3NYOPcqI?U{rJMa^|MsVwbz;NK6T0Rnx9oVjHK+c52iasa@FVXc3 zDu;@nLuGM1I%A>fn@-32zaVa@*Ru%bR1^=$Q3Vs+MLG-!p+M#E+w2y@fApj4YB0+f z7E@xpIm+ao?CzfI=HLK}DLgIQ80Ho3p6ZDi5$UPwTLpaG5Qq@m6@5S@=Hy7{a`1wxBlhw>^V>VbI-=FF1G&mSD(H1Y4h{j*WFkk1Y&`2PR^zy()ZUpW?$j8 z#}D(ub939C{eio{XY$jfN2HIRmp(qVkvEWzFdTPw$}kJX2V^0jgo->XWI>d%0T4`| zoC<4Q1{QD(ty$1EgmP2mJ7~tBZI$It3*cM?cmk--kmrn{fe}FwzCIoSo&k<}&cGY= z4oJ;_^=J*|3)|2Yh zq$C{}sy8j(@y*`--)#H7yu$)CHhF2u#XX=)YdkLBP#$G55Ob`HEoW8h_R+e+r?OE<4TIM zHO9)RwUi=YA7RD7mD{a?3%piYtEDm&M{~F-+#DGdA;(!6;~A0@fd=EGGi#|%Le6Fx zjJ1+w$`t7p^$&l0@ZjGcuCIUi?*|Wl`*8hd9^H?v+I@YtIxutUxb#Jf+hfzpvVzS` z(=!+MF3QNPN^|t+e0=SJ7p*L6{!6^-z(HR9(){@^N#ASMzrN%yNje^u5|EgeII47W z){LaQq$G=9|9dO%<=qrMRj<_H?_qii^g{QXF@*wKE`sRw zj`ZP?Jzs8qXwz#wP0#dq-`A18@Y?c$7hSvrkJ+<51aD^{>s3DPevqK?BA@!)Io|t) zM7N+9q|Z+skffirKKRS7)4baWf5+YY>$@BRo{(O=u~qu{Vh|d$Gd0zkU&9BS&P}`Q zD~Gn^tp@>ZsPUaG!Zl2o2iQJ`&_Z$!a`tR_VYL>$*<|*Lj2#jJ8;TVwc)BPEW{ZLi zHAywq4SOc0kCxf&FV57T|L0wIVLp5H+NXkoXOC%HEsax<;`u%MuGXtxp0uwwqWay% zLmBS!&a{IKk$DjXm3wYBjxbndnXjoP8%_Q1A+$3@UjmP|Ujtoqq`t=1gjk;=>)rM` zlkK~p_sQRNJH3z2LtcY?F{bw^=adinO@W6R@uytWxl#Nn@c&W#DKni~M}-Fa`+6E6 z8&Q#OjXwcT8Lg6xr1%rzFg!3U{uE(N2Jmqa$+N(_k&^*#ScISqB6|3=Z=Ss7(73i2 zRv%g)pWHD!C3j0(Mqf(S?!rUT_b+zOd79_$@>zIkRblT3!kVt~@}9Js^r`LTuC;u~ zllTAq*rfH+Uw?U7dac&m}p5ObG(Q|Ma;~d96U;ot!Xc+Xe%I z{Qu=cTQE7<{-Nm3{Ua}le)>$8La@SiQr1@-_B86wU2BOXzidWYd`?0Rg=Hhcj3Lyb z!^_hHHI>d7`$@i9+k9^&%7wzo@JMiUcZj*n%v5*$Vh#^?H>S`pMvSXgytPCk%%v+N zp8T+*?Ss?xcV!Eg7Wt{%r2GG*apB!sTHJV7^`H11fueusfM@oh=7q}>la_TYSRS9S z)Nkw8`*wc0Jvp}UzxoW)IT!Uj>1?OkSy~^oOS)MUo6A@IUENRjz&Qn4u*UqN zxD4LU;-Ci^m>C&NJnx6ID6+#x!g7-MNB{y#BUVTi&O?x%&>K}dddD_~=nT2_TqSTj z1kWnAdltjN|LkFwk`d?mA3cc9F$SGolM|yeVlt5H7#0#76zJ#cBo#^nIQ~v8?9De6seRs5f zaBks(;`Uu*78K3fE6ZqUjMf{xKN4B8>nu*uj)=kOBH&=Qw$6yb!3daPh&ZiI1>HlX zf~cloB~-MzlhEH1Y>;Nk$gLCcIj0j;FgLAtDN8$sF2w*sj$$)$_~J-Ic$5hpHXNyI zFYBg(QfzErj=L#*Kf^X9f+BCcyR0rNE@rLw!0(oOdqr)Gp7-*ulah3?o;=N)?(3Q} zeo|xdOjN`+WtNp?!R5T-optk9OGo7CUFzrcYVFOd&8KE9ew7z@GG6a*^pA@FcR8xr zg;&IB!NHE?g?>T$>$XKU+d;n(@CriADxa&k?2VWen+HcM%x^F<*Jc-)W4fFV;&u2~ z4OG`!XNVKCru<(hw+Pz}QEwwD367adNPmMiLrHMn2K6va*TjCerubzT^c!m*_{+hI z(qF48rN5tlS^D^%BYfCv7kKB&O5XX>;lG@z`R>ovkL}w3_>?J+?b&~Mswx7%tvE5m z9N&0%H-1;N+{5eF9wD3R=X}Z4BWu?lxs^-)Ueb8FF1|Z^$wv>)A{V{LO+>zRGhjpU zyc}qJfldhbTa{hX_$XW07S9`@@eLYN^12}!pUG^BPZ*J2HZp=|OH3gPAOo>HD+{Dw zRn7ap*|O))J7%A%FW%X>_5CHw-rH2ab8O?u7U{1Py3gyGRJtHPqI~_NiuIHB?W$fo zj$g3i(yUpRRxCMO;p9-(-?--PapUe@)3mYF*`s`~Fsg6G@>OG(PnfWL;7t9xqDdR7 zHat?bozayHizev&c@)QslH+)xA?|Mfw{g7WB$fe?mz?M4LrTA9L>!OOcC3LsIg%$A zd?*3VVX{G}u|cGILs3fXBpC`7AsHpB%4O&PYJi0T7NZ?Fs{3jJmIgIWVlWt6G0S^t zP+ecS0-hxRU3b;?DOSh;E+UtK9PCdJ!wKeMxC}1d5)9YViSi!CN|14 zd`kC=)*mpEMk;Z8%c5vo5|auXac2)siqWSVBvG=Mrwm)HDaA%Wd z`i*nXPQY#~$)lae`8SPIb~zRRDC{VecgXe!{Z^cczDm(JEo8fTq8kJ(dk)>k7%S6Y zf9R-WXPsfT2sRdz5u1T~qRl~ZI?DBD=J#N#B4yYq+f22gD(zEbNo90y_)Rpa`K{GG zhgu$8c=;E_ODAT|wg{?gA|FsVdU7c*9-1*Hwz{BU)*SIyhFHgyhh3b{FK@c5_Uhis z&DC1nxW1{5u?F{ON6Y-;jdNV$J*2inZ3Vp(@1EVUU)I~jc=7k}0HqUsh7CD4*&FyD z#@R^^NJ@xKk4eYLLgw4i=j~5c0S!pgcCg<3Q;Sa59$J81_cq2vMiy6$tw=P+H7z2L zG@S07Ry(t*YKCc&U-^bftve06(ai;QtNdoVRiupRoilz^h35?ajaBu_UA(rpOxjRB zeeLAxr7J5^nkdB=cv%86dtZGoc7N=02wIji8D@YCGijg!K{s271bYZ`eif%jpCUa; zEocRe@H&f=*fdEh7c#TO7)7w=&Z~dY!g7)Mms685&e1o@-NWd~{L3`Y5n2)Kd8oGT zl!QRCpQR!C-~tQ$8`Bd|En^^2X;8mkfAp!De7?UWF?QhULvNL=z0}?P*qY)uA0Bu< zHmSgm&!749k@~ZI{%=0z^UsQvE3a=Y)vON4OO>AC+cLg?y!GnQy1JuRTOa>ELpsbC zrRD{!)|76&zVZU^uz$bw4;gkUf7pD9+%afOTDX)$+)->u8WWA!3HN- z(+9JdC`S&8CxI0o6IkJoOhT)KzACAs#F*f|XU|hTzq#^FUwa{9;CG|4J?hQx9Phf)({rWk_&er$kL*#u8;H2DwtRmh;h5};{{s4e2b8>2 zZnD+$gT7aoVqvcbh~l6#kg{0dB7?qZU8j7?bR~U+8C4Gvvgd~t3fTMtDJm2QWLR;7 zJf?Yq$y-Ai9IRD?`irg?q=5tbrJFBwcfZK|pu+Xe9em9_Z}l#H`&e!5&)!9HC$CyKe?a>F27INwdjA1lePjN@8`91Fy}vj!XVa(u?CrI= z)2HX=l$XnU&PR^Ir%H}OEajk&+zvov*WN-d3HT|+rzX3;6XK^U-&wo%oxZ+bu3h)` z%DR>Z=Cqw{X+1M%&Y2eJkLgj7m=qk9u6nrl#l?$XT!LJNB`+*#IMz^qtg+!(ef_ak zq<@+sl9Ip&W6?W36}U_~+fIw0uNQ!d(pfEr!*NK@6gXI4a9IMiiQZ$N!fDhh*yy4L zeY3UXQ@}W1q$N{3ZGM*3J45)lci@YfpsU-Lg#1AIkahJovGBvJvX>(j%w1LN3^PI zK7uYONhkOSm!cAL{Zfw{X;|&&B(-Xt4-#JDoCl(WSI}!D5K?)g#f$AHB7Eq6CZ4CU zpC;S3tY>T64((zVSleRCIKSiZ-fH{ocohg>0w~&<+!g{#TNUMSg;z#^ptz^R4hBaF z1dk5`!NmTqhHZdBt;Pz2aOf^q(f1?(FxbjqphkTqrhMQ`H8@kant|tzfI$M>0u(S@ z16G95M8J1&Xnq0w^|0R%1!6hMNHHb0g#=~FI~{ognvdURJzD!QT=21*=<*X#Aw#%& z;h5i8!i6jOL3Q`0hb|ZeT&M>wcpGwqCe+8P#sCuHWnWR3n5Ywdg$o$t;+J~gJq!@j zwJs$KC`Qj{p7kK90&Y;PMjeaw$k%CBD7%i{4Oj5Fi1qP|MKI z5_KwHUeNgg#WCkch(mSXgs@1-oUxXPb;Tz=> zMlE<{oM74^o2!`SxjTZjJkEELpdK)OjDBe0%?!VokFZR*V(#DnN5 z(LjWoXN5idO&q_Y&bDXe`29!644%KU$D*eP&aztKLAe=@J#S``Q!U2Dh8Z!kjCLK4 zZKH&PI0{M5s2^bvjFoUKkN1DhOJW#=?AnCh4njO~CSWMrL+Q2lsXr>7;Othz8dM_oxj zPMSI<+M*Jiew@ctPj85fj5f`vr@w{DpT=THUK7k%ZhQi_%HrpRtQ22#%_X8jmoO{r2=bz@0ZBEJHGUhCqJ}Zl{}JQlgpIdfM)FCfI%Sd?L2v}9+;2&Bx5X!g z^WF}TG}{Wx26m4iH>SG)7Rl-sR6%5;8@``AJkH)o7#NiaS!{x}8e@ia#dK71!Ww~d z=$E5bD{&aA3Q%MZ)%LfN?t(Qu63Xwe1`cX;yGFu%Ty}gmzB$&Mh~}z(u^#Dc!U4k) zmQfH1OVmq-gq&eu1(ZA4R)e$_+Z@~Z@Kma zs%M%iCIC5WBUE4RzS>xt9G&X3!>Mrl@=PeHeZRd6_~Wcmc$A=8^z=Ls8`Rc(2h_U} z&Zy(iQz#hG$XxEUB`^zJ;S(H4XN`}I5!D>(veiL;=(mcgi!xhEM1GH!x+`L2F|-^S zBFL#>k0LRM+JmBtu?FoH)oPKPajeiV{iod_ifYV#lW$bB!o$!IatuY8L*hc?@QT4C z_Z?8aE<C2McMvGwRijWt)PMtn7t(HJ;@NrTTVC|$j z0n;Y$t-xerUij$Vr!LE|cTcD}YLjuGN0B3bf6U_N2M1UnMh3*i3OcPLlpu_hSFn&M zlC)!FkWmW|J-T6Yd>gP0q$WK>(v{fGpaTY-gMjbwH9BF{7jL?kN|&QAA>t0 zg_w8c?%?PHkQ%*i$KY;FwQdlAdu{m7kn#YUF3^d$7D>b7i#W_xCdGoIfk1Eq(pSvP)a$ zxAlyVDRZmT7l)-3gj8-Ut6tqUXXX8i-u9Ny?t5QgsR#V@tN}`Pu?}5ec!5Iu%d}U3ara`8bPey(Y-uDhm2w zOq?-=K?rLUX%Y_yC(3zwn133bUZ>Hop;S`z+-C7$Sk$usH-S|yvbkHScUZ4Syi~0x zHz3;Eux4%O1dm44NU`O)+|kIGfU<{t38;JSZVq}bnNN147z*_Y$w0CoNEuI8#?VEw z6e6og83JZGR%EpqgmYUy417qZbE)CycS(QZ6T78x#V(GT&B-^P8SY*B*3Ppee*40O zPaH5gpuF55-76Klm6z)TKZlo(T)3sXX+W~~HYqR{{|T4}!QX^95|kD`Dmc*B2UB~H zDOU>ru!v+qU?fFb6osaJ61kP3&~O}D8U%4cFatC;9c2g_kq@0wuZTt#vU#YLg{P^r zFT1xtYyMN8_W+OX=P&wAt2b3o4j2@chs~8)ttT6{{^iYcWBXmiZ2KO zzPL(x;xh43&KH?}_2^@WcUKU?pFBpJ&(*~e=_oxTK05n_oiG1+o^OO(+4E=PGd|&c`G~$ZK4|&X$H1!`9l9M|;F{XkSdlYz zM(Eh&uJ+Aq9@`h({@()AO$`pfikK=KWp1_1gLF z+Wy}}-~24**V4a5W7MZxHh;qQnhO8zMa)w%K^IP?-hB!2G11{PYu-&IqSYqFt3Xyb zDQLg22Bu-hl*fWb>_LI5)*RrksmYlr2f9B;Es75b3Yj{HM#?uUS-cA36;dJlfGlo5SS6-8V zmWZzUy`HBRRc!Q=jD8#L;Xn25uc##8v-4B;Bc74~?Y@-TWeGAt2aO7Y6E{CEGef1* zdSWJ)j=5WyWym6Qic%>;Il`7V0ZXaxf~cMA!0U8e6AWzG>kxORg<#^fw)HwnXm^`9 zzOVp&!weqgDBo}z|2a}|Vre{Nds~Gb{8Xk?0E=>223>b#j{`qst^^#R-_`x?*VaZ? z`50a1Nv>TkK|WQ{Yp-^6Tw51i>3x*fRnqU*+F#A@EKV+QaT%9f)R8~%s&h$7VOK#x zS7AyC|INX#Ho5Dh>vjl~t8?G<)xm>b;yUkMST5D!_b(5&Kfm11@uuYDxBQy)M^SL} zTmF3eylcz-0Pj%)QR0348pQUqxN%&)Wkwh}u$bV=8l5WWb;!QPNCkm&g!5Aacb{4V zgAxinST{hWM-G%D4cs> zJ~1S2Y_zmTpAZ^1HYR3lTxi0{{_~EiX9rR9S(W4Y{{Hh0>XT7PH`Na3`z?#cyK1Er zt?PtE5AbhjUB@r7@x6{y%{gQ0@PBSgEr9Q(%FX5r3C*)w?0@V>$Fe z4dcZLNDj&vW3-mZ zy7Zu!=(8tZubjPbR6&UT*#p-ed3M5P>7DPslYYIb)YAf$qH4(22d6)uvP$PyE13w;mEv1C} zftP}z$h?$Bh{63WK*|qes1dQA(e8d4r$F-W--4xr9l(-+gN`JGkHRc~CIcq*`Y+r6{9N5u#p5y1|zrlyjN`rKu8o8n``Q);HJUU${|!XWNm@IHRuf}d4H z2D$oG)y&@>bYsSSvyUxWP_=i%jqv@-et@%T#Q6Th{W!CJ!T-^bj#{nApa6zb5j>*l=!p?(?QrJJ>=h-yLe9sB zTH+!oNmXFqLYMTN^hxoa1=$w=kc=X5E!+F%r^RD3CiSoPS7c9yb9KJJ^lL_e$2vOo|(1m&9xnGo^BS3p4s~MkELf*zXfi=@=#S`z6!-B zXIoq&P(=v5;yJX-!NefhSrrHa0Hc&>79av}aH;^)i8U_Fln%$%pYC@1B0aE6+<^ z5p?1?@#>+Mnzg*N-fl=#5gUZ~)b9_R=J|mYyu(Qu?=a_9_$71`+AF$R4%_RkcVQPs zj!456B2wAxRV6%TFEx~*JD@2>LCNkZ-l^Uh!Y}LZY4XokySUUy_m)s-`PN2~bXjTm{`y|uQ5m-;k_?&F*=kx1Nwd@z4RzZO|sO+0C@l=}i_ zjdi+Js18@DZfPz8tC+fn1Ax&&v%JU`+oXc8>&{DBRAd1J25?+>Xh39OB)-kx+hjEB z(NH0ka#iHfx7Ln`%&znH@j?wHluvMta3lrfMohz0#=U?1qW8v9|9to4BLCbqd)FLw zi+6HKE*G7hMZt^Lly5lQwP&naK(xzA!uZv*Ui=-hWggfHDK{3(lh=t6Xa{9QcnTU(sYR3(&&keH z(Ihk|>g$Zc++;*xzTj8c_hEBpp%Gusf^|rNOW?iP*rf z%{!b8NCmb<4haN|AQ%PPBGD}VPDB$}%4p`V1E+;r5YCNe%=Wwu}-XYASKY+d+Xc9R~TykX!7*U~!L$iHVPi_fSA#jYR%5 zP_QWE9r%X=iktTfmwpl?2L8q3k3(^2uEh`+8{vtD)bSYT^}o(Em5p1O=1<$Wh*;xK zzyU3bGXA8kOFb;Y$aE3>h0F|^g_g?QZ$TqGWDzHz39CH`>46P6V-Xdl3ppi;g{<_! zT1yslx2GSr7->y+UQP5vPsKtt9*A^x8d#^~{3rwZ82vbg7o|mHLf+W5e6@?C5&49J zUujvJMN;&KOCGgm7bH)!&f9DAz!&p817piFQrxREOXG_kI>E1(MbLGx{M(425GPeh ztgqkR+{)yI?e=1b=a8L;^fQXhNuqRmlWGy3u z0qk592j@{RFgB^d&CzO_mZ6cI2xD}fZ9NzkThv0WrZIlFAU`iBEj1-MK03mPqb!>{P&nXDRw>?R0?`mxgY04m#S#}$c(?txS9>R|*t}v=l-Ko)o`Tk0 zh4YroE9Ch%>RvoMk4fl3!Cj7Stq|NzCtv$e5TiR@yr=WtWsTL-Gowcd69dPMpS*T` zX?t~b*OB?xKXlI9wT>kB_XmcFboGp)ifmdcN&-QU8JUsk z%ZTLwv5z7B#?@wFt>^J*5DA3!X=PasMTp$M@f}hhYV8@%8Fg8r711eCxg;Q^{}_8 zV>UCGfO`fiuc7o&l+!Rf1clSVEP+(9G;e`?ihdOq4*iNe7Mji#A4;E{JR$w=gB7d= z^$GNpURm4u{J`$r16R8`o*&q=M|!?fdh3nn4TYkcYngPQjP*R_`yP1i_>!$&iGmOr z=eS_8@cMnyXCJLt@ev<#-$_3BSNI9f-N$p+IyZy@A(c&)Yh+LE5dRSvs<=p|R@Kx!iV(@2GSjZFUmsi`1MC=rt#n;=$H5 z6(gZ4PqPc)A5xS!rnx!l^rA{NnshhwE!SZB&a}amB01`VVq${P<~SxdCKoRGu%PJR zXz09w(Bz_=5cnN3J9-ckWmwyEIW6GJlm9J}=k5LaV}>E3te)p9n5eE0ik7%}I7koj z`yCCgbNPohNE`Tlt4AQCP45ueWR!%uF>siTJQaKeGe&i~)YNzm{P)0CzZ%ki!8ehM zP|uk_Rs9x2AUYTL!mmaiMGrvJ$~dDaf~&!dLZp1b8zdmxNd*JUDtVA0)&O8NQP|fE zG=`z0M!L(0d*Cr*3tNyHZ}BmkIZhs^fUl1T^L6)1GdhB#5$?5WzPDqOAaXS#BIW$0 zlh1%u6I{R%C2kg(GM)S-a74##-=CIBMNdJPJ%lue0_@034X?Mf5PcTluL7pShH^A_ z^dWS&1spWI9$pHqhA<0WZx^M=^Xu!0c_qP~jIJ)VlCkX}zgnQld zAW&R*-c>Bl7gVlVMUKqi^qd=~cU!}`>(n}Th!eDP5M0DHo*#c_jEdA;3+rP_BB89Yz%K6 z^6Xkmdf}K@#u=I#>SokbmrfX4GN!$-JtrHfHBb){6JiQt3+(wtn6J<9LkqRWMiq5x zD8GQ@qMS%1MH;<~U?||v)Ckx}#!D*ozhLwd$vo2(jiuUK=5B)jiIpmiZuZ2VZvIJL z{m-=@ZHlPTdU`C++H>yEd5=s_^>}gAoQ5TLu+h?dOhHtPW^&@hFR%IH6{7$;0| zT26<>85|ftP0h+|4h(zrinlkxZRj=R)**Q2nU zf=KUvsTSY^U8W=EIu9|sAm|Id78f0_5u$^^&eRlsv>90%1jvypCJ5lvN)AukBr*ro zaG9~R66)|Th$y#2HVQnSY&qutzMCZC$z+xftZpGm*}?{}w5dz^zcOZSn z5v!aD$_uJVP6}3OH39xUI&^t*=LOCUsGuq>f<&1pgCfPI(xbQtB}M=VDffuM0ezaR z{x4Q6X3Y|(LKLeVF#IN@95BwMH0`q9QfqOon?AK_TuDKGLPR8NbAM~_l01o4dx&HN zg1TDx8#BSYWbqJ;-ZSzw0vQt^a_*-O0Ty}87_|pLddXEu>OaqXpnKf%h53m!mgJd1 z0X~`K4I885?g>s(KEAQltE*~v%#DbjlM~vvcjMF*NtJWv zx5vkqEq=LtV=e#Mj*nKxWqU~Pj$Tsc?>R-6m%&fe#(G!BWN3wZrf#olUO1|1Txml= zbwiALls+RPVti#{ct_5pdp1-wZpbWIHuupFfA_DDt&cu&Pg8BxK%RR(t5-v3XvmHE zivH1Taq&h(8S_b0E^r1|Clk=@30AR318rEZ2d1dmd?%JbIvfu#~_o!3&S)V!OUz?^JuDlL!vcHes$s<9MukUFB&qAbJ3Ci;NLvo{!; zAvk82T)|Kd2^Opuh7zifgTX9%Lq=)tl&z;KGizeKyjKi-vBKLccG{?l`?pS+vi1Io zQBx9dRS3Y<#3`ATr?yV{udsrKyqv=5=)#;88wa-FpWMd6u>KV}g~G!N9$#1D=T@go zjK=`^Iyb+Pb&oGt{?saquX~+Suq=dwo$B0uEvuedUc6^zX@LJgvVY(w4+wAi2b8Yd zQ~c~Fz!dn^DG#MY{8rF#Qy8B>>{$2(;&9$v1Y$p)2G|b1KJe!OsZ?;IAZZ4ilFG7z zA&_~FTFp@3LSz#SO{@v9K=G#r*|+(;_Hk28M5LQq6A+655D+2^xJr2_W_tSgh%)X$ zjF5^b{6c!REYQ>zzhcL0kD0n+0xcgH68$?DMc0n=D-_g;8o~Dyx6p@|A9}=5fAe2n za0N#3Z>Su$EXeOJb_CwhKARAa^Mel~mYZN1>xrE)PbDm9l^@0up}8Qu7+NrN7(_%g zO^^Ysbr9c#t+L!rB;CMg5gQfjVTv_Hx#?(@t>VOy$tFFGGeU=D1f`@=C>vlhu0(QM zX7I~8QM~H0W_}d!JL;^(&oE26E zDlI9|*+CCym`EwA=#YZ4IrvKLQ}PuiPA)}cuw`a&TwYE_VP+wonjU5L^T9Yo@(q&Z zPv^wguzX%PFnPoHBrQ^_oF9cK73&ubB{K&PQmDrECqG-0_w$A!Kv;OpYUKBu44hT%wxHGRs7Fq?Cq+06s+8UdV+Y`jDqN)2s_6eGqg(Md46_xK6Gt zD3FIYBrrNC+Q-|K(j)@-0ERoT0*7G?%)3k$H5|H4)wb(z48e``ocmMqW(>@>!-&|Y zUf(!~2H*dte?#EOvh>7CA)St0QhD;!c< zta*{D8+EmP4^NM79kt}4`uc~K_CC~LEWAH8Z{Vev>Y|cqQ4K96)1swwiIv?~mkVEb z2eh1PZ$CGC_PO@&-#&WS=VX~Wyu81%vcG)kBNdxypNzk07UAroVHuR5k7}0(G`R#% zL8mDyj2G29R4a1Ys0gSN#0B6yMz$Fni3f93`H}L}IpiiFD*>Jf($nOh(K}4lsw&GG zMUL!({81%YC2(eE#%AKnqRnO#T-i|C7;~`go-u`6mf{ROZMB>}L$D#C$@s&N(^o3T zsWzuQd>CgC*2(21N~O*Srde-ar}HW?P=zrl7~*fu!KRuW>f0#oP#>Yb(6CMkck+c4 z@e$-v(ae_~i=zRdbrG4cthoes?m-t8rCc&Z_37Rk``Agp~OWF5QnWUHZbJo)?zg|D}KNWJe)gH1Mvo z?{926)rzSKji=|ZeV0qInl0+}TnKVk+AJQ>Lj_|f1P3G~%51Pv#aKc&SsT$PY*66_ zcnl+-ph5DH<;{@Ksd;FWccjUVfMg~iw;x9l@DHKxM90vSGpIXu#|FhFgCBCln!z&$KM^n6 z5227|Zhv#3Je;EA#r~yBN6m8bI6CF=!~C~)11Y41h8@4^m*-Th{$RIP(|i}zM2*ie zyi>U}jqn@xC?h!>+S5w1@S>vd^X$KPE*rc!EU?iNZtq8bg+L1`@^a{Ke%|GB<*i_^j}Bb;*(GJ z4_w%}V`7Jk_rn7p#R*;UQtbY#4KtrVbl_Tj<8^7rQ^$Ot;R;!+RY+!F!&ePIAYkKf+zd;Idlt7=ud+e0SR{o(L0)o}hL$ zgN#@jj9ICQe*oz;DB4tmoMF(y)`boRpB_Ppcq;V7Hne-U#0)M&zCEfnAYZJ@Z7hP_ zS6OTEOG`yx;_TEhX=CDJBh6Nb$>CUY`Ur?IeVAOR>U0wo1Vx71)Gm$sCqPixu7EIH zc;);PMcRGZhT`hN0qHYo#!rT(OZ7i<>&wEOoC5rd#x)eyUvIr}3(x|#mC{Stkc~gqYwgbBIYA@Xrn^n3j=P9zJ#Ez|nxR2`M?5O(-f`^@Q~P zw7~rETgURY;Jpv^pW|_j+d} zK92t&Of+eeW?R=5@y^q z%DEI9>4nP!EU}zMiEd5NX{K?S$j!!&i;8fsreLTGma$VfAAN;3M6ayYcz%Z05+9^!cUd!;u;C-`2J z>kdHzKBxDQ+J;;&7bn}U8`LSv`bY-P-&YRG+6bRS4HQHrPQnC%$P-DB~I zk3ijJBve-v?qdR|poUSTv4#!OhW5r-$jU6q*t6DIQf>=)1frBUI-8yJQ*T26a_}|` zFo}&ec!P7edw3gZg6FW}b6I5*22WDt$L&X_oP1MsC}OfF{Swz|@Y^p`)-3476pL z0>d)2OiNqZ>DPs!Q%c*JDQ%~volZX>vGP6VzOObfA(`p__xpDSNTmC|y7%03&pqpN z)V&L}g2N)bHGARn>o;c^Kqj0+JMjVN6y$D+Sc2$Cr^{D_OlDFZRvCsWf(Dr`q(4!^ z&~@lxf(VR)iJ|0Lv4pHE7|(LEGW@>0T(}|4po%xcz@TPik!l4laa^o2QYdIMvh)$t z6dHW%TM8APdJ1&NUB00bgb?+#_XM9}De+*=jot&5JiFCtp zE5L&1-8pu7>M&@abwPiXmq1fKj_V`V#t^GP-$kH$WM!I7X@CKjI4^cr0n3)rjbu{G z3m`uqR3J8ge#iBzaiym>&7n&VcVTvOx5kybJ*!{||M3IY@c$RLdRAk3n);y0(i81< ztyr8fqRG$qR$|&r?lGDp$9p|K5K!yMi64U3a$tWCL>A}fSYUe=*x-crC%BD*mhe!G z>kv1P&n501SgI_Ehb|tn$CV4x?RLY&Mx~scxJggYZ}Ck~v=tnyxO`C|+{7{xbXcSlG48?^U=KK8&U&y1*|_!QN9KaW zVh)dbe7VMe15}Bi20STl2qgUCJb-<0Sdi`~JJFc`d@Bq`m#e~6VYd1UX4N4r!ChjO zY0EN4G-_XMHP8)|G#?(O4f)a#(F?+LW`lZew}l?NcK>th$KSuDp+!~exao`6jm^DR z_@%QZe{_F`qu{RQhrOHD*`49pK_PmrZ%O~H%lF>crV-~KJh*Pt?1{`CgQ;;{%c?%f zlRK|tPKSZ?N)gy$J0uZnqt0f9-0vcmG#m+JJdsNfhuj5rLM)WpHxyAY4WUh(8U#Og z;%R2J%DG9T>1%;my@5s`vmp%(>#X?gUcSwDhCi^ z1^z*yzYrm47kQv_O2P>siCT7;m3>faJf5%zF8gqZ=R&bLMjRxMq0e<*w8h9w8i zt@!-J9hviO;x{ie`o(lpj%e+T8u`0Mqg%G^f8~X(6-#P+LgQKCk_Ct79{TXtdrciL0e=o^J%t7ez~?k3sJupg))qK9~5QB@>KN2t9<<_)_`Wn6|ne47C;FRf)gj z+K@-#6H#?NbPD8j#4GV>EG6lgKs~r93oCq?W(~#OW$DEZHC*k?bd%*Su^ZG5c+{99 zQuE^JPnQflH|P4Bb!O*ddG)Q!iw++y(k%0h~32 z7;woTE&=DkIIaFTeImOUMHvF;IdXJsu+p^E9ZA#Kuc(JcaB93Dv?x6#)GQ z%ZVx!ew&FZAWDKJqUOS{m)%oSyS=1;K&{Uh&GPmJ_Oz+P?FF6r{CufT_?5q`F{gBR zN$;W3lA5%Yn%um+@&$vX+dJ#)Gt(z-S&sdOyGGPw|LxqCNCt|2D1A-jYy?mj+kVz* znik8p6f5L}5gaeNS(UAa^`A@*+WEvVg7ByH**Uw{QjoIi$#)if&6?M7&AOIvO1fs1 zolUx1FK8bm8JL*2i#ryv#Aj{=9t3n~#0_QM4Pt5}8*2Vu2{){@Z zJksb0ODWTg+!<06bXYyhN0v1@dkL<3tF9QOPlRU^)S=f-VFPfRE;?H9}vQG*@J!1Z98)t2xB~@v(62(g0I^yv~-nIm`G3@NtNd zif#2BX_^Bj%L0BVn-?o9B+(HiTTtM#qGby0%Y_pW#}w-83W3i^#u)`1>f^IUt4UD@ z(SWHEEI2M3nmZjQ#CdW3BGXVi1lvtBf{X}FA3SGic)m6Myjs+WfP_I}DPDuyDdja_ z{ckc0x|07lv>i66!Hr9$m?vT@5)Er1by<3xPHy zk7FtR0xX5VT9u_hR$m_zR7g?52&$+d*$XjcHfMP)fg}{;+LYBKODe@;6RsX8Jh8^o z)Sf>{WHFn~FU<2+K5)$zOH+AFb~!{@c1`?MG2JkIx<6a(1W*5Jh&^00;stig^o0CxIH z*5KoOSVN#hSwq}BCWUJ13c~zdGQF1;omgXT>c~e$NR5fy$lmqm)Mqd3NSMj--7sbq z6$m#J!EwpxPxm+I@8muou~_N;R_;1|?D{4YL_Z}ms3c(=^arXdE)Bv!oN|H7aV|s! zCT-W0i+-9RlWuuj^X5!Yq9A_FPdZ)OsNZOI`8oJNrLlN7C z3*SrGhX8kH#GGk1;HQ1CA@hNUDv*LpyO4*o3J(l+L6p&9N1(pKbv8nN7`a~{i^7sx za)KNhgp4|Q46;0vzaC9}1!Ow#aL~IBT^)7gzfn9;c2ry>BwM+FbS+Zwl{}S@l~LhL zLZ;EVg4?NWdGQVRw91Y|ry|Tm$v3uRdDer5x|(2{|KN{rx^NP118f(g133!+!0hVQ zh%Up3Vl~*+SU)W+10xib9vUMx;F$Cgwb6L4$eATls~SL*1SxAq&WBPm?c^y7e`s<) z8VKj+8%LwQwZ`04U)r;Mfq1C0)o{A}z@8`~lL7$X^nxpm|I41p;vV$`qOal_yfisG{Nu;5H%TD-Kxp9A)g{ z@B%-QoD2j~Cc>*IiG3EE4+K%#Qp`vYanij!9ew^iaVKAbROb&SF6DCgg?|ZWi5vSF zP#>ybck;zwPWB74n8-oths-bhD;Y7tlH}IXDltb_B>pLGx!9+$cnM;m1XUgY&BEEx z0OU%8aL^@O$FU@J+C%chPWLD{Y&xfNb5j-406h%W(nxXi4Bx`G9*d@qZ7(X`BLU#w+qLjrUNC(Qz#h)sG&wI7Vnz$ z;~ZR(;DSo@ZdngpmNMW>-x|qw!MkWjt)dS*3V>gp@{-ZDOQagAe6WrW-ZqU|LcKBS zx5Z`XS%XSO7>SGnQ2nXCi6l)RHVU6Wwjz*?f$Rws#gGtY55Y&`swhVz&`^0}MSXEm zZjLzvzI-)T#+T`hl$OQn3J{8k^+J)?3_FnJGiq{GAuIA^WPfI*m#UIQ_oEy&C|G0ifZKkb$WT~aV zkd;0dT{390`Ud&uU;ahMwziS^c9U<>B6I1)nBwQVRSSnZ=j5l^B8$`PT6;tEx26)E$=<-b51turZL3Px zW`-IK?&!^fL%r)gzyuV9(%IhGt^7-7L8`380*osDjdW2}**O%MZOZJZ$*FXg^Co^y zXm(9K&wl`pB73YI{nD!#&Q2}F0paZ2kcy!*&6tN{V}uv^+sI~%k10fKQGUcZtQh>yE)L9jVQ$5KA;m2OCUu6{!qkGML0&Lgg_ z{Jxkx$BFk+-m#d*Vw?3uC)Bl2-|o**ez{hKWEOi7M@9z$8YfE9M;3s2pb;=%p)!^j4p<7ClAjTTQ8tn_~GKS)K`?kXPgIo@TU5V1alp@nfK)8NP>Ctke7k%BW+*3O8D0)KfZdgI7& zL-A)I_eR-3MoU*+Euf7XYiHH9m6hhXUD-^ARq~ax3h^T)x$)GV zE-9I$3X;b|ZxcQ3OSfLNeXwRFpDng;y|^v<#F0mj-8%6ObW7sTHaLKZKZ&9@M*wAv zj4t?|QE*Nvy+TDfL{}^aBp7oz!1E|O7AYx=puA=QK`O2ZBqXVj=9qYb z;x)=Rq&N~aTNyQr0{OYFY$S!&@HKJ7ndz7iIt>w--4b$mIaXH=QX*CS@X^Os7FccD z1m|yfQ!*beEZuQ!hu`0Rc$>W@-P!O~ZvBd<&pr1TF{ZsWS~C3LH#bU$re#E##DXxf zckc`Nr}T9-jSbQljy!tZ>4|r6F)%uE=&M_ST+uw_*#oU79np0Y$jj$}n`#jljparM zkkW*bqTSH!;R7MKWsDpFkDqLW)IA1*NFS%0R*s?KK9%e4zpl>r-u25PYabdp z^A8=(_dVXd<{$1~{b2N`-zk_M6#j7h-u~>Og@<>2|LP-cc3boAjRuD?KOzkB!tkS; zx85{+Rp~8(#{9x{Up{{Q_pg?uiC@g#*0yncmNqzNM{~o52+yxHkcB75R$l_0=1Si`iD%Zsf}|HNmIz3SJz>-egNHthL_wX6Sf^`n12aaZ*1KW22#%aUfDf7LJE zm>gW){yJa98*Z4tud8eJEe&%q`k&FS{@7%*7?J|4pgZNQw5ti1xCT{D*@RwE zp6xW^j8-7r2;r%c;E9uBN}!(}QOBW?~Z!#3YQg#=@SKv=U z8pm>&v}2Jiun5Jx;z7s)Vq=&e3sPvPj3_=#4n_JnAH^b3(7}qolSq;hzmw&h z-8v5xz=gnXDBQiTGJmsweO_}Qf6ZthY2^ZwT|Zi|y(zo&N2U3J{`ODbRn{patux5o z9dRU9itbKAE(8=UHYVsxF@3p=T@4^=Nv+v9-Krte<)~#@(yEo}cV*S_$7%V>c+}Pw zfKCO$0(4FT-DNW9FOLjVEqN(Y{3ri(j1guehnPk1IN1Ak*wOc3?}HpNSS&g;J_kmS zD0w|Oh@v4B2yZF0dU)g{a5p!OV~&g{e8L<;O5B|1oOWxLIo+V=f_yOEAvzYRoVu4b z3Kqa|E$sQyGZ3XycT|(-CGL)vjQ{d>=_^zA^!$G{oZN1aSsKT#{o*c_lilBK-0vgl zaII+3YDD0xc}`Eh#Q==pD)=*~Yu6?dPZiXH0k?_XhymPsLM~u=n`@Y_kO3JH;*sn{ zsVArmJVIWS*b6|F%XmIg7k?Ca)hgt>5gOX`?;t-X`(fFi;wPP;xv{pbuBEc9FzC&9 z=OEH;XZBGVDhHt-q{r+<7)aA8TPR+qzPT)U#Yb5}R4GiA5sK19dV+Ln&5<>=P3MNZ zHtXSuUmvttt+sEpS%xB$bMD00eMe8{7zFzRbs zxiJChLln2oCk58Q8^mS2(Gb^Rb?wN#g{3TLdaw`HOODgW$5D3CXTR?fLzGt`^e0cQ zol7&SjcIBGgHaHxQ;h@rp5sw4oT5b=jfN34ePijHBdnzjHVa=ph%hR7b`Ar<~ z3sm|em+?YiAf>*Kmif1t87TmNyO~A0W>_RMC=7s|iLVwNB@;A5;&Wn&WMNR~wItFO z!T}y$IW)L*arc~#*==KyvBq$9YpA)rOg1`TzM-3nIcC1iV06goTe&La$tAd7HaVaL zUM9|x6k5H##piF`J(5!+SS%yax-|s6w1)rSpZ`2?R-A$o+|B3=y|KuiM89@m8y$d&*dgDOjaK=_h%^HC!)Tcz&n zp38B43+68A?#Hjp>1c0jtqlR~E1af=XV;>5?P7XL%xlLWFk-Y2>=x)ZsBc7A31XSJ z5*bpAC&?fYQ*2@3^&eSWY4*smBh%Z$tHx?g=loR;r!B9z($eDlt#j+O#-8%I8+=bk zf2BAYRNc`Xf+^5n(s$Re$j@jR+ZK=2^AE4MceuE%wJ@)#X<6~LzaE@;rE|qfmCctI zT__Ev*vfhca_YH6zMPvQ7B5si#45xplnIGFsEUuN&jHALtwuycC6=Ma6iMnlwGQ$T z<4foXZ(FO4(lST9g7dg+pz%ZDi{O3i91nAi~BxIC61B9n3$MP9laLlL5 zKgE2i(VI|xU@AAkle*`wxoeuz;VLcdaip7j1iLNMw}afO@c)MmgxlS#=56{33;9)- z^ZSH>9ZS)>g?JW$FZiSX!j%H;WnU!2poIwn7Y;Jg!I`AAkZATnK_fQZj7Qc{@jbJW zanGP>S^}f|GeYpcregC&+gr<1^h2-s3_rh+O~__J_`>yZD-61x#w1gSL4 zX7&XWImlj3)q0zC1OL=!v!TG!x2DOFS+#w$ztlXm&}FpGS>Lv;;}C{u5Xl&8`xwt;vob&U?b3&bF)9hJ8 zorU!^?cjg>PvnY`oBar48RP?u@V)b&DFa2a+`LW-@Vu5n8s(M@MAD@mSMtYj3@RT( zyCEGS#Hn>!omxAd2C+x&F{pp7MDXn?@eWwZ3hDtG^Ff`VUyDBqFa%C>{bjs>X+p%* zjj>;aj72AIaXjf2D7Q+Q12McDa$!>-X>sKbk3!(E>;Y0Le08-|5LNJ9M(SE+V7s!? z{9Kzg=D{?^wCdC-3{C|zPN$8-AQ@Z{-byHDr zK=cik9v;2_{Udr!0_Nxn=$7SE8`%^_%Tk zS@z9QajQKmqcMNy!DfGL*u8P3aGSlu<_u+@I@@|SBTYRZner+QUAKSO?!EWUx1Ju< ziaBZLUrdAG24*Av%>P6X<{$dp-4RcYTKHFJNM*AQ4nA~~Rctv=2+(zU-v#6LHRJ`1}T_;_576=ggZ zswxXtgo7xNgSqI;%s_feDPKzA4MtK>3VbXF!3o^el+}RhvQv=NbNsxC=WV>zxk1r)zWjjn0h5kvVtm=>^p$ zEP5Ne1cfXE9~N5H)?ahA;K3EQZy1bAv_p*PKg7Q%w0mDCP>Yrq1dHK9pm0BC-vI@uSt#L z3|gyxEPle(0}pFH_tQvv7O#UN1Ch7@lHW+_pd((63l$x~w1RGv-Ut+PT%&|hPWU%! zU=eWba+#|9XGZU8+N2qtf&@TI7NRk8YXX1J& zgEH59JaVmzvL)K$zKG2Jf!gB>UwulLxXsmdWZmE$odaJTJMiY!Dnoi^*Q$nmEjtS? zVQET6}%;LpjzIcpIN8=z53fHc)LAP)*5PrBhJf2_yH zivJ=EaSZ<5&Eb~U>VU1l$5YnLU3sBj+>svElOt7dtZmw_7e zF0H#-t6F)kXCuW(V>n&(F2{cUW*qD{K z8>8PfWbnT;+64dh^Z&BhoP!H**MVk#O?}RoeBqfh(1+bskyZ-*df0ULJ+1G#fe$q$L zqvPTqgulR+DdP4=^Z-Ie+BLKSMVuh}NG?K!GXe4CBalOoHE@Me76@qxB(ouaXBvgW zz)+)TEHXw|{$`##+iAkq)*jt*%MUX1jeO=aS$V@P^H(*-+$(1k9P6qBqjx__Yi^4c@qYz=NDe8F)^Xuwn zE$Lc*GU|6|9vMrsHTr8p@tF_kGs_E#s|s>3^NpZ=6=c|lzy$eQ(H?ngm%oDFH*d-6 zGG(?N?-Vq!~f`x>-v*mtPM|P|c%T(yYyJFZl91!u=yw?U_NHxy)Zw1==^Q z*ue5?x{#OtA;ZrgJ&15Mewz4PLpeP3c~Jfwy5rCIhAFu`^o~zegsCCFGgS#gbmTmU@+62yiIeNvDYcA=$Ql}-)Pj{Q|KA6hH@%z;oI18EvX3>&#Ga9h8@?U>OgS>>+6cj``F0MiF6cW zeSkkjNb!=J$h=%^6WMm69Emp3;Q#HV0H_DbV7tU7Z&hPB3|Sq@~d1GSs+Bdh_z;RZ#l}T;Se|dxQ_A?YLXY(b>BR zo>G;QV$3~AQkw@Nf>P{c!({=I@QITxGlj6Qn0O(J0LttZ4MLaDM;`C9eFiy``ityOpUk=$uP{ zD3jEg>2(2R!8|3>rS77#kd&29{FUHcQui&U-NaW~0rlof{9O#^1-0E+TQ=54d(|0f zFGrjRHMnXj%7C+)a8h79pyqBI=bVB{D7zCiD5}+9foe4+2S~`zVjE^QGng<0GtUZBvy+)fTKt~ zlsP~?W!x_)iBcaSP&BpX2<7-lLzFDZBO49TGL=d*q?}fbst=Cin&Lu#fd`JPbQ4~0 zEb!^o4rKZ;+tY4^I*)cjm%nzP))8>Z!Bqj^rabnUBoOs7gvyN`d-$_S!E=Qdzs>H~ zMs9W_(g;sgF4S|O2bF!@@l%LUH=H3 zUGY;G6FvyQXyk|tJEn4+FrnNEZ!2qCcXZ3m|MKMGGs~~-8T;-Wb4b&r58K1H)>PMS z>R-AkemaF8vgOq+Xs)_#&#ezuuZ#?@*uO1%B)izVCU4$gvA1I3ijLkL4Rt$~9=+cK zzyqH13x7hb`%dzEL?X!FRuiz70OZOmSgWt3;1X+9F4Ux@l8(V5qKTFAKZ|R+ohxqH z-gjhiRmX{PgI6k77o_`Ed1rUjg*)O}vGkAZ9`%$LPn?;qT?Vg-1+g?k`kLQa!Zh1gs@`k%y-U}|w#fqn$l9u-6y zEjJ!Jrc5HSY%XAzDTjC~OP*#>wV^4`;RrA&8|9ee`N>Zqv6XJE%JKqlUare&O4H%) z<68I@y0uuDNJ1lF>@h_ti3{{#uB7N|!cF~$h8J}Ag%mZcdB>tuS-18&66*QZ%uq{P zqy6FRn&r`wxx0?+nya)XYMj$l*sx=o-b8A{{E<~H*J|pkYvvQr%)wp$d(g6!Bp)yp z#bO+Pl6>%|3o1xHc*)rfB`OkKlHzrWKyxYd`{0xU%oTZH6^V`oQgBUnE@?Eru;P_c z!;LMo3%mm&Tv2a+_1i-?&YjciS+uya$7b82%|E{U%P;NS(?emh=9T$*WBR;@oF~up z?^{_^SQ9R7nwM)n{b!Ts#C01Fb(A%AG`9@o&jiPC9s&3Y8c64=H&;A;cSoiPuJ2pJDs)hDJ1^xKvsA&yyZd5us>=eXcg9%?k$z}{H~0@?3F|-)AvX9*c@7onITD zcTiL$C}j#T^4l%4f+HI+EHKNfqn|Po>&{3opBrAnsINOMef?ZnUQ6AhXgb53cDcRxe5a*|Ho@63uYB(L(>dOIzN@{pz3wz`t0^yCO1?*65oxhTGuONJrILE{AjuK!j-Ti z7Lzzj<;a16&4z?n6%UXF>@>0^Rdjj;?t0|=A$q2gHbR9!X5TU@uBNb$C@iX|C@HC^ zu0TdjNqI?mSt*?|Emb?kw)wM{ECM{%kAFV%v<*!>D6E-y@JbHdg%3Au_$AtBK)3x+ zksB4L^;#RTVx_zQlzhI@fxsW~m7(7$oZtWlfVWvR5KK?9P!=Mth=Yt%n12geRdIED8#H3?EvW6#>N*0`q5Y-j3*pb?Louj6< zfcNyl;XUnJS~}YN-F|85gS%EQjvi}ku9#I@)zBrA#Mw13|IuGZFGMFkxIZV~(Rj3~ z$T?8dy=?cQmh~-mzCh^W3vxUg9v*J_4E0w8DQm8W{m?v{Pt<{=&iu zbk&62j$Y?fOhZ{kbXsIu!H{RA%tS1S>uYOkYHDk5Yo8TqYHez5X--gw6~q0{slz9; z%N0?HFHUBlE2ozDxXeua#T%l%;tAF75qUcNd|N9{3td2j^5TLxXQl%9CLZsHF#!Qe zu{_Eca8e)+Bx3~zu^LQyz|`-k;*x!baFH1~|7H2PZWr$1+5BuZBrF_#V4ut|VX0FP z84Sd347CqZhcymh_XY7630vIFGK@9-rV`n_GDq0ry>^rfqXU`jZC%!D0 zR-KqT@9JY$&s%zCaMt*ezGFRm9~mt#a(>meGunIL(7@05jz52%_aE@hui4+ix#rMTu zAV0g5+Zxg5!n#D=P~5lfoorynJ+=ZnfXuZXoN*A&xa?Xdkp<7X&Sxq0S$t|8I`?4d zc04k9e;d%#q%qUx};lIA3OK*Qwz_my5`3x_rBP?H&5KX z>d>us-1zmk`J)?OJLD_kxBSM}Fur^JiA4n~ZX7%KwRM%Y=<9EqXPp`wzNWV8?-V^2 zb|>kvm85&2YmX97n7ob|tZAReo#V7KYZ@hK(^}Jsm7t*}WqL5(26eTx(BzAyHR~*FZS_0Rx4TuwT5a%*D!xB@MGh3plpox zdH7Z_hJ3#?hWTe-7xTGH!kT4cpB4BQ#e5Wx#@|bIq4IklpXt4t#F#(LG$!e{*_g}v zZSqyK--fUE^{HzDUh{kC8zQesgZfe6V9>njbLNX?Yn4Bje{YJPyBFBa$?sSke@7_g z^IN5C(2e@}mC)zFbZ#e0sEWt+ibgh&3&?blMM@paA4#i`#-Y?}CE*B+ew9{G9YNer zO&JC#YEjq=^*-3JwUAQVs7$_QrU%uqsnke!R33x-GVvIaT@Y4nK+9N2BZnQ~LK9~S zSo8oW2vZy|EGyM;M*EY`9nO|)LsF?)%GrZaDKjmGyiptTO^Y|-ARhQe`B9f()8}vK zK6Y;TL)(_zHi*Nx<)zts^Y|=XMt{G29^Z;~#@*EXGhfHy3^{Q)({fZ9dE>k1tY01~ zTyZ@e&yPLE~>jBMNidpb6uY8kl5)LO4&Rals4+7PKYTCg{V; zPCrYCv653=z)}{l1mkYMD<{(&v2xvo?}{~-Pof6382aQq|Hmt)4b?#wGnrjUv>~Zo zflH1gcZ~o3rnFJSHZ@#Jri2i6;N~%8p?^5=%%V!ti(1z%PSIjHuJ<4QN71se= zige0VJ6bW?ofPBSE$8XE(TS(q9WM%mt~=3*BRGAL0Iz89$6?c4S}y6 zZk0vSjz8YE;>6Cqw{PHWWrHi$hT*;0?~$dFORWu#@9tQ)BCJRyrZ#ATF`0B0bdL@# zJb2@wK&fThHOmuzoIc#UKL;OH16Nm1ew<3?$C0@ZCEC-va~Siv##e8b3=ZCR)KvbKXXBSfw}1XX!>p6*M|V{C z-sIcdMRV@FYUjgOmAPBz54UbVRF1AxX}^iykecGg{QaqWjWRdLZ8i?iT?O zDmcP{js-tq8mgVNx{Vx=We|f!5hO@Bf_qdU5#`o064gz4G)AMTuOO<$l5!(HEE|cS z;l^-dLw%?&1Tv*kF_w}`7vN=6kw=ZBAO8q9Jo(QfLyzn@eDk5LtA-`tCVlIJoA$lD zb?M0kp{q8|x@*tCT1lTJeh{>sD&@wy8)h}Wf1wK{($s{TCsfHNd?kBFnPQU7L^Nq7hAKg4}cT?kHtIO`px7$2RE23}l znvzg<^f}%btjg3zpM&CsPR^Y41bQnqazBrld) zP;wf0t2AI^jX>bU48MsgROH4=5U3~zmJsVwtAKe3gg}j`LR2XBfGDCOI^`*{Trc}a zE^j2&!N9CFge?JIQH|HCR@65hoG)VwAeMYl}tu6LkD?hxq|dW zQ;+L|iG{WY`G4Yv+7`Q`8|NRM-*D|%_Clv^Hm_KFPmOS%Gc=B}Ie+RPpG-c`%_ z<;Ck)HVHXhhlX9hob{z=i)x)cS2fMQ_KuC&ZMlJ5gWmmPy_8p!SJvV2ENrmWViRaD z!pJlF9`6KBbA6;1jzo0Nfi4cNM}!m}G3FM~iSa{uCEzu{tTTB|7 zLlK-;ud4`OyD;LXIRL5)E``L;!rveWHzHp|{!Qa0e)IL54Yl1JOO`j$Z`QbLjbe#0 zCxYKB3Ld?)C3t=m{%9P)oVa;o4&Hc*nLz!bOeP&p9^mz72 zRcG6xCG~aBJ{ziXRT-rR(z2Vn_-lg&2XAhOzSeogMX@nqHs-$A8184>U#7gr6V(f^N}8PJHolx7~l`Ng-4h@7)D-E^5?zcee&nX8%q9sF?|kM$mRndwF6z3 zjzmnT`BUq$t1?%~p^YN6O#;HIW*jXTG_1gTEegCb2pyqBlvBxUCg&s?6Z6b{fGtL^4;e^E( z5f;uxFRa`bEKz^4t|@C}bMi_`7g~}MWj@wrrL4$dVCN3;o7Yr-<$T=sh~A1hr(wRa zhLHs{V_t(QE=nhWOvTJKDiKNsJxa|LZ=^_qKen(-j)Poet1Ga1&NDU#6G9{>lqQ_GrOH+076!WSRHY?JlX=Y~P2&8Ay%2%o(iUk(UNw5> zY7hlU>;@Kr3ui2OH#8b%(!fZO*(YJ4Sf=NpuyA(bK4D!((dT<7QJ#Nm~ha!NPNHgc5C5 zgAO*17ae@y7)HtsWB#OO(+GJc&H>eq-?eA#*s)k^=>n^Ko`VZqwO?@WVu$&wFUYN= zy{8W0R393%XY}Wk%5qFzm!MYv9{eMpo za|pwXt-#0ol6w#(tpI`LsVg`hTLGG5CoLgbvh#p6GP^yt23zZnbsH|E#TUVIX^=lh z!Si;+k*kxrMY+c(??;lzk=xj&a!X83hJ~ZGU+4m9Z(TVUpHFm8PN_T}2uOJrjBXal zlcyk0{-?Tx`V#m0Q{D~b^E#HUY;GW3!daau_{`a}=L!!oU1H*)d96m2KeHSn@ix#Q z3tT!B8Gvt5$bmBhUH>EvwiZz_$|#!8qJg+g)Bul>#_^n}rhaJ1nSv$7(m0c&KUS;N zYxNccaJaI(I^@*E&Q^*v=2Oy%#M_u$Sg+hHkjEEJP}@=B8RfC3r^jr5YkaMPI4^MZ6!(!gUI zCF`b%^JW3gt#B?Oxj&5B`}?s+#e{In_9p56cNnArLbin$4C@)z8&fZV%X?4cieE80X)3UC9GxAl&Gq5h znrc8#SPNx>2gr`4+a)}MEK$Y57hviFAJ3j>wCWF;S!ugm_sCx*erM!D)lU=q!?ZK( zk92jkeo3Zl$-cV7rA33$(AtuFd3C@4>g#H+e$bV)r(@B?ZT6s9rSKQLl+q!}{?Oon4ah1V{Tb%2jQb`==PyWCAD-#>Qp z@4L&r)s~9jieq!;EwOa%>|d~PN1?xNPO-^W=qOoK{nz=2x_Q3kP(#D@v;7;I*CuHT zeZ9`Q@||}+_1k@0Mw^?mj9+tYeQDqRx2k)tTQoM;TiLO(Mx`$8WOAWG*drdpT``O6 z;dVr}+Cb;#0wd@<7xYE6!U!#ADt)y8jne87$N^n}hqWp=32 z8SW&uJ-99HyO%BoL9PPKw=np%jawNRs!ApGv~B>aJ@X393tF!H`UYQAx9`)8Z%$?~}Ez;JuYPjw2?xt0! zMKjjthih9`$<-njt=^o>dF`4*m#7&b`IyW2E}iS+eiX5^=LsfFDX&82)X?(1~bwGtQYzX(PA?w*a>msoJX%?pU_~bC+9Z;YST61>e46R*gPTy7UjcG5Q-M0Ez(4UU~PIu9$O42dm&e zV!UhO7IOa-u|#sQXC>G(nEdfQBe=J&=?Sfa{lA+_<4meF6LRaZWk!u^Ben~Bg*tIm zi{eLEvI@3L;QC#WIWxV8ZT{uI3EMQMvwc=;b9H4|X>lRjrT}eI?4;DzhA7D_Y_nbN zHcfYMgs&}XI@exUuti;a>JI`2*-ky=wW!D?3gk$C1x@7QV7p?uH1m ze+8AM5=rm@*eF3z*MXkr4BnU&NdUY85Q|DWG#XihWR5fDND?XTgL4zdH`b^$aGR@D z%uO)EIE2Mong~Fwg4OEh8o?DNNo-Frc410q zr`?ivG zJh!`|OMHcVa1KL%LaW>8_N`ZPkSq9)MICaG+ql~zIgEZ`m`PQjVGuMd1Pv+wll5EB zgN8+@4N(XKZC>Pdv8MF!V{C*mrS#kdDFqQQ2!$zQQJGHt>!%wEI<_{0Hla#Xp8I|I zd6SbyfjtF}1`{Mngp4_4QVAJC{s;y^qZ4%=^B%7ofN7Nu@!t^D}kJ}-! zfAF*Rx?Ow-WM%e5iRx{3zIzen-ko&9^#(fOb3s=-FeA{5T&qc@E#OFHjx!K*g3^LP z4r&}sX67cE3UarUgq$2pa-|D)q9t7nXDmHh7*Z7=tL^HnKH;7P>2 z!t?&g2g&K-L|<8co`fFUgdEIGsHxF~k<60|i!F%oQ6WSDI_iN!@EYhO9l3h+D6!B( zZ$JXLh)^MQE9POFOOkFi=*dD4EXyoK32CUWtF>7CzN|pN>Z8{7$ciWFr<_zk;WPV+ z1^Hmw2KgD-)ygAvRXTa=pYFQp<&oBww34();j*fcTNaGmG|T7J*7Ij4{xwxh3O|YN zIsDy&rfc50X5N3kXlB5gA0itVbD}ofRNl7{8yiEV!Mp%Ula+;b zefBH}2BZsHczEP-PqB1i+)K>fO-yRy|DL&S!QnZE!Hy-Hmh~5Jjy}TwNjh-jaOeJw zMUL6G%~`ZH)NU={f3ui+Yo%S(3P+L8YUfr(OzFsEM$870D*Q`H3vdya0fkbOM!+Pp zeZ;WGSr4!ozom9y5gk~@Y({=w4!TpKO&QXdS#(~lkzIx8y(%2NY2}LOFZlfazLH*R z@fZ5wYxPm!NoYPa$}YXF2PX#EezM&5moe88oSK zlo`%=02%`wNJLtjgv2c%DCq#w1f*P^i4A@PsyN>VoJ^WwroR+TK--T)K;+}pk9ThH=oSuQJ z9FbxTN?kR(;Zp$`8;5T06kE^b_zy$P=XfA5mQEVYp$W;1!?DV= zG>%Jiq#>-4X*L=0^9GOEqNi4KNq#&(Qnmv=;OP*v^S9YLqu=ec^Q-DCq9r=6P8Tx6 zs+QSnBW> zoraj1<8fQ@d}7yu$d~dA2i(d_JQS%>##8zM`;qA$h_e9w7{?R5T)x#xr=5a9p8T9_ zyUmnk%E~m;nFlWzWhy0|atoEQ_|29XSmIYTwI$zIDY+t)=>&OGhD`iN(S^CqTKeynH zG2v)o5+%WpAjNjjk+`vv`vvi%ipys;=<}bUuHQA_JWg(1E$g>UbP{*(6M`!*hLMWh9{Rq=%vnn&u^k87h#B=J}h#o3)6eO>yH zVdS<756BsRl$UEjj%6OaN+ouvG3XnB)?=U}vGk;G#l*LSK{@H$q-X6@zfF49J@wn9 zXUSDQ>CN$HlfTeoy0I?^kB-_CP!40E5h|I)qrEpEdD@2Ve{}rOrmHZ0P?aF_J00l!t0llc7 zOaWZbdI9hrpdb9m0<7R9GDPYx|0yVk%amtD4!RrrUnHjrc;8j6RjrNnr$RfBs|+zCG z?2*nLaBE!A_pG*K6F)j;vqs;|7jyPWrzHDpzVmk^buroIdySt1#^iWo|mozanEt_fn$-`pP_hI^k*G1D$?c##`YO?0g95qvAe$`}J8ohhK zWO~giWG)o`OLbz8bz&%~{psV@J#VtPJ7LSb1G(qqj=bQ=LRK2Pv+0t_ri)p{@w>?i zS0IR~Y10TZU@9@frHr8Th$%y6{&rMFEr6l3;76TaIM8j>&q?8DwhWnFV+-WdMy$49 zr;E;=;`4hPV&?l++ijfrLjVGFfhY!H5P{37+(jZO?cT3C37I{9H&*p=c2DwFO}W#YJ*0h*ud1wwrNe zBtJ0-aL#a*Bb_JqT0vla&%l>3#{)KOEOJ9;+E}wE3`1Qi-{@31KFPG~h!*Xzm_K!i zj`iYu{DlJXXX32ad>1w)*i*cl*c1IO;xwVUOGXpaUH(@pO=wJ1PiHhXM)$aM`;=T< zPxPI{7}g6Eo5-FWOZmK4x?TQ!;=Pp57so!2hEhJil|E0B6CUnM&*{lg0QH;XL=lQV z*q)%)1kM==&KNiic*AN$h}R&UL9wh8j9IKaAV8P?tS2Ic@xky9W6KRZ;-ayq;xCt( zHq(=zmz$Gq&VnE@o4pw1TOD;-}MJBXTy7^!L-R8e$Z(=`8lz%RLz};Sf+TRC2>Yie;vu z?>5eYM4c=^k0Z1umY_}t0SUzD7^l&Uu)YfptgQ%AxX&2Z`9xr)7Z zR6~NBdQy5hZ6!ATBLEJQPJEPF%f=*~d?sA1kF3%?_EC~@wB?_^7jG0RuTS*~ z);Ehk88Nl+k`eI}lLzsW*g06l84yD;0IJ-m)$Qa=2%(s!giz3DM5|woU>`1TNiwcR z9@aW^(pCER0lO@begWc;iPW3?S|mpPE#6B9u}^-l4$$V9@89Ib5rqVw^-fL?GGN0t zPD28;QcI8kwnxO#*4o_Eh|m)C@FIOVi^5ARi}EtUOH+iQ!VVr5WKz9?Ak&Sn%n)Qc zpL!0YQ!=v%e}Cy<)5O}uttl~`Jo>>szUT)jF{k&XmvALg?Ltq)L`+TC18KPIV-XcA z$e@T(5;-GqU2P)x2B336auQ^f^66H@P@XqC{6PsgRD3-6R41n%Q0;tXoga6<;BP*? z`|!J`4*pYP-S_W%D*AZ;mVXo-=WmmCsS(TwPj z8>new_ZnG+eq}7A)TIVTN@gC1)K`Q}aJR8{a~g65W^o)4`m@O}!Tm-CqXG6d8o<=S ztX8CVLc-Zm*;<1#Nk89roy8G-H&;c5qI|JAEXOv@+OVgZe~w?K6`UE;j+mS~e!T`Bs)gG>4!&3FFBIZ0%Na7Rv<{@;yJGeonl-zbZ(^!K&MqpT!0T4WH|k) z^dY}#3g2XiSHJX1>Q%6L!{SXg>s~>P-tC?j-x9wNqtbNtJcCP zriD{l&8e@~z^6l94uvT$G5VokY%~vV^reRrpO&%VqNI&`(55_<;yJ3mobgk9MiEzK zCC61ZRW>!$k=zDlD$r5NY4#cko2e$CQOPU}FZN}7ix4vdw#ILM{N#lrH`u8qlAzs3 z{}qa|>{i?~HhQbu2cygh2JVsCLxi7}{U6x7lGCjcRvT9KXuQ6kWT- zzwUJLSsA~Y_tKosGwU3d9Z-s$8T_fW3+g>l56KJ2Qj#KDZ0RrZoza(GOt(#ZT)MFC z_}*wgea-HUyo(>I_JV#j+&Sb|VGZbDl@9pbIQ8IanudA;HBqCkTzE-BlAr>Hjh#fn z$%7;1iE%_mfi!FG_$6sc^*^9x&_c9CE|1z*8;3bfv8W~!lgbBj1=Zf`{`tVRZGo>o zQZwG{%v>MUZ?!nyBs%UCzrHIQZBE__9Xe34@c!!-@h`vlcJGjG+ZTs#`n&r4iT|9Q zlF_2vhQO?Az5-VmmxufJ6VT9xs@V|t9-`SQW@4v-set7K)LM$V=#X@_8xau1Qq-dE zv%)TAc(yj>Sr&D(>8W8g)Oyrl56ur}0{bMJp#xBjE=AN=owhjHnL&)i&g!{gT0N;C#f-U5{` zT838V-?;tsi_5^e?lkG_`IdRNj(+uVm1<(YB;A={WX8Kg7yn)Qoyvv229=RAD^#?K zU?4ZsOi=eSemIO+1waMkk^3L2%FGHDsF)T85M1CDh_xAh;+UYtW7l{XMy{PTtOQaD za)WRNaWv(mDCw$w=QsEY#%N8K zqh<9Nzo57>x~siA+rWRmxN_nnCsJ=1y?uZgeIGqh+*HXY&kYoYqXEU?a2W9OFK3QO z-jnCZT+)}G;}5(CnwO3YKN@aeb9}Vc-5_RkIh%$z@$*Y6qdOzr2oj$xshs$uLr}3f zUf3fYxTsRUjyWD+cnDd^^F;qKHcz?Nl?TKGK8GNU)4VCBf_2K}l53$gOOyfbjgP_K zL34}*(M6f)Yl-OnLlM2QBn|u;1gIchlJw`lk+-{{cg5h@n%ef@eB0pJYFC|feY+z( zxQ-7OmPc=B=_uCmJMkxz4Xc8$vmHVNC4Ej<3v9gZ z=@~x%PRi%h<3%3-rXFZd1n+W+7Wg{JmH| zTgQ~K>Nh5jrFqXVwl}tB?q7L8_s8}TJ?#`a>&Ivv&`sH#)m6}CYPi=UnUq`0m3ah7 zRatBo0p$Z*rx4dcDYeN%Q6>`Sp|=3t1U_eKc8qjS7HWZOLai1@pc`&wkuY*0lw$}u zS0#<(VKtCwkSQuZJN;9a@c_*K#LwW?Fx7+@{}*KX8}xGB38e)h#U8!IVwt6A%3uNp zrd;?pNg6>D5}nVT+O)bRzdxWc&2Qqv0>Z6l1$<0uHFl$SrsIsIbsxRST7;$ zF?i;2Y1fn(MG>Np@i9oSJs;yy^uSH}Tpx*lK9=%%ukv|G^nkP$d5zd#0q0bIn+nNp zwC~Zfl<{|(1X*z*72>eDI8FVRzClE!7^NOtCbc>J!@|Btyh0gtOX^2P7DOYf3;Z|zd6 z)oMwteYa&>Em@K!%a&w$lWb#4US*7TY%s>y##`9IfZ2>$;s7BDTM}bJ0wF7zKnTeY zhLD6LJRsyH1TwsY*y`)|t8;I+TC%(_GvELJzHc%{x6kc9r%s)!I#s`_x&^0+9l!Zh z-=RY|OrY@kUFA4i>XL!92fl`yc(v`_YQ6UVSF7K3;*QX}HKV!W<{~HatXqF2(KBWP z2MhMNtC3F$V}jNT7z5T@55r;U(&M$v zzT|lAf(2QVVDMOrdcEq$jgq)@?SiS#7gnf$<^ON$i|NL4m=*I%Er%l!^d{jld8s@o zZ-;Fk5bldOQf;urGEk(ROtCy(ew0LVe%w3&7f`()IEV!?n zVf1yiD>>*%@IoXd&kI5Xb8L!?O0;y%=IRM^M};6sfx8-AL(Qhx`N8WzlQGalYwIGS zG>uM^GVqE?GRYE|U<#@RToZeje)R0OA{TxE)EfO6Qco3SI=vjJx_Y9VeeL>v4$^s` zz5*3`QiNh(eDf^rxQy3GGqxWM&#CKLFSkrTv_30(CuyDDiRk%M!OnT@;ku?BTpwt( z{H<7EY{fFkj=5{X=f}o8haIMk7v6%^PkM!a57`sibJ!F5_o&0PSMle)^qlm~nGX!x zAqy+erRJ1Y)WPX-8YHV&7D~Y_wGd7st#|jBhbBCP--}ZB;2Om_w=xTK5v+a-YBgG| z;xhE75XBWpf(4*y$V?4I-;UJ8U!<=`h+RY$a_m>n^%^!j z2gx+GRh5*%mXnp{bGsb&B&!LPCIyu0)(buI`U7Lk5NtWe6OM1+3y>7_7OyG7-WX$! zOun`6#qWyIHU6IcEBYU8TkzxIO)ocZ`2+hN``*y%RX0PT#*C6$r+B=*cSU*4U!S%3 zle_P`X4afC^yQs9Fk_W?W9h2v@5cG6KJ%I!frXaCnj;o;wJ}gB4mw(t?H2;32E|pk zAR|IB0Qv=&2|{cLzw5DE2p&07aI=ezlhAhbG*bIe>j#g_M?u6j0o`07Br%H}zSr7PugcYVDa+LkfB;(@P4-)Qd@?|fv%CmQ25H1;?6cq0BeeuKhz6r|4*vhs+KL5DdBpg~VSs~R37UIHQO z>&26^(p%PSR_`vWVNcKK$+W61Wi{f|3|w4xem`epeb3_gcU9Ik7j$|T9KjtI-;UL8 zIP;qFAn`fuv3m9|;&l^v-O730jAd4%Iv$uj3Auz!L52Ob$qWVqv*7TL;d`J;k!&C5 zdpahQKu38oE{q5b+MLo;Qu6lJ>>F|KpTH>pky71;l@RQIJe)!n<#~(lU0S%fi_Iu5SJ%#%c5HLupxc<9oE03{k9j$f&$S!#$`MvXtY!l; z2w>UsL+uAoW`msofdSY?5)MMILwzm6KTLwRf>Dlo0IoExA+%OPi^c(5B*}|Fc^V~4 za|WKxcVxO;3Z+=UEgO(HU^&m=b7_$PqlO^@Qbyc;=kf(_cCvydYm)q6QD1$sFOptb zo72>mDZjk+mz%{jV?~9MVo`?+*YBK~(NLD>%P*+zCi?Pq7(AeFStOidF+%2WF2Dlx zCyE?jRLSm)%}ZHbeSMeu z_4v$ovy!3(8IG!gioDc7s2&a^QCP1ol5@bH8A72jjP84xKDP+_JC6Y%4NHMySh;Y0 zQ*AK=Gaxpl@XN?-YI;K+V%AUago%dqCJuo^GZigbJo$E%PNldNS;+*~i5pDxA3h-3 z+}R?oT~kE6oTENX{+irnmo&Y|{WULSI9HBzulM=`N&ik3@ubaP-Yj-%E*hy74jL$1 zHuc5Y!}s4@)myRtafU!1(I5S$Vn>R*F>gY(-*AEWVO3&7E9i& zI`-Pt@7#3LJ6F$tU~hrd!>ny>zx(!mKUlo@2lrk7%(k*NEAv-CQ2RHVE>8aEGjDVcPIV6^AU0+9&MT1;SRzHH(guO@pwfI!W&1=`+ z+}4$6FqryYPOZDVsd;l%=1=>~24h}_9O^naZ{ESKt}j6BFjG;{flW8=-hJcMJBo@- z%xVaH0Y81_1JLj)PQx-`N@P-w4JQjsjG%!6-8`H?PQo%kj)lXx;PFOb*;!sU(jtpl zF;&n&y-F=%nork%4%$)@4x=RM@(U4o)7w#>e0$mjNjx9>z<$4G4emHpgZG3yB80@} zgocs#0CJdTIEp?sf_QS|J@|*IPtBR5;8B>vtd`z&GYhg`5^%nzhCi9z0i@!FR`zxNOyi#n?9oc>QHbPt z&}E0P6aBPN;w%E`GF+;lZNT+Z$R4j?mz>@Lr_&RnC<8O1oe<5IJgTH*Kz;*%q$lv} z59O6Blak+g{gq!8HTc-KQ>T`Mw{6(4t)yg18rve>U`jSb-+16qBtD_ZQ) zfAw9}(tcI9XylLhne>6zf3FM)xir^odlLHDunda?fiBJhXHhzyNY>dHr32OIEl`pm ztBrXYVL7maPZWQ1wkM+2R*S!HcHZ^{^^-{+mYX`I^|EzsQ&K-sDixdh_4O-XXK;>* z6%}HN`jeUci)XNU`V-RvZ!`e|vy;aK8X!6l7hzw}FzSdM+^WY8#*HyQj1s!HQoZQ2 z#uUOY&%ej~L~Kk#TtWMu(J`-_a}323wD0-koMUh|*Te}guk@xNt?YGc45d#WB5~#FLT;0>)@OIu{yC{h~(aqSNtd z1WRK5qWD}Uc;{Gb{^xw>-T2&MzXKX-Yd+^YedFJub)EkmHXfzHQ23tXW9iVHJv5SoZ{h=yuelcD4#`ir6YF1wkPkV#rZg55_?@0$-g#m% zNGT!VA^SDq5#nYPe>5sQ=pnoRwtD=TB6nQ8*^jr?QeFaP{RvW!CeqTwq~tX_9=v-k#o#?P^8QFih? z67AePs$`rMqiI6!aC{8KXb|_;-rbn^+!BdDA0G3(H~xIbnCG`bF=*dYI_CN9T*!z* zC1lKAU^@zd?AU?48-(C8lE@7VgNp)eL90=e;2ai`5EKT$S)#-eZOZ{1=G(|Yp#grI z#mA`EvNrLZ*eC*tM!c2f;o`zjex5%!;PN|NC;@AyDw+%)ZoXgS{MJMVCtvOqbnZdKrV}M;#ndrhd)Ji6-R7Dw%bnvHZ`)FLlSJD+Cj)wLgccj;2BDI41C zkFe(gLJ3vNhQA*`SM+Iz3qsqpHDfo8F3m5ttSPB}0Uh13rr)j?ayVN6X z8#m67D;BR`^^3)&KXQsvK>-Tj{Al#QwzRd)9$X2jfps*USqeUey#)WtWf2!@YSG4! zSrKO9%pxXqYJ8iH8u%~-H8X=F-Gw3Q*~A;|q{8;#pU3et^bxiWP3D-8hkPa^WV@1p z=hXJiJi2eAyg1Hs(%984D$NE;o>Fw-K9u|@t;>{aEvgCmc5fvD3`<~*#d^$ zh7n;hx}l{M2JJ9OO8qt&fs@!t^;2j8)n(R9uyT9_UwBSz%vl7$2)_R0N%hpAnuKg9 zA)IhQVak`U#})Lzf?@ai7j{l8PY2oU%e|i4cD|62xu|p5P09y*etu^t`j-pHQcjkm zQ(k>2)D)P~uf+m5UC2fFtT^4a4q)G0@ZJ7(+1y{ypciTkt*5o7$MO^(NU9 z>M-q-HU_eS%Vq3eBjoG?^ff9%{US>W7}E-DO>FCOpf^Qo%E}oPmZ+jYjZzFCws9F? zeGTmexC8Y>j(y5E%}kCOUw+I{BWu@LO>n{#C0mrYYK{9r02o}*_n4|aHRGXM7J0l| z+4|)VF5UL;2OH)D-*OjkZk&7j=4SEl52^3Ib#>MHCwEC-A@P)GS zwfnAq6(7Ct5@gLex`5byR|)mp|EGKI`QW~u_Mt+n$$kr4J!y8~WlyeL_2l~OS?z4? z;S`g#aQO^z_wG+#+`Re4Pj+AV{>d7&5wtDY`19ZH|CRcmyLYpsU+sJK)qP8BEIYa8 zmVqKyF;5PzvIKj1DKiP;R?@mhcB3{~)bJ(zb{)sS zUHsaSQ_VX9#n+!O2Q}LKn>qj1&b(7BHtD8&C99?3*M=(;)yV#(l$FuV_XxWb>o4Q> z6yFPluw%3r=no=%YwUC5)nlKl!^Bsy@#xh$QyU+9u8r58gQMKQ)DRy!q3PB|tSE|Z zP;znCkBN*tBQqEjWHxey6qNfih!YVpXxvvxYVf?Zi3cHqJUVC~Vk5SICFaXBYX$SP zTt(DtgYT3|in%7@Ug;zi3k;PQsb&1H*x`*6GV^;>2(Sz#GHc`?WKt z?QvbKoJt6}>x{h?KX||UTiatpy^9M=jbF~~ELC6UXU~-Gh#nSy@XEcs7s=YxZ3jl^ z*O`!tx(1cXBlr>Y;qp0C0B2z6f`t<=iF+U@_TwT1>Y`2oM#FeQK_Vs~LTfXdgmjj! zP(3`Yu1h6NWb9d}6Q7+?ot3#RGvED{_))z1O5Z%?kxE~>KY0P0(x7I&R^0VW^pv&$E=Tr#On_;A)$Fl@;0hi)Dw2QJ00EK* zJjVg0B7DU%M4sltcIz%|_^?NJV7w1odhOD$t{N48|7gBut~jrf9hvrpS@4QJ17>ZK(wK8Pi2`z(J8`|`7?uMC`eoy%by90nkGtTHq0(5IbPC`{io5j zynSz4`!?y*xdkP)skewLmPV`M>p0GSH){{#4kAEMg7kgN-482aFp?4u<)m5Q#g|d5 zHf{zHr-G%xd>m_}w{k>8N`OeiTF&!1b@h8TkW~zQ3mK1O=m7BV#`?N?tx7K)ehRo5 zb%olVnT%;<`y|S{63#8!?bZUK4#eoZaosO$n4s zHzbej6Oqo%S|sa($w7PB+`8i4hSgW*s<%T=XPA2L$!=)1PM@2fzJO%-nISnOZ^QXa zg*B0qprL(?=?$KpB+eLwHxB{m3myNEuTIn6`s%b@8P(o?MnI^EEsBvN?4BWY)j14{ z(_fW8%0G?R_}KY&PcUB!!2C~IK>zJIYc`Q#x$-!C;+h_wB{#2# zJQn-hI_0d-F^)c`ab^_g0!_kdi}XD7uob5cJ<42kjMBYH5;l{>K5!ZoHUn6hYShAr zCoQ}JWe1Swp>*49whUW_)9c80QaKl5%X+3+f2@|$F!a;~pU)#bzh2G0rG-6Va*-7{ zvT1qvrjg!s*ACjY!o|`8_$~~+r`Gf&DFyQ0>D{M3a(Tt19?UV(W^IkwpxY#OijA-Y zx(mml9VdoLRuG8VQ5yG*80>^A-C-vy91$_(c7`O$pn=HhSvB*?A+>RZ!)<(z9}4xJ z^=$P)*0~u4kTUGz-so;59HV}lP1-%O7e`%-GoHjvhTDgjlk83*vl&quCLe(RA7fDH zlIBE26nb1>Z##FVkNI^F$W`s~i6_-xN32Q89mi#>t?|PdA1?e=r{q9!O1p*aQTMk?2}9N(IwXK&LlAmZIoUqA6OFWOn#lA}ReWq4kDDl@s#yz#4^b;% z^!eC$3JS%RChmBP(ZM!Q^l-7@o9nv1+}G$@A;3yMFSV zTNho$`4`O-#g`DbE(G>I5}BHZypIx>5qC(e*8t!f)Pq5g40{y1T_Ow(KLMJ>@kCdm zi2=T!{6JnP6fjbet$6dPICx=n!5m=57wq*)xO;*MJPIPdR5z*BTkPvzecSMwyH|E+ zPVSFnlhqGiYzAtTuGIWr0g;S;^~eUUpmKncURMrL9{w2eLl!B^GgS^Qvu^s;^}= zxN{q7bKFgSc1_8$IrB4{>@7QH-#Dl)ujy(myJXttSyh9j?8>c=>|dC*n(v_dGM(QF( zq5MF0rq`X444iTetFgpuGuM-v#Ns`1oP-ZqV)}B8fijT@1AU6{EjB)+VJbN7*uKu0 zHgCm%V{%p~r%04*nwpjc+v=;A=S^ucNMXs5omSYJXLg%{S*bHFNzW{>T8lCzv9;7X zC8y;;`s7F;Ib>~Iv7lk~tQkW+HAxL#TY2I9iloe>`fznoxVt6S@lk zktOany5f$%uOnqR&eL-DM)eyNiz=43ZP`*CajExjW8YtpVo^RkWiX-Gf^E3?}wyr@sD?GI|O+=Bhd2gg=L%JWh^6iqG8&T@$gVav&*0+G+H9X<4m zt7>VQGcBxJZopjV>6a9ouOHnA7E_%y;8wak>4#Uo!<1yKmY*zogo?0u=#MDsP%b0)67Y!87&oai!)LWFcoeTnWY%jd)aCu zs&iLH)6n4W{GJV9T(BxDCQ8r+XFMi5PBv zxbP)hh6?h+rEbLn*Anh|)$U};7}txCoptqP@JVXvnp`nD#lqm4o?}xkSvkjPa@ahf ztZ+km$RU-sb$4ZC}6khapURJ8yg?{&bJ?~FS&eUsOHl4T`MYfY%8Ap z$TRm1PHPJ1J7@MZs09DU{HvqI;t&*AAyv}I!_>We{#{Q|qB82)>dSy20QQPsBLOZb`r5!ZF9*@#!;&qSRV{OLd!uy|@v9$j}GbZ~UzU3yw zP7MMHN7$9Xz-#~p#w}z=GEfM?gtnSsNRo=0gJ^rE%PA!xm3u<3)rQ&XuU4x+?_txr zjRyATKS(z7OysM3l1Kg_*=#j624*)z9}+Jy*_ok+evfR7nUH60p*dnh?OO}L`bJo? zup$Zl4-~tQ=#xsqbxsfu@~0F|B7YRB`et%}loG#7$3&S2mhs2xp%v`jVJb1_LmrXI zw4C*?P!Fd`cauCaOVL^4Q%I^e_4X!@T&mq&h$cqDSb!$b_iwSKXT>wz$dMqr@i7#K zK%Zhwe=fuG5lkvo=!v8xP*A&*k_QFJOx$g+-4!%fJ~!$Q@(;*Lwv5tyVBniXa_cyf z$CgN|Qku4_wMW(P4)*6ANJ?YFY?0zh-puYgeZQExF8Ut4VyFJ5xZM5y7M5Wiu}L4> z5O3%|6*TPd|7$#0moJ{#ra}GN*pCa zGLIbwu>QCa^acBA7`J}NvX^)b55mNk2qj1)LB{}>!R$k0&={H6G`aFDkS9cxu_9SE zikGSZP92-fc6(d9T8dLk?V-T7%D(lJwjP*Nm{rs^Z((ycV2p=R@>t;J%7MTZQ(q@NHU)>7{sEpJ1W%3WoHcO5 zTkNuv(Ws0@rx=90cFi${VDwAQ#}f9LO;2yXeCdYSjad%|8VeHlR{9bAd~*vbrde;9 zyNSoDfFJq>VuHH-5lWPKG1+D5k3SD-&*5(=F@6^N+%|I7=NLzy(>Tjf{3{BzYKuI8 zadwoZUl~bC_d3ilf{?JH#M~Z!Py!eYWe>bFsQYvcGVmm=-#JGOucB)~CLTg|pq~vy z2Rae(E^M)nffM#2-`DN1b4Vim61e!lWrH5LxY+tgti-%I;^b&_eR9rGW>}S-v{B62 z9{tYIE7>nKA*Z(V8nTlNrq-T+txC!^7%umodi{O5go`+h?_n!8AvWw5W+VAA#fJVc zXmLxs%cU_)%)!~DQOF4zlg1?MV$UKr5JdZv961BubYh^l{gpuPnrv+;-IMn0SeLPLZqWV;8cS5RlZkUyIL}e>htOk%*J8$ z_#nIg3+#S_lF|$1hn%>h(P1eGy3a1{9JxCgnior5aOPd5SSi8Imt*hUPH29FCE?jA z2^aV%lUxD_gz{ZdVp>XqQjnzWpdQtrc41NQI^n!X(*C$g8$Wnp;6$O@RTjfx8(EMl z@&LMna?GK6=nWsE!aG##3I75&-l4o_t`;LG^8fi<`=^>jUO zXr8LJu3Y6W3fXtN(~q;4*X^j5Jd3}6Y^wU~?<14?FPqP7t5M{@FzbX z*xw(v+6L9{XBDL$`Qg&T$F=<+o2Ui+5)l?^?2tQw^8)|^QFDH*HGtEz<3r#XfvEv* zrx-!ZI|56kI9%Wu?;t^paTLML$VZUi9{~a4Mn36M+?iuO1EsDPE|S*8rRuw78Fht) z6)7d^lhaxQJ?35jWac)vtyw;W4U`s`8n0NiXiJ?fkk8gs-8ckKiEL|LwSRH+#ih#v zg*A}^H&WQPUcTxP%~8^H{pRSdxl3v?O4rSvc1dYY^Cg|)TPyF>oF(MHCb}#Ya-l)i zMv_zE5h=`1wQjH7>MtFdenmLT z>ay>G%LcIbzHPVG*6%)Bh*ha^~qX8&J8gHd1ARI20l7C0I+du>Vj90FH}@ z951AQQRxw45>??HZVylkPNN|!Y^3mHEJDd+4b`}!h7hmsiF{?z?Oz&rsExTA5iK8n zXyCTX4XKmfe$jw%`BQ2y`53w#@+b8@DfdrnA6 z!kp$f^4)w+l(IKwP5=^ZQ+ ziQjW1_FVXl_{!+_8>o&7{~qDDVwd(D7$Vw7{W&m12%(V;fXS63M(!7uM(hZj5i1M1 zNfsdaVl_Kcv9Hi)c+P$Oh(sVILLP$Y7@7E^l`?KA$@qG|;LiwR>v`SzxDDuHpl{3~ zFa*f!LdG<^K|QsZy)@`>%8xVC;lI98IJkZakD))BH8uZ?aNFP2zw;m<$FS^^gs33? z=GG(idNh64u8{+x8pSmVIEOH;75OliMqGI*;L&s+ZU_?YS!At32a=9S10Mk5z!`M~ z!54;5u~r8;ob}nt*k=O~4-RcE8MiJc#W-VmA5>*-z)vv>Jgwdf4aZnB80 zf>hQyyXCviP<8$p_3wwrtobi5C=SO`wt86orL}%`I5?n&XOy+p_dDy>Etd+S;@IVj z;!Y0Z>9vitD3+G5i3pHvc5)@)j(bror z@1aH-HfAxJEw~rN$wJc1$g7@43gSbFU>HL-VZvK9kgA$Syr^hfSy~dpbqZMAe17mb z95?Zhargv3WdWRg$GJa}&C{P zX99>>3x1vTlq{8q+37;@Qagd@Vi7Q~dO{Ek{DI=OH47QrzrOkfN75vD04 zHl|O^kQtU>IPuV2Y+_n`j6!5R>PM3yiq||0Q7l?4lYbNW>cH)f^gq-Von0z3lucg) zIsCMG4?G)kDeA%eOP#}>RDo~hu{ti3Q-y&@lFJU@EE*6(BgCla1Pd7n!7qs`!CJx~ zna&x+WC9AdVUp59&WMBhpk9CqutBLps+;7t%LJ0TYAKLM6D8x1qKmWy1U{WHsjdDd zCbdl&QxV)lI8Uu%uOOrcp)LE^Zo+Juco?eXVHL8Wywz2(9;xqbHttvYwHkJ#NB4?yPlWP9 z;Uy1Ux$?@%X3G@(^Ux)QYo5F6(B}HG^aiUX)m^%1O8HP%Lq}oCS5gW)8oGwcrwo+2 z;rMMxFRR~t=&I+|uwT!9;_ksYmxe-@&Y6GfGfN)$`*o|PmA0h2_Mj#B&+gvv+{Ttm zcQzCiHSD~!W#e-j?*7@@WZNEBdQ0iFRR=!dXA1d=%9EgL9rv>!<{)N*P(0LxD+Zl9 z6a<1>bqG2~>X*6;eFdRZEt8T_k8i9~TU@A?yH?I7jwc=ot*aI9d(qw5+qI;4@{Ym% z?;Hva92;Hzv4P;BclIy4YI5`^*EM9H0N>9gDSYjjZadE8pu~#OET8t6QbwOACl+Iw1-D~eEDCopL*5J&h+LJcyQ@0kYh2BA>KYh`%FsG;}SWtj9!{*a7sN-wq-0Vf% zm*Ru^Wgx+M26vFo-F4Eu^2N=q8&Vt_r#CODnET9)H$2gI?d0u?`mbngKm}gA(^;98 zR_=79RM*us$fnjSXHIWxZ|S;n`hrI;yX=w0oi{ICdP_&=#>&abwPa{0U3BA9yn7Q}}`9*Wd?E6Y3(>KC6i`!_k`zYl@q<6mKH)mi`r`A@UN=@j)Cp zoq8N5!MKeXl2CDe`L#K(488N#&|9-vWslqNz|YtG>JBt@o(I>kCuQWH;+LZfC38wm zjfwf9Z-~328^mcQy9wT6u9LuXBd}-E;4e zo7i&c2K&fmQp(6D(j9ih8tu{B#J8d@ai3zfXgFf{W|s22LeP>h^QfA^a|RP~^l+CN zGv?Si$51|?_C1fDbBsSW#?l-claQaN{TB7&;ol?tMp||DeAHnh?LQyGarLJ-y*+2% zF)db33VvZ%#G0F$1lTB~e$01*DdQyXVa=#YCG%L4xxYSP7~(-(zd_-_ zOQ4U5e~FffhF|v!ez!O1<0kW{+my_NxXDaTGQ7mU={eTz%TFq@?!u+gN(?$V$2B^a*M5jrTWq)=g?lJ6O8w%F}Pj z8&BV+^O+hZdLW-*2kd4>{HnJbPQ;{*zm3_x$?6Wjm{yCTC8yS<}5$TY9JO=$kgD-2J?}yl-mX zj+WldRo--~ZEDt}rmCHn9sG|y;_d4`{`Uh-gTqBd!-GxRU;f9Q->^9^G%l^J@A12i zBqiOfzW>X6&ItDo-12Dmq)FY6-ZF466YlvXOTRfO>4?kUQ(wEZ@ud0`@tg!))jMd_ zuoSjywvZ=06|twG6gkeDQAk4BK~8RBI=^5<>b_z$4GW3|AS1 z(u$Q4!+0_}0HfrsB`DA_KE|FPVk%wTDq$ZS3Q($XJY2RnN9>8@`tdi5vjUGm`R z2j#)D4rq)UPklmXQ|w&1^39bi!IQYFLlsf}dvnAU3I=jBGrVXML%!W)O&JiHQVSPy zM=nn$!Ob`3$=yD1BGa46-Se^Q$mm@perMHT9f)fo&L#_LM_aPh@YGL?hNLy>Rohrl z{qqj?y&1_#hKHXqnUee2rVZ*}S@}k^ic168)Klhk)IjXDvt8_{lVB-n*7x2sX8@Ga z>r@|B+Ylbo^G>i9_>~re4{%?AE9C)UKh}oY5EHB|VG(=@Hq?gDE-vta5vCBgmU_66 zv}jxmnc^`T0rY!7{bLC(a$B!eaGg|Vz4*-@7A&)vDk`kMQ2%McJX0~xFMW=A8a~H7zchB9FMMNko(9wB zoG0bd+zUUz#nO{%ZGfI|?GruWPCpMl?^XX$j^c1z_RET;S?zxD* zdH%3NvX>i^4abhjc4K+D_1(`b8dL<-1gx^M((=0c4+>-R+z0VX@ZDYPJpYI3i96v% z?fb|L$PfBE@`FOcRGuGH707kL0tf{&Gqe+EAtR9j#^f16ojF{ym%&mHg`RW=&K%iZ zJWc4Li0t#J@OG7;YV5kJ6@z)|-)4UOt6j`fVlv8u8&KJ{_V?2td#+X9QDRIo+;NAJ zY%D3U{OUt$Dnkv~M?l{hpX zzpsjC1!-=Gcwp&V0_o7FZiQ(bgGWDs8lTClww4<^;r?|{3AF6ME^Wr5H{Qq&8 z8x~eX(lnV1>n-TO#V@4C)p*@ulD!^KgNOYfs1!SRWopmyw~Uynf_!&UGxDH- z;-Bzrn86|75rP0;tXziI#((j$`6MnBhc9~B?20uyAFP)C0ew>=td4j}!-ar)W}rBv zJ%uNFW8c9Ev}!lOo5@2A%Z>@FU=%k}nnzmvH-*O}Sto86aPGU?L`=&AJfn#SwY)k$)vXegha z6~4S1n2b|UPl+l^OdX@LR;}LETDoPPPLr4T*$AV@u&*MUeN_5KOcLbt{ z6H8~H2v*ARE!3`_(j4d^IoNsH+4g~FvFmwup4k*Y3=krBEJl99RYN7O-g~&|;uv3j z>8V>Tdf#L5o<^R-pvi<;GERV|uT77|pHuDwp2LWmW!Q^3a5(H3>zjVOm7*6xTVsPQ7C96?dpPTwgB=>al>a5gip6CD;=hQkrM21hX=wEJ{y3qB;y`*c87Z&85ig_geweAs+Q7* zKlC0=D8ebsad_PM_+OsMf_@QPhK0!CFO`X-Wy`ed3Y<0w=_>BW_6=_eUZOtt27BX* zoSI10-hw$_**Cl)xQunap@z5S)HYY_E9h3=o3r-D?JOMqv-mHzbNSM&?@cZ0TX*f1 z>Z_tVYEW+-T9yC9sUS7h1ed5lvWy56%I=vuPDl-}?EjovBmaxSmyZXIE9sORL#9zqP|R?N>Lb zug%PQv53oo7@|xw&DH4Nx7(d&^<(u1Mq}De zb@onnRK4MlC=RC?ji;E~>P%V7zP4Z8&u-t{Zxho~*jLqV3x{H)iM3(}+yWMsX+sY7 zPPG3@?(cU;|4;P#wAgu)N!@8|!m{ zHAMPUqaD@S$k3^mfh!#}s$z&~RE0uRBuUXQ7c?{w0LcIVv_TnFsyU#g!f6>BfSVgC z-x^@{qUQtjs>R{uRHZzVi=`@8te=OnKNxvfTKYN2D^6R(WzuT-FW{kCVQa(}F3iqA z$;Q$E*RF7}%9MDaQvlYK1O%}pqka)&QG$Za1D2LjZ1yCKz~*RbmJfxRMJv(bBE&06 z(JDeMt2LgjXfMQSq-c{x=}`WvpfhiFUT05seS`Y*3v%6(+J)&e_Nl9FN~feu?MW}$ z+0TA=VGev}?&D3Nl}kw2k?dZ=`6VnYk9b1)=|0dYT$H0}T(pmioi)IUpldlH5Trqu zGRC;l&B1@^382Wg#pigRN0#IvxOZg`$V@Df$Bi#Gs)a8^5}&LNfAPzUF8chTW81%d z4kn46IpY@a2tQBIwIzx(_T1u)KL?JDp3~>X6ZlsG&03Vn_*~P)O_3DnVxOjrEeQUB zuPFG-Va9_z^&L7nga0iG2gshs-vcM&TAE!zOFzoy%IRFt^hBRK#HLCEyVW17-$x^m zEH0Bi5c|ZPCR3_J5@zIoOODk3f6Y(pm$pUSMzdQ2Ff2{lq3yM1L*!E}Lh2OCvRo<2 z(8H83FmXW)q(EdAasx__uEQ&)hv7$srtUbDi`J=EBvrMCgZ;G~-7VkEYpP07kL#r} zKKf6T${1qnDK)aizj0eQSlmIO_q^dvrP8ig<&5Lv60LFu=grh;q4*+v0e+gVljfTa zK){9On^B+`s9XkcLPmo=U3fba97!7XM0~OBjF8k=T-{t&9lknmQp&q}an^f}p^iq= zUr|TnrL3(56(t$PxwSc$7K*79)TEjkcd~Wyni^>GB)+H&{g>tlG~$1Ajs%mr&?I5c zu+PNjFvmQiBeG#pDG^O3FG$CbRw=ES7~%UMGBs4^BMOXegOS> zb9_9_hx`EA_y2~yIQsn@OQb#jEt{6`oO&{8zdM}xJse}CJ?~W-_&MQp!`+}2_R^e& zLL_Zf6o_WU%S1D(F-tNEj>^bxHp|Ld3qx2-FeBMXVkY3qAr{bg5`RgiApu=4=HN-I z&{{yLPeJ5~TG821;c-*Mh%0>+vB-B_y)juv=-B72!<}k0-%AH}k#Cbnkr8Cgmu^jM ztButArdBrf<&dGx_T%D3+&5hLMmNB;Nrs}ZcY~caC4!iuWnJYT>Z%5J(*KuL_80a$%8T~W- z4n&C3QbeOP4Y|cNuyJI{$;90ex0JPz@|2wcC?Sp}8^AxzFof1a%rIxlWLHq5ONE6~ zUypGKMJpjpI+qhx4g=HUz%2b9KDCA4bhbszw#BEYm$H0Qg@5wttvxv zBA#mn+mOx|=pZ4J202FT|uaksAuM)M65ykalH|c<)IH1~J ze_>4^{d1&8db*0#L46lsRyBmk%4@AZTOEj7*E~%MVJE?>I2J4K@~bCS2--a}2rON( zfIKMNWC0x)nH#7xx=K=JKRf~uZq%Y>1ut%iEJY@yr&60Df{`hRf4L^oU*o`XJpA>` zaVpGX{NUrNHCrMYVlKC%Tz%X>P#5giA!K~oyKUXw2C4w{q7GHu@mV^;{uPv$N$2M$+`wzMWXlEl&N#w$tg93aLomGZ2OtCC*DXH*{#q*2H z%DMy1S6te;p?>$etfm~rs+@I5M_#V#sjIJ@*Ih&^z1&riIVY>ce)k>gmR4_^Wii>( zlFmMvr=DP2b|)>VEuX^AWisUY4)9~HfV$e*Y3Rp{+UCfZ#r`GchNY!SW}XqmD|~Y( z5-|Ov=7*jIB=6Xv-kagw7kzuLH$(l#UiS9tOiBKjSqzSp&Fo?6I`_y4^}%FZ&*sgu zh#ASOR@#`}*JqIc)ZHg;OG#t)K1_?`1$6pq$fV8b`hXz%RgFW3CpsU&q@Ye(^uMKg^x4-ZwAzKjyL8 z7MCa;{hDmGbg~qs!loL3iM~D6)dn&3IWfgvSxHH50MlJ%MuUS&{7{T`1*pto&xel; zS2YEVqk%7tne3>Th-Wwwj2Dr?1f`hOo;~&>ECF|AS^`F34Y=85x*T4w!>LseV1AE( z4DsO{osj;=S+9CuCidX3dwrSe6RbjgWukp}Z>9Q1!bXHwN}kwG+$|33J2AQs`KT|| z&a;wDM#Kjt!DJ(cC@NtZ(KXp<0z3xLc-&*D{|sIHQOOAx|7C<8>B&d;H;2>d$a6R? z22?FIO1w9h<^sdMpbX|jTl4PQf5PTGI?w<14_}^OdjeCw9^0R4=9w6plV5pyRWJIN3bLakl>& zM_Pxk8G!&YBBWTPmHqhZr>KQ9sNTMgUFYtIKng>my;RleV@q_%>5MnqW^PIU!^ObozkqJ4tqbPOxVm@7Rf{(U`rUV~4o;i3q6ODEq6_Mti7#UR{HV4uKhl?p8n_uK z*IZQ@%mKU-bTRsg8;LF^&;|4`fF492l(e6SE>kAg)s&YOhC;-R=5y{c&qd?R=x+*4~?G}Rj{YYdfzYpO2k2zQ{{4`d3kUOt@3pzyU5Mp-oQkOF~Eiy$rF2IGKI z#jpv61bHyzZo=W9TLoYw%L%^T4}gQ_IV((f0fqiXi!pa?-0G+B7t1MUY85iyNu;6af+(9j1pUtLvCxlO?J$-&_(%YLHX@I&^;*23nz zwA{sM^#vQRU3+B(TVB+Xmyy$-j=$3mUe5j=EtMX5eART%69o+!+m>8;`N-i_Jx_l&zk5asep0){;-ZL?!w4%TNA5x0T zpEyqy3Ezr1ilCfA1x8thGM-h)WK<114}(oahX0odhwLZM#j)8)COdT@R+DDm&Q9Kr za|w^Oq{0>~5gv@7RJ_{(k8Cy)Ljhb?U|pHXUtNG>>PPboUT_|1qNv!R6@i*c{f2o8 zCcGLto zDux;&VDe4%^}814EoM*m1*R-3nOhO)W6vxJ1eWv~= zDvP@th_@C7Ds`RbS6VczLv0~Itc^mC8&!v$sUf?`fS?X(>00zCg-k{l)A&;8!b)9B z;}0B&8H!(2->>xrm*nU7PVzOf^jcqNT4r62ua>1Z`h1Om_{gH;N3KBXBc4;LQj)P$u;J()T|e zy&iQxHeDPgP5sC3W7DlXzg~hoehEVMZD279geKw6h_9*~mA-^2d7_0)Wd@7a30F-# z%35HhyawtXDwvHZkwGq33rcY*76U0Ux~ifc34%o7^A5x}60V7cD-v$$S%o1%nB0Jb zz>2ccP*Y)(KNl0qsY-KXB%wrv%P~!=uhS7Apa{RhJ9rEZf4Te)A7oKIN&d6wi4>>G zzQZ@%Hb-_9ux}2!>_v{wsi&`rB{ZN}6t-uIYY6nr?P(BjhE;tv^CcEf=_jB<>4> zC&&b!o*tzo#1FD$V~!TagO3Pa>kn~J?Y8_t&Jt_v>d<&CZmq>_NrgjxJE!a8ow*_nXAb6n0enA%*s(bX)X8 z(ZNIeTt?9vOq?f>>+5ry;EPF5v!}p_=TI&R3Z~*eW3|-CXXgWZT+n zup3z}`^etgXAqMS%33Q~5q(QmL+VeAcGRrLUL*tCdb_e5{x4)Cq4hZsX%?emhVazg za$s~>78c?kL54RH@n0%xnk{4lW(T_noX5=yNoy#Aj6oE#3-CWQkJt;XzMn=$YHgBq z=W&C{H0`a%H+<72dr+2~&#+2cFeo#d^5;JpET&-4qQ3Ql6P>*^jFkKl&w+vekaA$; zC&uC*CKz+_XqGDW7Kz*~&#{V=(e zC?-uChp3R2WuA$r3LtZ_L8Bk@OmmOlF9`m8KWd5!xk9cFN6SnlX9-Q0;D?Q!Cyfi? zUn77%8=m-3X-974EAsW{JW@c*#EzZx`WlYu&}Bpcb(++Do^XqgdOS0cB`p|{?5Mi~ zhhu&~O}OSw@Npa_qVdc%Imp`7uTCgS zZXZ@RB?yyShsCvLNfYc3WWy}@DN2NEBd&ZuN=qdtL3D%)GO@97>`D+A3t0PX4~1^v z00r0%|M22d0Ha}t4TeUlBoaQ)I7ATu*wBXyNXp=ez$h@~OGm=2!$hE2eXA?rdq71| zx-IBAvtB^);%P&goMnHdEq^>etDwxWuzq$)>#VCgW~`i*2f&ocK>~H7dIQK5+xKSz zscDC&_EipSs!i|gj0@C+-3KYSeo#&;--6#}Rm28NsND)5o~9m1u4Y0wB_xCK=ud)d zMY9&DUnYS*5;p~xCya=T*q{*fd5hvAc0702!QFzqC`u3Gf|gi(JbPEpeBjz;NVskd z-%~k1qMl4xXoAiVuD*6&ZT0lDgFWk_zt&X?on_?e&{;dSH((tBz@!?HH_Tpu5~+;EPb_g5x(yh z)Q365?E+lHGttxx0Y9XyYZ{dRYz|<#uwHC1Dk}&AuN;f=5+El~fFRDe1}~HomjRvT zpqOAT%OzMeT8e_5wVb$EhD&_ebcWb%LklMr{k~vYj%s^QoVC}RYG#XA#FXyY#r|-Y zYN$gfurRADr$1|To%9S6-FOgirFfJIfcN(&pZ;Y1{m@N(Eu_Cp#iYN`8}hKG3lcDX z+L%e&7+@fNsl3iH0sJ}X8tq;56AYfEN3?f=A=Jk-$Hv6IUpq&-oz11B1@2KA=^GY4;cZ-j+=M%cTt&BG6OdmqfHvgsB7t!opwLuTN&1Twx=4a>uxK$$;Z;Ww2Oiwsiu{#)XE!go7{ct;#Aj0_Swqt!oJd- zI|{=q*=HMFsdxq{o@QsKvRlgQ_HScPI8cHYMzS$qy<%uKa1ZRK`9^xc(j2Ex5@ek7 zGG9MoT}_pA4lK>`&&Hku?01Yt5~>>96v-$>MO=tKHHO1fW+$jbwf}%f>wx53K^OKdBp-ElB`KnIX9!cdCljH(w z#|Er@12FwG^fUCYjjwjqv;AjN|R=Zgh;KY+n0dfla6NjNT4V6eHC90r# zOvI|u1~uBva3{v`ou7v^inR9O$EWM8)_Qn_&T2i2#0e+>*Qec080@7U?R@9a?6M`HeW!MbBM!{`L9S4j?!;r}$>Lyk3xyQjq z7>JKH8bGDA>S{r#o>o0=@}zpy*cPfZs@XiInh$R9i%#gLw05T@uD+BkG^jZhG+>-`%2EJ#)p%D^P;lyJ&6a+EtnAy!4?} zGwu_IithN^cWxOkC?PyS#pA30$gAo=vJ+lfI&k->--EEG0X`}!&H_9`wFfRLG?-O5$k1C| zE$@jwU)fiXs=lUv*s^rdC7Y@`p14PGxLO01Vq0Zzp*8v5Cp)S(U9xCti~1+^XEv*4 zcA&DLGHchdq}TuQ(!q^sRrekH_N#M^;=cBXGc_YWztS;lYIK*e@73=dyRRy3` zWj1ED4q0R2&jNC$Y8rgpn`1{uE@t3LD+NBi*v-kpIL{Yzz`7X&oBoesA%mJiDOxX>8`4tOj4xZ~7UOY2A860_B# zDL%=)R{TfwHFiI`Po@0Gu zCxn2d4}%Lv2g7PfSmU_S|CNO%0gdLbWpsNt2FNX=EmSOze#!!lhDf@+&_v z{})gZfKDAdQ8oc~;(TvKF8l&u6r(?bvtV`c!{Ev@Sz}3l)IsvBLqVQpV3_4smoC0- zceF)6AI^39`N$A`8-uwo0jjJ%Q#P<})2a26Ei3s*?65@NpHX*6_G&t;8aayji2wS9 zgJ;uQ$$-E`-^%9vzZ-YN=K?^m!Ved%&d(}Khn(p-GOscrDnO!L+T6R z+&OY4HdNyeE(h}vfwN>lUps)YipjxJxaW+Lyaga%iBe<{=UK*;3v3*bdJKa%pmN6Z z27k%8=OW(%RWJ0y*lGeGlExZ7&Kp`m0aDJfE~97hh_wFD;l-Drbij9W%5@e=72_f_ zlleBD+O%$y@UWP(y4jgk6A zk}bJrEX4+iV!ZJO9lR3F_sL%YsrFpx9dakqhG>3O{LY16`aI*Qc0gILRjHp)&{VB=H7Qu=t zIo2&EM9;Xl7;+oUt&QYkkPXW~Sx6C9tn(ZJ#E2?qV)bX>2n7*G25)G z+BDWIdrgj#L{PytU3nJ;c`0a=1FdtCkcC0Swu1^4 zql6=3D**Q^5@{MvbdVYW5_D+ODULb?y46-b`W}dP5wFF5DMs%LzXEBI913bQbb^LS z2uo3wOO1y0fkeA*JPE}`4fDgjM#E(5yQI48EUA@L*NXJZ=7c6SXV@pT+Uq8FNY9jY z7Ki8MENayiShbqnzp6T9clFdGY^mw5-%1C-tEs{dk@gKc9??S{)h132z;uPfWD8{# z2+C&YDvoG?6oDyEch6BNLZ*CdsB-XJBZ)8dFO~Q3T!nKCz3`|Q7h)DDZcBXV;e9<3 z-KhzUVLv}1XpQ0{xgihAxg^^EVvnzPw))%bt@V@g5+rV@zWu>w@K!tUH5)*iWK{7) z7b`1*51;_g!yv&K#KD57+=6rk*?181;3}OOj!Kd|q?q(#-x}x>jyD=C?YY2cTo*1p zkixeE5noD*Af$LwJZ@Cf#w3%82o^+ss>fJ)Mi<@8gO2ubOFvS!a)^~CEPJ{7(#b<} z94a=J+r82wT!uY>xFJpzyg#B0oC}Xp((?0hLSVja!MZ6-vu9L*#SEMEsNSNjp5mEg z>g_rIV372DkYfms|)8QCitb^=xccc%NBR}9!{@W zG_&uH{o5}?>EBTnkPeN!ZCkN__q^T4hN_ybWjEZoEJhIQ))ZBdGUPXK`a|$AULDCq zr4t!lufXXp6b^-0Rp1t@Dk@ZD1zj#Q=7!%4J_Jw$*E{kqf?{ywlQRrbN{5652*Y{4 zQ9N{MK`BF&f-R)-xAm;vgG)y(1!{E=Cwy0k*Tw?2U~&8<)e`&jXfDh-E_SIZ>>#`S zGj-8>>|}Xm+ve@78urdFzG_Fw)yHNHJh|u47ds2`sy?{!+JRNmADmONK&jif=E&h) zZ_E;N_AH2ge~0wJv5Y;#8?Ma_U*erqJb2H%MUSpqv!tfccdBdM+-qjnb-2>QbJ0F~ z=duO&T!%wIyaQY15O~KSe2-2I`36vkLBB8Qcm9}fV2nr^#OsEWo(vw(MK3?ESX!o zrAqs)Bi$pfxt^kYKmcy!nHjkDAoAJ0`#ft{iA~VRaD74j%yJ|sH*>EFp<~dwmzZq7l;`ZxN zY1D25)HcRlqh2a`rh01emD7F4>!uEm7{pW2|DA5m-XZ-Us)#d^ol;=Odmk;VsVleK zQrCiJLW|^Mffv|}rKg9zDId|k5#M3ooPt+)f9&ZajUL890i+zK&j8mPzdI>L13);M zX2-7SPCD%v(p~{_9DfCAtGJfJ)p4G8h|HzhJFtcDI~RT_Hm!v5+8u-pHSNdrY3b+F z3LpU2g4JH_z(3?Yy1BL)es0k?qLT)Z0gyiz2klc_E2+1WN9slpzXPQPc zmJGSi|2*G!bB&~#v-eqh?X}lhdsV;K+c#b=et!biSn*nrJor|p{EVs-xQi4{(+!n$ zA%$-d8vH9b3c@_9kRk)PD&(u+OnlUl3=~0OazEu-uv4?*3+2r~B85m=s;_qAvTSnY zMp~Pe7BlwL1Y-FlYF@YeOxB*##uU$T^D0{&MgtQ%-~B4|BoxfBaK7aTg3TJb4TNti z;#4pyrSA*1zfYCLOTOIPkA+hkQDQ|QyId6nBrl=ko#eBS7_X-(}elS zX=GCXkZ>pE&xzP2ky{0zUl9~s5kkh01zl4tp_Wi(gsAk7R7 z@qnQPfax@Ri)UqZ}gF!wAZvHc+HV;r~6OD7H4!J*5Q3E>V)B_tQ#6 zCmKfK=b>@2$fnOsv)htm%i_w?vUSENMX^$=-FlNoYj>$l%oPoXqOLqWG)jSO+O zm~}}ml<#HV>lcl_A?_AYCe(`VPBiTFh(+acZ%=lI0%#pfH7o`4kJC^OOnfa@YsWF|sb9Y1(WS)vn zNAG4?A zP13VF6I52qxXkv;ZDPO03USvbZWK$G%HR5qC4Bs+<6k8)hYj`M$w89qgBEg>;Frq00>~UurFkgq4ag;`UlGLd#B@6k-^d%k$U_0rd*ZM*a&&fiec78uqB^4T4n^SJSYxv~U|) zMPYEJyNWB(m6q+*geBbz0q`bE@89^*tw+B-P+eupGC4AO@&}H$4ct&`OVO3FJ7imM zxEMYxmC9T8zP8J<=k-0!BLy=$wEFKx#BX}2YwhU;R!up3Dc*7?+kQlD*zsK~0y?h~ zC%_R_HPzq1VKVA0-H6*;mheM|Assl3cjAyVD$;^|aX9Kip-VZ0;1YXj^YKDa=z=I9 zHzH}lzC%6e4>Xo5z!(;22Lg-(y*YOw%%)R(BeV;!`%|`YfK4EmA~r$1vHz~+hmU7u z=;E|9V#*V1`U`3nXGTV09g; zr4$wvqFef2m;BGBEiQbk$yZ2JrPY6=LJBw0>ezZttChloj*v1~Omk7(2qp&=m0Y7Z z9x^*rz-+H47l9rHo{^ocme8FHq>HRvuqhPMUMA>tiiL`1Ur#bvTmjs#rDAQB%4q%m zFESkVx>@BQCl4@gXRI??D7o`e`Y4@xQO0dWvG(v#@A>*g0Xj(oMG zs^mUnRw~s2Qb;_rJHKgbS9SmzH!L}KxFg>#zg}Ne_6(=^@PxTr4_B`_ z)--|S*L@T826|UN&+p^aGTF2+%bp_li;v}D2A>BX$u}fA$6(&3fO_eu2GrC2zS7jnFtpC%8K%`(h_1C zskl3(m{767?WuA(s*4<_()rvx0tOdHEvp;G91P4eMzES8-;Jy%} zA~6Gs=|NEokLysUgGhHW<3eSS2G`JOkHU@AZYO@6Vb7);sU5~qKP81CBFU2Ek8~OT z6jmtW+tT}24+cf*7ieNbO7?U={Nv`$mCH_T%v=+Ie&wy@w|8gNF4~-t-(wj4UTsu{ zwaAo*koEoB8W!i)tuEbiWHJfM-Lp59W|q}Nr()d-Jp+Bzr?4CvXJ|C!RS-1uWF6`d z7XZO7R#B#?&TjRbQf%Rcbt%qp0Ky4?SSduW>U!}*C#=vjB&T)7I?8>u?rD!j_Fi@t zcv*RNd8#cbJ~jpwhS18+WS~k_Mx}-*wXafQ?tR!oBy>+y(!hR@*)ClXi)dj2WV4^W z@r&)Re6TI$`m#yFZ~G6*XD&!G$2n{!%Z(qMfQU5e2flm9l9h7Sb;s7<)RU!es+hnQ z1_wm36CJ3M>TICqrI3}0rBOzEWp7RI9>TF=|Gg79o+aFdqCvp0m5cyR9$esySmZtu z(@+XQdmt;@(5O|!DwJsQOKE$stKDihmSxG@-gMP7}IiOTt{7 z3%V;{Pl<3=V|64>RU=}%EiBg>siZTMpfeG6MyUsbn!;WeRz!aBD=RHgwc~?NEBo{9 z1NW}n@SRUGUVV6bS7uKYi(WDKUHBcP9o035e1aW(Oa z0ccqxLc28~HH|9NyTQ}CLG!?jHBeb(^>3eD-M;x?!+rC5Zd*M1^so2*@qc!%vcL4> z%`g9pwa#t6uy$T+OGoz1f;Anz$JWgof4!mn%`JoT`i^hvJH2Be=TXpWR3~va&J*5n z7}B7Z$9d`H&U*?>oR_(#;Fb)f+|8pD3yo8w52RmvEXCeK9nQN{piyqE-19nUj924$(NG?93z^6-yi9Uo#nc~Kv%(y7+Q$S7>=u? zmS<)t$ffKpC2&03eSMtH7%!{_d>oX@uU52+A~*B^tEsl+q{M`n=>H~&O0$>Vbklbh9B53c06Om+|EGe^lk(PIeEfj?*JKwi=R9`T zH3xHnNA^_(kMjDTyngoOK04A_@;9UlkOh?UIq43b>%yU5Iu(h3tZ`q($tXJ0jlt=S zaNqls+c(s`E4Y1MxbeS_T>V&YX}KxGP+@P&>Ahvq+M6m9lXZpc7CF}($wHP&CV9_Q zukP;L^qtKOD|6DDw7SPalZMW;EWNreNK?pOh&A8D8iwVj756#s+NzUU_`Gz8>x5tI zBh|5qzA8l~s2<_x`ac{NBV=bdWM_m^cG8`DTCx*o+gtx|oo!#S;!o`Q3vhW6&dN+piHQnB<4Lp0p!b|@>PP2C>bg6YdQ07x_P_tlwY_(|vQCbbUf&wQ z;;rM)uzx!AnHgo*ZEvmFcyfN(^4w-iUhtfTjc?0Y3%~N?ZG+F;*q0^kENfl=OcIIjyLs-3q2+0*LTIGC3r+BiXq$E zH0>~E;GUHjZ;e*NA{|;>EqTd$T&^Y8CyHgzCG3U0dp`K~X#1{)nLEC>?Y1%b)wuo( zH+L_(uA%LCSmN$$-&nQdN4w>d@>4rAHe7$#!k*1#gA2YgwE4$hKPdinXUdW-hnoAh zm3L%6%gSo)_~K>xPbBlt%Wp^-SZ@U4soEVDvZtavL{uc|ii^$|hY*+CcZRMGQj?x2 zq9{0va8pIzL@(~EBB(<`D%nMAOsj;`8Lu7ccQVe4~Sy+uB1cg*lm5=v-B??Dr4%$n2Y~buu zToG6X8O)K3;rdE(%u#wKS+g(N?K!c$2@g~lt&$nVRu z>h0~zi-Y91wZRN6>$D7IND3`wiz??WGnlW^n6qOjhm@#h z-iSmu+|G>{@yg@pRa7qfggeteT}Vg!RpqOsKM%h4DWikwJcTW1fAg2L9z;vgvO*5O0*>xxSwa-H<`$~KiS zaJUD%WHO^+7v=57n#J&lMJHAtK4rgP3I%nIw66NP_jxH4h8^Y@wJ2z;=#q;Iic=`0 zz#k8%K3dg+|G~0~+Pv`NX$KJh07;<4XM^QAAAs-lyXJ*gY3M%$JTT6!Jw>_ter4MiPoUfqZ3FoT@b?oX< zUo{u~e9fq)E@*HtmbhYZ_A0Y)4IoN>djUCRXC`okbZTa- zCIO(xxhOD8-~mp4y87TuW6LFpmlB{B#>!WaB9Iznv7~|m7vt@A&iz9v`_FR^R~)MR z5*|5$i{?t5qc7|jePD5JbwsJ9G_EqSbY;!*8yrdIAvtWsk~m{zWvwb?^uFFXYw~iM z!VMR~taGmG1FMXX1+V@6YiP~Dajd5ZymFSNUq}0fPML z43tT@TQK= zl`Xc|L^h;VHO{JUvHZY*1pGYGjoh>{Qy6iW(_`Q(0L36i#KkR=fn)d()I8Rg9LZDx z(T8%Vh6{fN;B(5M#VF4zB!ohp+1WF*XT}jJQH<~ONr~a$OifObYJBE&NlIQhJWPA! z;Su{C=2QcABnS0jwJ?sfoF^WE>bQ#XOtpyw?Ixg$E(xe*f(>IR%!8lUHq6vPVGLdm zBa}GsvuJec zoRXgE#WNmzEL>lQZC7|hS#bHv_SM^ZvtqMN+L4H5^S~<1^-lf)7yxzKSnM;f7h*KP`O|%TL4@S&M#3@+o#{2p>r-;yBus2=Xx8{yD z;i7g5Pp0ik0H zRg~-Jq-t88!YWf-&*A|*38O7KaATDBlZYsQ+6F`@vwA)0oKC5SY$Tx&1>7j2(&zf_ z#!WGjjV9m+@-*BVN0GS^ZXA)cbm57l=tNwFnBBPAILI=+u=Unpzc;>K4JvYd+7p?hjs1@$Pb!vr0=+l44^b!h~8@tK}80`S~p=rSgtM@WiaOBxhV#39hzU zBxrwJ<{)#E3sw494C=V++K$rE?xZL~WMc2q?c0ozrUgNvr5%koZ`}FZNY%<&$x)`n z8KVoDw>QMZEYOAM%i0=miq2oWySaPAg1nHq^BXOh-JLC4P|M*Bsc2Syf93pZ<}4U# zv4ypEw}cfB&uSW~(o3(4Vo60Fy38>l=hFMCkEL%4g~EdlBXZ$|tQaVYbXu%9Ep}jw zeD9mdNRJ}|&3*-m+I3_wkOug#C_^~2I^4X<#JCuZ1^7C*6=v5gynZm7+-Kl zM~~T*gt({(w3#*tg{+Xf1PXY!9{AS^kq4lNaW2PQ>U1F<0L_&u78>zurH6L4e`8PE z;l#8kV{&d~epY{T)4QWry*ImKTg|MD^6>RROWZFd;qSram%oVZIQ zPUBf66A?{<5-)JGyX)5gcPL;3TJt<6;-vX~isQnbiZ;}sv|x)-FU({!b!Irw6lH7z zE=Z+uqQZp>mrgeQ|j9CZ*S{3GqYsg{MbliXmrTB>YjZi z^ZV7c<@3{XHdUk*MW;k3Mn%u9$(vbInm#KrJuxq>;!sX}O-V9B+Jt47KGkd&Z$_>_ zC%R~_6qY&`ujuP&C@;#eMd~%iVDvE3z~O)>HxyP?n@yroqc&3g3zR?XwHT0xA*lFI zoITQqFeP-hWlI*$Z<&ip8y(eXk&|u>OHH)~A#}iw%KFO6JpYAz6;4>UI>!+)@dKg+ z&l7k+;#WyjRw%JMe}kLcUSvz5k2KrE*4aouOKw?e={ztqKB29& zbZ%;@ZQ;z;rkLoi!n}_3Z(0}VITwAz#K$I?zjMjgs8 zXI(WJ88xldj=AMCb1LlhiH_P08|o_1gA6Ap2i*5{>7-y1qJ=>hXM=|X>4k{0MY$ZU zun(2Tfp`XG4I`H0sEL$y>###hnc_KqwF@?z&C%v)#Q0flAxU~igO>ni2P4ziDXTiL-%@y+qj-Zt@wyqUdKIrg!1lAho) zTDbI%dW-r`!b0J0M^Irtl8OrQk`j@K!=-cr@g0B(n;BvvoN5#}4}!PU>J^GX7}I#f zw9Wmnn$%F3uUv!3nCpBLJl!>AfTt8eZCTLO1j0k}q|8(;t_}7`B}E5U2J+r1H-*Ru z6@sn6v9;;+EeSd>^mGytw~Qwt!4U58NR3SDS+a?TC%2Y-0)($MU6WfjZIZ5nD^zF?Z)d`h(s8z1lzi`W`OBFXI*5{VZElVjX$xt`5^k%?-lm&-`q!tzkAy!+k zDA*8hYLz~TeJXbBg9CqJ70-Oiw!Qhz9IjglgO3${TB7|&$3x98)vJZl`oEe zMV`+Nh_hIQ{L1+EW@n{_IVI%+woRS7%oq{-94)ITy-C7+rd~(OBuez{3eg&X?n2?s4mYI>BVo!k|GCnRkl0wH3B%l?NS+a!( zT9CF85}Ax{B!yNenIs!&Q*h_CrRR$2be8c?{I6}V?zm}))m0l4R*$7Kp>}lky)6F8 zh4-!)j8Qc#d1e)>Sfw`g{PLCW%g^twdPF{Ws`EcS*!&;NdfkjP`Mb{~|v>A z>=CIOqyg$_#a=Cd4wedA^lpbC2x>HR1o!0LoTO!-*G&9y!j){`Y7OOH+Pczj$w=?lBEdAX( z<8xvVzq;jThb$>^FN|+Lg|!#U<*4p>LI^_P?-U2_$o|#nG-!w_Q;c^^EoT}?wkCe# z22p&v@y7M*PRyBeV%_>18;>Wp&zaMal+-b2PJ3ef))%*Id2#Eu7q7bN#cfL-8XkUV zN&kaGLl0sL5Uc5g1F^C<@jWqI`lonYYUOa%C@$Sl4{%(&=dsnWtp@UG>t|Z7*!u^1`gWF@nA^l=@eTFYm{xa-P%kbJ^p6YT z)8c~j#V965bX5%2^t7gXTDUb&1I)m`Qyzn?wJ%=bUQ z40qls|MP(Z?;JYx&Vl{EzW(}O(>-~W+$45!+MMA?H|h|wMEo91N<0X=8lLFI4_=%M z)(u;DaxhUS>J*hi3(ywZp$MRux!5IFT_5*4OU@59?UkEKRq^8C$Sl5f0upE!_cJYy}&oBz*%mEC(g2AHqVrYal zSOwZc(ijStIl+mHWS`1ahvNyF#J#0zDN;O=1XwH)DmAC41!9HzPSDd=T=c{|^aOms z#@NN_X}b6zs=6FLq~J1DjCeG;xNAJv5X|wRyaV_^Z(A${GFO@nS_u~BIP@kxf*$>1 zgdDvAFoLhJJ-rug5!fIXMt~BvvO{QbGkbi_>!C67TkL^`n9$ezbu1=CDro6{a4aGu zh9PK@Ca3`|JO)}w7DgQgGhl#7=Nvj3GR;}~N=;cLoHDT}X;d@Uu?xQ$oDTt9U@>1YrP_Ak>@|pVzYCnav08X+=TBAF&To zS06dg#OE6Fmpkmiuj_*=t6KLqFFMkEt$gmSJL!y*&fKrw4j=lnRC`E^i#hl&*e_-2 zqY|iwd|LYA(wX&&&55+4bg%q4qOfr@oRU z{?=Am)qSY#=NGyko(J45hfGg~job#_VMK zdSvsXgq04iMVF1;=_&wt*4yo2 zHob{wlsM%EU1uT*1w8a7MU!7#P@wz;3du*?9B=WWO%TP$mY!XH_*hzoE zKS}>7o*Een>d+?3c@5_p!IfgZ3Y3E|u~d6%syz+pqmtPQohT+WXAYTyKW%Y=6PXj3 zl45Zs^sQffX8H98(=rTk+RT{3r2cgkz1c-kgukm+HDpv-r1k>VC5C3_tjR@@0Vq+YyFp2W2Tk5UF^CCz zoV}iMLcJ)-X^r(wM=W-m#wJh`;lPOm>8YFke02AteU(+F9CKlGeOdR>u9e4YqHCGH0vx_e3W2pX^#SnyV47 zW~_2Y-+}99M1D&ZkyqEgdrr^U^;^zmshUo>`8oN8j$X{k7E#0yWJCP981_RlM1zr` zAhr$+4G9aVeX$=#C(2EvGWFaB265;Nm>JT7cnSlK1IL0OCC`~M-Z^TWmd+utm!@D3 zDO`aa;>6R#Czt{zv3PQJEWhRNr;ub@Q4Q=>W%TARySa1VM14jBVXynP{?^@%6{{5V z?fJ>>ZQtF<(I>qH{B>;N_`AsQw{nw$ztSyz-8-t&LSIvd1A*7J_k4BTRgVIJWI_NN zcG)%zDAs^ZR3=nag4-B2R&ojp>=4pF_zi)6LLidDC3F~#Mzhgu4MAx|ttmE34dXH- zIV-dl+EAL|>CwINFneOz=wIZYy&}s@>}Ikou$40P=kVAtzQNybxb$=JyHXAAlxAV! zMd$04V!aXcPP`C;nu<^I{CM7yEEeD~sb})n617#gcteM}w^%&63lJwJZGcfFE>!_N9#E45 zK|NG=!0BSAlqqjBKL7`DDUNop=jlwk9>s)HJf)cV5v@$A1(eqp8e%cQv>MOiDQN@I z$`eE(f^QTG4Yxs1GEl->5B#t$Hz-!yXl+d?A8i@GS^V0G7hgn@+0E>=i;>^j`EsRNg+s1 znt-}Qmgr7?B=P5#lx|9+)T+RwRyy)k`3d%#oaIey`xIg;Yr5^~HgP>rLi7flY~?t~ z7m9@MJ4{7{mgFSF#*v~#xXE!5n-__#Yy;&@TsF~;2S=#D>qTy^kpTzcC6|BRi8JR* zfHRO>VL|o`d#W$qh~$)#E&=gozXF;ACRv*g(FI0pz7OG*5|&B7uYkUPo*jUXquf!F z2){n5_Q-y8>7FVCjH=WKn<8_6>j>tI!h1U$s+?y$i@>12hx}*1=+jm!1ZBmL&wZ&|$?9E!c zD&E4*4gR+ne=U~>?xsZlg`UC{q#AlH_iDb}e48;+Dh#fU zZX76buQz^m-qCxRao?G&+QgJpEI6dHH=8f`k+pv^gdKFR^8{XPox3;*fLMz@MnY!k-rY(20MI;4@ z-+NfzrjCjDJ)M~>$U+-zDh13yx#=2J6r>$-C34~-4^sn+)Mf$I+q68Z7VWp-!Bx7- zkb0oiuJkZkMG#Uo79@<)KrIyZD;Pjl@DL-qX@wT&WJ!~y(PpdTMH6U zP5>^O_l6%Ic9dFYXl6%`*w)|HvFSU1+7TYHPI_`vj2LQ>w?%|Ee)Hm2mR=`cO0L^j zTTrJHpN+Jyy}fDWiT)h+nhE_u7NKBlaAcxM{$tQh z?G6fFA^xa8L>2M7py1lGH!T@{E-H87tb(~(@tI)#hT-Kq%2=*GOw{y$xG63|e=eo# z#JW{yU_}KpbKQNy!8x)WnZaC_2hKzuB&kM#0l2)=POw$go}QYD4m#xRP`hn%0o`|;7S(py@5HCOPxkfQypTnW=Un|tSn0;{eYYK6-IFztp<;c@mMuBkcd&WxJ%@h9 z^etykRAefGt@%8Bc6E`t0K-n)~Y_QiHbmDP8srRko-*q&BVb1ydN=DmHZTpI-J zPPmhOpYPEY@WKMz3nGPd#L&SzC5cwh6mdf`4hGyP?nj>1M%hzCB5YKn%(+G-4kA1~ zMNmjE!ixq&U~ec<6F#Rw z^ILzrZ#N71uYa)7pX^u5AO54cbmI%Ro_@G&W>R_NPvuWnlKB&K)QA|cTci21*gKU#e5 zUw4I_dUkC_<^DA*w-s$Uq&AvVul}C{zlvt9to9fS`{>E6k(zJ+`cLbw`|GV+zqRJT z3%S~yd{f^4eD&799sxY*RI@B(09o8>Th=aF8UZVzxWL6_-xi_&7-gWV%_M^uHSO5=fKxYh7(NmXyEA038 zHPSV$))4NnXW=#m<9~6ocu{V?fnAi>gf+rCRxzHQDgM`{w=o*@=!64$#gFj;`RSQ> zM4AJ*;1BkOi~7HWD{qEu##0h9#}tO4kg9LSrR&=%;B^8_;Upe_kI1bzqMqQI@YzsO zB?9<2qi;F+z(VoI>NDU2KXLJa33^>A^ty}C>n7oq;E#2||Ap~Nj(-oB-gwoBDJzQ7(hbQIF%$|Yxq-dA) zf_cO%pcekP1kn{zi$K~ERH{dKuHuRkFD|%g@u|nQHkYpc+SR+hRx~#yYjO2m%j@cU zuUb2!JmUI&`AusEx>vk4dwunFgA13(g!MNK@7&s3)Ls%~Dd=MDt({$^j&57rJo$8U zOKx;WUHghYr;NiIfY+Y^uM+pK9EX2JSyPKc?_QHal~)?v^pvt>J)%F|&XH*^`S`+{ z`%gZ$b#Cd3yRP1KZ*fb?j3th*Ew7us;HtG5Wzom>6gI3J=wAJs*&|ig4lZ049ns%3 zvU6K+QAbIzxnMq<*V5im=2*y=+|-&I)mhiRYBAALo7f21NAW8^chS7$iC@9Ka$4&~ zM6g}Rr+v+|Md&ZPuX(xIGg4G$#Ax!%*cEN-#UoSis`R&^!v}{JEQ=0XI(K;cRkTFD ztDP+!oz5lR+T2FF+OZNPHk@+tqNW@BN(V?1Hb5@MgHI>IYh5m2U-L5KOnglN_H~ND zs;5eebCAZ4ef5ZX$Qp`;^mWr?>l~D|H$!*+(Y9J%pd74m~mjkb>C>-;iX?jU@xJ^p#Kj!XFU1~f>q#B zfb>K{0SQv#+4JCsN=*(WEt3;%ab!eEskg_|DdL?z;*t{a>3ttQJahlcT){luLn`Nj zBR$KOEnU4RYgvZGl5hFdu2xZf@5$_xlj|2BoIB_4(S2w3_Ag$O8W)y??Qxz%#5ZEU zeu)o6oI^#|B)EH@RKRwrFq5F?aJzKPN#9Of8+1+Q@?0v+)XO`5`Q@=+-}26`c2RxL zsqB=S*Y_R5`Znx4v#-BzO-gK-Oj_#Z_csE2Tm14;3I)=2iEj9LlFQOI4L z7ZWWAw&duHm<)`LO0$RP=rk2aLP6uiMt3r@>4b!bp+FWKSERoo@|H^kW|)2LuWk)r zcz5^qXV+c(tAhvMy82u49SwEIpiy3P3>xKG`JHPu>RrFRXKvYs=Xc%q+pBlWAHMgA zeBvkK!SdXsSrI>Fx|I*CJ^e6%>x2&10^X1z%ziSIpob`>dwdAKLvY4w4l^{SLAriM z;qbsv1&cYtHu#|t2k0pjwm=+f1{G9Lr=tt4Gj6~caa)Rnc9)sTy(YFN?3H)>Dz9F< zVtbK&%|KPN%4AdzFWr29@2=mSJ^B8@Z^_R^k1Twj#ofE<&FeRRy=TsmMPa0TW^eh{ z@ZsMdJ0}1A#n~ZLm-y0fH+f=l#lCGfo9 z6RfzcY5*VCj5&b24&_!6d_~K$Hs9HK;N7F|GRxv~pUTjVFaG@htULJYL(q<|K5roa zLqj$_J9^t+j>vDy-@QTp_>ZV0Kro^op!f}*$DHA?hj2s(LMD8{87e48p-|VoFbPP( zRVTz=Xf$Ky{fF|?=hr*hQaTnTq^nG-+KP==&A+Md#QDJ&*{6=eyPoVu=tbkks@7q> zw!j#1%j&)xyPkYu!}(#M*!>U$AWQ7yC!OWDbm^J%pG>CB==Y>LGdVWDb` zo$#TNY_qD?-j?&Jc)ZqM#EM0$9u`ouNTaf7D0)$eLNS{S1EwJKW`pmSY(OsChVtIo zUu+`1-syzEn#fS1(D1wApyfRQ^xF{@zm0`R3KAe>h;q9l;@feP1;!%`W9)T2Vuv$qRq!xe-IZXnDIPE?a zm5iVdC5H4!=R@vq$-)_UEYWk|vBVo=cs4KCy`R|weyNmy$6kjB$l$S{zMUFJLXc3O zC6eDcCqu*uI8Vy|R= zLiLr*Psm3rKcrGyP`7e=Fcn`M? z7v|+=q$MRpMNHW6pa2{0Hk&C*wC_3fp%2yFW1k#Ay}1N0`dA(COv4*Qi*aV7Qc|#)Gm#sXM#*6zO5nX2>6}zR3i%VB0BrBO z0>1|#%$Bp^iN{-WpC<}kYl}?G_YOmse>UmtcMU#rXIF1;r0THN>T%odX`8ZU6RO(+h2o#zszdea={V4m2`euB96IWKu%}vt z1aO`6yn2xpF-NEZkQ5{8Bq|p|F~GF%g?f{yku=~eP`lL(3<6zzd#JKGwMZL;)S{p^ z2wzuF8#e@(3VMwm9O6nxqQrS(qS2U`oS2*t7mePV!9hmonrOR(;5-Xr%85aEg}_Zh zGQw6E`edJVhsC`=w)I-+_;{;}VIw6>s`n0+Q}s?d@&=R;mLK6Ubl`_{?!OLvkAOU2f0FEqMimOL9aFGhg68qC$);o z?1Kq=X$3{}m4O|catfcZbh^LjduemO=zHlbS8}nA{u!5nd(gFXQWk09ycN?r%9*{= z;W)XT;fiLEBnYL&1^Icf1ly5c92q`AFR=L63ly`W(@7gJ#>0=gs2Z3e?8Co4Denjz z459*U{g38=leRL1QntZiQnXAPGw8|FMwFF}`xL2|!!8wbEo}7$quwy2LPk7Ml+ox* zQQ7nrbh>~;eZ~+aUFmy>g0A#EL^)`j44u=&bxvRS23<>=0N)i3abFG+1firTCoA2a zjGzV|z0n+yKInD`0=bOO*dKWmU*3%zfI5y}&bh4UhIcyYBaZD+c#ocig72V8th*sq z8t4NZG|{;R5g&T3!g_F%TdP9(Fc8Q@Zj(lz>OmU;26>()s+j)VJU4IJa})KXdp(Ui z6BDi&OCsCQ|0mW={K z8tj`tI0m7xLNpNI5E48rXR5~`3OtW1q97_ZwxO;dFDE-QBL(HzV-xVd88#zTN(tyI zo1{BLMylPVR3>^1H4pf4pULm|s1}KPrDvw=Qd8}iq;()8Jk`t3HmA+yXA=}TagUP^ zpf;~dPUu-2(-}Q-0%AeZ8x%QVB+bK!PPRs!TTW=Te&qxmr$9py)8;)!krRO)cFCiO zW88A$3f?n8PWTx&=vvwY_&joAT0>k1-vK#MRFIpUZcj>}raBYOegHYagQli*llT)9 z-IsR(`DYbRUf!KTe#vU*{ZxV-VL7|#h@@zpHl{JRYeZ2~%S@t!>5X(fB}Ux|gF=v` z3POxvkeQmEL9I0f!=|e@l4`LZr6SClAoDP6y-Y+R%P>?qquvB#F4aI$TOn6qayA&0 zBnQP#NEf#(pf(?J{k_f8H5 z<(V7|%8?-mOP4I_?&@rrJG;K7s;ndr-9o83M_dfGk#L$WLp)Mu3Im9nr%kqCU;a|+ zqQ`Gt##|zO=+}ZB=$7iD*RH{b@A79-8UOb5Gb|wi6gsB+7TLpE*m*~Yt1dz#Gg@jv z!qkL|dNtI-S*FxSaMG*^;?S&8o4L`+q7nvi!!_#30RiLkK#&$PlGCgPi8)H?o|f}e z*XxT+1Yv71cuz#cU*iiAy^{wd4F!g6;xt6+%|R4j3|sSl-}!(Z3U9j1Y*?2!^~HY4 z$$Uw<*O8Et(%dw=p}tO`;~5!VCcI#n@L*V-f_fSEqD&{!{u`GeQ}I=W8?!#45D1A98z1UqB5@Z!Mg4v>hvkZJy;(Qeykb3YIoywpa3PBI*$#xZ` zxo1BN96{zF4S4GmZy^q73A)mEka+lCT7ct!6?S5^=|Mq8WAogGy6Vc(Vrt4v%|9Z; zgRDVTk0mM8za=RYBIXOTR?QQql~l~r@*g>6o{*oH8?T6!dh$0FqtxdQk_}1>YS~mh z%@lRwpKaV;^$>AV)DL>a5t*HZEWJcDJxyRi>SfGg?E}?%C>1r2Q0xMRy`bPADR{qN zLY+txIe7(jkOqCPgM?sJP%!aS(W-(c!75lxRtucGS~j2$ap?hQn`Sl(15}C5<23At zYhJHKsXMrezwlf115o!lyPfn7(nfo~=+uCXY$sanWnQ2hg)a{KWigZj=>NAdL{h15 z>RrIS8@5aI&Y5D*4Ted3K=BPr|IKgU`kC2m9Do&QozWG5FfjQoXu6^?!1?4$yVG5d z?~5G&EAN01^(=;8Scy~35QLQjOPBO7UerCWV{TJ@ZDmDCQC@Clx-B_AHX_VoMxjLV zXotGg!Y|C4rfKuMQ?X&(dPO`-8lOV`D*EC0#c4CK@!gjb$ZF2t9)Ub2{??0>mQ7Nw zBf^WvnbbooyXV5sPI@8{cl%*k6DK5=^>14-v8aFdiissb!wrN*ucPaCk2njO zxMX5M?O$?6&Iv3QAC`moBwsk)EXTR)Q^D&yF~vN6`8mN-JixkHN~$1?Zdlsi+1^kW zfSCmUJD7V=SNfvAInSS-*1j(lL!7caQN+CD^$>bwBux1-ehE znCb|!LOVx}sL_&x>!`~CSm83AAWjw6Qj>~%{>&N^?xW6oC>rEwgk=(RB(Be}0Ua?L zs#&ZAS(OK#0)sHxA|P+mb*VKNCQLw_jHg5?qeC%Ebb8?}Jnp6_qtM=yj;qK?2)~GnNeh zu;i>F6ONK7sL(O3Td$nTCiNgiXwu;!r*JfNIw~^ayV|PsKwnKjXw%h`9e9&=6sLS!ll01WubX zd@Vv-3*A!75ZN{}huSnIqo+IReL7^ugN(db^)zfH&Wr2x{Fcv5K~d6Ar{l~Ve=8u3 zjaOaHuc-$8k*`akzXtSa=x0wltS$;%&|X^vPlAgG%Ur}0kYu2?Ez>xID@-O|;wy2D zr$?qDz6qm==)TO`i5q_Lw-c$(x&qj_qG2*|U2~ltM(ryb41VVs%ry!}k{~SZoja!j z%^v*Fr#rx6dW_9|dzrS{a8OYj&P_$E|GW}KAu!F3ja`vqAdt^?r<;z=$oVJ|9PEO_ zifUeDm)4aP*rQZ>HyzJ}-vWJ=QIS)pM`$D*FNlytR0mI>RV$8q?$~sBuoayEWojl2 zqbDI?nY7kD0eM3`gNSOsz`N+)aK-N;3eQylgpJ3!uqh!V$}X73C{EFOy(egBdV@g8 zo25g`jK| zs-Ng|>H$<@;F%}`Z9!&{+`z#G(b=1=9FM6{Hw+PSYC#8WW60lJ2nSd-VJt!dpm+rb zZ`wGtcE$4kzU~E`?Q`eU*32p+mu*H`GU}lQhX#j+rP)HMwTHrITp3>x5}TZ}!CY>B zB=S%A-Dj*j901R`yYkTQKF?`{*+++mjyEL=FW8*RUOgz^# ze||^D{P|5|Pt8?+o2x(WJ!k08^uHVLg!nSbUXpq~yGOlL z-opRb#s52AclYzp-+lKBFNnXt`@7%08~@}bUL$rnzrEY_)9X7<7;Dg%Y&rIx@|9-_ z*FKdIjcw)GA#`3dsRSHR@KYRnYlVs&&*yKHdE+_8$2U zuq%4Tcs+?so%Hm$SV2ginT{Ynv@A=DOT+tP>?sixyTP0-cD>WIlx)~=$yE>!LE)05 z^v#sm9Rs)Bdu&0BPU%ecy{&_Hc9a&)om*7g+%lS%S6-fGXPa1N(qp%uIQCPK_aW=M zfApG1*V%q6Z)+(kZfY(rn#+!gN2}+|sUF`(If<9vXVl|aCxi)eDLWpXMCitZ5VWNR zpBI1-?g>}AQj6=Tol!V^Jah&n;UJhhM-e{ny@)svT4dN?>uZ zXw;pIbYO|n_uZI4>23*E1}fN36jA0ULHq+%;IK`lgii^*7e zcgJG+vo+PF_r=!uAgp9glN?{v^H<6AhTr z?P3o1eUkG$G~u6ZmkB_)sk-PB(3XnjWNa7d=cJpNlT zzH{uOgxuVOq^vCY*NH_Zz8CUrMpC!R1~lG^@&{#3II|oXPU7=M=)`vD2q&%lhXkhlmvV=3>at~`BMZb$w zN3xORWh*{rJi(ZuL@Yk&z=Od;aJqz>eQ28KT{=?KerQgJ4xa6(Z3>P~ut?G#X8qe; zdg37ZyZ$DBxJP*h?&tK55Fy?XgUDCq9l}A(&bHx+5FsRk#4;wII6I~{A|S~?^BY1U zqKsn2UKY&6H?P&wE40_XDa-QT_R=~|;vL7azF?v9No5%dqau}n;WCb%%G4aoI%#o= zX5e05uB0HC$~|h|HN`|&Djc=dmdF^BSh4OeKRc?d?dZ?`vJO+yd)XHL-Vmyzt&NgLP+rIP1Pe!q4d%^2&7CysX0c_=O zRR#V|7p&qVnA!a~dyn0${;Q-#HCKUsru5}upSk<;Y!HUnHPU9#TBM-#JQBdW5zFLr zgov(@cI{1P)LYI)?CjL{7KRBEuH?BbrwMK~l zh;URXZDK>}bEtQf#G?noJ)1Cquh~3%nrMvT*$vDhf^&oy^2$QWn@dXBP+CW^c)$=| zU)Hg>dtIXXtbNJm20dDwXU7!HUERCtj=@NJPp9|_dyBsZv}{6-STjCEqZ*=T3JgU> z2G|iTLO2VT^vsJgUtXzDJ^pGibf%wXGTUOk=wNHsO);o}AvE)a^$=k=7Z&MW7%H?Nr`EHw@vfWKch&7w z$283(MxM zQkPwKZ*)uYzy?)bW@+s@!V&gjFK`qp`{wzoBLUH6i<3P-r%fVf&Z5FEAU%&OkiO~ui<#xU!mGeol)c;7<2 zPooe11t!HFy@)+}7_<=Qhz@6LE=QTy3-LNymggMeZDI7fNOo1iz>4~vt6CPsnjVs_ zSuDO7*|YcF6s~;BaQW&`%^f*^{j0yj>|D>LygPs0L&GD*#elu05dqsss=|=EUsO`T!PKjLCeVu z?78+C#PBGsif&GmHbYH9K&hPI*&)e7J5;?v1%}j6+Ob&fD{O6!YE9`Vr#)*d?d);x zS@j*ujGf`zTTm7Q{fL{ z(8wtRgQgN=;xf`*A&1?G-fGBV4mLC}mJGGy!kQb`us->LH8)Cc%g?YzGzx5_`5Vyl z-mYHG=SLTyqr!jzDM$PKia0bw#E*q|JOAobvf#2cpx9Xg%Ps52s85#0o+ z33)PA4z?yUbg7jp#z(|k<#(BF{I}Q0!&1{BSkC!`zsiUzw;gJN}`*TA}#f_&Sy%|5`k?g><6_s4ZjQN`QeDnd~?=$gR>PG;dLrIIZV)GfWsO*U%31DmD zpZQ=GEk7}bT2NbY`G+{-^zbVCNPdBRCY|peJBRr{!~B2d^Oreu|LLtXJ)u+NiV4YT zd<0Ix@k0A1LexA-DiA( zj5V8g0eqq%lT?JYnuYtG(88KGm#_EAzUC>O2M#6^}KG&vE)x)~L{@``sW={%-ePy1#L)QTfK-8kKMS ztx@^L-x}$g%dfG>Af}f_#9)o0E`0ulvwV$X&8nF9*jwy%cH!B^N9ApDjjWa5|NIHQ zMo~v={J)oG#pk?ewo^9owYvC?LbERZ;p8`NigkVNn z4e0{#i$K|q5k;NCZ_ZwrAFggx?fbk_ejmG5BX4`O@j3Q5_VO)-e}X>Op#VdM!f(P! zC_+BCm^q{b6FU@M>abu0%&;OE=oKQPS|S055G$HIgt$?_A;g&`4{3ru0Dt@cRU*W3I0bA$xIrdP)vXkL@|>_CTnPzojdOOahyd!&(UwJoih_}4+ z4Dpe76Nh-qJI@dwc{g#0x4iQV@sW2Ehj`07*AQ=c=k>G~B9rY_y16sD#Ds*D6!IP( zNcq0-MC!(Op6=`1bGkiN{N=w!HTU5v^BA+8+Ocg+-b6Z~C=@^^ykN+Hy(W>}qDYAZ z7~Z6VXfGp(hZNI8FE>35pcOLQ2Tqhn@Tc$vHAsdh1<#9T-1Iy39JB|4=b*iqdJfu@ zz;n>%Og#tfQs6mgx2B$hwk^;cQw{g*BRS^cVxl8Mtk{j9WV@E`Vz`o_yr``hPKOYf zlq6Cdk<8COL1kbjM>U_?)7SU(I~!A}no1`^4C`9jE2DkoJVxOwzGH||1CLP{%fvB(9`~Y1 zvRiRlR4KF=g4QR*UErj~z4JnB`{_NMoc8oD|H}l*+5~z##cW_Q(OMlxALlQekC%Jn z92XY=L+p(F(jKl7M4uBoA*6fQiLx+%g9zz<26?dVZxCVK&ma%l{S6|t`x)fHyT3t% z_eq1Ch@bTQWe`7s2{74%__1cqP0oXUN`4m+i0}UkmOD|ApH9Ah!pYY)2uf9h+Hr4$ zCx|MJlP60AU-uHF5z15ehCFHv;X2+THDSs*IA0Gq2j}Zk&cXS5z&SWypK=b)*8|SM z`TCS|aK0X34#yOz@Y+mEaBy&9aAI;M1U2^v_y}rPV0{jj7bo}dcX`+N8{#Z%_Twy_ z__w^taO;@tJawm8IzEq`>fb8 z+@=D5BWQakji6oScX)pzXfr2`pgjyYg0^nb2->lLBWR<1Mg(}+vn@}$)h1t&>a8}t zq50&~Pl4ku`CU?*NM;H@lXKWVH2K1AA=8momYo!aQe4zz8qrdy%`xO;Y62RQqDV%q zHI$70j_LkXzmPk7c5c?}hEKog{6&4e^A~xo^Ov#DoWHPI=Py|GV{#LVL_DY@ zz&1z$`DE%}L9U0@FhJ5@if00U^^%RB$dPRPkwZgrllBak(~m(4N2-61IntCka`&hT zn#1F{;4>hoBH1|u#*hHtBkG0cHDAGaE4SDwt`Nnc5Q;~iZ)gzpD)KRl1g{v&;nh;q zIzh~1Dps^n2nC0k{sz#_zj+_7U>=u6Z^b_`7O;;g@PE3RuL1L7@>sqGt&pv(!fPJB z4x?w@ZO&N{0L$0nHLLpfpV2%7E7rw^_`0-0xFf`CIC)@sv?T^g1lCYP!^6rdKOe

*ISXQgFWhA5#^yI`n=X3I^fZxcdGG^b|#zsNseTH`dLGR>bjbm`>U(v8Q33@ z1&#kr3f3&;`{QSCDGZSI75}8*y?f=|d-ja~O}F8HZqaQ3X-qwyB5^M>UJ{D%{{MEq z-$*fT*jAKxVyywFjL*g*nF|{EVQE#@*qtt*K66N`yT<4^V?{V)QlgvoqS2oL!NfRG zD)`fdZC>#MP6+ub6P<_3>tHE|OB7HbPx`s^J@C*F0kH`|denNye67gymAqsAlvsk$ zcV%a`^fYW2#-Leg-k6 zX7)E=4?l(A@nWVG%+ed<#X`vAYW36O#R#7J|G0Y-_^7J0fBc@iP40s^A41+i*zM?^)Xh)PjgH?-2Kb+1*lYFpb{m$sJI zSINws|Mxlf&df~+BDMYfKW_uc+`0GMbDr~@=RD^*&v}-ILyQZ;v6vuiRsIMYPkkLC ziw&?ZvMPl*KxU=*khpX$i7om9!!0(<=A@=;7_|7!OyzAp^HS|@-<5dOcO@tLfT&U;bda zmdDB@AVs#t4oZwl3=NhemB^?;LA16*p=$1kxr55Y z-ZJ@R&t3!SDx(+dx9iv*A3&Z}J|NNXUZFwllD7jMTEilR^4gLJc+nz!hdj<8ci8B0 z#}2GxaO{|l)l8mCjvcVr!A~D6C9)1-7!QC zSku*3`7%g)y45zfxk7_5xdJ8(iz{bZRw^yUX=&+Xt%i)?IQa9W}mwRsBeq!m;6Whhb3+{Po$vS#aTHuWj1&+8Q(mp3d7-wxWeZ%2tR^%0Y~- z;h7>X;`4_aQ|mf>w4A1Q5lJiXZ{YTO+vNQN!*JnkXFvfMyff5RB^`ZE6fk>{8H$X! z|An3_gzB^O-kmPlJKZPyHUF`4efjBSeee#?6B=MLL^JiCn7=TI36aPLC5wU$!Xr#C z2nl*>T&JBblVQn@Fg%H#J>p^=?Vg*(<;GxKxQ6h4*@X-4d3ni_m+o0`2L3^7!~!IQ zHx}S|)*l2CJ0~f!6?p-IR2U5=lD!=SQ#9-t5DvAK55zAAhUT_`;W3RxVQpRllNx^@ zHfT`Jf)USLJ|_JES)~sO$4|btl-+2o>uuu?C+NFkZo@|+KNcJ^7hA{`@g!JB6SR^@ z$R&F*HlL*V!8DKqS=Y|&D``VdQOa&yf*#&rS-rJSj{Y*`DP)?R5DqI>O{XR5OjCZeqj1m`Dwr&1zmoD9&((1fx z?PcFy1X)7x)=8f$utS8LM0SMuuIyLZVP}i);Gxm%7~#QP=$+whm9GFE)JGuhmE=lT zr4D%X~5oc5e|iel|s?m>^E5#|2+?Qe5c=b7cOxuE)7>}!vsNqkp( zPR+{x_B|>GyE|>3R3x25eHNkmD8v-ZZ1^)$+!ySXu&PV|3XMG&uDGfYcj3HP8!F4R z=ZL{Zj5d&xi{fXCPp$i9F5B+8ahh8>i?WvYeHOb@EEk6Y7lY3I9kfxx z4#<nq3GY7Tafm0`3tiN1?-KR`$nSf-OF}~L z68R?mRY7#$ColF+u#ClOm?Dg>t&1NV8zWm9LqjYO6vx4s)IurNAgwP7RiMa*aVveo zZWb;>_&y1l9}v^u3R%R8KZC;5lw8Sf!7+v<)5U+wOIFw zceGW^xaGl$ho7JBnsVo~s>wh8%N-SWsF}M<+=D)MM}1bZyY$OJzr+91-RPsxD89u$ z!CY5_k-Fnu6mF7XG)$stZp|a!YvcFu6?do8UBkS&pOG*lE9@xtiT3`bO8Xkmm*RD( zhUTxojVNK3-bw;OO-Qgrmf@T&$`aQ13!HWr+ghuu-;Pdh0gIE}z>Q{GA%vE!z7ZB<1ENrDSD^Z;96dD0fa$N_KWi zQVzieKOew$M7bKZKnH{&;2T3>wUt3V!E6@l&%D-utWmC>{ReG3Bkw)EKYzzNq44j6 zHWhj3z|o_^@c@j;FjD89Q5}T0me`DRZtAT2d-EMrlwNZO2P`Ei7cIy(9jqL--QN^m z?*x3WoCQ9si;mAbTbXT*k~>@b2H}@3A^2vYt1JLt6o6n&Tmr^JGXF-$O~+UUd}eRp zOkVS+SpbbdY@sG(TT%|k-T(NTLQNQ3DH!wv&g`Qf!U2{CZuhSbD5bJD@S+> zj!4!f6`5|pAC=Z9J`Lj0-q*<|TE{i7KNRcdM7@Q0i1?(`;nYF#@*mNhsOt{Rz6Wb%Y!ogDno6Ykh-i$lN% zN)M`gu5#Z$Q*Xtp8IcjWg-++r*-b4;MFlg|u!<~)WBdFG%}GUDeHf5$>z0P+INKMM zw z0=_TI@Q&I z+vVbWbVENvVE<}HK~Yjm)BNoYM^P51LL_be7 zcs@i~;ff=^=DA@7yk}M^?Vg*&C7Qx^5`L1UIPqf8xC(g>Ml!pZxElbJUM-u-dYn41>`R8xiN;t6RFW(9%?J%$BQ5KEx^ zWY7h(*s0>ho*m-F?CSk&$=#ks^q1-vvr-|^x|(*>rIH&PF!NNscamQr8imwSWBfkQG}m6Jh2`l(0u_p zV68v~iipNRE1&tuLhl#V&+Jy-Cs7QwXiu&EDx$&R%2#ruhJJA^C5iz;dcK;YVYuUOk^+hetbZtQchMCbqj1lDYiv&R@-<$eGN!2<%r z7M}&O)Buo#Q&6P57;q6zZ9Wg|0h;$UZg_b5F>D@TX=!4N_RDT&jm^uh$kHw}kGBc; zsew`8<1DV1mP)7KbN?Y|%BK6=Yc^dr1#K*c-1ZL9f$*VwIe{!ly%))gh0zAgb^$sa zuDk`t=o8Bv>8F7+01cdxx)e+4fFPT}9}QX|=;Lw;`YmSYwM}QCBcm^gLuvxO(#KwY z17q#vW1ktk8{>+Z9=uz8jng)0L9IIDIm*#VRje`jZJ&DTl-~UPFY@doEGt7k}IV;2{qCD64$en(kb|M-!4to@y?C~@NSxa zcy6@)5a!=C!29=Y`x<}S^5Pd$N>gj7ZH!D{?%|w}_&h%+Bng7ke=wDSr7K%K^U;`e zVkTC;R>l=u?aW!4?{ZWmf3;S7Dq6`+ElW!ScRUj3!z-U-&fq&xmnHZP!n@vAqA~P! zpIp+P#ssnV*PyY=bIt*$)yZWe!mwm__||fp`TE2~+ArgV6As(`Z>S_ip(fVhR&!|JGL!(lJe zVz-uxQJ3%9dHKtiY2Up1Dsy1oY}P`&>E4Cd^W7^5-|o}*eBbW#%`4O$BHr#@2%Ow2 zt?U0Bbha5dNmq`e?KIBc&(^+<4Zy%kf4BC{x7RXpue|uBm)LCZmObDt&#G&2-ZMx@ zt4$8Wz5xrf98@?7Tkd*~C}vOOmUS3VYVR{pMOG z{aCF|NP02%&042sq##`xfdyBB9SQ96IIc2*EZ&4(v0};e%?~M` zS1s<PG@%EzsU^FjIrjiy*X#j0eF8_JgrP zIu(I(Z3IDJq+61TBTdA*g0nd#gR{lq?b!76==AiEUf+*I4*jp^8XXhF%e#Q@E7e~CF1s+Q zwl*RZLLESo#3snGqO5Q^3g;q_qp*h)t|Z)S7F1bQX9}uHqOdBrWH{sEfG$fglCbFT zILJ#n)~@?G6UZ!@qGy0+G9Q>HaHPaC_ft!*3T%0A2$S-JV&Siih@#>9y;#%^zG+fMvTUM$W> z{g&WEQad;W=V{;P9`qjjX=OMF6+UE3 z_P(gF@NJF0k?Dt)r$-eQjT%){I7$hb-qJFC?Djcxwy&i-#r5^X&kUF&X>XbZzK{H0 zd>|;X5~OA>auK5$U~nJ+8NDADce7bHCPGEfu*}fe$__2&X>N z`QWL~e0=bSKX?CFD)RhVum9_Ddi`II3)KHZpKAqDk#~V!|Ns6@um6947pVV-K9~1O zMarvs{aT}5zt$M2|6I@25UI#ALQ;X>C-k?yPte;~gI+t^Gt8OS)MmW51$61;RGAop z#6{-dD;*>Sv^X<-n+axdIuV>qh9y8nMp~*Y6_wX~q5b;6e(jAv*OYg%>)8lai;!$r z-E#(}B7o?xm6F|zp~ zs#${+Wv6MXJ=*U zdg1`zE#PBphcXrWd0LnSZbT|zkVT;d91RBI=~g>w8Hm4w#n1}lEJyq*S{U8+Tf4fh z8Py!Ud3@8x=qtt$UmP=|xp@ZO?8AT0vmNO>+fi=j40tTDVn5G)P4z-jdztt!WTzhK zZI%uiV(b`8_tB8|CgO8_%H1yhwSPH60?NIpJ`KshAvD&G2@Mvp{}zz^JEX5nVId_{ zkg5hDOHmNTu?xC z>eFUfLwnbR5Z}6_eiFb($YS8-UBroE?C`rl2j&}J8`TQY8T>L~OnBcjPqJgK2gI81*l$I%DM|4$JcGeZUs%YMqW$4d~w z!RHLl;1~&(IKB?d8d#`e+AvDo78(pQRU~!5avi73My7cKC910Sz7m)^;+395=~v5* z?VE;?8wZ{0&S`Y?=E^O>w?yZf>yNXUt7L zWjMd>orCqa2mJOV+A;PantvaH_4fxpcRz{s_e=11)KC1~{GHYr{d-`YiL|Ja5zx-0 zb9eH6CYLQ$IW0AVj&_5OtzNqm$d)4O6SLN3= zCMP%6QM+0R)@j;r0SHpE|53=a>2U!C3pX<2__$&r|!3KQ6 zCiu?zd=1`zKyN?z=X(3WKM%C8p5wXY6xQ8L{rym+2ca{_(4m3%&+&YQe5-UoosIWD z109_yr=0V-8YbcN6>`dFXUHi~w-5Xb^aCpz=|E5v_EbiUE=)%-C(IhQg-*T&3cLx0 zY*aLzXcGYii`WY@@mr6@qdX+d+AGacTUnv@I(u3hiL#&-qfXE)`a<6=zm^UJ@6h`; zfJ}0Z=Y0O&&h@8%mnc#w2oU@KZ0!Fx1>gqHAf`$iLS0XbVgxr2C&U+BoWz&fT))$_ zy_$wi-4)saHtVIAs4l-Qz`(&d>yrA_r5xApVY5*ghixwE8>@T_y?`P65jGK%2*Q9V zR#S!^k%&N((iFRuEV7Pi%e=yrM~66ao$PL<{cBcWPl37!Fb3+(<9ss>2L=OpKcj50 z5FC}^8!SnR5Xs8OaqQ7VRI0VH8?ih3SbH-)EqLQM;K!@Mk7YAI<{nLuV_;4MMFt%Q z2_3+PMEn?bOi15?t#x01oQVc>L8cVk)AQznEq2c#@yh6p`mSZG+&FvKRm6{nZX&V5 z)E7T~d@G>|KT$EMbd0Iv2TVUxDpi1>InG?ikVmHgqd$@t2dAa8{BH)X6G%TJ4ixOr z{B_#70TirTj!pIouo&8piWB9B z=>kL0G?U-1h4P2C^}0SLv38Jtu1@~!IyLHtO^9G?kMWCvP@>RUlZnkfSGe13uFIYY zFN=yRwY~W6v}YqrqI1ihKdHQ~JrQS1wm$eKtB;S)KlC=IhX8ED3AUqQx>7+01{VA# zSR(arz>=YLJZg<00~I}>At|GDFeJky-cb*P}Z8 zT6D-^CEgJ6TkmI(uHF?&PPgg2^>mxgTR~5&v<+u# zN3B+9=}KWTNxLS5$+g3l6u3LQ;)T}nH{P&wRO1tCS3fdiT+j74PMY{Udu7YSQQ5BS z(KENS4$rB`fj*AHw!E%(fwN&tQ5zi}4!+ii_NndF>D&ebyFeAp#_7Zbax}Xt$o>tI zx7w9`TU$$4+o&rKZ@ge=!k9}tns#rgT=&p*4dTU_GgKu!-IhN%rKxg9!q9>s)m|L! zjBFi_YQ0`xo1z+m@0d5e7^4T(7$F%U=I3y1v{G*)(fMH!umnSZb+v%OKdUL3iH-SW zMT`R<@MJpJ7gUKK*(-2tf%_KB&1NDaBPlm6P;O4OPsML*P-wEwIH@~@q zG{tpxO@0U8@pww=nla%Cp(7{PZJpY*WN6ZY3oYT6<{wpOk4~9zVO8qdF=26`qgv_* zdZ^r+RF5OvBwNu)*9FP-L9Egm9y+p2dpvnGZm0CS?e+HddLHsV=!G7OGGV|y7xI&M z>|5U)mq6>?UYB}3RyV7VEsW(@8wPa1@7Bj}}$2gfnOOWpu7;&(ph{Y!@K zX$tm18MSGcNstAxix@!TmYGf(pC~@QMnB?gSN!8@Kj^ETZ&NSHs5b!4;s=w2zY^5H z*jGOq(Ccp`reZev(d%c%KG{Y%b7s1iT=CTw-U6X&Gr6zmSw28O`p|Rct z+66*A*2EFQg*4vd4+ldZofd2nl~6DeRbfklmV~@rk?i$L)HG`+{DP+DjP8$sF!`E8#bzH2j`hFwH zm7V2Ej>}E8XW0@fa&ugXae1kE6T%8Jn_MMT@kkt)mKPUamF297kIkp+gqniNrmVuS zNqH%G+7piKEJs3GdLq(A;E!u~Nk?s7@{shjAt`w|sq~qiUE5JI+?5KOivoG3wnmM@ z8rrJkiTt#O@IhS#s~kzZ5S&-yhTGCS%owRTu@)pjq7*kM?DsHWV#c{ksReG2F1u$( zXU*}s_*J70YG1*qPjNr)xk>xddlVu`un+;BHrTf=B9B4TpztsYYeYU?$l4-QW;`f0 zGyKwGVj?4B2FDDJv_@JJ{PCSLGhH^t1^Y}FUAgdUvC$vxs?NIT-P*mk7SW!NE-e-JDSzef220$I$7b=pF7EsI zZqd8$r=TnQRQV9^8oEyI`a}{pNwI!&P|P`myN!;T4pnQN6T(z z@3MEBwd@_u?0qf6_~u|IQG(>GOilK#=lHR~rYJg0C7UAV>m+cbn65qJOekVUOkesS z&yP}$Nans(OYnwZkDDVbBiei{gk02Mh)!WyQW0cS1a+*!NvFJp%nz|yBilq_K5!!m ztuSllMPNC0mJ%~-BY~0}A;)fY#5x^8#^Iylx9rR0%nMw{Rmyo0gs$@PQZOaDKuBrr zl?40L30Za7l88S|QyuZmG0FMqB_%Dz#rA^am@9@%$!pqJK6>!r(d8SO@}~?*7;P^Y zQ}1kOaEu;XU>_Zi@x~dtH%6(3PSRCdX5nkn*}(4L9V2b}5Mc-wa%+Y?2*xjlk~>ph zwS6jeL$m&QcJr{Z(bniu6*bM^4-@hnwSx)=4R_>C3R9}V7m6K@V(@}tl}`Lr4ny4p zleXR)1Kv4AsOD>BQ5-{55X3G3*B6BikfMdn6!C&7Gx4Et(x90s{+0APvJSyJ1<;ex#I zs(8rTg3U)O1`dUNouB_z>p=ukZG;odX6zO(ugkd1n+mcaf* z)u0~e^bd=NloHgF0$W8LU+d(<(CL&K25P#2 zKDSzhKLWm*+-CgF`KSm`0W#EtTn??hH;RYcgCSQrfu}R{us{5{`xNlG8uAM2r}oU> z!7q*Pz`^&9bsw8nE*?6)-?REELIu^SEyH+yq4(iT9qSK&F29GdYeW471IeDqRdSB! zYNL2aeOXfRem~jM(|WdNgd4R-JY*p+tMkQ;CEAZ0y$;|XuoJF=!o)*CJN3StDKCEa zbI3;y?NwnTr~U5>lMUmw@$~+m5Jar^TI3TZ5-c5Q0)^0~hGL3Nk^7G#m~dhk@7gs*ik(=$|BdieR0LUupPND|8s)ZxjL_ ztyg)Sx`*v|_L=IU{)*)q9D<&(o z->a=~7?t_jO4h>JKj~LK)ug=*8#E|;_Oj(_m-0BTD-CC`4Yi|?bPj3Pn_v%v*aSfv zAjc~*=@D=&%YrQ2d3C_yjCN$%2>-!w4Dz+f;IE}GoT&q_kTbR4gPPMHyUy!XGv(VM4@L?VwWV}~3n^p+ z$xK%RJFs!HEW)}Exe94ZKQm}lJ3xf+YZ8}OppInZVb0Z$X6+D2G$X^|wENVJJb)LS&l^^S7E=mY zhly;>!=$}9az*{9m34J1N7b(w**UqUX3CVBn#t^tkF?#LKGIfZ)vc(*|N51qMy;%` znKrGaYUA)Mo@v>P>)X4L;LD?POtnePPMy(R_}1I^;uI17s1hJn5jR zPAf9jzfWLK8QN!`Fkl>o>p%q(JnjY zt<-6k`l1vuQT@%o-aZpe?W;vRf8N9yl94~p+a!J~Y*Lb8BWCoE6bone&+qdi@y-<) zor4RU`e^ft-T5=ho7!jW6u)iCTRW*fw|vB55;+|d{?F;H8(9638zz{Qi3fAu^35lX1GGk#2g5BY5l%ua>W8P?@%%zNCw0T z{aX8xpF@mXp$#>CsYi#1Ww57>3(u|o{+j!aYvL{78N3|-#B$#IlswD(7d6}9e8zr1 zJ=Owt$5l??dU1YOQY32IXBjvbLeMs&Tn8 zs%BYT$EZ;qb<1jM7Z1Ons_KH_i^rEwpMhME<45Gpm|pH0KaB>Hc~*K(X_Xkuh;va` zOjN92s%H9d7*P~UwaVf-)t5DwO=vFmoGP7FJ#)p_hN0CYP%xnle_jz^0S%vnf@7lM z{Pp{XTMyG%%3fJ@VD{_-tCnosxTJkwSJ&O`SFB&QY$MH^S>p5TC1p4G!W^$AKCkly z@p<2x=2!P&nP2lEEP0p9;ujD;UooKX5`3^Fhxzl-6G!~|L^xIOwHbi_Eo(BqQL ze6-$fI31M zD;Kza!|~H8hO;LT_bZm-R9K_YI5k79E@KnxO+jq!Y1sH!XKQLgLfh(_4@+fJ zE+})R+ATSQi|05e6ww~9%lo0xuY942TVCKO86i$Y(deN$pi2kZ_4s~Y<-2eCv~EG= zo=G{i!5Kr!hZc0WAMSq#_<*l1^57D#VsNsFh%M0SQh#u4k27&ll_V}8lu}2Q2@@UF zj-h>EB~X2ftsheJP4?umo(nt5Qd^yti1EXk*Z2JoE&7q0lZ&Du_$D}Ci8(&8g+uN}dhxrk`i@u1;i|6w$ymcYz2%;ww?XKtTT6_|N zE>R**1qj@5XEm!Tc+^0I_|ew4x1ZLb(Dnfm(NhCBmT@@FyRKzzZLhbt6I9w=Y%X|3 zt~UYELmmaMz)tJT7$_cxX4N(VZvVLoc(_cdI1vsOYr2H3Ka&xXjQK;7`FxQ|q4Gx{ z5N!1A9>jU+O?~7*Dl~vXkejY5cpuAUU)Rgq-UaCtOU)FEK^V=nfXGU`Re60 zmh;-hpd;-^@Spvh{vM%vbEV(07m$lFRHz$&*ap3@4fW~UZ)w{E16cVQa$=5w1cLe( zmt3SkHI3h2Y+F~!<-hMm&%j@Xx?clQ>hdf8ysJsH)fw!xM?WJn?h(KN&r&+yo zIPPquBfxcqY!@o<83R$d8@v*8EB>(G&2n{;y987#~-FJ)fAgWU>9QY_`=Hp?~*Vn z5!oFu;_y1__n_~QsG<)=nzj)gW^HaXNpn)TRQOyxBEEyR3@^EXD+5OT(gY?J{z1=n z*U>_f$zOZL=i8=W!^_hW(@nI>{My28r&kJ9-ocg?l?bW&ncKI-95jvCqB<%8G zAMBpVTC_G9v~ALsB1AXLFo7Cw1nP?)Xa=5v7Z5VE$*qSDwQA8lt(+HJD!e2Pm7p}E zRSa)a>|2_&q#uTQ7%?NTvcX{#1uUqJykQ(%?OGbe{;c(|Rjn`mv-NAw3&80BaMAt< z8fH8st^yM=a22HX>c7;P-zI@H>FNZZ%LE~HrG0l0j!>i4NDjn|bX4%K>ETiGi1 z)IY`Quen@i)Q7VOI!R-~#lmqhL;Mt*0-r7)fazX9+@2M5h9udbT%YZ!LrN&*&A!VK(`7OO+0v5$)qb zTAfM2ka=`PJxKV~=9@Erim8lftrTO3$3~*~Nb)St?^bJnS}kWOv$S8a3j7}8@RfMc zjzH}gct9K)5q$-N=fL7aVDkVF111L3Gve_5ADP-zNP`bOK3H<4nBM!_)huCk=Nfj1 zTBUwzw?b3!Yr?y}rcee5#F?Td1L*NJh`e5Cum6327GhF`*m!&QNk`Ds7niaUQyc#E zmfEoC>ymkZsd5@$dP3Ia{*mVo`mS0wT4&Iy`=pxH)Y6mMuhEt$;Pgp)Qn?DOI8r!d zSchRWzzKwlaFdKFNhFl!VxKf`0<<7<2cn#hKRg5=r5gn;8@{U~Z8cK^C3REmMZPzD zHJ+nLr#unaC~8BAH1LDiy*O_HT@Y=6|L{-g;YptCNm6%+{s9J)qJ7MMboxsDlhd=O zN-Nc#7RsRKS#!q$ikAc0^iBXiU`bsoK|b zJ7b%p9Eg(@#^N6n)lpanH7(!!4&aUu{C*!87TrIGk37v+aXyWFYq~YYx56OSn9BwQ z8YnsjS{IKtI(C2MurBTN>^0h<;iW;lD(YB<_UA9jCfd`fU8a4E8e)VAgqz1=z>dkz zh|mHif~6LwaA<|{q6ol-NZ2r?j5OV!`B-cmk3Po{z)f=KFd$$+fcqG;5R z4trCrE2<<)J7RSAnxnjK z{hA{CaQ642>+;s_s%p)+IB&wuv2`|WVti?5N8`G-`1FlaH>O6Cj%AycFLnxLSj+ct zzG8d_zR_;j+1s^7r%S9#?lh9NBa|zC2)TS^mMmC{)!MvI9+B!ykS@oL1}076)!{x5q2vmO!td1 zdJUo?n=F@hOGs;O)`YH9YjU)JbFyNrlA%;q=o~sm52Fl3*kZXBlMbcz zK-an!*$70UU7TH>os^k`|7DZPE}E3&Ov3*hS5B&JNV(JyKW+51^3NTaHXCi>GMp)? zv55&oawBcloDBPrNE=WwXh=#%_TYrrgxL5&LmMDRxJA!Lo@GKe*n%NXIp8JXS*HD= zi6u_<>~PQufidWP#q(36T%-UF(OFN-kM|Os94-*(tND<|=?e`&)tOI#QT3@N7EjVEm}5MPuMG12<@ue3v%DX0yspJWc& z%1T?cuUW{kV|*;#uV``Fd%!Q*nH2M}bfR_IS1iKh#o{_0AHKbl9uydMz5a;{0}%;Q zpZ1>TpJMm~ZJQRymTMoq&3(JP5n>`6CLJVC2VQfsHH?y(sAtkh4%gIjUaQ+`mcvLF zq)RJ~iw#pU$5&6BbUf+n!sMFa#nIy5OXt_Nj=FK0a4EuJ`g66}y(M0bWNCy>qI zoH=bA=^~_lmwqZ4rTe81O}E+7>31l7DAVXKYGmFM_;VVfM7U6)=cGW=EnPi^v<6Vx zL~(N0^>v)~kQ*fw;e{0Bmk5)B<8n)N@jFe=Jiw@8_6+^?wMgT@yua{j%*}p>2$t9oM*$ zMg7xg-wXZDS>AV9a-dDJuxEmDz}SK^ag8hBIW`DS`t?6d0sInx2kdY_fJIJbx>bpY zD5U=DpY!nrm)b%7Q^(;6HjSNTL<1Nr(zOM~YS{N0E9=?DicJfQ7RHKAVQwjv_mz*8 z^}EK3&F?o@e7v7$txS{G1n^}=FV8w&Y*oJj<8>ToEvygD`_|EK%-9-Z&;(=9bT~|O z6Do7OSHM>cc!x`sGqW%})EZMjbQ2O|EescbfiJl9Ou-jC((!J_$GOK&JT~Cii4U^V zQZn$7Qaj{~Ic1(uXB{N*LH{6OjKqV?EwBEbV+5G`PADBW;(gFly!<`KNc@E{MhA^a zrH_$mUJZcX(vYaQxLlf7{8P@r7hLvg`lpUxGp!6zN53(WhYUDICVZTGjAWW@XU;Fg zw>T|d@eYaaN@w~qC$Zjo&h)`+H^H0E}T5-vy=${5|K)=2@FaU1KEwOR7 z9O65%wz!-i`a?f=7BC=6N(g9sR~gS)-4QTj(=JCR!Pwj(%gLtTM)ETAz7j z!p8t(lx9^UA_@o}{8O2aFSyhm)IasUoKV06`8xP7&m$$k?3nifR6A8T2)(^ee`Xf# z(#Z$SSV{RpI<>@ZFQMP5`bP<^Z4a=q!c_LvchzCG-|^SMZu3=wI>a7fs#JB}b%>vs z>X3dbOqH)X?>gjsQys9enF{=V2R;Vui*m712lh`sS;ZN0FT`mzYTy0C#{H?RMKrhju&;bS_>Y{W{zr`av&09*?O6MK^*`z>FCG+kVh!`%f6Q0E zc%RsVb;)=CsBynPetzmJ-w*y{XQ}^jyb0$;Xd~R@E_@OKT!W8eeRzyWGZI}<>N8IW5@!|kjMJmf8JQboFPa1 z+z;UAr+w}R;HS;xK=nV~=l%&pesG37(dYgzjCIc$@?@X;0sQ~0&;0;?e%|MP0RE@? z+z-_Mbf5dawbB{#jQKvU`H#+-p_vLSy zpvHdB`|z7gP)EP#edd2AMyucRKJzgX*z5b8l4%^^{2u)i2tH%}h_FilJ}&|PJ;3>| z{`^q?=Ya13=ZE_9NBchqdjX9dfDOUpT6`UIC1N4b;J6HMV&$ImT^DO_|d6s{6 zfbwQK_Vu?vJySmiD%XEroUL5{`E|B({pab~%JrG=&@481kE+X&+tmuMj7DK%?f4o! zGBJb{qD3a`!Efxxt4ZBb>3?;`Kr@DOY zZ?)g=->3cIw`(tdmnGkQ4@-XMvY8h=u&z`$? zdTz5Vc~1S#Nk9Jm72O}*HGWg8`0Cx-NAIm$_a5Hg&yr8Bz2cM3&s)h=(V`|BX-v3-;h8q0qwP>;oSnLSfL^M+;?6jg5hwz?hLE z%-K_u5@VnP4@DRnW{rTQfa1$$Y2EN6>2mf^K9sjd^`~Tc=+t)jGe4U%=qNolow#2ybJYsF@ znLBUSoam`BI~Oh58I!!|=-;lr_RFJPT}QvX_S(N4T{P_$rrdEmlW)02bKic4)_aS1 z8?S<@F@Cj`jhiP=-rN||wrLY=s0YB`2w`UJwC}*%qVK@lh~dL({v&w9vH5u->*wNq zo*DlVhO}t|;>Ax_!SE62kDp+ot6(8W3=aDLgRb;|Gdyi0odKL5s#ZqN*)?x&Pn-1z zkkvB=Rn6YKnQ|nI_I{@BR4x}1gaQFsQYm%<%qGFD=%k6PeT*Y0SbS!ur9=h`33`@P zM*Jw2^Qf3~%%%XJs$e+Rv#0B4e#E1sm-c{9EMIY4^X{pdp1()ah*vOi&$vt3(fJSk zanq(x9-2S@p-(n#`r||MmCIMYfO5PLW4zcN#*VL$JeBm=R2)MO_^%KS$~Vd{!b&2A z+be;Kz&rq1z&OXi2RH^lEo~wH7qicc3hn@_9=|gj@k?+AA0y?#eZ;GGf3a=b7rU?j z`_7$zzy9(QYu22&{IVCiyI(lC<8Rkr|F@kx>FxTJTF=ZGJyWOl%$V6T z?d;uP$7WKYnKO1zow^$U&kUr^9Hdblx{alQ5ua`D@ z{)nWlczrBn_-@#QtK{{H6Em_?J0lV+dKoN|`KZ{feX9NS-L=wj?NeqGWse(mcyeJ^ zz78>sRG)?V+3GPOx-v^9Ri>@G|Gw@;O4R-LGXa~oe)vW(A|(0ii*G%gyG4REj+Dtt zrLlzSxeQ8~dIvl3?T2cbHp?M`iJJRmtHtVvJbr#;k-;LFaBy>8CU(EO_FWbyANIIK znc*-I{qek@t@Exxe^U5)=m78gj1qVqE^S@+qVD^&7rbbgR2ine@EFjM6j)jhuN2y+ zM>aDBQI{Ax7oa2FHzdtG=%**8iTBWR5dA&~Pp9H!ud-~opd zTnjRmV8zgFI|hi}YC}oN(M=xql1|0}AU=0zNx$81P;>wiEqlHAu0h@M3jt_)t2v!- zVJVii(jm|}>{&2|hz(6vOosiVe*Q}MnPeM8lZT}#7Gy=UDQT9qG*cY@c_U^XY-i#& zCbmNf#N5|wk9%Xidod=ktONsusRryx8ew8Lj!Xm*vjrbKB_rwvRwxv&1Pe|}Y+7ul zEA}%@+_L4+q{a(ZjpgIxp2zEJg(<(iHubEv$buZG1$F!sRs5Kbui&{=UI~07dK{;b zbM@6n`slBt+=2eeE8XqvM!V-3vDU6FmG)_S@1FhbmTza@&EXUJ;1MGUJQ6QB6OTrY z1C~kQWgRYNS<8vTKN-X{O=~~6{$RVN>3Dn+c)S((an{-~FAO|lM05*P(+EXz;$+-n z+NSluKuX)9JvkOgce)9upz?zbgN=$L_G=lt;J zy+4!g*Gff|>ZkV1-vPVv9b;$MK!LL(E+;0aWaN;TPGbz;8sLXuCcf&6ivoN&TuQ8x z=|J4+&Dv7CSnGMl&Te!+vDQC;)TXCQy5EcV-UI_%quZ#kH9mjBANCyh_R$CYA!{?i zUvzs7&NMN$XN;vyXPrbPz6lqg6=Ed5iDxhvoof6uP^TW9erO)T|IufK5)8jA5K$1g zbt43=Gn>XL=_7tvpkOYhl@m2>-Li#UM)X8;5+sFJHwdpHejReM;N&v1HX{_hky&Fn zg*sNwthYY{K|HlgOX=ys?6A?7X5}xQR~Kr#7MfTIt1R_ol<~GcMO$vnsBq#am|;vt zXx7jWv^4+`z@jEx4v!^{LhW!OKt9Jzhjetgwrjaot^7r6#?nJ`@37={c`_=Q$|v16 zntn8=DY51o;Ef+cwzE>3QTnhU@}4zXA2>wb!_fALKQ-{c@xHG0Jmp~5WALtbc%BmL z9NG#D-U^3U=dti!^NHVQ2w6GrJz5ZkXd9)RdWCOfmYtZ zAeg!$tQky|F|(+CD_@8$nOM*>DHpMYR`ugX)pOa+)1AbDqmUz;!ov`4naujQ?It@ZTq@R zjc4lJCJ@}W`noQ45)ixJ?7iU4H{~02VEjlRh;n-f6Tk&)^Bz|ZC}H3gh!<(ZRkT_p z7nWD`z^vYbN80{AU!0SzwI|Jzr)llkV%z+`wH?vlorQPz_k9;Eow=CgQuojI+?~z# z^&Z4)N7@|pk~|Idv3>BFLA&O6(SPh4q)aOq0@ed{|KHn=@E57QIr2fz{yj$&` zE`ubHX@$w4dVn|KxqCk9kf+J0=SbV%=8J9UfjkZ4;DWAcg#0>aD4JGo#E=G!htf#+ zSflB@-|ixoYlBmX)glUM(4ryGda4i=bNy`TuM?h`9ihFgy*;=zLEd#ai(}Erlce4` zT4V?-W0NJZEk*marjbXMxy{Pjh%j0XeQh@Se7k{^4#2X6JR^B5J^FS5n>h1P9SUMq zk}Fz3cm+xfnf%Au?l=O2<*^(c4DGMlm-6eA+{zsGj}Yw@?LmTrm5>J(axZ(o zk`F6`DRbi^iDYwzNV1}Y97-_6LI@~VvS_P_j&>n)HrES z4C$t}3wO%T%a0*9rxkItGlh}D@Y?F+q=cw&3t~UBp%wWM860WxF+mL3tR;!`GMJF? z6zdn6%$j0Nq}k`ni~@i*xD~Oq%nDZ;qCQ(AM6e=sz$W=`sj6s!j?U;A6m}JQ$Mz2n zO-hA5V3E|}-X0$oEp^AMQMT~pGSjO<0@cN(K zY@IwMDtv#mRHHaDqmS4TuEkJT$v|a&34K*X-!b59OYi7-;_=+_(XgA<>;BO zG^s-@sb$uVU$~C2Q4fU1M~grYq&~t=rQGDlPi1X+06PJ^AXVbO1ux&f``^El=!YoS zL0b>NN6IQV5mz;BaJWRdnlUDE(Lg@5Tb2SjlMhsd02-sgQbWYK<*iL+Kl$CMHNp8c z!-~>&{rsh;#{4{P($r+z)hzUXepO$xv@&(U-`$_fqf_&vw`-sEzFb$G;a|eC$dD=g;7d1Loh4j`Rw=SVUsI46q!0)K)lENJ2KS6(t z&nD2Or2u@5hgcJMrz6DgKLnn{%2sF3!!s1eh^+Wk*IiwBZFF1w;5|$pKcq1wY3xG} zeE%^LUr4hq`o&#E&cTDjGS=O2L$wwgo*ZPqndPsJinpXS{PZ=9A@uO-6nJMP3hAWt zOG`~jPKu4OsKI>tZ~%AdguFNy`won|K@oj%N3+3% zJ!E+z++j(?a%h!){-pNDJJN>EDNfq*#Vt2~xHe>A#EqbH;ieqzoV%+pqg(e^qJx*oL2JC+`)Pv-wf`I^NaNX?%gZDF*KHYO_r+V(*XDMwp2Y zANrUO01`@FV2{TM4B!t(AcR?FwPEp<#HXKRLkP%{Apr8Gk2sKQ5cA%Y!)6z1jyIH$ z2eh|zDDP^x_w@Gb{`VdoO6IuNvq!s~pk%+J;S<&fPs#;yB`ow|(}zt9#53}#A;&zP z*2r)}CfyT6YSPci1=^|`wN*W=dkR}ESGqSzSGez%=Af=Fsa0C6mVz4?@j}m;BS^24 zNsF}yXK4@4VHwP!mU>dek3Gx9JsiGLEsQW?sZbg2b!r^EZS@;V zee*_5(5SN2=zU-;U52K04ePm);plPO6z#evF^-$0Iqthr(ma;u&ixGd2t;Xdn|4xr ze-4{6OKKJOUfO*oJbtnaC?F7~977tMRQ_1GurVd8=f1biCr00oud%Qcn z8<-_@MeifuUZd+=zP-lh7X;iTvoF1KP3=P!hZ@&p1f)Tn5fHq=Vk}OPj=FD}hXVi*er>>5x4cXH6X8qygz#nH7x?P#y^mEHI;twajw;@h z1AIQjpFi^LV+KAS8xWs*dkVFu<8!-q32^$rj?Bw0&)jh)K7H^pR!#^Nj{>1h?%hYl z$Hk+^`r#6%0FcQ*D!@pixZQJ!6r_Fdy!eMz;_sh#lS$JVxD2x>JU+hu4ql^I*?xFe zw~Bf0%k@v~gHz&xY2rPn+}FEz`!BuT(?4>5E&LN+u$iIVrwhM*4EwCsFYHD?l3+?o`1A4 z2g{g4y!!>;Xk`=U_N1_h{y{_R2dz%hq?1CJFlzi^?qdhm2T#?DunuzP=>-#jkA>t$ zLlH>u1ZuT@kKoxBL;3Zf5|Co;R@xt0;ASUjt#?fK92*ob#x9YzNlSXiUnGB;9KIPR zrXt(xeN^p&O|=S%Kj^WLAe=$Pl8g;JMwnr!{ftOqJXv@PPIvS z4|_9yA~{IGK_0Y_2%jrhRCF24`f5yEsGTfJ_;G;%2RTgAxi|-gQa3dv%b8M`T9`v# z-f_VQ>rc^zExxe2SSSi43N%c)pP2s;mCk_U;?$B_k6pX}v3vQmd#3b!C3Rf+{=r4T+QZAVf3omptR;BagYR9o>5o5|zu?d( zo+Um1*z==*>}tLK*_C^~>LD85@2$2(Dt8Nc@RqNxt;7@&6IBtZ=9|)()QDx)=O~Vl z$~g2_C_Ci$%)&uU> zX@3#FP~L!iox@HxhzqZDDjKUvmcwY0Y7sdgT=z;4LW#ZFLAo>tak7S;(9gc*p7xJZ~)J00N-B0<L9aOQXmH%ZRrd3spY%CedxP5S}Gqrd1BP zj(cu8U3&hGa#$!u;j+}I4IZgXe}bT!cDZDnd(6FX)<&W9S zAFpg&)6nOhb}zqo6&1%@<7&3j72C;XYWK2}bd6i`#aqYYs?j|Qud|tB)->XCPxscX z-Fr}RoA5HPU)qjIZWT&vi%haKxE9wd!I)N_iFhV75oECi!mU-dA}NVUknZ0kiB$cY z_3rqssbNn0&B82c75YYP7uDwbXC{;8-h$f%2P& zoQdbOexUS6`=XK!*oD!xbw~~!0&x|hGHr5f^-5>7XaJYN=fq4ep%{$mT0cZnP#NS}iBrjrj z!3H%-&@&(OJ4NZBQeWq0-LUbJX;&u#OE51)UVyr`qS zy<>FE@ZktQgSx#V*+`v9Cbf!sv32t0q*-b+|WP)Mp~*(Cox3Dh_R29wxJ;-QKXtj5=ylTWySs z-X_Flife$janvM0(2-?jIs_dG=cG`)NE&}?Q?c}KFG>NK-qo`XKRq|iudknfleV2* z^0YX0>C)+A${HFL)^I2y-Nwg@`dMCEg8q?38``9);S0xO;(CgVeg*4B*f({?g+rIkT$ z%WLRwFqkN1ZA}s=S4XyxFBA*q!cgQ09Vv_!#>0wcG6H4A??oz;-ma4k8*t=3FFdF^il`Dwn0{72`5T$#PY>JbfrD7ph{*tCk>mLrq+D z&5|4zJ3e=`6gMI^Zc*(p(KWnMEF0`9Taw3YP5BMd;CS2MMFlK=Lg5%GL5)vXRGN{V znmk0cD2ipe@7kiwDHXG zH#K3CT2fGuljC$EQMz#C;X@A|y#L;N?z-dlTYr4hjW=w&_L{3VuD^WEB^NDUwy0}i z=Yshi^V;UN&267EWA+T_a;Hq6GJV?A=E=>ITU(kYHBFj0p<#T(_{MQ#$BeEYRX4Jh z{3NTZhE}>NO3F*h%SsE13yO;hbMkZY^KzZp&g`sAM}|F}o6r~vaFcm$h#IWoR12F& zB`Rtn_P|ygnAl?_WNfhV_=prpx!mr6KF0hDvtwR{%F!?4I+iM?Uk9RwmzGEME5%Eq zCeo3Z+`*tG@uzv?Mmv<;jjNtvCleQ!N!nymTHDI>Pa_Xpsjh2|D(z%>CptS%Xm53% zP-|IU=Lye!C-BYmyQ)+FVW&=NBu(EXB?gwvH7H zCuL18oND7V8fP*51d|(CAc!keG75rs%Gk8SrBd#I@_W5-U zR-c{UFgDjYM(T1Gxr?P&<)RzzYuz+{WPN;IuB~cBV*d1sH47)*TfXA;OXqA@(`=u& z&V6CY@Daro)m7pf^n&ynx(Yp7zVikRJWM%nMazwTm+*KKF0 znQG^7cWQO)Zs-`Tq|b%^23A@@6yO^A43#u=h6W4u_R&4gCAcP;5`=Jfd}yyISikR= zynNW*+OsSTKJVt=5nA2<$KHFu$5CDT<8xVP{-CZEvRlxcSHWX-koWdsY_rQc#dNYdWfMTcx zl`wxrpf&*m$W@^`07kr8tX4~hV6j*+t;A}vQaz1Za8uru|6@m%$whfr`XA4~YX4=E zhP+5foub@}H&AaYIJ%}l?ZK-c9rsVhv)B`J9;ZmdMb-vAcs2ec!K${hrev_HHQFf2 zUW7#vmYPt}ETL7VZvdgFiB&DhP}HRAabpfeB@P&`;ohsg#|b>q|AQ~mmAGXzJ@o&s zu1WkC>YD##1@V7E%HuxQiL=FN^4qxe#te{=qzRR}>ZPh~x>n&j(XCGHkl*e&_6n{K zp3|WIM4W-?P=n8*1RO520O&o$VGl|E)LA0h^kH$vv5!00bNKL8^d#WY@IKSi-UrVa zCOOmI2U+q^z3A>xr^;_1+aWgd_i4l5j^SZM#L9Gl=*yXZ8wP}r5sA{7b z<8ts53@93riJ!t`WC1mVKEX5*0ksPBlcFp{@tH3}d^p3;O1B<;dcD{0WaFKF@A~!L zoDB8lj2y4}uhL)6DP%t9u`8Tjv5=kpLtB2n?T6|?_Gk9Bv+X%K_OsPFd+dRLeGht4 zFIB(#U}jEE=7Tu$UB{P-`%QM_?#xfw2oGk`06NWHv*^mo_KW+kdF7mzmUCXY<~w)Z z`JJyi_q}@bl1sjPWv{a{s6LMBlHaRG@`-h3;5ke6=Vb6&62kp7HUW<+MjHzxaVdYs z?YDo2o+s8`a`e@G&dz*v0rug43xl1`eXo20#*mK>h~JZsLHmxFr-aT$wHFjzmTU=B zm~_P^eovjmUQ~a@if%F8xctikyiXUNlb?Lfu)LwBy9b}6DQq!cZDV2em+ZT@ESGW%9*deZY=BX~I97qJfxc#oLV2#dER!V^5} zpX|+=HP0V0XO)%?w6qMAmS&kF{&};quBfVLy?t@%%B8vanR8shz><|EOKxwitXyLD z`AaXKF>YUZMVZ}RQBl5c-1I9-(MB6Deh~X9;`?b%G~f=>esswMB1s!?y=8$=^D?IQ zpiFxa{@5fmRe-7UgCVoUU*X4IXdBvQn)&{)&1hVtS)1Bk=s&bgg^*pe%Y(FosL54Y zvc9EdeMzay6r~;X(QX-b+rMa0KX%J#w^#7@zhXMQe}&y%M(;o4a(e$Im6ffxFDY5M zBoK7X$;{7Py0Ucf?X4Arw+D{-SaatX)*Lq0 zh(n($7;93$OWiNOYu4AGSK30X3E;i7wORCD0Ro;%sh=TUQa;n3D@3)m+Frzf z;4=1t&wN9q4+Hb7?OQP=-{nE!dTrlB-*<%PV*g`)?n9$K_o1QhE4;&8CwcD1$ahEqR+6t zA$00XrIWcHpFtfD&f!hdJoO8p-9`2d^^3GMgLnATk$8dbqdC{u6YYOwnPPg*SYs<3 z^*MRTHIt1sL+_}W*6??>jk-pet33z2$=5K2zEitz+Vh@}hsp`=C>IBx ztF1|Wt|p^~%N_H($>;u(?Hl>NS#C1kSKw>(d+|*=yOj6+X4<*vyoYz_k?&C=+Sw_Q zv^6&x-%Y%bz9$$-=qPMDQ{IP}R8>NaFeNdm8YTUTP{3rVs?;xuvIKBBO73MvS`QQw z!TkE5m>Cd%jFkxUNS(W_3+0JRBk)es}bWL2(^|G#_;bgFn?%%(z zvpRoPSaGyB&pboAN}ajUnju}FE;~o|TDPiKY+{dH&K_0l-X&`3Gfzv$x-X1Q3N4OB z9p(aeVR_=qZ_Tv$%&bm*)8@C#oavDZzx=}GkTTf)>=|`7$$ru?@;OO5a2=!RJgy$N zemM3_+L}(#DnM&Qu77yH&Mb9b+M26P@1V0G*PRw3Ie(5iBW=xf>>2GjnjQn`Bq8hg z1z7X1_)`$5>n(|#LKL(6Ve4gOpl=Xrs)2jO>_nO)PIi$W-jOJoU=T@WbSRc^pLNTq zkYafVN*aF+mq}~&rme%MmrPkM$d`k1Fbso8Q3AX;o~2nxDdz$U>5pB~87sbg>_m3b zzFJef_RA%@xnvW`T*}a_C8usI5uz3WFxY$LoUhJIcx$Rei%{w_NoJ`aFVg`~omm2s ziZ+;MWvHu=%oebi*`@$xY!>?%(>IC2Fi`p8)HR}L?h;Tj-mNdjMp2zz9j~mw_mzjq zy3Kam!Hk+Mm|_m{%_{hN%{olqI}1tf&Ft}m7}K)g0DH{j0K?7Nq-2N}sHd&UGdX+dL9?gH7v)sq)J$iR>>g8dkNi?Os9>y62>IxbNqef4$vnLl^k*5 zjyaE4fe@JmXg!4aLn6xnH%N2y(tZPQ``j*VVF{v)Of+NQIynU$Hbq41O<1V?h&OV_ zKrXSsaxP6SLy!^+2W*fkhybLo>q_Jo6{1sHq%d9-&kvGA45u8hf2w)4D^xEGCxVP< zXLbxoESrHJeW;EYpwH(SCRiMe|TD zV^+EaMJY0aN-;z~bb=Rxj0~m& z!u^V(Fo2v&jE)D*%=fp2iu8MBEbx*dE``#cOv|d)*t^HJjY-_m3)|RAAnr zzB2}?bM)N$o~2iJiPNpw%-n(chivPyLqC3z2nDGCfbH@3%@^r@*0w}ep3j{LAgj}+ zNI>G!3`V+eMFL7DFw1){O^3V)@wV({GJ>#b(;<4Q!5-ZSQB>HmUQEHQ^Ek*ps@y#U;yh zsa(lqN=Jw5%RlkmNy)uwa&vDYt1y5rD}pK8X@}5Y#_qzX8!$th?6u;UFT$WrRHIR& zLFg}>J@B+eCTuWFnA(d+UCLK=B?9oa1xhSLVfpMCPX8J1yl^=f=m!fS*Zo`0QFQ9T<{~zz_)LRg_IL_ia2xeuM7H{ zLa*meFZ@>e*L9d|suo;Ou)W$*03?QM04td~zp@_{lnAR2cxO^4` zfglFyrr%D#JLGPPg!xYle-@k7#U{IdF?(hsA^{h$M<`gZh3(v|Eam4HhFOeii2#PVW3Z>AH?kaJls9l&5SWS<{#NotLNSO;%= z+U(+uj(HAJbga1NoaxiAc;m`>mySPoVqxYA_4x(g@h!TyA%4+>vmRdFeM6w@j@QoE z`R-TyTO(_Z2D06vwg0-Q8?KvMB|aLNa`C?%-2K8f!*_=LWTu#b@$*<>2xIcM2YvF< zedjUw!O9qXT4lI@clAmA$l;$kxlcF-|4unYUrn);u-w!iO~%D?K}pAn{{%( zUH5~8WGv|9KKMTqH%W%6Jf@({zbqOIEf^t% z+IbY)@20dsT!fjHKe%LQ^r2Rm-<8O!s;a84s@4J!WmZc{00Mw01Av2M=}F=b{+vrs zG5p{u9835?iwXR9ia&_LWY{F%(`^z&d`vL@Bc-U0l%WeL;6t_uy1B_>KLr}dVy5Jo zW{?O%I79{s44T|n7zR{sh2L;*XkLXj9vbymKHsZ_S<6}bf|t%9qh#{M zuU$F!*4Fdb`Dfj;sQdbbiw`$c-w=>~m+fINL8fg;u}gMd{Hp`IpWikoer#!Eiss{> zApBC;eholueVOny!~2CD&%|n`SnA>QDP#vzMM^7NKtxP-VBD>uSZL(DIYR(wiIe%(?2VA7;44tmVtIn9u$pyWin0SbucO3Qtg7_PW_>-FQ9Q){!Ay zVsSFdGWE+V1M1E!2 z0vCYSrJ5C;qcKuItKe6P&0$xaPcnX{A13#Dva>yPHSABn{LbXQ`Z*UbS$_4svlhK{ z+l|*1E(|!Fl4Y^~=|fi?%AV@B#6d5YRoV&%-o!et**m+pEz3M^d)L5)vw~~3Z{OUH zOsBd^7BjXDtn4?b+W|=dE%}})KJCDtu*qMr7dh=x^J0_LkBBbD8e5+53m$vKnq-`F zGHi>~H48yUigTu}Db?tUbDNHXM7uqm-i5$OCWO-xOEQ>E@-eejoSVVWh!o)$jC;Xw z9JtzSzyZqJIpqN+(dcfQ-`Ef;5`+ok8WN2YYpUZ_vC^XYP%WaL1*PC;XOi}f0KK{z z$Q>I(eP0UoIc5$gz<-D1Q0N5Kc-;5h7hQuB_}-Mh2K@vUzjV8H0-31PR6n*X^zu_^ zF49lo(LQl?j-(7d1$Mf23>Wmhh;v{&)~)JCG=v`o=yCi*(Bp24n%v=RE8vSyu8wr7oAa6?OagUS`?dj`!($DUwm)!+WOg?GiNS(>GqqiE1aL_ zvsf4TpE|U6f5mL4!I<0fva)s;9{&LJm<)a56H0{Di7W&U z(G@ro+P(mI5hEby10$hBo{W(maKsskYY}cD4PU55n$rHZ3yX}V_FW0U@)=PQ+{k@VyK{BFKkv&OykT7M%2| z#PW9BSvNDAD#}IZ$hi(^O2I4(2sN1yDa4?t0rW`)`y&|BXTu&uAZkS}cM9?aValY| zagFuymg=Thq%@pgRZxXz=Y{;Z035f#g%2e8Q0~Wg6P&+M?+=DBX-ChG9Xr+q(<`_G z(VALZftL~xLKiJre(3P5W8Z3@nmD-WYu}&Md$oBU``$~p-gMnptc#YnKXoL?>PbNqe>XbC}q2~cPZ1$8SZeWp~goR^4d3(b2^va&0YQxX<1#3o!_QiX30 zmDT|z!8)1{2d@yT)Sa#gZK-W-uAHa7>TxS8O9r05p@Z1R z`kk+K-rC>VY8sCiD;g|sw{UT-esWum+oJAcmb@vumOpv@%I&*Wbp@^JGGKuXnn)jm zCZYdYnuLo%lUQ+exC-A~6e$KxNTu-%$fz{ov&D!e1_KdMaBJM2nmTDhe-4Q9irZsZ zQF7^f7l0(T3$Gr3$4%~8dFFAX@`uvo4W=x<_pB%HUD>*Q{uTRik7R=;4G@}t0!{J} zub+{aYUX#yVu~TJg8&$ye}c)f0XYmPY}}?kv=tc?l%%esB?T3QWu&M}L$ohD1VOs0 z)*|a942JKi&eyP=4!Wj}y?-ukCw$qv7kr5A?7yV$${Rhi0;$`PMIKYPsK#!($8Xhk zqewVk2{MlM@+!Jf7eU&cm#|ro$;xMfiK5R`$5tXB>ddy|dLS)l+CXChEV?*AO8_+C zGZ!g5{ET}+c+(TC>PiGdArCTXXf}c3lwhvA$WwrCrqHE*P-19OaHNe>gqk-0>`0qN zOSmvUM|ttFPxQ0Xb}P%OB1w1k_Tu#rO&E-8aol zKYG|;jYzlthOh??pze@!pKO|}*9|DtkWTNa8GOo_w~^{J6b09lcWhw5%_ zDu4`{)YjDAJh`B;ps~J&z?RWSQQ@FO%07Wa(l}E$TDWNo3Ly@-U`N3Vs)fmBgPI$sa-r{*MPHlrE)DLGUyYX0*aDsIk?(V>c!`whEwYz`GS z<8haD+XX%jHT4yRsp<`KA^4?Mm`EOunOTHNhPgNli8YMwu$U~O2}V6s6((A4fN=;c z28w>nFlNk3ACl^5v4|K60IT(Q^!VnwiS=!GSgo&QQPdZTkt%j4y~{!OhU+V|#Uf}f z*{-@Jr=@g~E+(v%x~VYk+#d38zB_+M^OEaVl85=e+&0zq(rq_gSG6(I=kA;J^r0&c z<+r&qOVk@qQ$(Lb{(&P0Y2vHTTe5y!M%@{`>-NnIu9L(a+qSN;h{rM=K2bSMdS}bP ziUFbvgIPWwv7vUzuMz=~AqT)FAqF5dC?LTtH9>Tf3^`vgl2w53@X(Mnp9w{UfgHCB z^D!(!2`f>Oc`EAW?elARc_6DXt&`#u9AEDg#W!#J-?x2BF!!Rwd9UpH z{Jkl&fYz$qAaDFiV9ot+o`GJh*B2M6f4(hHv*pd(exn}unX(+;RNohI3IF@Nb9>ea z-`S&NW@zsW{^NA!v5;k?OMt`uH?B)q3eHtq1MHncTl2se)_gkZ8j2n8=K!~s82(*q zE1kQs#&(tb8pk%_eUjLw`LxD(XCbPsNxpj)n#1z72L6`ctNH57oS!^ssB?G18G@74 z1pgTR9dJ%_9xsDlLlY{1UQ8Cm$P}|hLHle)QeaLfHn5)p$UDRJV#th|&GH&Vhf4^w zQ9t5`q`fEl6o4*1q_i6ws;gq~0SfcI<9y>XomNHgFb^R(7=SLd%{BT(jH0a{tO_^p zBzj^d-V<2RR*%6F#8&o7BlGk^sldc}T75Y;XKr3eX-!8UcjK%?f)$O(j2)9!<;bIEdJd8@kycgIj2rep4#4G?Tpro?21`#mi zl(9}I2^R$Y-fWlCA;(NHXaEh+;D+)Z!*0#u_Ejg@pqSg+d+fE7Z%V{HxIkXOv2Zbp z=miE26LwB2c}#Npen|4r3>bFugL|hFXgBqulk7$Vyp22gcEF_zkH2ra z%5)y|3+6}zAjL$)!dNvN1X3eZ(k02z95^Ek`7&dwG{%y_4k28UCP`&TQpCq1p~AdC zCdtw|RyUaQCpHA`LhG5QL{Yv0xr)k;;d1q@+djE(*Swo`N{Tdb>#{kW1}6@Yw*dyt zfWRMdA;60=+l!Dlz&NN1DIpaciw;c)V#?_6t-miaQOE|Y!6xAP41aYC z$Ho0~uMWCzs{Z`O3{a28SQ64V zCqI)9;)z7Oa?+&JhM!IP2HEQqDy!PsswyX_IYW+VguQC~H32qAOjwXWh)rS*fXtAbQpj43ek@mZ526oc%Pb1jy2# z-u21h#dopd?=F3C>p-3<;On1%_V>@VitO0&%;T@e7Dchcz9laN`W|{?+gF)#LolfR z^;)mfp1<4zS% zwVeC%*>eZ{0aM<(tq&~wE(_f))^B^`p}rCyb3dJx!8{+lv-0ZaSLNF?J=d~~VDJX@ zb8JDkS0-@Zw;x-A?<+6UY>1@qJ9usrg>TP!JOs_2-NG!kDrh7(1Osha2BbCRm$0Sz zFM(>M^VGd>9O6mZA12Y|$Q z07y%w#7Lh<61i=Bb5lu4adt^5sBE{O|1)nPoAYl+#8a4yU`=G9!fuGfN~is4 zvtIu)o7K-Li6!#$+A2e>_JORkI>${9_-bP%3xnB5pFZJ%y#ERd2J^CAfmmqC&Y;Wb zDfIl{^&#hjgI%azD9^y1aUv%H+n@+`7IY~%jqEJLRsx0^_+k>xnAitH%b*H*FT%PM z`zkXtvnjKwG~&iM8(+u6g5?{43kH<%vE^#>Qz7=3l%wc@Yewif3P%QBfIHc2+jB=o{*%_Co)f z``_B}D6^2||HV!A9N7}y{_Z^=s-Fd!HRl=Zc?x1J--XZS5kf*NQ3`+`V4s52N67;t z@Q&bFVXhZT+#v#~YGHN&a(So%v(I_2c!Z63^(Rg;Jf$}lg9HAHmstH1Gj9Idk>?6K z3NQKDo-==b?G+tnDOcK>i*^qkyZ`axmLJ}^sv8N8?pF>!^4bczvY0?1Tkt*wh;zb= zw&N|}9Zk@bSMF`IF-NBK*{Lk-+|6i>BT(3Ks&1=;!=99CEfcHlIHeq3>h zI^D_~e_T0qCCF!HU9{fLrG4u1t^}e_OlTXA^kM=Heage3AT_afJ82+BJ*&^)vOf|< zh)_?Z7__8hixx%H8s#*CGxR8y*PDgt%ph%L$Ks3X#+^0m^*<7Qrxv;0g_8={@8FjY zESudO4KxPWn?13k*M(2ZRhH)6bmh5j%%u-Ml0t1%uJRD$u_?PdI(9Fh@*39FrAYox z^+y{$8SJ-2``wq2yyd)2_$LQSemVZ>K`w7o>om1BGoU+E2$4b&`R_J|@H%?u%P=0Xunqz-YTeD=OHFRfBvacfTUL!WM z6B1q$^q&PkswH9qdcaUD4|UNd8^U-cF`qSmVJzIkh=ZZQDFa6osyWhU{Dm`>KnC=}e z)PP^cSy1jK47>B;L?%!ck{enCekUbwG?-0P09;5i@4%JCq+81d1(o*_4_;T!OGd`x z7?i1@zGB*P7(6FipFjZ%J$Y~`f1JSjQoe#ZJS3boRXpkCjfA{5(%)P*0u?UfYk%;N@=5Hw#+xpM2n{7ktgq zhg?)bZZ;k~ck|`#2oYYO$x)sfJi3iNBAYhsIeUc&*2TR^=VjROpl1{4S%To;!9*t2 z`IO~DDTg7{jHGEP25o?$Fxo~yF%VCU!HP% zbz`tG!qe))f`qb|=%tYe^FuDMxN~k}S#(`2oPOAL$4ZGkg57~un!K}$pbM8}g5um` zLUEh%d-FW~_fhTlBoD3Uaaotdzof_s>h2v8#=)M13xxJiCbT)xlW=H$k~9r`$mK;! z3Ie`dA83M};2vb+ZG(BWbQ@tL=@6@~JE?5Q0i1CZ$&lRN|D$~~VKRi|aXcd?yrOk$ zsfV=-Q)q`^Qv|`5Z1w^UQfz2V1zvJcl4&i{97$^6a#F&+A?2fr4Iw4HGs}rTn`qQN zX5LF*Ip)vd;^GYO-bkkuhcslHD}qt8T5gG(X{4aFRK5Xd)7r4mPt>Aia!)LlQ2a@G<@Pz$Kd`qP9v1G^%r+ltB0D&KZC57Y*Y+q`I(`jrFi6Ab|4{u?Q1JKcXDJlS#gLMbDTsT@d6373&*%2a)$(G^ zKTL^d4_$NT3D;b}pEJ@L$W-Zg%6A<$o@4l;$@d}qNqe6Mj5R~whx$Fx8i8sD@XkD* zy_naD>#=yMhfnsAz#e3@*H1q8LCg%GIsi8GyGM8~_Hv@4`_QP*1@=DueT8>~ zmrn9r)X(U2cvyI8UK@<_nVNR-An9k(Z^|m~ZGZuG$=(JUKMwN9r=vaxy$!T| z60Nm;4&@_`S>?Qj%+)Boue_r@2eja8G+tvzQRAP&YwW0Qm-F`<;WOZ)6Mu%sX|#8l zY`n`*{^V;oZ4k@R-*?y=@-6stfH!?=Xde_IJ&gQ<1O>o>x2wegX1@Ke? z-@u84`M!~oWE6nlweyr;)Jo^E%pyvJwHbX~S0WeLn#!^mrmi5SPMHm$L8B~6VH-YX zJFG!>5pF;uv3mHH^+l9aR6;!5fG%#R2G^4QX6eGa)X(o&d-j{VTo!0tsJ-K)P z`AQ~yE{aFL^4WLS9ofC+OEq7-^xm8M4lM~>$TEI>(aJdk-oPul`G-4C{bj5o|qDZ|U5?~h?O-4g&fZ<2}@N}SGjD0JJ zxS;#-Mg(591gwoGVDTv%2$b?M%YhSr3UQiPj0xjg8XIEuvHEz`P_k)p8cZiYIoTxm z=+S71I+&dr1;3ruIyuOG>zgq#L!CVqVyfRg^VZX{qS36=Zaq^vt`QdYf|iq^TKj-d zDxd}#yar$w#rfdzm=KjPKoW@Z@#L#GKZphbp`yIfU@5T*Vp9PY;K{urWTl&G^)P-T zMVG1UKr~0|&@vbk1rgZWr2b9)vq@UPHXRul_|3+yx6j}BvnMuP`jpf@EY~wQK|5}V8tcj~cGOGzOJQzbeV2&YEOKt{owIY?(AQvUU%`lrS-MCei z^9TxU?}5`{kJ~8>K%Pc(%BNACPoBnDKkmoJF>ai`T;5CHeoCK*vvQJ;;&t(^%)WMT z3ufL&lDw5Np)6WjRN%_y4oldTfpCM-HPXeqL!v7;?;44pQvGO-_nO=rX6_93ruqlF zj|3f}eqMDOTC{AF_V>8G4i5pM(x=ftgHVnK9TSs+iG(=aQ8RmY&)56w$7L2| zOb$)0zxnX;iznyLVg;W42QTfLzQo0v0)>lSd->5{_Z(P!+0st}lY8Rfgk-rRE9csQ zMIFVa)7i$}fz2`7ow+sR*PnOGWlIiLSlf3jy88vNntslYV$Y32M`EdqvS2vpVy0k> zG71ky%ZZMB-gUKA6_Ju6QdHTQ8A2mQ7;ABHDlX8@n+`G+szv8ppLW{Z=|+Zh;<3Li zyK?d6XK(w)rmp9AZ_STfP&eVs1rMBU3s`M+k(L8%p18DZ(_$9fy8SDGr|w&@bA~N` z z7xtB1`sA7eEu{^1Yo6`w2Ns+;p?bv>TfvqypY7WG^=*4DTXMy+zXt+@MX<#}*KA(W zRcsPZXPei}SXmjZvfj>YjRn&$IBoV#aodcY3+{W0%V4_8k8&A|Zd_aj+ci7du*bo~ z^VF9m;)5nks5?Eeb&x7Hu?;m?;Dn*sWSa=RzC%wcgFsN?RuQlOD-^udlA2y_LAU$q zn1b6pr)AD?ni;v1AvCK^-VCDPNHmj1LQ^q~kly(#G(y(bhB7oVc1+GpC*xo9<^xpa zuN$`{x+%J^@zL$^s5%;CT0a;+eCPF4pB0~ ztpHho7_(r;2yKPJ(#$L}euoI$nF3$|dsYk8`C(7#l#*=T?F`XUL`*>frO{V+LLLIz z*CjhIyawFU7`Nxx&q`sQ=Q<7M@h|F7k34>uO#5aEEb$#+XNH-K~^?pCd@XGP4?(ex+yOEGX zXOTF!2e+LCasG{MdoEpa#nPU{><*7-1ADW(EO7fH%X-U}PWlb8 zq=V^kB7q&BuO5`1293&ugI_~cCHyFjJYW}55w!wBi`hB=%nHIa82^C^8i+rj1_m({ zdBlsf8ZsR9h9_Ij+qp$bsGA$W0zMC+E}T}2P{zuLtK1q=Km)v-h*D;8&5~%Pa;S5B zwueX+MBUsjmJd?7R;+Llseb!&_2ANY6sG;vn-|JckS-odtz*AE{5+UNOwu6_l(;gD z60|o9Q34h2RvLU_!8CiQ@=-`pr7Z`ldMxX^5{1Da6M|(yl(vM6Lxm~y$YXhq6Vd}V zFlQQ+t#a#dBzpW@j1Vb~9Td;zgc+G2U%r3%dGU|no;93}G~O{6gD*Jm2nrfxe#vbn z@YHC(SBQ^{^@|7BkM?_8esX=(@i?ug?@|6?e9y`L-u~DKpI?0kF9p98JKhDAUA7zV_ET+FSzLB?SE0t4jm3FNTv7lYu7tN{5(Xj*;^snqtU;Kf^ar7K7CEPd- z`8)G`4V)St^DZ8G|M|Wan%eLZUDy{YY4m;NA;&FB+eg_}0Dmi8JmOy__k>cbk@lpq zDg>RjCsbIuJzK;x>_Jx4tv18}`$*lB!}0_5McNZJpC`MkNj<{Pp+MO9HJdK4=p2f1 z?Et$)Oa!QuEK!Ov(Ylh(tt6=c!_0H}(Q#m`?i$KH;hH8_Jf|D%F2PKr{6+FcFB(vXy| zr}o(xn525xXEjC`^6i6P$rT%fKh?+>Qr}=5f*7VSSf$7#RXK&gPZSmx7Ke&bn8-o> zk>IkNh>3uQ8_GyTZX&hn7SA~$GfA6zdqFXHP$Bgaem{y)3GtPLjo7M;Ic#M0&^2k& zS&PbEg^Y9#b26JluwfoeCbK*2{pqaMY_QrXeiGHJ)Hk4}y0Sc6S5haWGhu2q>j{_; z9xXy8%`{=75mWm-Bnd}mO5k3ju^a1}R`Yu|DtD@v%#K!7xOR!L+Asd{1ClG$FY*`g z=VHRF#B}tLU}2#9qo|0HDAofs6d)D=y`gjjaaM>Jc?j+u=UtfVce^qi!1n<4u9c1; zqCN=JF(uoWoH!l}w8v|6T%4TlJ67C<=> zen#h36mQu_D&8tX?~sz>{5-!GAYr>4H^uQ_33Ak5QA&5Wcua>~gAm?5;;dsIC6D~U zwId$+_9s3@gWAuHa}W84oO=K*pD->Zpq>{B!G8T6@^4f4UaKfdn+XmkH#-V0M$Z75 zM=W(NrbwolYH1OrO=n}1JYuw~Fi;V~@z8{FvJ}`klqk;2X$3_%iY=(4Y)mV^FITW{MoIa!Y2#Zi6j@ zBS+d1QVmUG%&=d~C`&K`)rumo!B`);M44Yvj$vmtJ@@&eEAQRVFg3L)+n3+m^PSV#Us4;z zV(ZB}O+CJ}JAL)2_iXUK{*So#B$v%MC->zXWnamAKTHm2wSjk`Hn34xmROup8;HT6 zlrtnmFs~0pGi!LTaND%`mbf0n3j`}GO2ZThK@vk5jN}hq4I?F^0P{{Yf_nCb3;kxT z!=sEA{}bLz%72@4 z&)s^hVE@_bXSYU5HBI1Psj7f33~K8U-`**&fzE9dDiX0E3L=pvm1%wxH9Lcu)eYw1 zsOQRZ(#+K^7fo`adeksYsG`|hyih|&^J=+@X35L{lJpqGf3U}G;x{6lH!R;ced0Nb z=A3`qf}M}7?mV=pIJUcQVcD8#SM6*#W6}7|s@!qXdj}3=Pnus=Ju4DgxP3|cdA;N2 zUAVC8?79A;#fcgFB5EbeY+Ny8!Oqgz%@xzf=QxY_K42&B#Jw&?KR&(>Y-BENfE0Hh zgqyeqbzLPgFurN~C`EH-4ONa(P5p)v2!LjbQ5wnJ$RXzxMyI{{d{Wb~$t7KP_N|}R zcGkjKdv02=^YOk_*Dluf(Lej9UG>}M?wk>5JaFKEoA07Dv|z{Lsps~No4bEe*V*&@ zMT-(M_eI!Sd>0FMMrJin?}}m*ntkyZ_7D@MCMJ`8K|plFt{7b+Gl2|?0;J8d(F9#H zEwp6HAj=JM8J9^EX2Stu${ecvfB2mG>wCCmL29O;Z|(Pw?tbS8k3nIn|) zXB-C~SAqpRCWeq$kH-`1VQ+1Wian8y6czM(y49+Ukz;pz+#6x;QE$H-z2}B{1KVW$ z8T~iNJ0>(LJFKqeKWH+O?}_8&e#LF22jq{2*&RdId^+kH>QBR;16-H>93zhenWR03 zY#hD@HqPwf-=%IP8XIe@Kg4r}+Bm2ONnHax)<|nw)7D%iEltNwK{gDYXPwZQSZ184 z)5S7nXC`3@29FjqC`UNj>S`oq>Z_Pu>tVc{x?-L)hS}DJOibpMF_x6H+ zbHTq^;NLoQOtZO}&0NI1GmU*+hDh%?G)-ofMP=kVX!afM`iI;6 z>XvY#YRek+{v6ahf&XPcYn?N`Dpx&#t3mOH;^QC7e*sNugzwIIoKgx-405zlH-AKS z_!yLwUmzku)nr62^sF_TY$g<*U@|63?KXfWlqMGX0sA*;13u71UwdXmUu*+J{Y0x1 z6@v?5SRY7PjUO>XCH2_=uxG_9V-XOv7)?Z@7&=3>qp-!2BAuv)pqWO;bV_qm%_sk5 z*YjI?dyDV6E`DZxq$;kqt%*JnGzHd5tNL?I!KdQex0J2^mpyFDO%F`zv@F{{YtyAw zIqFKEiDZeGn?mOs{PDuokKwR~+^b(dL)R+P2(G^ zZf>BSsPOy<3}?lav5Etz*5utkk*vtzc^f)&7(m_Vld50}5B0pDeMU+(JA+D;+}XTz zzkxM4;rlTo9YetvLm3b2s+D%`aDWQdEQypW*LJjW?W|S~Q{HQj7@3G`#mx)F-MV5v z!Rdc_eDfWr(K%zkNBDm02!mB+hxM*)7|Hhm6eBwk07kw-Vwspsr>C#;avefCB-u%Bv13J(LPSRW_Yq-LxT z!~px828_Y?!OsoC`b2*X%ivuA!Ol)ohRK<+SIDrKGA!pqwNvzWfJ@0v2f8?Bk}O_F zI?QA;BYcY1hbEkKU2S!oG!dP2FppQGp-BU}#u7*er=5k<#>lBg;D=MpuhEn@WprJL zXV$y2OzjUng>&|-SL5AfW!>zJ^)ayr+ecOI_GPm-UHF^1ft&98B6-48%xxHFsqB5M z{mky}Guh=cCm;KF^}E=A0(O5F_FpU{5);tt(acRwUB=Z(<~RVJ93&Mm7BX+cadbCITPJ{)o(4Blwa;*X9gAeg1z9ERG*P4zt|*PpTN{(i<#si{9oP|nR?V$qIA<{Mss=_B&AJ6 zd*q~~1Y!#rZo;4r4lAqDmUAoS<&Rw2P!T{_u3W|{qa{0x{hOA3E@?5Ud)GoxK-w6SS_<9>4$H8@9HOukYuNKmFHpu;od>YSG!(QVhHV zNS#dFDV#lY2}CQbJUD$qmyJX#hw1jgVoQ<;WJlY*n(DHs3&-#Bmb!|mIAKttrku5I z+!|;QV~0cpm#jQ?b0I(YEe+A->iFSo!NnYrKutVQY{^!S>22l%KST5Lt46LCUIobwT6ex}`$No_ZIGysk3 zY}($3M|v93ZMtn-!{o+EcuqYa&onoq1U-Z);)B+;BPrpee#6OA!&++D8rE$~{ru4h z1OY7y(bb~8{!|nlYdZSf=z8|YbI>|`Md@oy{EvUA^VfUaYQ*E-AoksJ$)20CTPmqZ zYnrX`0PE6}ICUF-$@eoG$ng5&hr!7uo}&8T#_ONm_6==o zht)f0O1qP`OIlovt^>BH%#);h@BQ}$>W4SHxSFh#sp^xa3hcW{n3tGcQw%@J2fdjA z7YaEP8=9TjBvkgJ3z#S)8wwbq$ohH2M6h%N;fZ2x1|83jUL+qQ~vq_KnTK_R`G4y1Y%-lTWcht?Kr85dwt8=Jtp^ZVc~y zOIFOx=Qw)t_1|74)2?oN2n8PlycH{`;de+Y9T=a{1$YR`)bC`y{3p#`fUT{}!Jfwn z%K@&#j@VobUdhaWC^T&A4zjJYc${OHZQa-amgjMv5v}WSY@F+qt!uQz(cNlqmZ|lj zhxKh6SHLi_*Dagf16T5`p4cf^+3y$}p@AhuVVe4k`8Di64xIb;#FoiSb|siSt1J(Z z-g*c?WJz2?mfe+=6b3wckgdx>A=AyAna=(p@~{>$!f8`5Zlj@kPVF2+CX!L4hl0Tl zCln0$dm|DItzqbdB>U*HLE)8KJg6^EOS&LAsZXIK(_lw1EG+Xiafh240wuk&lsj*@ z2HXrrKNOM|!~j!zqS@Xa`Q=`>JJPF`Z}zw$?VaB2Z11KH2o;Qp&@>$Hb24b_qQAV` za`Z;tV@tc|6g!}It`a6DTJy7QlqR1G%ND5CE-TqW0RJ)y3&9-HtbY+{B`k+?qrG{P ztC+A;hG9GOO?BHgeNQg=;m2;SO?_*%B5g-Wq*&@3kGM0#p4InJFI+h1@!Ex*UoY@! zd&%Q}!dO;{*5WS`MZD+80V{rmO~Qg77^0!5p$-0$nNWg?z$9u(^@Zz)EY!M&K)cr< zXav9_WV_E5)~$Tt6!nm`F17g4VRjBDP|EMf`J;@RxU!&ko%$nA2KKW}be8NdI9AVQ z_J3)|{A&N2eD%n#8)pn7#Eq8hY>TvU=%LTJahGxbv@_nL(I($(<>#FDb^22ZZGhDR zO*)7+#<`~{Z<@z!!WxqRML+Tq4O^%4hei^EKa??31pM*h%||~y0WFR^elHs%8926Z z8gRNl0-sM34ry%QtSZfQnP9a{WEn7O^Ah?mWc?R9x8yr4xW7)U$>@WYGf^=z=DjDq zgD_f|${DhNvJV-n1p`pNo}o~UIe3Plri#thzg`$5Z?%FGScsm8S#6kbwd<% zBN6D!&i1io)cj~G`}rFBFFTF?e-ryzAH+mXUG6Y~-C}dQVZ5a94Sq9(ldkdt2_i0o z9G|8h;WTWgp2D?d;2jhgPr;Zis(lndgboL=fum6YQlnjHpFC+CFQCv#kW)+~AlWfS z-WZJxF7wuxZvL`%?6e^K;4H3a9ZnO?PM9gIO{_t&bcPvYn=&HV zE>yBpcm;L>*h|U~d8kMNmmtD`=re+{h^YXGXg1rs1-l)wXEr?_tQpLI)9g0!0Oi{C?|Q&k zlhV%xAxLq(8JI(T&5<#z`O)w-!@ld&5!Sqg@4C&LYkZgR&hfW~?VadXPS{25eQZU- z)S+vR{|#%-p&C+s&1B=dQuaRPHPpL^o`e1<{5jg0Xg!9s=V4ypU{54|zEt0%eK)2K zesAdeX*@u^V)XZ0Z@iyDcld8J5NE$j-iKzbl|o;_8VjPCFKjtLh6`Xngy6{HMvHx@ zD*&6U0qd5`aj|w^2(Dmu={T*GvY9M)_&3GHX`^C? zUSDE%cHaH_`#YH?{I@Wz*bW&(>Eb2&3hUMPg z9`f&I&*nbi0f%2S14HVCSb#v0qge|pWnwC7wcxzceM zbSKpr;xCimWwu>+cIw+8nFan@4!Mypj88P@aJd2R6a|-v$qpp5AE`}QLHB$i-{V5q z#SNUlVP``!CTBeIjAcEVTPLN-_rI^nlUFf~O_M0>?!G|`je0|$+M^{q@Xq=>L9PTx zl`BL)Gtrp#2yE(0*aN3IYJ5t?t;8Hc^YQn^FYtT6P@SlR(gi9?mi8i#1%^ZSBjF;j zT8j7J7yKnIUx^t_IZFX=LJ+~okYFEFN8T)ck+=Qa>Xz-ZqPtj6#l(WriN25Qr)_O0 z+O}IfO7kXu)!>Zl(poEt$vvtj!kVcn6xQQEz+_pa>1PK=C0s7$^E@qGt3@L0Rfw2bdW0dFc6z zbRH7);W#iXO$}9T@iuf9DCC`Yy_r0thh{jHtdi2VNa#hP#z~K$A?v+7y+Gam(e#8a zWwOPDPrm=LPxmRGc|2Ry05ndvXB&HetH<-1WB;GN`t(1}KmQ+3AG~hdxa$sHa>MxX zHw13@;zo34R8`BXb6zZJD@4SINPB)aTFrjgrvYtG*}An$xq7wVL5-Kb?8}|mkZoT zh|oAiHdte&p*kwS#28-yMnXpO5H0^A9%9yjDX`_s4>#dI%&z_&|JfwxY{7rj&(;L5 zQvc5ER|TazgICdCUWE(6gnHmEm*V|{)OFSgbOG{~Frnar>=jTOPF$3FVVU5k^bndQ zm`y^E1+@u2I5v7A-X9^|S~j!F@!Mvbx0IXfgXeGZcy_W6H@Q8}1blwR%6{y2J^YY3 zo0mHB-X+Ig`srUlWBr|QCrkO=`|tgZQ7$*=9iQVi1Kt_B@B#VObp58&`XbP}gr=CF zueNEB*04R%76~F{01X*9)l)AJ%C7*d%p;4_{ zPU=5W)rwQtTBg+JosS+t%;EOzVjpdGyI*`U^(@YX7#%zf2-5Az^T2!KJj!t%q7XU$ zJCSlZr9daZ+W?#wjU&vgthgW#Jyy`9oN&r87){K)0izF5fdXtYL`<@gc{GAnv*nSx zXdQ23?nFP(QdUY>QNNDfC4Bl2P680tuYl8>BDG>q-u&0b&kipMes5sfjK;=jV^Id7Q(`rKM(7kN)aM)c+KzeK>RK*b zeXQv4^YY=d+sCbFS+W$@9j}xH{l;*1RdjO5nM!#``NKuzm_qYj2rmMa1GJ#mP9xkf zSjETCgjLHD=TMdykX%TCDpp0M&^S5j1%%fB%@ZZU^=k28BPgbe^H3JX3YagC&#Q}l zHE*KJUFdW8vd@uzvYo7>?P>tll6p;3W!sU!#CX}b=K0ae``&tg^Pq6nd(dl6&Bf}A z>Vl5l%ZVqm5M!c!g%K4WNUY7o^yxS=12u`AVaAD-5PfDqDNz!Qs&AYTAV$!18&E0d zWDctXVBnOEmXt7|WLyaxK9pCW&ZHn8Ae6x^hgp~$zR|ag!RmaYDS1_lY-xi*v>7YOnPO@2Rfe&`lq?LQUp${1qX@OEmKWTZsk%T1 zO6cbgyU(~3O-8{BlgdQEfXghw`4&#NzgIMC(}M7%_UpKNn` zwtroH5>hmyHj}bM($nhK)XrtQyV}Yl?afPL&3|N7HzA+St(P3z(F?DCVI}sLjU15| z`{VdRU}H$H>vx=2kuiSI$cJ?SLyVc8XoX@=J#SbBb=63Uqhl~11(ZDN7S=-Denvm* zJT+H4N@WlG9beN zC6$zVi2We!ya8xeX0zMU??4SQ%~K~&Y;CHK&#sw0vM3~A+KFi6*Wg6(aC8ZAqWsWR z)${9Pj|K*>#M7Xb$Gwf@q(}WKzYUki)>ZfdGjgwbCz&L=?4zr0SN}T7}#$#$nl8z7|M8`B45(oX__3`@J znvrRDVj05yzce*s;&_7*rD?h{5#M=X*E6Gt(h=yfd--QN;6S@aG~cA1&<5cmACzHh zO(d9?39a8!5ktWqrvm&j1?536%EbWPgGn9e#;G;Y@ujb6sQVJ=FCU92^$T~_(mpPQZbZnl57`Y2n!Lyc^-XUYFi ze=AvCTh+6+v+e4U&qldy3Ueyzk64WbmDU{{nUdPA+A=#jZ1^=AeBUc)P)VmpW-qa~ z<>d9#^sxLl&?^S5gGp9Yuj<8&A!KZ*s8NB8Rgj05FyIElw$CC`m_{Fpii#+pd-2Iw zg9|8{3L9EXxKU~_f;|CO!xe5ok#dOaBg`8P!6k%Yk=1#Bjjy&M&+k4<`XStnYPYA% z(>VP@eC48jKyvbSK)n=8gLLakPcW!F& zqB@VZOtY+p|5cXuSIA;i!3l@gWzF`AHpOb&ki`_A+lA?=oJbyxNJ3_2$-E3Z!uLcg zSlbMsg}lW0l+OuDmPzRlva}Cp5rt3m;jTn2yhyZ1Y2MYmYx}m18`cjjTQY0rwDu_g zQr0)s!}3fXMYbPD;Xn1GpaL0bSe6aeAr6%P3v<`&QKCTQY5&5!=Nt|l3U`u{tKO~$ z<8+!i?lYx3;5NqHo^p5hl+`%RIGyHi#F+YRoTf7_UM+flvo7lPtYdHXyFK$}L~}#7 ztft1|uK486Y2S{=oEA@3?u2mVA|I13QorARWqY_Q+D}YSbk5@FU1MZ|+!nU8w^wze zhTSi11>2zPjMzrq(9{y{ENwZvwlFxUY*PKqvYz_FN%^%4d`e}ZdX3yMZq@fz)SA>Q z{V0GZK0@9d%ybX#NtrM+F)c3#(VTpb8Iuvwwo$-E(IBh?xF&>P#khpETHu|!KWNZ@ zU<-RzR{)XUgy7pu>y z5A82AYSdDk!7lcs3?P0-hgIEi?DwXQCib*?h)_A$4tyoPA0Ag9hwag<+WXS|-{cyK zt7~gM9cc~b41q4j8q66g8W!`UIYX&y7UDUnkx2eNsKHNNvt;NR$SUIusMj_!M1?`L z_{%{{L@5MsHUbza+*lx{U>pFkjtVN#Y#YEe6rB#yDq7cSaS^oeCFw5Xa%vr~V^IPU z7UcVJ8R7Lg;384&eI{z}saTZ_&-{#bO6+H9#Yscjg|YJ*W5fLehZwTs=5Shpxv5zE>#e%X?Z z^OKTSOK^Z+frE#fgC407x)P_QXp-t;=nVqgN{h z+t3F^DDg{@*&2f_&f6XfP@h&Lp3I~|(WDh+_+&xh7DK5JQVJU-kSp?iVO&xDL>VK; zl^JcOv@mMY`9Nk$v%a^U&vvQn4zPCCxz7tg=g&B*V~tv|G1jhbl^x1z7|U0Rez~Jj zox@Hal+39$-VdttWE(7LybJi0_H+yQv`xTd$Z#<%W@JVbskNR-W`eUy7!E0+`HRU4 zN6TcUZUQCqL`Hi;%u(A?n*b*R@XqRT*aa9dprz&0F)SxC!ZaIrU7(vqPL--Ef`FH)G^yDnS;zr~ zuMczxtR#~F9F`Tcbb+3+D3(4X088ejhW@#ol#mmxRrKM~vNX4EjlUSrn*;P-0H>VS&%7Ho;%KRP#4eFRx zn%F8@tkcHB9X2BdB_R$ifmD#pW=Ljqa?+#}S3N^+^>ktS0&$d_KWFM zqN2Nc@|cZ(4PB%=S(HN2bP}?x#ucB^55K9Oy@@Lxo)+BhbSr^UyQ{f5+!>$T6)F05 zJf4YrUOzY7;APbpt3Ou1*j_j*IMPMOxL4Mx{;S1fD_t6G-Ca|VKdG#+HD_jde_j5> zyxtn6B%to28$a_yHqmZngZ@xDk3mZlkKyu4LJb5UvDw%JAdHbDZ;=2*3?Z+Al{TBm z7)htqn1rBq1En9Rv&-1iM;D(mSBYQ0tMksIpNw|>PQ+Hm^@CnOje~p>=P7iG3KhUk zZ)pyoAIl^!s-I0FYVFWX63yKyz$+s7#B)#mnvmlWC@2aZfU2uOg;H@*VSW(BC?;V# zn@-6n#zT-=zKO)X9$P|wJuL&r6FcB6gQw861#HE~8<@qv#=8AkEj5RB-u~qwk9(b3 z)laY&jjLAdFAaqY8|p$`@hP1pMc=BcC@d^kUUB(fZe0D4`lL;sZN6~ny5}C5qaL#R zL~0Z!`s~t5yI*S>=C`XG6ZP@6>w2pS@>(PLGje8D_HJxx?sUz*|Jh9)H&a_6(w(q_ zNOw-6y7Jl@bkdT>!*k1Y038I4fcu2(h=4Q@oH-24gf2iWZc-JbPzg^18g$|Wu0JsY z(@yi$$*4){M2bNUBTd?kOO0ya!nx$uovU^As8)`diD2)fD#ue~B5o-0ULxjx(4|}G zO?0=l7G-53%0JC4+QiizOXneCCC!D3bfAkgCN6_QK210~op4sP?a7bRdJ=DG<{wDyk6X|Be<1V!8>U zk=v{=Z|*_HJWDEy+X7i$iz8P3t?8YUatwp`WNcKANP{JW|ym;i?6iM&te}s zXQ?l%ABQ_5{c6>^Xh&H6Lx;?^<;NBO!s`5qg>`GnXVxXkCIt)sZ}Q#)Jg%!+8$SD- zGfks5nyRjmrl|L3%aX@cmL=J8Q*o2kEV;@p?nUk)j#F(XIN-!ib*d9^5&{H563FES zNJxW_kPj{aa)ApW5Fn9G{&($jW;AM!;okpw?)U$>xxtq9oH={%wbxpEt#`evXvgH5 zeT&#_;US*Z{gst_^GTan{Uf5O?Zs zpPy^7+U-_rEQ||8p3A)I1`h85{uJC|+GGQ8O}qn)^Pk>gi{9i}wN&|or)79Ml<#U= zJSibPnszwd6Wrq1ik?NmEne7FX|a@*k%by(iyvKlN7p2_zfg;}h3!Xhw-**vSJ7r$ zrl)GVJ!xO-c)L9&oX2-Ncn2ZQ;;-Qj;^I8Z0G|VpWky6di2yxmI|ijQa@6&7h0{11d+Nm}(Gy(wW7Mf|WQ01tw?# zByH4g8&dq>?kK|#6zUTLS>*4_ed~pqZ=F5Tw+@ccbwU>Tn204}l5#zIfm2V!>$RSP z>T@WLr#|OHd7S5(I?_8f3XWsg6GV_)$; zz@zdp=#i4j-)sCHlI25x9r4^nqo0d22SlLsH1;=DSfg&N7coE-oKRmlNZ|^WQ&b}-2_^V%PFZ3GMO4C#IB_r|DDo}{-4#!g^28!q-EJ^_2dCd)e}jM65u~S6)v!C0R&QO+a)%=Z%Io-n^1gPG2^}WLl#?t0UqnnC zY8Fx~BKT7XgQTrUJ}T>RDZ`2Y>i{qxE@S|~00Ihf56nDLh#a-a1@r;YOzUw~pHK=frNUoJ)XLi6ntW+}mc1Y`ccyMx;@%Aj7Ke2` zd#ek%?);?uY4VE1z3U_G5vq!VoP(a`b-ig)dfRawGlwnH!#Y!yyX`p6#PZNup(cQz z;S|`-C_=0dX36+Js;j}jZUvDr7}d2z{;FMEEX)(L}2NHlfk0 zpel?U^%J^*tNBPEH*B;?2ekKB)I&vL*6K6pa{~`OVopUP}`4RQXbLQpu4NjSpT3oPREeRez48fH> zC>OTWRL=FqDSx{d6PcejxpZYqv_GXHy9P@Qol0^=@&9Dp4g!^Gu zsK+3QT@wBX^*6HLh18B-U78=pe?6+-Mez3_KMc`fj@C%ZQN3UkafNqmq}ww4MOUV> zH!@L7StlI^s@7^%_v3h!@ds==_sN2#PTUUzHfsvjocm8yV;~VJoQR%E9^TbXo0uPg2$m{uV#U%lBEW`5{BCZ&l~1I-Dysic=6sx%kv|EuUa#dw|q{)XLc8#e{cD8Yyz z1xH!I+t5v&b^~MFp|@@JDfXSMlV)VFATYohgj%@ZgMT24Io$0q~%^JK-2z zPSEI4o*LaAHNq$DQiod|#f2=vRSPhTR*-)K>eJN@|7?C3BKDx1fw zbK2tDmE*1Lnbmsn#t7^6M`dYvCBqFw__D{R?LD-N>#G@%p<5v7Y#zBEXa}Esf$7;8-(#=%YqS3n_g30P8=8o)a4Vv`Xm53iCWGe)Ui4O_7bLk zq^ee%n2!6y5z0*E1mFcfegJXBHN8)6vf8$=pRBZ6@7z;)a~@FYTiH)nqL0kpg*)r4 z>R+thwaSx?8@g+j-%_yh{)8pxw}@XH)^9b}^HFHR}|6hNuAh$Kj@3^e%z|w?>y4{NCXSUyljYh1)i0G9J&!8S44I?{-%|^T( zyg#^f2}aPq5OQ5Z-|11XsV~rTq9?#JuBU z@+@KlYAPrR2&Kr=SR#^|L9LiBq!S{}i~plLn8qfkO=g6!k}4$gRk=13byKWg>;+d1 zyRO%2y9P@v0Pl)& zg9hBd3<3~|RG8a?R79}m7UB60$`7)6=#T1k`uwp}aI3LOQt1n^N%LzWxyRu>_Ut?h zLHyGYUtP9R*pYdDU+cXyyB^upxmazpXSH0z7WX`}w(qGe&bY3Y2%TL#ux?*%?%)nE zv#RAX)Msy&F$S2nW3@{gzqoCvl>{8~X#+>CguU8Tq^W_EJIGBpa&-7W_-zIRiVSQZn6wEWOHG;rP9|ie zBVm)6;6m@g7?cqCncrkYpA%kMfPx!dLZms=V>eqLwV=+EssD0Lv(2`FZ4_5qhmZKv z%JF%b-m;Wi#ZA4k8gCeHpJzUE7fdhg$z=2_xfA73eI(PZrF8gZbitEK;lw*(zYXn9k#;wy@x@}-^NKp~Qa#T*zpdeni^#sm*k_*h9hP#l)%CCATZMN$xg zNC%1q4-}TGOtXxm{M`|%6mZ>=rn-zw!! z#eTQTzv99gR<>n-ZSE&dNs}0M4VRdiE1K@yr5xS!!dCcurKsb}g1n?7@`K(`=XB~D2Ab%&(HgG z#FIYc=N&xQxMRX-A?%1?C9YSJ0HS;362Q(OVY(s=XwYfpbi6(_!w_ltykxO?7dxAS_zYFn89B>eiap;wi=8#=F2LmjF4usi6n8091BstEYT4w6%M5|D)lg}t0 zUB0aR@wQVe=IN(c^yxR6A3k{OvBgUsK791iC1L@wQk-R`2kvI3b9aKJ;>b@A9{LFg zDvlm`8~XEq#d~0#_;dT7H+$6ut&w?0s7DkN`-eYQ?Au%ZD<2>iw=Re1EAvk8t zbLeiVzR&%^F{9sy^BQ4{;K!X1?~JpKcjvrD+Pjb7HR5xClM#IG1A=<@9W#eZ&y_}q)?cMrcW?!=9s^W3$e_kEBxs{1l}t&G0W&yDE&-1l7^dhSEQCu5&WcV*6z z!Pgge<#&YytgpHkv5#YY6QC!tzQg)bm67z&b03{(-}J*{UY>Bw@cw@|;TVc7a2cU> z9A9RK_fj2mRWi~LT+iTVp!+{>{~sFjnQ5axBMx(H@wMW%g}*;Ne}eb_Y4Ew~81?sF9m;apCZ(;{H$pEbSdmJwIiN; zEBiB#MUDJ!j2Y*->evy_<tKdL8wka053L;1UFGCDYR_pd9pN`EXxJ_ogFgKrJlowpf?N zg$R8Ni{YJqie;jK3RzMoSXvG4e31bjOIFkiCVHe(p z5!^B8U-$GiPyBjt_SWZj?S7t`DR9{h?T3$8knvLvmcd)AL{C7TtKN!k9=-4AbsyO# zT3vsl;i9)>SbHMA*Og`19OA>_?hD+RXZAm^%)nP$`3J52y*u|`Y@;hcLYO1NSAH#) z8H+8(VcEAlH$c?P61}@j{U~E_D=ZTh1)B1FqRx#(uph>+DNAzP~T4G$-DX z0at{t!2z7@p97GQf#4F#v{9{sdU+JGNn$)gQ&I!( z1e`6e$CK~P_vU2k&^J}*=M7ThJaqL3K!vD>=s4l>%P3Mr{-_R{OE)ft*+;is)3=p8 z(uR}OeaqKx-a}sMqT8o2_Sc0w;hh$;#qdrmixpk=#)Czd8b7+3{L|FrJB!ZG`{Xno zDtG$R;-4qMRbBbm>qp)m=zDhZkTK1t>KDw7`-+!G_=+Jv0P|k}E}O4%8AwH*yf}bF zjvL#Lh`h$agfaLRWM$-XqjpvGxjk^ZeHki|r^9uNxfw{SAp=6}T$1?QPB$rWlg)R&d;D4DuP4OC%a7jKdT}?+?UERM9}^m44SU$(-G)zUS@U=sPUueR+KdU<>c#sjw3uHTF5gf!+=tAUJr0(%A zB>>b@DWlzl&*95PlPBH|F(iV)yOD!~U@{7W_e2g!RwYxtDLuP*U`Bqirrd8o4dML4 z?-tJ8tiLBIc}3OAEy_0R(lPAPqlb?^vP6EgW@ksr%m-G6EBNjBk5wDPpd*}I0U z;WsIgOFkbimtPwpmzs~nZQDvqeV9&wXo`qNSp zTqt|sabzkF0Z9{A6(#vY&tdh41~S`}H}~U%|W= z0v8}*-g!bb`=v?-8(~J6GzSPlFo>(X;@VFl=_djDOH>ZHzr=<%P;pS7tyze=BN&1{ zTLzOUf&gVkAnNGw86t=i%`BMAtud(nk4E}cSREBK@u26xA65EK;D75c#o}SJ!yaac zK;_B5{4`>=tyQ0<2alP-^xu0N5$(_U+{c;tZ2MwZBqkdv^h`9_;5@JJW}_b^L`A78 zpb^R-xN2?qC*Uv02i0m>esw`LiDE{&8{PlN`?FezEEEPCY;#)gKa#p~vR+_3%5WeW#D8o^k&~58=$~h|u3Tw#n31o4 z4TVT)U4!2bJhAEM4doRl4<5U*to&riS(#e7T>125f^toaVW%hZl(YKvkjqT;y*d~$=Ruj9GT@$2X32$($TS1RAfjy@XaoUR1VXb51sYNZ zH?nmmE)&Q@eMRo1rV zE&vbj;If`AGrH=JJ-WF6S%i|Q3_-pPT^@$kH(k5%UKV+lj&5R%Io9p@!;$LEgT+c@v;gU9Y|Xt+nNIeKqHg}hAJg3Cm&d_?`Oxke^&?o&`&V;RLs4CtY=arTsv+(>9T~PZ`FDZ z2NXRXPhNgT(0W1{Q#iG}A_yi`xg=4aCj}KU?nTzHKTDq+eQWRGw+_r7sHLaja2N)U0z@k7=0 zi0Pqu&D&mGl37QSX-F=w*)m6+A!feqMn1WOuvyji)jHO>8YW{Zi>jL}>zwtyGqgD_ z#r)S?->J@zWVH%+l6U02f))OFfslp6iCi3ro7MzFzR19#exF=#%+2HV`7=FwBY8UH zkI^QPhnXUd4*z=5Rg5Phq1ohjmS1>l&z`sLUcUV9pX}ND*7;@k_I~Tgk>~pQo;!Tx zTYbvwvnwlS)BlUbd)_?1bm{rm_t5_?yYTv+121jc`24{BAMD)v(!t6(b1EzA>hLDm z7vOND-yuJfgc~_dm|u^(A20?;#S&&*67&)wr@`7vEpc$lRQ1f@Yzl%eJvl2S3m;9g zm$`jF7Y%d%b9fPoxwVE_suIB!R^1Y$s~mw&8~)kIzQ*)_KlHO>>CI-Fb@!xO9=`2y zYlY2O6ON6#{p)-0Y0@+04W6=RpADSYc8Dn^S@%Ej{+aato`357(emG)qKinV`50^7>Zxov5qbUA{VAAZt&ENJHf(q)dzBT%dYxB@-5S`3idfOV)e?W3z&RWyY_EvMw#*ru44k2 z+y~Gx#1D;%b)Y&}VF9QQQkl+}22?v|+%Xwt6Zd4`7VCzcA6SWy&hh)8JadR98jtdN zy)!Kw=A9hsG@^nU3E^#b6hmPK)eqf1knRquAAWpSTH@m^E0G(HjZd7DIQaWLZ=Xz< zpBNvzF7xy)`|eJfl4xvz*J6o~HkxBqGSyh~687aMt~tL*FZM-P=pvFnKyTgKI207X2 z@m-Kzo>NY6b`SbYl!dBA2&0oh7kbpz^6M6%veAw%IL&di?04Ja68yD!1<`TY*3>C! znblvqnXO>@za4!0Sb7T@3hkME%R^@_Su3qv<5~?;e`0KG($vJ1_}MwD+9r@#fbT=+ z@n@K?2f5%fLgN-NqbUWON|fK2a0Hr4$`R&XOqcI2I3nrN!*@55ySCxGTU_kJ0WU5q z1`kMnwztSvga>>0E_+c;KD^5u|8-TH%n#{Giz`drXRxCF{nxwRI1%5FfPK!n`KEp6 zl5+jVxk{4=Y$M<61m32Z#hRaGH{HGNRJ~rRQn%W$WpCd#*y^cFC$_Z{$$+Yl)cI#( z{vCmpCB z(+jHI&s~6G_&%ZSSEZaZZ^-x2Bz^n^P@={5+i4MW^AnA8M=wB$c;h|mPBrMI7x)6) z-QNCRTlTI|mq6QpAUt%<`cA|krwDV1ODj7|6b)3GWmAogK{Ol%cm=>~a4wJ^VdfSG z4aGxeS`cbyqT0NwFdtQBR9)t9d%<3qs1?_!I7Sk2xJlYoBp-C*aBS6BWROw-XTZQ6 zb*Hnq2Hq5Tei`hMRoM<(ExVQJ{@(S_ySuHn-O9(SzV~2X`^g!#8$H%bMSYW39N0L! za^swLzq)3|Edx#DeON;{9$oA0U*B-kq73Cd)$=$fX0)B(zxvkonK`XR$=PCca#hXD z^&6J-75J78w4c5wLXIu#Z-;ZDo(}``!Lv9YiSP)g2=@kDI%t5&%w+J{Mah_#E9xZ& z(cEu^IJG2m84AsSkPsHpUC{OEW&ya@s z*)i|*M5}f8(5*9OeDU_Oz8ur6u7>IrHzDC`y5*tUE@7daC84F(w{T^HW?gUbwZ3cV zjLzE~k=uNZ%9&l+rL0|b`hbU3*R~9@_Y7a#Ore^rN)eJhVrjMykzxL^TRZ6xVNY=+N6syuVjaJf^@WZi{f7sY0-~fjj=wjUdmz5j^W^qLftle z;WFhc>RtArb}x(2WGMU+vr=46UO0vFXYdokPC@mkL<6)GxSXUlBIYM`z)3{R4=4vO zb>PX#h_j(W4OQgi#jVy!Rr4=QR?ar*;*9dEwNsBx?Ob}I{@|}(dU%fVo-t0>Bu4eV_4bC` z&RJV#ah=+PeSHA?S}x2K77FVEeRklj!RBigo90(dhBYZf#3Z5!C^;Pwm4IkELNu?5 zjf{~Za6N&?$4N_Ya7rW^W@J=D zC0h;C-ILaxUUBTn752z!!+j5?M>tB zjMBh3zeZteN-{FDJm=uFM@9tuzc8o?Ee6aZEXc&FIpUmKenXhlJe%61NPfLK7S93IQJ#xulL^i z=krO;t763#gPrX@_V$5+w~r!fbl~0N5FyITtMf&E`b{oV^Oc7~^Dh+E1fmKHte7<# zLa1@K{}l@X{?iE-Bjwf+i=itJOA9Al5wm6FiYUt)St1oj!e41pUe?<+Ihcy*qa8$o zx_E-^OZf>X_m>*-m{6w*MmlA%5osdI3dqMMt@+PL`N#+#1Ton^&M1^DkeE!a94RQ2 zJpQKyrOXR$;}1dvZQ%d9{Oo<2=zN1&&m8#m7Z2?VEOVyc7+S4Im$W?guz2|F1Jk}> zY_4gneqT|>U$-dT7)et0F8W^K+$jm>_=L$d3ntG$55+`hsR{V?-^=@j6pCa8%F$e0 zOiM*x3(7~ANEMb1aB-;t%o;2>{-yku*a127AitGq_j{o%lX7%wR0pAG75c}si%!z2 z6=?dxhBXSswsgPWe%*=33%6H&NBPCMhI4W z|6lT3E}dPUnRSp!&#!;=Voz53r{|VlchXtN)=D$uJ!^mV;Qeo`&Zjv{N37~HbS!x# zsG0<^md*pvF~GPYg9L!3oFdAUHm6C55*Rl`tqT!#tqQjBDk@~cB=RIC=jNm&C4v|L z<#RFU-BZ8{sBDx94*dC0mQ=SBEZ_*%A*pUFVs%6Wr78yQN2F71NG|H#RAIAL?%d|c zLQ~xirEC={(pIrwy!Pk0yXzJ&@tGt2J$*?thfdsa&zZ;UIrgNYgod5d0@pM!QMpMg z>_GXP%6oJ4{>2-n#km&sl$0*7TxyCGd%6y+-k>xx23^6|g2!Kero~@cN5)n##z*TF zHs%8AHVDTXj_FgMla3gT`kY6DW5Td&>KMYDsbfBza16zI_!z``zBcB4rh^k3`xhJo zo}Aam;ChG?JA6zNzKiOH!?8bn49EW9yMQU;V>srC<~Kh6ZaR3h!=Jlo^m8F!F<7s! zLcU6cX{ea8CNaGs3&mZ;rztfQ{g zLI+4&C~BG*$Qu5HDw(4{7aH3Zz#VK_^`x?*!VGs>3RN|$@@X~*rK~hc3np{X0owDil~C>@1`5%s?9qq(T`yJRy&+tYqwIo%4U1!8Isdqaygy9Br_^HtGmZj zIh5nAP*thCH&qf@{ z&py)5tOS;q0QH_oM8T0R6UR0-2FDf-(bXQ?vQnK~J*hNMR#R9&+<(-z)@F@1%O$!J zI<{0XDE-Sv7u?N{*pK{^$2Yl9JktAe@8FrMJ;cVS;X|zI<*BGeBb`R8gF2Bo^wLX2 zxqE&dvP4n?06?;U=HPn7CG@z5qeKnswI)2ET~^8jby+gKgmeNV-YAr?65h&(Ttbzu zXtYSc6%1E$0FB@#8jLhSu!X|q+wCu)&jmCn! zl49wFF35?}NS7l~&>?s9@38YJ$Q*&Q+zJmm4#zB0LR+6Q2Fi7CL&AZ^MH8_aQ9$i5 z=L5no2p*)~1;lwUYv`7q4NAY%inI!+9hK3tkRqn23*z&a4TDEW#e<-P@)wvrnii;Q zg1dvt0#{sIK`Op9ZDxkfXFvY$TUu{vIeM{r(Qi7NAHJdaCiEff*>gijQSvz5tg>vHAf z>M#A}^>xkLE6WZPOnUs(>i3k79GIx(LE^ z4;H+I_L>7bRG|-MvA7uzn*3Vu99S)CS<$Q;cGek9Y2L3U&FHS&cWSN^N!(ER5mLm^ zgz{U#`dd%iU3&}9-cizeB6@>qc3w%*^li5;aIW3gXQP6oPw(L=W6XD^a#os+`MT-G zn}9uz0_&j-7Kd&##QLqrG!Y>YVL~7YT`()^osjBubsXwqn3ry)XCk;nhWlugk-j#83s?pvm5g3&Mzo{T;)NO(V7&cGPZxxA4vob! zxV`FZD7`;+(@@r@PC|3ZBt#2q$D2tB=JO>x(%RgjjT9|fO8Pkswl zda80%nk65?8ARPzMglB9E&(jk45AtoAwl^_Ds6td}Y=uHnZ|!+8gO)Mnc`(!NZd` zJ2skT=a&|({n{?+-xE;t`}m4}hZpY)QTgm6c`NKkKG1|;RQu>g!?`e`>K81UP#!=C zh5Z1rg$96xsNHTFIm-r==&eC;3b9`x^w290u?IC(DnWyILqq6tpaYXXViF4 ziVFaDDJiHbuF6DLT^K3 z_L061>o`YfW-qA=6xWDj8I4Ou9=H-|Ezt!@ZFMy2HvnK^mBW5$!f^T_;jyBy)RmAM z4UdIKZLvg)I^+}p$yyVHWC6X!%(1iut7FDOuqK6l-T*qrf9e6W*OS!;=m~WNNBN(8 z0(EzqdKFIq(u$c|X(|9IkDf^&5cUL){2FH#K%)^htqE)WmrBgjQ4p&S7)nf?`Kk(QwIAK~@m1Q8+7HVpZD)s1>&v zTm&gP&x(u5v!)alBywBo6icMNVPTFX%C~6#HK%{|2kGR-WMHpO#zr%HwAH?1h20*h z{LUQ3zF|grVCUeB_Skq=p{uXI%$hRo8uo{TlK9x@nrDulAXyc2SJaiSX?7&<+VG_t z365noD0`yyyOe{w3^B@1y@f-vAjZ*A?0D^_n$>$M#7*p&(Xz39`*NrNEOaCMrUS4g z`2vs-Nz|c?2Z*4JQB)0O#SkM4#W;Gx3?>sY+$qTkt_WytI;vV1rO#W6I$Ja6%_xdUn0rlY_o;;*yYi=d%!O;3W_M+m-?nGTmwWy{H5b?xzAya# z{UN4C^>-!wP8?oG%f}cl;ZBS(f?pj27v~*zir2nrV{!i-u8&fmOZRd0x%W@>TwbH5 zj`?t+F=5}$$6VES(>*ioWOMpc?^!h{oFlcA#O1d?CR3o z(WgdpFEt_p>=7>XK5L%O>L(Z!Skb}dttFdEa4PVxdex@e)rf-=es<=Fzi1mu7Jt?K zlSemvCF>fOXpKl-K7ISy1-EwAA8Ok4-0qGCs@Ehjt1+s6ZvVj1y$?KAjc$#ULD?o<``%&0*^jwGQXC$6j(aqDzi2S)zlAiq&Za?$WgL`z)Utl`Fl`O*$MNF<@=3i5cp&_vBlh`RkRFSF<3>&HZ&Xn{#44QFF7(R<70;<>pOe zORjQ#R2^hkg8f(Ziy`VmiE;}}m{c5`p|5s&)Kcm%|zt6GahI&1R^eO`aryg+S?smD>!SdAmVYg+=Q9oUsS?LYU3fey!Dr2$Tc1w1}G#}Va&LITOGnzYWy*Pft%t` z1!#+<$T{esh-OfAJ{WTs;}R`>GT6*cDz{1P8xxIArI0OEcA`m}A$4Ob`vd#da^<&Q zOjc@@)Rgbg{3wQh2zqOe5G4etM;1^kdh{d(juT)reH;IRytX=TE9AD$L7Cs!&hcB*^s zhMs-g@{@Qfjl6;sDZ(rkZ2N8tGE`m zJpT{+pOCa^A5E4 z{x~gRo$QqAF4zZmX}4-(vp;UXz?^jF5j6wW7e9WTp#?W4T-n^8m z5zn~+&lzo>Gd??jF+YdkrV5>b=tOKHh|saCK%&k4pW0cE#ZsCMp5PSKhKF_+CY-Y) z;zp|NZ@sj2+2D23ZNXhmo{}8C&rQ1gqECJ-eV^~P1ule0JW`h6euv3uhOjF@5G%|E zNk>gaeRQFQjsnLyXmcpN3zQWF_;g($emK};5NU~$hJTn>P^y8l0fKg zDYaPAW2qsWCcZ?#&%O8INkS=&5@aIdw^>8e9Kzp+MOxY0rL01#Dl|``>OaCon^cTV zJCAPRu0S;AN&W07Is{{^vRtR5Vhc7HiF-_Y6HcHS(4&(X#1cXaQx#K2_();@_(KC8 zbri*7@W2U%K^MnD1G)vbE#2brdJq~+)_@Z@-+DtBEK$|aYA}tJ;yAySJK1zNdcHqv z;qgAHLKOn5ZM(L|lL|VLOJ+8#8BjjZ1b{lpLN6h%kMjahN=E=KqD1Bb7?xMd(A45- z4e+{KFe%~4h`9$6wBPY)=g42S1wad}mJpdEjCvuNC6oDOJj}0c;_&;cZi+OI!uZ+- zs&5}(^3}xRg<7coE3|d=@}K@X6qX+$v*!ab$>65~1WeFJlb*yzQc%Mi493G}5XAi< z)tWopCoP{_RH7R6se^&|y7FjjIw@s>hN%O`Xy#l->}O^-SIsDiHG(=yvr!ck!X23t z(-s9a2$O9^d3go8&5iAMjZoB5Me}viEve1ZCnX*=I`YSA>=oQ}dXYab!G2C-34!Qp zRNSR7DcVhUZS5S-0gIQ(Xx<{2qKu|ZLNrha(Gna_DyZL&8DRw>qfvq!9VMf{UWa+= z^t}*T{JT05bjA_wLPP{Q+Ccg(Sh2eB=SA)R?>sOtSA7<0_vq#RTMxt9yQbDmshV7g z`~bLz$w1nCK6?hW*91I(oCOW+;8fj(Aa2IsXI%FTYaVnIg!!~{)oVv5jTBK=GVs%5 z`yOhktc>tQRAtQ1YZ;i;vcD?cW0=I&DpfY?4#m9P1`hZXF=4Ij+>oG4RQ@hrqujso ziM7(|2UpiD%ks|D8P8grt%n0mJEleI%2?;nA4VJDza4bEwMV%qci(}%MWH!zlyhN+ z9I$#hflR^^0N@Bwha|$W!V#sb0)EH~mc@A%YkoRWAbDxF95IA~o?ulnXyD7-^4y|t zUVq2?x6OU?v1!NGZGK_TrmvI^!i}7oRK5JvrNwVCd#ATU`SjN9FK(|+lf1|98u*TM z=*~&pmuW+11jxT8TI9yy(k6fao;vXux^XB%FOwKZ=D)_<4vhEvQErSCm(&t>FdxE6 z#knsE8$hecj8@wY_Sfy8>5rf_{`3RpykL`#4X@zfZrcUr1OC3;N8#`Le~IsfHTI&m zyA1E_74}??r*w<9#$M98Fsx`lVPXb7qT+%c23Jp&_+geQ;M2xYlH&>k+XC=fh^8<( zk$Q?n5iK{`BMM&TRSWu%2XL$MA5z#9H-HAyfO~d(tmM1+&`445ST?tvh);jKDl_4> zd&KvGx_hW}e`ouSpPGQ~Z*A2}u3suI!h7;So{nLzcKoT3=`I86BhY+FdlyEarH|ey zZJ|lvn!E)&ilTho8fvknAsWntXy8{PaDPB8ks>b|^*K`1pZh_gkR0QLd;rRVzoC9Wd&S|e8ut@acbn$(Ivs+5Dj<~IdJBC8sYwoxGaV~nx6CSuGREgH zOCeoMDW8eMn+|{|r3W>5A6x-_k)_bV!gfY1$e$YjBPK5Y$u~CseB)-DmBk-Ekh$kJ zYh}(9r8Q9>-y^M9?KC9FZsj&v{>E3XIb93az!@Q37_6SOq2a_)SyoJv^yQQ@%%%MH zObYak58ROG1;r~QQR7W@%;>!n0~tYrC4oiAzBuGrD04IB#}a<56?tc(eR~AbQK;{R z4<(>cl8AJikr}(pdbGV3+fBIpifO!#o4z6|IG)m1SN=dCn~#;`2niMpqa^_*Yy78U z)su5`1tGU0x1y}L5PTMxR}Pv@_CT=O|qDvJbLK@ z$DK;&6SJF}vl9nYeaiflVCs@%ox2u4(PxY2f})~T!P?AaRJ(cFLhecw>d5Yl>@3j z8oG1@wSp6HWvm}sQM0n`{t<4Re|!5aUu4%IE9aE14QhhHoi32D(`uyTD&07n(0Nl~ z-~NU<`h?0rnG0Mws9;4CZ64V{CuS70i0$xB2v!cIMd`Z)TO8p?&}GNRd_Cp|Rg(W@Bq@`PiCzw5&Z$AE`JeIM{}%pZS!h~JII2!3^px|S2> z75E$oSEoJ~TwN2am*Hgax#}3C6sO7WaSpXmzCs-4T~pTveId2yV(j>PHT=HxJdLyP zJ@tLn{TRVn$e%mTXRyzo^O<);?;rdgtn=r51{`bZS_a>Z=6>bR=-&>`S$LOyYy3Iu z!Ih$|OJ8tZf^r4@RQuWc`mORo+^egE^#M~^8txU~K7kcagiiOOfJQP%J#Z~V8E2&f zfm%~Dkt0wz==zaz~#-s=e7@#l-*eDe8 z!Ab^QdY!7N^%+-I2W;Pchk9BIlICXUqh?mj*(!g$&T9MUw@P(LH(aaghBfSFX{i0M zf2w<7raw}jVol8sWa*hJa_HC(o?|zNyCYppKW7gA$>W`;7PXvNF3#sa<9Y(|FX*-4 z`XT=H-kA8;ROQ5oF`r_MN5{6NDqjd66A=iG5qS=Z<3y$+Mi%y6$Z3uDoP#6A+$29S zGPXrJNvRz%=2rHXk?(^&FM+-ug1+`6X9FCZ-2iW&*ds&{ecxFmXp};1Ce&AmkU_-K zhH{atOmxJ_%Ph_+_8+T;tw;xJ>=T0bp zvQ45e?X$h|r}8f_*BnCJ#1kPnYsg_DI#r`N&O}xU08qHrc54*(t7%%?N}+<|YRr@l zMGN8$@4sgS$?}_PR8#c@AcxknTfngVm!W?wkv6m@{@aV4xQE zAna$SfAHLz0$q6l&4md8+%0~kdqk+DOk0`9C4nLeW(i0r(FFD#8TSd~QpQ-^f;_)( zl+~?dmHe7;F=rQBjUv&)GGX9)#ZJ9cT z^duhxPN}hWDF99>tbJ(AnDdGKLC#Bl80U)fTzJ!?DHDo}7wS8~vPUJossof3?~IM1 z)RvAMAFZkit+ILBPnA@K`#z|c@)M2;4D*B^jTXhd*S&JGHqhdcV-K>r(mHR)xuwg_ zb-L$NvHByiI!{}m_J&szmVfDmzK&;3=g3B*9MQJo&c}CLe51Sjjf;CP-qGC_0hivG zbNZQ%z8Ag(bO#fr;GN$@&L>mI59Clh6>kiWoC-9^Z9`N7g>Xn_aYUdz&+GJ9{2G}G z@x*cbFZbIJ(NtYFdhMV>PZ8xek0^h9EpW@h1#z}MR@Zv-k~QDjSzPCS&YHQptp4cg z$>QqAly@J^YP+#lyq?-fFCKEYADFuHXQx_@u85B+ORJjJdDk;Bk69g`5?{ z9yD44`fUrn24X$3T-2z(5k8ySh+v#9Uq_c*H^!zTS%miDz!~F>`R}pL+J}N85vk;Z zbOToyh09SiKA$F}c`OMQtHGEUnky!Y6e&e44Z-qM%DJJ`zFe8No;|bCfYQgMe{=cL z3)bhY=_LgZ=j#Ug{VVOYtuvC5XoZ8lJ8;hIi$|m*u%r zsa(Ng1Nsotw5xlHd6w14MSQBG=NgS>G~u(gO|J`NIUTx$v<*3P+Dd!364%4rXq6W)t)+YJMf^uV6-XCm|5PeA&GC5f7`^iNr@#Slc%Q|F$aQWo{Gg{Q6#f2X43jdpM zmB1-tF2_ghyKGB}l|WEReV~fV{iq|-u&#D2aG6tnPO05sG>OVL zm6xPgbB}V%T6X>m?BefL#e{^<-j~0JcbO_w1WGN4 zKClM*Pd%&wR*DT^&jlHrp`_bzO1VvdQp?KmSu!&-3`Q)I21!PY4?Z0Q9I?}kKz#<) zBp@_~1Slf$YJ80Q{a9D}F@7R{v(8jdShFBo`7Fm-su1c#l&5?8&^;I=1 zS}RyZR)(Q${ep$Pg$YG$X5RkR*^7))b=jicxk^pLyyUc;($v_h0Q=pF`PVv2qsrFS zmTq1)q|B(#ad?-{tX$ynwX}(I7VO?r>so~6q_g}21F8JmR4}-BV^L zyAHG8_Ql3XsLsz9Bcj)`Bm0#vB-fP}^={7a_|yEJtc3PS2|K#Q)5fMIG;-D{nd~QW zB-1rDnT8(E+tX+DF0AZNwwq^^PC_Mg8nzPK6%SwWM%bzZ(kys+$sR$ia5P&9K!?}? zsp8CeTqCPCGeJmjWRA3%WHfmhCK(Sua+yr5ROXT8YBhDcTSmU>!MkMi}-sbf}fUjOT3U9$a2{%AS={~i2tEM7E;rK1T&756)u$+ z4cZH<(=pP5k5nHSFesPDR~hdgQT|PNHDLlZ@#&wW(}U}v%eD6r_0jd*@8o~yekcA; zey8s87~MX4%()52P@F}5&ixaPp|}Mf6EQ6~CM-6jeiz|1_!!|G$uahQ&;+4@30y`j1Y3Q_(pL$67_78?C1>=?buU%$2P{RNJ9D^&0Sue<<%3=F`1s z`ZPDiFc6y16EzeE#_I>bCL-%~C{dym7T9&^J}Y(r^+EN=jrFr<`aCwD*X#Fr%se*1 zxeR=M`X8Qv<9Z7xfS^ih^J!9Wq~VZL!R2Sz_l2rMO&7*#;LJGPhXsB2?&#W6V~hyw z>DqB$Z{fBVcXn>ft1OGJFh@9TdA)U0HqM>9$bHehaPHg^-i zeybSY^1{O#7H-PT-L$ad!oT%>mBrmrGCwD`J|T9u*}UQB5AXf{_HBCX5hvi8x4YalO&{|GMl=K!P69_HXJ}C zO4_xp#nPgKQnn-6NggyfKiIIxWi7LY{Ai}-Tm5KN!%dK@pKVgUvWxv_B`SS>@pnUv zxt$%}p*+I+wkW=}W{d8x$}c68rN70-o@KAfu;K<5ZBYK5u|kc&L+MmvY%MJj_%Ul` z%H614GPZ=xBa`Ogc2ajVqmJg06o18efZH?Mk>m)?BdDFN|5LO0!SE!Ct~3Yo9ceG9 zmqYXt(l3bh9U2q2Q^(Mm=VO4)`j3%kp6r&kr

oP#>71t_4~XJ0(%Cb2CY=t_hXh z!-`2deIEjO*kDQQ(l$6N3!R~JvyjTd<&Q5A)wA)9zM2K13aP8z>etc9jCMP;PA^}1 zuSHuI+%X|Rc|zU|xhf)akW}#l^~hDZexr9sSCVezr|{Y&Uy# zrNe0W{SEhNNvZk8`SEh*j+0sU2A)xkQ7Aay~B;xUXA zSQ|NF5?a|}(h?cULZL9pWywZP3sqr+Q7J>#F?1l|AlgHR*BN$p2{G!l$eFj^z98&e zDmz05*liwleklny@>sUmWcD5H7)#dhQ!FX%N`%fv?c88H5|q>Ow=q96Zab-hC*TGO z+7gARAqs67n!3TUJ*Ek!%j$6Q4p2;siP(`Fgh-O6ftwp_xW~i|EL(Opv%?3JgF_!5 zV5t(@q5N9;g>p_x=v8*?V3*jh*xw9#t3(SvbiKIQ+VaU=_GaW??I%rEu5IZtdgbaoR&@O@oSaJqO%E&=^0bwYMpcP#Sd}Gl39#t z=r>}#F&3PMEfIsyEgjJJN}I|0+gV|IH+>MiI$J|8}2C5=H|ZOOR$Z)ezZ&f!in z8o#Q37x`D}cilhE81=hKFou5@deNO4(~C}h4&CMXbA+D?zZ?I%c;1SSL0)F;`|$7L zWB7Mr%x}kk4%hwabEXBK6Xx%!?@abVea^JtbHc``?@abVedl-BI}>~t#WdCL>I;5X zF!w{Y2r&xCeP~P*#*D~aXm*CqMw;NEdo<1lj)soKyP{Fj1rLjx7Xju4C&fS~rAs?0 zZnq$~J#OR)@Y%EqoES1MS3EHUjU{SAHu1RuXms#1+4Xs+hrpv!TxX|xhT?%B*(N^- zOwQ6kBpR9{n4w2rE9m?JEJLHGYeM+){&PcO(QgHvDuX`uVYWGa6={hqpEXzYjayw@}2q93zX*pKcjL|(+It= zt#w23oc!EC^o}XBlq~25B1z@*`s{J8xAq{ju`^)HNJEGXea~@I^?4JaA~+(JAT=Ju z6DVXefS@G0#^Ns6fjLq`UO~{WLe;)LpYSm5;4u6gKNR%cBgfOEWAMd5UpV7EUcbeN z`lR5jdEH(Z%oWvqVRluU{05r*%Z5~2dRFxOl#{DuS63TA?kq7z_UFE^VoGC{YUY7q^=N<_H$6;{J29WU?fRss#z ztym7neZrM4*eZXQ}|1O+Wo$QeomIH$M*0H|1L z)`y4*xGmAWbl5)uU#ZPvO|)99Aj3xMa&9&OV;r@r2+=bQKn_JfO5j13s=P7QfL>;K z%0odTI=P#@H^z!8$=%9#RWk}d66a(u*I{!4Q7Q4j%c0nw#~f758PHI$MeR5lge?ZY z4xRis{E3Gng17=0j=VZV*()kWwh#~9f`W0lgl@LYYTM5KveoKfUF(&)6%L(V z`BeF((Gu0CT-y>WCWD(Jl>Jk_HAK!aIf?9EWmTkIG}9p$L+?&#yM}-zJ=ui*Rzj~2fF_lhunbk+91^V2 z9w1#tL0R0(E5d)`blSximiclI94@Inw9wbhzPr>ve{N>~29IwU`(C%--wl)EVmG?V zeR&%@SX(^jyA|V=7wf8$vMg*3p`M_c;sN#Y4DNEbt9CeOtFeDm z;n%D4tso}YG~C4yS=1L5!tIqBBAHn*A47yu1b`c@xgdwA6zv_AbuTC=s3@q&&&~1& zXCF&_e<|_{i*uFpXBM5ES~kchlu1`y2xSu6;#%51{hwJ7rG@QFjLlp)&`LVC9CJQ} zJ>c0f$JYT;AAVmaT{69e{Q12I6XipUi8%DL8og_*Pr>`7_SpOmk@DSe%UMtP*J zDy1NPcWRYC7t*yEB1C6<_$(9)w^8Me&a9q>`b%cm?LxtfETLI95Y0gJ#UiQC4vW;W zwKybE;~kR3K+&j=z~+C>XW9Zu1%MY66c-d174kEdi!17h{XZpRcCQ6ltSg_h zp?mtrlPsMhbkv|IwWH<)%hLt%CqtPyZ?KKQvs5h91!ia9B%$twTu|-!@UznfS;MJf z=B`nP2u~;$Fvr~N;Plez5CyNxSAC8wa_+6SSF0ze`p8mMmDDZ_S0yS|bgVj=Xpa1f zjQIgDCH)7^jYT*Whya)a=RnObM?xCXFaY;H^e7}+CIMPRC&6Qd9PuPQP*`eJlY4j^ zhVCD4U?5!`Bf}#^RCnA#T4vcISmM(0FS$r1WlV#V+tvC~OQU$7lI4gW9UAoj=BFT#(X&87`hwr z=OA{b;o`#XJ&no-w9dgXs9Ah__76J7&O^ zZ4vaqnak833wm(;y~_#vgh7@NwZ6hf0=epF1h-Lq{&~Z(pAH*}=WI?J(pChHK&4t- zy4nh1#J<9EmH-UwwvX9eW1N2dP_%e3Y>)HT=PBP3v%0!SpEWAia{09i=`5P6^GJe? z5@;8|dao8p7pjsMlwu>QrHxwz9AuqA()FQz7&2~p5Os`}>Z0M#ae=+64-)pESpDFz zs9(h=(ejg@wNW3^4P5C(sXV}C#|o#R6_+jDo*r6b^VqY)*I42?f(j?Hq?AG!ce#JR zaDA@lbWz2M(QB?;JJ(;39lJHN=#bI3G8kDQCIpRat9)tghCBn?kHr*!J|0SLzb@8)_Iki}I(ny@br z6Ng$?E9j4;M1mS&b7{}KaGStIL8OHsSD=D?u(?w;t3Y`?mDG*E13q;esxz)Qkk^1F z#G_s$I+AKY)6^&;Bkn@ni8I2s5G1t-tyiejjvJ?)I5A_%xfu&+FP+BZbyD|GYs#uk zr89TMpK|6eRL)LWURbj#w4qD&H7)s7OIPa)bMyJWNM$kDg}3)Dq(J*HWugu z$eS#tBYYvlKLT-GA()tCT^;W9xFE>TeF$R6AZi4KrC!&Azy|ApTzjN>{(Mh{&Fe7` zdL!uep)UyG52z#%m;?=@z!>7=1}g_6(@(+Lj!=%8wbYBRORYOgue+!2m4}NOTc)me z8y0((ZT>=MMeESt*qLuBJ6?YC(C+pb`P(*lUDon%Z`p9}nwQ0$^TaIw>N}U*@|4+S z``~0s%9{TD`=f_`#r|_jjpACSoNcYxG^x}ZWi!t*B~&eFp1XG~$#pJjdVSdIJlOwe z50mxa?K7xNYt^Ev0vua|tV1IN;v0c&C$cyce^O>29Md@RB4;XBnTS!D10Rxq!hlW) zSN9RnQZvEl1-dsaDbeMOi9+=jnv4;+fkXh2WQvZ^zVmhx@U;!7yX1@c$s(YNkf#M9$1=DXBlwJ!#gu9mNZ3*f)nhi*6ps?lo)wG+jU4ul{P=!-|_I_dh#Q*^+B%wc*w8#71SY#r(un&u*L}VZjai z1>Cs}`u&0)YpmyMth!z58V6bXuJR*%jj787H4vp(eT0eNpz0b^x*0TfFu+uzgUTjm zAXyFJLc?YA@YYw)1LpEbyV5N6qa#3k?2y8qtuc6f#XHu|>bkG1v?X(vIX7zR)SefV z{LUHw{h2!CHHMewY_9CEh#A7g61V$$A0!3=_1Zz zj<7H=Ki*}O2o0nz4c8!7UYia=5XiuU`Ml|Ai3v^z_^(Znq#Tx`!DL91D!dG*aX72) zH|PvFekyXP0wk&Um0m2r_UDIxbWXX%9_)H-{rtQrf3&MI<*#gIQ^&pC_g$0ToV2-U zM&|?ZwWl^8y325xt@Sj`$eF7b?~INU-}zy8X7lXLC!!;sm{L~IT!G_(9@)~nm{%e+ zD%cXxlt#vN6cGrGv=Fy)iWUw4DF9>`{Ip)}ToGY86}ts8!D=KQ9d8`GmNraW=->x& z0pDgcGv_?Q0LGb7?u3Sm!#JUm;!p`QEMtiUn0QPWRm}L$qo<#mlARPCh@F{QvUW$>8Hs@k( z&J%wkGZ0Ab z$s2ckqiy|D{mN6y-#4eVuG=$xMr&@@tOG48?mxL+T;A_pcdVgyd2Vjemsmtu%IpKp z7yg5vRct!+Am0B6ftXmly~Tj61*o36k3g44!V+WaQOSwW3gD5dTfmb6x>a!GG;A{I zKj4^yLlL5aRheKcWfsWwz+;60JogHNao-#@j^R5QZm+?Ii{M;PQ!2Qaz-dgk5R@4w z!ii#X2K%B`@*|7gxFJnS5?#u9HciQj*OQx}Jk5I8wC_3CFW58u?x|EOF^kc6?2_^i z@m0B3hy%bOJK#s%C9(nysIZ08P&d4r5~V{BWAx-(T$Fc~%2aYb@em93b5 z8Z9u{=V|d#mw}}4U6u5bn%H`{EX+m9cbC;u%uxG-2yEvRp>rgm+q z?p)vDVdwL{U-{D;6K%SWQfikR7`XpEeiu*uN&E=9BwghVWpTSXgP*j1x_L`!KUH&? zDj{B#4!lD2i4(Ge;j!QxQ3lYg;#F&ev;6p*1)+7`^Sp=F7Q6~793~|j<)FxYqFPI--{>O_};De$e(|Y^zHUj zpR$#Zkq%vlEl-l60n%dTBVK3}2R-;wyyjzJ1@a?`D0oSLB1W>99$fi0sxePyXM3!Y zE;*lx9+CT{wwR%?YRCpviU`wE8kX9Bq;|B8bB)jg%Ko3#`>C?CS!sP zlOW}OHvWJS!u}gNFXlxsvlCL6z!A{`B8TsA|fq3 z3KZ*5k%sgLKtO-+H0{D?6Bgk!`Td6<7VbZAKz;{*s1S%B##-@vNM7_z0cEuoPF{&3 zAe=ZwZv0=c;K;v#a}P_#qKTX{y6BjN$1FZ90HzueX0SitciC0$Pztb2xQ7WKxPl3S zN`T~{&b=Oid8OWgN36_ zS@;KQei1N?c!FHnqTVCXHMXjzsrH0+mViXO(rgk1E$kfd7@$N!kSb`j!cGiC0eF{U zFM~#H*r_t;H3t1om0r^5i7ZisYqb*}s?n)5m<5OvDfZ)w$Oh5TB(g3Wni^E9w${e_ z3b1YF=A?rPAz_+rnq*a5@%u$=vZhZh3VB8o2b+0 z+cEtFkOvgh)Os?iMA8T!POBCuhD9XH1&B^v2oOG)UPVJcNPv_$8AR;_@>8jT;Jo(x zh^N#Vk1`5y-;kOyA~h_X$Tg-)%M*=KF}H{a))B@;5Ebjx3uT3X}Ii`N3JUvQzXPb`|T%_Qfbf$sb+E0?5u)?H@!I0`{3c8 zu1evj6T1@g-@NDBA6Jn~bizl%N9x1)TMGOr%s`Ga3R=;Vg#(2~Dh*@;mzbD2@x#FW z4CC5UL=Tn?v^+}v_#!cMIpY-qV4pX`O^wZHPQcG%?fUTAL6t#=LL2ZnkOFa7DfvYf z+ovGI6(3(Gyyvdmu%Ko|@f%foyMJ!-w2Tc`&$w{o^0an)a!2mMm3Y+qCVJR#`+>fa zjHY6VS#=Yi6x&mlT-&hjk}MnB6PLUTWpb5W_(*(`uPF|^@m=C$N?p)dTOjBS&CH}n zw%nANl8izs5@V?UkgK0`+INk>{wCu>sp$tOUwHPn6E*2SeLL}no&D_zU-x!m6gd0a zsfqHR{PwfGlUUp7KSee*D!l>e6w_{^pwSvYqXIZVJTqwwreX8|podmOpn{ z#j%Ik$FUE~uknmeP~P548Af2M=>tJiCmPQFF(l>x>_KQJ{tq66_Uk`;5ZcrKg9o9V z|IZ$TPRD=zApglTD5nfjYQgmB@fNZ$)4Qg3bsg+5KQ-YBI+~_!Ab%^?naR=Aou#3kg zmR6JwT~XB1Upv-Xy{5BkeVVtsu6=PD+aRvo+Swa#XI?u&imPoiQhn9QcVrosT?=a73U{d75w}V*HP)A91ZOq3 zE@C@dHuYB_)K?YB>onJ^0gyz*+K+m~`IXG9o82jx%xz3-f{HckOgb~}o=K}Sk-ahK zv@qY2L2Z!KxREMqZA7LX@G!GRM;8gq!8HDP(gMYWEl^)ich~f3Elo95fr`@N+#Gi* z^l^eg$K`5?YGWY>_)>|u@e-U8Wu$}Pg7#!(=O7SHsLTcrR{}-qWQeSag`)JLVxg22 zHjA}WMVc}9is$7&KlqURvyU&_{Shns*6vpi&yhBI*5AMVCix#Pt~vK7Onc>XEa?VE z@tm6c;@S&x*Ofi=yN$(5VZ?52*KBOBziemM#C?<2Y@Oy;UuKE--NMq|+qL62@~h9j zAiwmhEwff)Sh{;!`TAFw>887wle4u!_05pQbnL{|(77R~y4LDSQBS^ma*qn`R6t3ljVyqmk-WmoilX_ z;!0MxN;K+bvC+|MyT`A*@C3xO?TZ8Av(K|Grn>y{Q!4q9=bw|mb?WloTUR&q7j1o$ zId8k2x!&Bm^-cK?w=aHZolQzT`Eo)>hYq}I%pfx72E3JuEoNH!4tL6R4*j!3`UPgP|q| zE8p+QOi8kWk}d39k%QraOX}s~nLMnJ>lA+q4pjmrb;{HY@(#9g$m(goICM?#O@F*` z_Uwa?4PEqlU(I6!eRr-J`tJUgD;m}BoycxnWfamkPN5A;$ z_${@IFZ=YG`&iPw^RPw_*0_|fF$*{uuzzcSNW<1*S;&uT$$vt94gQlxtJcu55>c@y zFPxM5VWr%jk{3>1lt^AwNl|W2hMVXKPg^7)S>!z>3U+&G#DhxUi5loYYcx?RkOP_H zQ8$dY%Qq-1y|4GCkFM;Peels0`(N*`eUw(Z;IaJ$tA_Ye)iB{*iy03qD;}>Dm(?!5^s{U3m4A1yuO9n)>XdAerX$-Xs&X(NZ(C?f911_U5kgTm zeCcEiVU%V$z<4(kF^2%=RFOkEn?#%o~6~Jc9hfP80cb+2Mvf6abyG_6vf9Z zRm8)j8SX_-9$0X*r(6EwM&{f0o%Z_5c>_!nsCeU*C)1{-k7iwcT()Tc_Y0F0 zxAX12wN{~iPV0$>%8TTOn=6lqPJrs>f5Pr=|5oj<9~Xo#jLsMF^pML7r#_U<<8q0| zB-A6JOADAmU+hO|ppI$GX(%Ls3_!#00%XnpwIDC1%rZe^gn3_#DkT&VU;GSRAbc8YjvI9ksa^WWPbVnPDMsP`1Rw1hyQj1 z7wC3&V1c(@&uZti&SmVP0dGAr5%>@M+K}O$>K%CMY*i^xkefp5tvFo`Ol<}Oqs9%5 zjov&#XVwW2VEE90&IEysgt1L#^ml8_YUpPT6uHKLL4-I@z5F~uTKqhTw`|Q*>{G02VGE$+E1nywZvE8D8L3Cdx2S z%+NDx={EO|i;xs1>~7(D>*0e*!@ zeg&}C{TSNL1|cgA-8j@9lN>fAh!vUWAQOuFeTwJBV^f!dJ7bCi7A`&7ggNf~bBflj zTiKtSX3lV>m7do-|H=h-UDCE5Pj>G_Z-0AI^5KXM!!c9S66e=%B zP2nbCmI#<`D19spQtq}ZCp$X_!Pwc3cay|XRMFzhq!rU zQd_vkuNKGL&3mVss|TmSgLmA}6z1QC!7PSrpLH`3o=OIC)KyVC$kNl%3!N)sRm zpVbIJoI#CX41-z)4G^55qt{3Pa}b?1X$~afxN$+uHHuX;URY&7gQJ4OO-ojW%V9AC zpF(PZHkMn@Aiw=#$C^dA))(|?Nef_qU0PdIGP&Z|xV&q_4I>Y>Vfp9m%wt)6?Irm= zGZwVYg#Zp-^fw=u$xG^`CvJ8BYEHYgv+?sY?$Jk-(YOTqWT^E{m>B#z9 zlXAPVvSwtn!FErTwzww^dLeSRL`+ad)+-J6#*G;z1^I%OS`qpDRwTYOl=#B%JMvPL zj29%s4-yPBHUT($exj)RpMk@Wjugdn6vdIouz{39s!&((qZJW}H#Xx%r;I1SNJa!)3^)I~T? zSN(o}W6rtxg=?SLxaHZgq{q$n+B$g{**`_cn5EU+KGRgj9ieT&v50ZZpe`v!VknIX z`fQJ&)4)I*)Oxi+4_bCXt&hnAlAR_j3q&>Pu)t;^OdCbsgz5S*$Aza;b)-gNW6BEh zkVZ{P0KE@@97?v`6jrAqJbHMnqye7n>0Y+>QbCFkualfOrC)J;!bS2!%2wSWJ@z}CO15%bi`=CsNOHMVGs zwC2Jct8|Agg@qOK$8MHF6Vnol-$Sf~S`z7^SEN6MeVI0945j*+(NK^B!n|sxjjLfI z@Hfyyn2ZWNH1iQX?qnbQyEub>qbg3Ti^CxjEdm;bc3YvTPy&VA)E0O-3Fz(utb(qr z!Kg9}tBfGo*Bf=nNNZIVQEP$U50FeR`Ta}c$?u<&66&Xq*6HJR|DS($f2b(IN~gG? zE(i~*q!=EQ&+DNP-!!z)+Lq8})JyST`XPmBizv)UhSr6hL2e?*!?S>utI@MMQHo2& z1L<7pg^5mOi;{EK+=KmN&3*a*%$166c2HCE7nY0B5a1)T%2bW2OF|cC z0|4a9M8q1Tu2CZx`2~0;A{<}QxEqZm8l$k&4Aw7=V1O5IP-_h8orsVkryi~fph6(2 zM_-R7_OOGr3Wq&Vt_ZQVy1X&anC~Y{Ax1$ttYryhG2$eTo}2D7PhyN$f4X*=;{WGr zHlMsHDolO`W8C^F3PyN2;`@Kq>5F*v6eh(em~BzNA7>9adEti|n(b$U6#{!IO$N>4 zO#G=vH2`jeC5xJ35o}3tS4TlNgLBnStTPOqO%&}o>~vd;3+)w-3Pd5MQ*sF2cg@@$_Lvyl+RwY*_RCwmJAABU(*^zK@ z`q9DT>x-{XyI069HnZBafh*=efU3e<;{`o4w=|7i8Lyjtq-pz{s_)&>dD-T35@wZk zEhx;2E1h4uh?Uekt+Nje9DFovb1dp0uQ?T9-K#>wFu3YGG&Eqf=sf@&m2MfjhMRRBEym5A1rs1hSU--#w_kXy~mm&Z86RCaQb%$0ILSJy-_66?S z>VSdx+WDD{P$Qk4#7XtqlB0;?gK^&pubZ}RO5zkN{ zBgG^?m+h*%XJ@rpP45+eII< zjocHL7I_wjBUij>Vt0Dd8}iT2+i>Z~%RP|IoQ=6JvXw$h^U4`Rohu-qdF?wMgK)Yl zE?U|X{}Q>q?Dg79+b5ddXr2+cplEJs9_xdA#vZPE)c=qdLfZes#&?8I_gz=^$7>4f zAS2Sx7WGMfR*dj{*MwHV9RY9_?g;wX49EnlC4t7?sVLk*fD_gs2zEu%H@GiQGjw0@ z1u7Z(1S;8VqN7p;<5pOZ3qIQ~RyB;3*tqGTM z>v1slT{rPT)PWP8FeRIDa=5f+?Yse}$++!%Ykv0V#A9LqjdaL0JT`EvysCMjxv43T z2jm0cA)tY`kD=zgQwTnp0H2KJJ0S$Z7dWi2P;gk&(@7lZ-gMY_N{wUGuUqZrVu)BM zxU3N;mDcIXRMN0Mghe9vSzUHvuY7#&(S>sloHJ|BY#=?JN}JhMeW0rRf?3x7^6EYl z9u$>I_wN9KOH^c%k4t(1+ zGp0?zs31N;l!BZzGzFje5YZHf%+dLYy8(|BHiT*g5=MmZ<>Ctz1#0?G6tvh-12SlN zUQarT0bEVAs$2iLnouIWN@sZ>YF!*9wBM7OLxEi>>LXWrHmWu{o*7!dzHTreR!ghT z+c2xCLI?zN+kSHU;@g%jSz5oo{jIlZ_cflOKYrJ?V$I_AwnRbb8(A~3xKcQpKfPqx znzr+2hmK@v152h|d4u08|Bjyn0ceMhga&>N$}q)iF?<#^jUs^;ucL%3LSm4X?WR)! zhnu8BIRlawX^<&V%pi|qxReU>eE{#%SeW=Yny5PUNMvB;2-cMKD_(hNxXy$$f}^a` z#ZlKtywWkz3PJg2%(k?yvHsw~(S3zU-Yg+AuWZJgb}>gNpErGa)3)OBo#8|BL@1=S z_xRcenrc%ElY3cab!Af{D26o%+( zdbZShlE~0N{XpCsDsEIfe6pE=Xkt+YrIZ_c`1}6ntBY7Vph`2 z;oJycrUTB0aA@L?u*JQ!p{f4z<@=7!*w@GGd-|$NmNz#qFUgzU+3v5HNBb}PLrogK z`}gmhch#aNvE`e4d)Jg#S2i~UvPegqdPN-K`(CeF9U3h}dqxUF-h%ui7zXfdl3_rO zIHC}c^@L?WGF^vcIurvmpA1qPvBUC)nw#8zUng8>gv zu~UGtrGXOK%ZQhSu9k8odgoSwk{ottu2?7E+;wzlSrW5pY)c!q?V2`RT9}_|UbS^= zcf&dH0GA{!8@a1%@c8nqWrN$U`9)Bge)1`IV(RZV>FuJyoMg=^VPMI!xjFF~ zt8wf5MXB}cW)&`;o2*1S`N)P*zGYDQZf@{e^Tj zWve;8zLHLe#H5Ve8Z^JqUS$SsID`&YIbP!8d2IAIxWM65wpQyX*D`A+4$;1e><0Oo zLGjR9FLRkEPCUQKoah)Cak%0tKecVQ*DYN<{`e}Vt7Tql13Sd-mEU+?^X`{9!e2(m zUpmHmer!nljI|^i53^S;Ghzz%3Rb$J`j$Q&OS7DmH)#4sP-K*4VBM(}2=|m~uOZdB zp$+9Y$H*gbFIZ2C38^OyCUGH4K&Ulf*VKB1O%S=Bmqa8z9}`W9z@%>|eO0I)E8fanG`X=Aun>;R}HIn5*n(l1M z3=_hO;Nki*+|(I{x57X*}gq5BiiClV6z{m~0AztqKi zWx1kYFZP#BUw7{G{VTj36PdO|LDThVv@8JGoVw@Khhf>YtKJADmbjsjFfd(L;Y8-Y z2{SF5AtW_)S2D6}&1A=cP1uc{H>=S!fZG5j5V$p3Fv-KyK@m(6&HW}OK}StJ6N5B; zos!@!Q$K`P(*E5~=?`VARPAkz^{Ax<%ggfpq?;Tl(#9F1M=e@+RLb)K9y5gVV>owb z*@h{VN5a)=`@}0JRP1wXgf>!b^tr!sPo|U~u|d27>F%OU`68Sj8$8S2$p46B&E!XE zm-xAL$aje6iT}V!$|7IWOg+F?*u!+Su5H~1rmWOuRx}Q8=itw zphV%bg|B5K{18#bc91*CS5D4^6PJ;n+)St@hq!#>yzzGiZSnE8!NLu@D@J>4@vK{) z2*OwOyb}*dMwZ{Du^T6>1{+RV!zEkT5shGjFBoUUJxr1tq&!|<%~UN5Eur|NnT z?%(CX16Ujy&;o~m%7#u4_$onqHRw9jkU>XxqMl$>C=E$e8bXW+OAp5wE8u_w$|Gp0 zN>*7>0^m+&dNLpfG@pm7k?8is0HTx_IPBW`N}~R6gCNFc?CrtIDN7D4%ft7@GW$%iQkI!P&!lDv;>f)>CwOzcRB%^V~wmAK; zCNCw}RyQyxSmiyIlnaN}AIQWJ#lulv_f_sAHmGK*ejjosGqa`<74r~iZ~RP$5fcU- z=@05Xn*-DVmB^7zKtrKvr^+N~OxSOOPSjC|iKA_#>^x}Q%%pVz{{_J~xs*uKBj$wr zXza)AYc_~_x{;Jg*Vlg{zbyaqNA`!ZRH_*rt<*q}0g`=(^%r8%}f2^9+=-W24MzJ6ki#B@itBIBe4DCe=Hw|5Xo48>T zlqQ_|Soka4SIUb9p!*Z@naP-igNqrPk{zaA3fERzP75mC z(Kyd&g1@RVf#OJSG7Q82Bv$}m;2)umg4{<+s4ezm3_1>K|Mx$qKZKe+@P#x~vH$}; zQ7=hPqiTsQA znXI5DIc=_|Gw7~lxpiqHomulb6RUa+*H_**v~_E02y%OIyw+VJp=NQV#oiW>Uq}+9 z68^ep$v}!HZezns#s0Mo6R+amxh`k2S)AZLNQr7oXcH*X(3qt)*irLHXF7nEC}svi znJ|DD+CgPd0lZ@^@rm=ggW8ntn!$|z}!(oO>~HbBe^JCRTW zUXR1rC_k%!AbWW$1d@)t6@q}h$nK}5BqLz4hNtY3_Ze}=81;yT?Lhb_oK-GBgGu}4 zD;m~7b^5BKl-f~@Gt?R1AZKw}0FW(!7)a}_Ya6~aKJm*}4qwmHNrB6ZV{ zTI|Ia=tYQt>%~fXpT;2U{`cMp?ME$IQofXrehQlUiJTPqwB*8bQxrjw`VgF3D}q5N zL0rDD#fkZc2%jIkk{{hSlp2zMUY)|PR9aAeyVgC@>A{u4`tKYca2NT77ZOFWq(l@G z<<)VH_6k<-lIXSMlJCWR$~MdQ;kQqo9dv)Gf2>q~?Zr@A1-MFSNeQBe?s%jtscnL! z=ZbtUDM2u60fIBrw|Q3uwOz!1TSMPQ!aMwJ>Z*?Y_G#}XtbgpcPkVR#Y2QBW-2@`} zC-1hM_U+T&?Kth*r~Pgk4H5g{r@cGzv~QpG?xfScJ>}g~J~#G_5uCl!(?Rw4W)+T% zQGqn$>=5?U2<$1^et{w~BA(VktqEv?Azh{zk7)Er#+@PsWe+>f_#}`rU|hXfG@8Yo zdZ>bb{qtzk&-^?MVg@`J0BNYA0y^-oe;%EZGe7T}KOY^dGe7T}KOddLGe7T}KOY_3 zGe7T}KOdd+Ge7T}KObFShGD*F0 zCl)%Kn)|`o)#y|KO;Z^e_C$e5};dEBzF3U}fPGOxwujtV zi&=u`PJNK0456QZ9w!}o+ zI`+koEwOHM#nyGmUU{aRiKKX;=G=Y3oN6~)n61_IAp`|HG{VVL?BWHyiBc*%L4aUR4aFaOo9E_?As45!5 z{=L%b3_q9q_XT-5*{Db0G&t-r&Cbyn2XQ%4?WmW=wuV!XM*$v@$3{wpx#H#HhuQYY zCg;(Fq`4<9%O7IrbyQT$oLNzU0-*NWUrd|Y+RPii#SODNCcFrFq+bR)Is*7<+yTJ9 zh0ob#nyc|Q3p!M8!hAri*^_}0t`TC#GIR{64F--vq$rN+ej4&6!T0(rN#D4$6yFe2RIDIA!d`5EAZbQm1&Ju?QpR%kkMegv9OgC9{%!3pJe;BsN(aW4>fplFDmU%;h#QvhMd zHIx-2$~CH}gtcO{DZr!QhC$g|O7Pm*Puw-hK4(tw*rIvG`Ky&W_uUwTl^tN;FV2-O zUfR$?x(c;TLCITXPe-ZA1=vMtd({G5#-FCc=)7BjM4b_0TA(7k~_3bF;rtKP4(gld2v^p11L*15ZXLm6W_5c@cb=LSsNJMBrPC9!shnMfIJ->XC$|iC-Si zUe12J%yaCRXPF#Wo_*YnNtu6OAItZrDyfN7Hh(dzOBFGd5s6&+1?9Q_Ec})2;Id4o zvw-4{K*(?lj9cxh)VNT!z(l?okzWEn*Mn7p9Dr53RelmO%H3QqRs=enx86E${OC(B`Kw<}uU^!+^zlYdmJ!LLXTNCEW`xz} z`0Fd#i#Xq}t&~k$p3FFCsH&(5E?OGA{F-`|bZRGimwlV-)p)G4hH_|`a2>L-S`5U~ zCfWgqMI{gycoyWs>q(eEhZ2Gg`cy;Bo&c?(QIPHi%6J^AhA)72Dkra`Sb@G)RutEi z)Z_t=laX#MN+>c-0WK8ML-reqGyRikl6bt+Xtw#LPtkAHJR%OBoL#1I+au+hd62`rtUgK zuvbXhKRg_~WAUCn$vJ!M-um3e&Hk(erL-VSuV?Yax!pbm049&dN_zj z!n6NL&X#WF`lEt!gHRm$B2*7(PzjRwC=TpK@my-oTTf}7Z-$0i;cdvRC1 zb!hyOn&`!0VVqdu>M&kPb&j#J+kPjMg9uUZjWM*1m}8TB;l z78zsYjgd6gi=1x9hzbfBQYg+?wKZ8eA!TKx9nQ%ne;T^|>J~hAHQyggr zFL(e+*e4Lvbq3Y!Wzp z1Tgc%uHg{lMr+hmia@qb$pTaSpb`nufvZhJ+kisF7hG+eW*8OaVwaav5Rsi_txl*m zqdPU+RS_}Mz;2@=%mEm@t76Kvj+KHve7!l~NE8PrcI6e7&dN!zNwq9mxa`1yyLx5D zjKa;Wwz!B?1Xo#d$Dw{-Hy6gnp8WioMa;PS`guKjLM4|}2WK9uXg$&K!HXiAjSxv6LSKjG4v@gYp8jDm(>;APLDf9crtP7J3!w z-smMF7C@Daj)@UDcb-z|x9uB!6f(;= zld2MczUbt+)2LueB?>91MnfHpd{n&>1U7O3fsS$Fhzh6O!C+-V_zv=BYrFcXi7PI6 z9@X3JCtvn2wkO3UWjUP9Y2PWTo?mg^_gczo*|kkdIPu zWJ4%|WrM0olb!y7JY@~ep&PI)Oc zUSv@A9hEI|+>Hh7OT`xDk=g8-4j3nd;ZUhG+8g(ZsU1INY}N);bfaHJNenlvtucen5ODw#36`%e%VL z@4doxgQL8#tat0nq`yz0wxh_$PrcKaA)oC?rcvj?NGfBwH-GkgQ{#ifP?5jkCb-EI{DNBb& z{+Uc7HJt-bvm;+83v8>BFRJEuG(h&t9gl*Q3povqhl$2gABYGf8c!Yu1*nY@%3J8S zfO6B5cZ@=lh8%V*A6h&XPep7;xQ+nj@g^~UE{a=_fYV`#i>mEtVFsBZn*Lonc|4}5 zub6yVInGI$Gh6MrXyht6z>aBJo8;M}Mzc%7-qos{$XmklB?saB2j{i;R zackJ2n(Ai@3Wk>K*i#fe`5L!bJ%r<4Y<6q9((k*#XP zTNFoIN46NVV($bpi=bqz+>-npbSuDFnO3;dR#r}y@ZGu!{TiIt?Xi86US4`WAyZ#|! zy`ACxL}n)rcReF5Ni@OHgp)0TBaryKkk}yO97${-Ai>#&od#5ZF;lO~WMY5;VfYUm zl5{sGo2K9oR2vliO*z{X|B*|gM9-w~D0~VB9!gz5C*T%t`pX;m51V_-oA@s+ZUg%q zo6V6}!t|BfhlP#k6U!XlzEbWpgVzAs4K#Jio(d+ur!G}vsLq1NM9hjEp~5^OYl&8) z6y3Rl!8tHM62pU5@TDN)<9bLF4ySzpYxB-RoIvPi%1!76(e1UZbEjwn_pe;E-`}GM zdVNNQc4_=M11aLsV*mIXJ8mtZxb8*Z1zNa#3xO?y*GEs3fEJTtgd{q(TINN z*$1u@fQgVeLdhJ;g(p&!Cq?{^5dtwR3)G0h6DpMz7*?FZ5p&9O)Aix{6I>-h#Bo~X z263DOB`{oA11*BmYFL;kqC-+aVb_W~<-JYkmXvzC8oT9x+>~ElFwk(${+adHUYs}3 zP@Q(4$Cn^IH)~^gX;NwUhW%D3j+<6epIdGbS7STYISc!mr;paCCw_bA=jVGHaw-F+ zWA=e{qvE087+Yr-G+H2g+9Slny}VwYLiq*oN2(F=@hu9s)hm$HVR(tCq5dbK``6jc2&tAHrw^aW;_h#LLSgp zSFpF@mGX7G+^hj*2<;zmm9Ha~$p^Nkn)nnlnO++x0Nn@L>p<*^3crQZ)Bt}BNab!M z#*zM3p@$O=gpvF1+*tGs6-j2n}UvQ zbClsO@yK;Do*~1?`DY^dbfQX$_?N_K6vIA-Yr@MvT>xzJ*d1eR*QC*x_uwDo$HPKU_WEw_3stEu z4($iViT959l40-5&4Suo0cjvtqty@*5$K2r7l=Jq(-EK|JRJelj3P&XGCITX3qh`f zuC&;D%`0iJA63E9VnD*cG7kR`2q?G*v3GnD4vgdlwJBH?1SLlVqd6$nNuqlaVISgk zCAakAI1@kEoIxj}+vqZHJCHi0nO2rgTrF!S{=U_b=uFynaq2S7jMDUSSE6IeVZo5| z8;AWfOj`CxjKJ>G;2%4Tx`{^=p~2f0K9|;W4xJX#7%7Pf;Hqk=4wU6)XJY0$pWKWT zj(Q{HKn>hzSRgDE;(c&=0p~ReE;)WR=xa=80%kFm7vy=esA7WhYoxXqsh31DzX}nA zlDq<2YPen!M$AP@NT?tw5W2~KVA4AI+sq$IO`Xx6-P@fK-%(w$tsr=8VOk?AtVwot zzihRv?RvH0!*)zS^{0OM0L@slM1R_e4qu>?%`IONIQs z$=4R=v?}=}yymvRh|a9!S5sM0UdGRu-|KOw zg1-TrhI9ya@+Nt?ukMnM0O@?2QJ|AuIdn#W`pTRutr1D@i%jjJmU?DQwE?GK!{ zsc7C8RId72Prheo&9d(N9`^m7{Niop!*lX`l-@P=*(5wP(jgFRKf1iMCAoaSUz56W zcFRQdBy!L0Thlx7hv@5LNf{fLO#B_Mk%L8kU-K51c|2Ow#=wwsclVqQQM3*rzDrm> zzfmu#9l}6WfiD#-&hGI(&fKn9Gum3}aK14m zvM3+>LYF@otpGH$BwXuC3Ts0~@(&M#z%)dZ%0bbF00^#4z|pcY){>}^$R=j z*B@guYdceWq`ZtX_OxnNa*w_-e@N|I^K3WYPq$QZ@^zsz&Tje@-=md;V_+RD*(!e+ z*{;vc38wdz7l1Ud)U1Jk%^Ux%sk<#do7ETaX2>7o2iYZwR+dS7{3!e@#(pQ+x&G-; zeJkX2+RSK6tSBk;q^D{$>c$3SoQVU8kaaa`uf0{m9-M$8S@QieiXZ<}~uZai>&gDLCi|X5<_-q?cz%BKHMT@E#oFqm1 zRk%pt>9b%8Bj@W=sQ_Q&DPKPS4GtvJS|k)aqK?ua_a2CXioaEBAvf#advAXzSEXud ztf|6P1Na*K{FKH^bvcwu0oa?AObj_63S{MLAfhnB6sbqu(@2DYf(n(8L}|l6DvvEr zZIR!sN_EZ62?bNDpU;%2O-l|AxVt+3xAD0d))WPoj>Xwqt65F-_5J?7S~sjuWqA_Lf0}I8e87E{TGhJH>NpdnX1rNWvl`U) zd|WyxLj}!l%G~kD90*6Ff#O`)Uzm_#+y-QCw8l{$-u6>?OQ{i=!Ak@C)vY)Q=~Mp9Gb*$rx;p z)N?XT`uj=My-%dXqkW)w!^n(HyKu274l~LwX=k`E7jj(t#&64r%BYJ`vZ3!N9L=9Ra;GnsBCyFkQFX^cmh=L2uCmaeqZ$W-;PDZ+tP9eg*@Xe&I2qhBY zwhtwo2&<$b%Sl^D{;%SuKy;wtRBT)kLgO&;fJJ)Xk`?HZ8s)ANUf|LxwfwbNd)sb*{ew_|9EgYoJ#mgfDQ_at%%9&=?Zt8x0` z9}de|em{Lhtt-zWze1MkRzqpQYsr}_mhv-rY7I1Q9Jf`)svAO9oI$l%?8OmNg9A1S zpD&@Qy>$E#Ut>zedJeQf9VFr~!v%~mz9Kz<#3IIV$G&AsE;8(7pYa~dkX5MwZshbP zbW+3OjucG8uEvy;8e2~!Cxf!q%n$&^5BJ}4TU)-3!N>@=@)3$T2XNIq> z4#r2ZG~?I&Q& zf+VCRI;_YV`C!1s$Q%Ox9r5(<5H>Lk>KT( zO1cnD{gK8U?Uc9k93qlBXZ~6e^Edjnv973; zzcF&0_$4nY$U$o+)t#bT(2>+3d7hJ6GuCX=<)-%27z6YqbMo+Lf;qWR`P73|LzQdXpB^eOm93A9%mf;i&(t-o^GURN!_( ztpc<=$c`mP0Cxnhw6H5>pI{^)n8f*)U*-EB4@96_)r`p-SQktHr)a{{#K53{qveR& zDD8V1%Bj>s9i?54R@0QY7Zm^q-IvL?Kn)Hqc%e4oDff&^mtXzh16N#e|0VKmn)&nR z_Q}WDa;dLp{(Jy0LAuP&={Y>_@qPOrdF<}~y?d`&c<2y4s}tY6Q*$|X-=Rtao7TVg ztRCRxsVy-nH;){$K_mnPBloh@Vq%r;fOn}{k(7yV{dicyt}h||4q|JUb%4ZE0?|aaQ*GydH1o<19*0e zaJTSN^^W;DShVtoH0Yf3`-Gn$LVn$O=glPaugK)7P~ulBTC6_?Xj%Y`ySE?QHKr#nlfM z?|PafHhI+ zMlB3&Tt>9tBlQTfALQDA7a`mY#`N&y8GS{%2IW0e#wshV@ooec z?^)-~T)&`WW1eSG)!Ze1c9d;7w{@#0>HI~52Rk|rup`>eP*t^p4DP*gc-b{quUd9} zb!TD0?9!B(i3Kw^*X!(>b(b|(=FgaSSr7it4s;C7uS>yw;+#%k_aIqWA*nxx3;`y_ z0SN*nG4qiu*rqNBF)j|FIl?=G5py;5dfoaM;@LP+6*p{RA{b~z!>|f{?t&R54ZYTi z07?(~=rb8;yQh34p?T1ct&e51{kK09)U(RSHE>EJcZOc-e5xu?_KbiP3H7#m5L`xv zuMy1;#axphDIQZ{bC3-7AhIeY+8-K?i~_9|oIl~eXDT$R93+=BQL(ziJ!ksZ%=9@O z4a>aj_tSKG(JIR~7(`=J6NGWYuZ@Crrrh8is=wogTa^77xMEe?`sO2JZ5vwZ-O2u& z^7E}98Jb^fH2uUd^H6Sb+7r9?KhF2XjJXhVd8}25*@cL;;&Fl-175VM6K{VWQ)|(i zp}|NkIB4jv#r{zLAx*jgY!KkVaMv}@W+4Q?EIZ`a^_ZvuQej+T4%EjK9F4;0jq}af z>RX%ZYRXCoQb_qID9||214ZyCJWS;ZG!Af#DG7C$x$s0G?inZ}BZxzQxVcGNm0H0@ zi>u%?gK~NGw^r4y>n-mZzYM|Z=i6+_b@RO&o@!37ya=oT3kLQLRlGLs=!VAOhQad1 zyPA}|ulVQdKfEBPJO=^m$z8dkdCnZO;Iy$)Od+e6?~8K4fHB~g8@8-^_S_8N`|0kn zN1&fieFz-pwOp3@;0>)OY1)`sU4#Z%FEd&)(gcMwM>5k~d!q$GD?u|2h+)ZlAbb;u zU^F0tQE5zC%rVr6!*PJ1BEA(su{_z`0BQhqH>gy#)tDfGCaAnz@C0&X56Lp7KpW}j zCjfw=E!nr_Mw*7qWCqPaNH}Njr3GdEM%HCFt;Y-~kBov-4~z zRbAfSOgt}_R}Yk5aDm*&-jG83S2r#KQuC7&3F7T5FMVyjB18Ful@rnwF+#zG#?#$T z{Bgy@V;OV?PhADMDQB=jHJeGHlmceawXitwqiEt>K#PI8+SGtll9_0$$!+gNY15+B zA@bL2EmWn5)9T}PT3MV19tylO$t0;wyTP$(GV@8$Ve(CIO%j9(nG=;6lbmAne4K3o zVQk;O{>dhhbMPpT^Ejrp8XNP9j*Hs4tnAk|eKfE3yFvJyz%*UZbAjeB9 zT}Xj4GHT&59|7g`^Clg#+2QTXi5A{)n|M||dJjnBtd_oe@?Cxb)CP5U@`on_m*LpQ zJ0|alE#oro_I-ML^rnn2VT%X6e*GHkG>b-Y;fj@1`&D#S#Z1o z9s$R(EdgSL&)K0m#c7BUqW2?GqA;${;ou2r$YBz(h2#s=gBF!b9|*$m!*=1j?nB#v z-;uvFU1#5R=+dor-E`JEP3PEp>4AoZy3jxBni|>frQIiY3-ivs$94r0PMV@34U5+u zO}J;vmb-0FgTj%*3Kp+_+IH``-TRZ+`$_w6l{1p|cXwZe6=1avcHhQ`Ebv{a`B*@r}wElBWjyiYU}9WL4DcvULi_UQKpasre{ zrrN#59QB(c!%%p266z93VJD$avNM&`XtZ>V(hFv}q8X%w^d-dQkOofvmymqG>Ci3B zz54UZ8e6PF<5}f0rXR&O@+}Q=o8X;AcDn=klh4V zK>US!OmnMHQ+0UwcLfYfg z#!|Z*Bm{||5{o9dnDIhXE<*jav4I3Kt*xQ6u@jvcg*n;oR9ixfXA(g@PF*?-fDfVw zo7fm7Geu&&vX3`TQ8BriRVSaGlut7I6jiz@8fJv_Fux2tbGQ0-?7at!)!>!_!cO#NFHnMn=iX1%S$4MRbGLO~Mck+;SK!BL-j$pyy zM^4%9DKJunrICBzTCp@%3J*L0IS8r)`LDdf*UxJiVs3Q#a1!P3aS8DHM)S(+qU*jr zy6#%l(a_;s5YHRDz!h~os4H`{(h&)hLMcNv-Tpg#@#9;)9 zq>keyA%m7<$Fwwa47`ixFbQX@D;S{|fNKr*1?r*C$^Zlibp6O*q8t~#0+YZfN=C)f zBr3I@D8sIDY#cE@8rPsp#Df!h5{j1lT^PpZPvg?D#hjlwUDzk{WfCzWVHtQ0NZ~~^ zJP$d!qGJ3^ps8#|&V;DLnfW`gu0A^q!BfPkg2B2n&$H)coGzjW!^@?qc5KaOxjR-T>0_8 zgki4%f=ddE2|Qjgf*WA(vQN(_#T{w5{-A$BLsj~{o?M%n?HPB8e^CDx5b~GnS~6Te zHaKQ2&v%YmouJX)c? zVjjvAOkAC~bt0Ypa>$$!Gdji{%sF0A(7ATg{fnlsVYLGmwxjlqVRDvZAL5u_o} ziK=U*8a0M+QxeGt(=394tRl6KXj?JlP^r#OQ5(>lVI=r{z`I;Ugd+MP|=+0DBr)-3+P-A~@n8^qihY+hi& z+RdxyIqkC#j=gdRC@3+A@5Go~NFFJ%Eu>o5n{AseKDen(_>)j5sIS_4{CqraCVW#x zURA0V6=qx%11Fe+?pP!6Swz_ly8-%c#FGSHASn@PD218a3yWec2^9woOD;Pm>;Oqf zbQCyAwbdjo3{P&UZ7C|CEUe8URjI4Eq#{;EA!(;zDjnOk!mB~OYzw= z)kPCUjRRHgm#dR%GS*zs?De+pT30Z8iugYNc;ffy@ycF!XF}ra#Xq?72?fgFuCA%5 zG#t!$YO`>O)V}H9p-t^yDZ-QchhMsFv2>Fllpfi8oGuP*vmG3sTAt6VLEXe)LpC!2 z_cTJ*&;V6JAWPD~P|}1Wgw^4CnfmE)t}qgW(z{6A#qxQEy^a)w%4dFnno}t%)6Tx` z6T*Yvh*;W}ZveTg%1fkz?&v7$%}W4R-$8UtuWI)X~8rX3r=)fdn<9N-3)gul8Q z7A|V2tjws2Gql?m)^6I-xM)#hL$$BY9^d*v?d%H8n8$A~E!lcN>D*<12!!%-%KX_y z35K4dI{RREMN^)yBHvr%G>ypLzpNzHpDOlb=3b|b8|HFFoP;}=fG2HGtq-lCVTz@g z`35t`=Oo}b!eiZy00HhRjptJ0B%1vOU5$~8)M!W%1Bk8v;UU+%=q#vY&Phk8>=vdNRSv|Y9t~1qk^z*pPV+W=$YpJa1tgT<> zu|Fbz5|>kwE5Ftpyqr&PJtutjz_0J(GHRwtwwciTek-bIaBg%V#7c-HBT8R~>J#xU zgL=Tgo5kSwQ@{k&FDBYTu>nG&H6R4%mlj{5@Lzsq8)~ZwxTwdfbAlVL@YsNAV{#9tn>Iwgv;_RA#0-Y?yQuD?Cc^ zh9@1BYO{(&A2|zj*`WST7oc;B+{)ZYCwhM{V}Z|i%vIA^U!LH|c`1y(+HdgtQp;fS5j5!+S2Xz5I&=KgDk&MT=sr)^Sh3)fZW)=rB^tYgE@4IS{K zeviJjao*#UuBr}ICMTJY0|#daP9wqBM$vdiDj#2PdQi6ziVD+GQ9@Glhs47`$0`{m(9ZLR z#TPuR!fZkh>-F%_A(2s##1s>LU_2-fa05QYwW9@0adK!4d=VN4DNuAQDx*Rw>ji7B zW5+$S;*Xg0MTL*%r&`7sO+&u(~NGp9+771)Yv2Wr;J751U7#fnFAK6ToBhHM}Q^c}I za7@IE;Pb@-+^JxZ`A@`Xce5%zT=Ni6K%DJ&_idDer;!k>vc>-o!1o&kLS)P{HsXcQlDl zF-^Yk5EIH;BHa*e5G-}rJrNkPk-h1z@C7Qr^PQTyf%&a71GO_&t1CL|?!_glIr-B4 zZ#(Lzv)=lv_vYrxBh#C#S55B`()=lb^v01AkrV zUkOtJEBweXX>g@r%TS`^J-3rYRj5^R+O#52pPbz+Jc0Xc_%LSp5M5p|Nm3>Ch%2NSO51q!@V~NA@fAYNM;hIB#m<*GF!U_GtA zws}lk@n!SPJ9&Ej*Z1@o$Ob!igR}43?C7yXB459%x-nOG8(D=|@klzX5i#_^;jP>0 z9G}+<>ajbHQX`yG*lfM>mBS^`>^su2v5(BS4P5CugUvCQQSTU6#HOHbc^pX-O{gn76UEj}enpzo5_3 zPOtS&pD>~4=BeHz=yFb*Hj}SIE#{)3)z!B~bQ#yDLv1%t>Qj9kvuVzMeJ9MF2B~2- zCgeV6U6Loms)uyM?4(})z*>aQgBs}}an_D$?17oxl9uF@oO|Y=rC3uNc5&6r)k7&2m&+Lo`>&du zmmZ&%FuiEuJ^gEqJGyoDJ~n(x(X>0Y?0OgcpXjH$PsUm5j;M@ST6hE&=;Bg%o95Jg zu@>>PPT<@>DpHV_>f94YTq`zcnoM`b@_S*JET^To?31Y$Rg%ZqBy$QKZ~i``t`j`p zm={vZ7Y*pYxL3+^W5V3-PQwS^G=J_*brkB(=JSj}!&m3HBg2Y{maSNG<&|p|UR_ia z7U|AeJ$(2zq7hZoZ>x>G&|SlV{wL{`b7S$ecbmep*pPMC9e(7gEae?dJE~!*jha zUEV%CqUh{|In&L#2P#zRwrup))vLFTKI{5-FPnYtmTv75jdp{JFSuvv=I5&iw=)vk zb=z|8?91LoO^bQtTdK_KoJGmiKB$UsIq|xO1;?nzQD{O=|Ih<5+^Wmnm=sW!f9jg zAWm>J*QsIBZxDLifB!elT=v+0S?@b;x@mo{Q`=Q)V`b^}FTXk5^!lbnJ1;nI$I?wS zDSi7Z`u2@Z-;SbQkP(#>WneWuFPW~b?W$*yvC+plJj)ZswjI3Z>}VPpvZ`ucP+sli zIJTkWls&qbXq{~RT15WK!h+UG65^vPVk+Pyqg?*AxsuahXXmz0{c8HVpOms%(k{T# zlA0^;RT}(3f2WqB?7N!Zyzr7@Gri=JyI=k5_18zWZ}8sKp{k_$qmsTU-kRjfqN}cY z!|CC-47_a2tXXGXR(tXNzq@9dF~l8(l?T&^h~|*$keCQF_+#2Nzj;XOMCHrT_F$(@ zigd3jkOyN3Q+A> zIDNlmuSq|R9spJJ5Aj3};VT*TU;LyC$k^-%d(;i{y?0KHjq@~p>`sWCiVHy5*Jjt~%QVofv#WY`Ou8f7ztFMo2T6r@29QaQ5@xQj`QJ74*wYps_ykG~! zR*Yx=TB94&iwXf7VJ1v_A>u*EHQ$SwF=wt(a_zqxo!JI*tu{+SV>lAUAC7_&u=WIcYPK`~a&8Y>eaN0ShU&2h^{{lJt2M&*w&z*hT1Kl~DeL<4H0mu-Uh9z zYBnw3p%nRV$=Bur-7NAEbeU8Whb+(PPfU3P;xv; zNfq&B7HS;bRmg()GtT-4P5o1v+TC~O(h8vCE z#t7^jP)OPa)m#60o@l92>(Qm?RrD)P#%?ZjDGQcnI3_)?r)(ny(<2p}C}SAXL+PM= zhKdHzlY-U7_8ZqDs>m9yH#SCXD5GD-wMtmFOpU3Dt+AtRzuu{etR3M1GY|}g!(1dY z`d~K-xR#ElNlGqjxfI^xL8ueto1BaLxY65b*ElQ7J0(7?s9n4KG-Ke9l=Qs3oFUya z(<7~7+tt6S)2y-AfURliK?n#lun9Se>XJjWN^lcyAKQ2CN;F-le zii<0Hb(>JwEi$*HUHPb)Lq?oeIpLf>G2NrO<)4vy^}x)kc3o;5nuV?Hvt4?pVT#If zQWD~$+PUElTckNBHzh;YY08jhR9TKgjA=%LHKKE_KOalENp#W(*X-tRjZc?)i#K}9 zE-}_GY9DJGQ;hShuy)Hb)TUp?q(oQT@gvC?z+gkiI?@ywo))3B)OR3It{NR-SYolT z^I$j9c)B&yYO;WpsG2N5V@B7MICMD3XF%q3Mr9e-Xy@m`!J<%a2X75-*|LI5a^yLN zY6JQQSD7|gvf;F%@)WeFUp+XP18f8)oX2n!EtB-?8m27i6PDk_`^&0Xvj-0tTi~+m z!$;-R^fN}6M0YPN?A5WWk>2I3nw~u)>dlC=N6uVR>iubMPGql(3SE1T_wkIppi9T{ zsbkHn7G2+er&sxe}PxF?qddRhloF3dQ7`+c3MJM@SU`9O~xD-Xyb*lgf@ zG}SZ_hZ=MAh9Jbnfxs&F{ISrE-6$45nYtg?a%?w9f%D3Y_%iH=RkU2ms00}cX2H2C z!n5|c>sBM`t6Pl7f~sEWJ9087)lIu$X6Av<@6M^8P_((EX3o%tD@t>_c218f=$xD6-SIQ;Q%9|csLQ;?OZvML zzx^!3UU1?TD{XD_Jy&!X%w@X+YD?$eHn05Wzqxa!J?zAr*2bt*BXZIt4i!4QNqalP z;Z6F;*y{SuOgJ3YNJ2XnLkUr~*#moW%D4GX&Chm0PBF2^s<&l9sJdm20t{Q6SYK2# zTLE)|+T@P%i&~5eG9^&CfDR(j0yM!-+7m;V6kTye*o7GjyssMZ7kbxDiV6#Rs-S zn9vFT_~UjRjEMUBIB$dZsu(rPm5L<8dprUI^Yw4mR$OZ*b?uy4+hdu3`~>Qxhw^J( ziwp9zGvFJN6JnyV`(vStuTl^sC}yMbYOwPT7vrqhVCR?_nBjDifZ#ZyksjTaS1xEmVK( z{b}28u8%Qh&qmbIwt9X`w{ZGJuAS)gi%O?oAlJ$0n4HAsAV*NRQ9{?+?1cmlzp~V!El%~|vH zkaMbMbS=)#!0Ks1zUdB2PM~LKZD24B63ax{sB1pedB*%PikcjNW5#Ul8mb`RXzFU$ z=v?%j&Jmo6jO>=qp))7KT%Arawbar%S}CWI9pTD9vD+BrUAWwMba@A~)2h(Bjf=U| zjJneMb0gLJj4^MOckJBgxOU#d$3317bDF)^8hyQ=I*O|1_U3mwIP-0U@rZX^bgBuJ z=wRl}HvX9~X;PHhp}N{w8JVV5sZzXtn`x|Va?zDu0d1{NyNii&aJ-gTk&U`-lpZH{ z%*d%iT|hPyIQY;aqM?(x9b5b}ASlj}haTO~cgB7}2-E8jkstvH`QFf~Q9+aT5nb~7 zc`PHsm2mHOKlc)ELu>6+SB{J8_OYFrKD+lXpBdfLi;BJ4Z*~9ayIIt8; zs4%NKYo1k5o_xlczrC&hEn`jYd*H~S<&*P@hgY;W zI{xxRJ3PqX>VcV?_IB}hVDA%X;0R)L*Rou$w1Ei zJX_s?wE%Npn5&xGD4X;c@)1>L2OEK zq&u}(=|^iFy0sEp#!+A*wt@>0r|)#n&+oy)(qVa(1@6^Lo5C(y|H9qN7QK97YKQ5Mj9&jl-?ZW}1|w~j72>#`yBD~61lkefSV;5j4u z<(+qX!lb5H*Y9PzYhmrSMd|qAkV5DX2J7ALy;~)l0PkI`ph>=EjLx>n^>keAu zELS>ZcT9y$!O)S2727f~QAa8(^B<{A(v_L=EJ%do_1H*^`LwsakvS;&^*(R)3k!2^ zzTL7Sdp1?SkX%EFZfd?LBacgZo-_&~^76tW+=u?=Vv8#;FVg$+BXL+H3HB>jq(e8m zaMt62>X=TM9TQ^F8!RephgEGAnW&NNa!jqEdgf;IagNN{f?@f|45M5s+Gvh#a))1c z?nQ@e@Le6Ndj_M~jsHTt$<f_gbhu*l7nx>jXS0yHW{`Jm93%oNfj*bhT;vF^C?M|8POB_x~6B~8S zZ@4SYVjMYk`{TGIg{@j07(rby$LcJEAYkG!q&}2+S89=9bM2CN@c-P2!Js(*8XGmqx0 zw1%@k81u7CjQpzsAeOC+iAGur&Ly^iU3XRD zP~j=twQ3+fc<*w{H@1FUtkc-pM(E0fZ!EOV$yktVq(ynpF?%#e%uO&;qK$^;56vgL zW$tDfa`sulRmityw|k>IS$X5U?`??8Z2D^c$3uEuYbAB>+;#E={_l)MX6XOUG9#lp z0+XL%Wr04S)5#F5;)kp09>Si>OLRf@zjgV@shr7>5S=(8Z&IvKh z)v}MS7BT8HIj&kp#HDsI|Bdvjhn<{Mi;2zuM&Z~{@5E^>-A*si?#Bs(w$?RUPiqBQ zC*H4J7qG5LM+~%;f9mop5|<`hvDRfxzqbkkt@U#3prnSyNn`8*g^@kkjcRGTSEI)! z_;=MihYybMI*X=Z-sM$!gFw(C^uba>jOWciw+hmnSE5P?xz?xK%-)*MosM zIgZdW!EDH=EUKTEdX5h6!R?J`9oz>ajWbY^t1lj3IiT*0Ua^CE$EKR&tle;gPm{Y^YS7V<)j6u?X8bMudL zk7;_;Vwk@US|-5&J@1GWmliV7CZ3IS_j@BcLYvlF3a|#jxuC!i_7&(<&`KS2?NMkg zZyq?ZKDqf}KTWWb{WRg$!~*YAt}~ARA!ER%+KK`{Er?2o7If-eaWSRv>l*HHzCTwc zS5Hvx7z@3+4H{&V13M*1r5*iMwT+-3U^=HiTbEGu9cKxJZLTCWMcT4f2$o(AzqJ$_ z-D!)KRXL}lTUcmqY(DFx!uKnkcj>BiWBfXV!_S&N*}oK^SsjYze8uz|)=c*+3y!;T z)j1SO&z42obKq^FC7v@Coaf~BGf$qu?8JF&sCUpLH5WHJ>L#jB35%?}GGC=9Ptc$g zy-S3Xpb?m@jWAGwW{-ce?w?F&EoqHPNJz*}$mhBhPFKXn7R1M@eQFn{H0h>+RbW=K zt{6_H=4B9bd)S;-4cm+pf3$mLEX%RtqM9EwPc&66%Lq?36Pw*1pYR@UImMpgnSXqh zvHXkZBzwa&?_*EqoR~F*Po?cjy<d9X_t3ww;dtL`FDm_uuM#BAiln?oVqM(Wd9+ zo!s;EOR}}&mD*L_ZwF2oozff`T7GM2YQA@)`~7dTGR}Npcvbh-f_JK|Sm7@_Ynv66 zJyANyS=EzRop9V+&1}&5XeNyQFpscCv-wk6Bi!AQ6-Q{lGqU|^<1Sr9O!F=>7HXy2aqZFTyxq8=d2{K(Stk4RNN^E|$`JSZcFT7X7BvLf&SejcMy4y{rR!?So9R(h@nV^=75m!3J zWJ`^$H5XbeHcKfRR)9Vhu4mi6%Mx@c=0bALW+>CsVmL9#5;z(yEp##4UrL;=EsWvLi~*>eu7knz;5Cxm?{+yPQ4ja=y2H(?{N;+ONX_ z+0yf*&dKf4er9{?SjJ^9U2fdi^!)jWW@3!-p7*@UuueGxZkSmz=90P|6N;)MizEB? z3o4XhGv=NP7Cdvyn5I#l2aip#en0=wdB(2myw`1vO855d?T)Qmb6&q|FBzJV_*A%O zRA+DPXJfl|?U7*JTsg?-k=J9?wOh6N(5q|j`U~w6OOGEYA63V7bZA`!=VGX3OmhZX zW<1=eo9kpaVCBXhu{3gE%q_?QR4qJ`2E%Mfdo>%<9v2-vda4WAVK`y+79X|fZ_tz5 z+je_Z8L?*w<}u(7CY(mPrqvF^|Nm0L%!1YZdRLToSA*xVaS5?;`SG#bp#+hQ%?|Lx z&b}CZUX2EE?dhpouFVk9H9{}f?&fXAAyf_0`!<|)?y`*I-CVDF!@sBXM&3BH$HYQ^ z;jn^a#JT!S>q!@rog58*^biaKnMz`Jtr(q(a}2}3;b`Yt&wB3| zwV|P)d-S!L{qiQRn{vq$^GA8>jHvt>Y2`_!BbPn9zVCkso#ojUA&!*i!|N8BE@f+H^$SP+#b&$+NTkbB|vU2 z$xCj2Zhj`__4Qn>QV2W8mE+W6kVfP&*aO;;{)L&J26xZ*@10vlY`NW~`O>-fTu|}M zGdDNSFc%D8*t^q+*xK}jt^@zr*LW#VQzNfjUsrj9HMQyf8?P93QQxR&%+PkdXskKk zoY4FrMK&dorTj#0AA8m{=ccrq6Fj^d!^T+3 zp!pN8xv$Uog0AJ}ws!8tGZ(H%@jen8f6PH69Oa8jhxv98Y zH*-KlR8++1(GeW~HV;~NG|kQEnxWTZW_11H{JS?T)Y}&qZo2z?%6WwcC*b_!F2lau7zEt%FTz84DkNe#i2#enW=z z3tPV9`G;AvnQ{rDoL>d>OQ@0mP#G>Udj09#cCqoX?fMO`O^!G9^Ggc_Sy9?nX{RycX$)b ziq-U-&9Q)9PFY^BUT8XJ6!k1FPWB#IY*Z$6B-Mw#ql}8Is&sZn_eEiTJ~XeWkg|so z!eH53YF3zAfKjD%_Y_& z-n+;rK}xH;MMoi55GSYBI`rG>4Avoo$1n8N?=rM^v!1qdn4SwfNp^NtE)4TI89nnK zJna43ZO?e`_WDYDjyK*k<{9&i7n@6r1>ReX+%Ba9^J6lG4eauhRjd5p>utyPs=b*A z)hFXFLeAjjaNz3&_5 z^4DHpZkXP7#(MAh8OD?sUhv-U&(9u)0a_n1+skzSd890r6Wh*deSd#+^3;pgW;j{G zyku(-=8Q(tICYrG)iKf;4Q1!4}MoYze5W`Hv;0mj6yGv6$0HOwDkgR$}p|kyzsCCYE#>iKU!wVjX@o zvD5g%7DL_aR>X;L_8^Ld`N^cIARf}B{19ZI0%axGfedT&Uv^J;XHR)?Txs;v6W-dF zT$aQFrCN!?aHID1hq}ad@a}qTYkWp>pl^rvp(O6RWL{|LLue2&lb+0@(KU3>x1|rM zG6H=l5EK(fD04cS!z}>=edq@h3-qBMPb|=felW2>ANujc0)6NQ6ASdAA5SdMhkh`z zKp*e%ZzY$IkGtq-Ali&f<-LMfoiTGrwUS1m7S`+3^fJ84QEKAT4bleedpoZ z^oVX{NOTN{H3lg0X~eJYtx-{?RfQM5eed++G8R)s`t{+`Cd7)FD4oT$4>=6&a4&xQ z&W?<>lPbF;>Uz{aI9Zw||M2S*?^&N`oUv$R=HQ|}x7T~xdlsFO-h1wd@z)LAFOj5CN*4$;=j#$NZ0PG0eHp?}8DXR2gfdJ!Gkl^c2_xM1$`&IYR)Ao)sF67X^1MnS6$;1n#M5RHvv?e}0p2)4M**v4b zh!0uzhnZ)4GmJfxy_FaHOVX7br#0W3VLUg*TWPqvWvX{(Hxh}qGt8G<`vYa^!%*-G zybwJ2B@m%YA3qgtEkw%_NR^MYLXe|m`|YLpW3hh;RL z-BJcjAEU7x<5m-8;mYsozG;@9;R%l9&_V>cwjbb$0&7f%Ckp9W!Nq?bi_F;UXv;v| zZf|4r?kmS@Ar~3sw?uKKJfb|bFk0`Wra?<>X;)CT+!Fk zGRnR>yD!YCW&KRVNUGtf_F?=dE!28U&vF$x>0zgSW`?mmqx~5*gC}&TwTgQdcCP50 zo{!y_^ida;k19_uNawGpXHnA^=A+Fc&HN5!Ikml`lHCdEiK!_?TxSuJoSK##otBf^ zIX*eMI4!kfVpe8;Zd`mqVsf&xj+J`kXj5JPrLE500#(($5iT)Slz6pFxzw)TDrP`x zpiFNXXCXoaP1@MHS~o&^9D++nmg2w7x!#QQ&dw<<%>l;GIwdD}Qe?R8Z<}0IjxTRc zbaYM77snsue`j2rTt2<|ARtAcoJu>KeQ#!i@5(m*)1|b&4wkZMW@ssqyPQ;q|C?nr z9}1SS`GJ4CjH>X?@&EsHJsRf)%jo^`hsp>I47N2rL3eBCqA#Fo5tG$D$%@VQ^u|!K zlXtqkF(X*K-l3&a8a?6V`M z{U0o)wWGC^ZR_y=N*T=y|IIS$Sm9*M|DUaG*nT$|{aqOSHmCnRt`JhEEuN;*l0j4am zYUP0GBs)i$2xgS==7Q!!$(@YVI$d}NSq82%Kr%WdyVCt*M`sagBcv>;(zlvZ4FNgy zk4zxWz{LWNr*%>66sM&uCs7m%|`W+Kh>Pb%Xh*aYCPrG zQ%h8?4?mJ=u@*P`6kCisKH0-YRBR`@5}hoA=L-r@ocAB6&cPL0 zsS7?`<*P2Xe~qDPdO@3cB8lS{+s#xd-#v$@BjYR>@D93t1-|AJZQ|Z-%OG8e2-%r8GUr@ z!}FCe4xvje=X8Oy=;YFH7QkU0!aKCA`GV$Zbj_z$dA`#ryvvNa-aFn3=^9p}c~$e} z8MRl1b`Hwsoz&7df@M3YhjJYRg*8zrVrui>GNgO@Zodsi)!A#)S%YOO;NZ*2Y#fBYY|!5fM36FGiqQ{NPhp`H9M`U2NdX;42XlbDmhc6rZyP$s#) zk}K_%vc&y?40LUfdSjYQLg2Iv)Dm`nm>74OePy6|vrNIg+&WAeD`gDW2xbJJzo!Cq zuL!tfG`z;uG=i``lJh*5%X(J>226rTsQOkEArbcHP#3G?udVX zsqmQ6$F-N=Z%Ms%qHXe!n2uu1BQ5Yn&u_E|vsW9N^52eW%o0vkdH|)8w-# z|Ihet4D*;T@eOf&-y%u39>={(YWc2ObEEXJ%lN%PGR^t?p5U8i*GjrMLwdSjk|O&$ zFopNaq}XmCPPTNjx)E-tRMKGvc%4o-u6SZ$=8x; zKP^SpUMVymmkRSe>ETW%y}kZ&k=Thno+OQdz3RDeQy?@QW8 zTk48@JzlAI-%J|xU+vx2CfatBTy7kv&mNb{owAIOI<0r7?W1Yq4`hsSR0=_cD~z^n z0(`%*QtC90#t%=R4YV(6TLfHv*8T@95DiN7+nv7GS%y2$+TQ|j+Cux*zCiz)MFBr% zSbhTDlrh%%xR=X^+W(xk?LgiBO3KW?OR4#&R5|tZ1;2R~X}Sm8X~+4#ZQBn*exo!^ zUBp2{+w_(H=x@r|l3wWZzmTW<{nj+x=hL@VgLCN*w}AV2Zs)wuA)g%cK1tKDXx|a2 zCw=~Z0H(GbXsN3>^j7|bI{KHww$_K{nXltYVeN({2A)G*$G-<*=-bvxiL_^7>N7}# z{)b(6lx#m$-kw|KLiG>Z0S+-1e1`0j?e`!q%UwzMJ>q+a_dB8UwdSi*sjv%p-o-Pp zUZ-uZl@i=`_Hs!!{~@Ug=A90lLFi#^lwyTGqqoxEykE){I=5MQ(jC9P{4UeqA;__M zFo)Q**$vRS`SkZ@-#fhT5x_uuGtV0Deu;NYf%YztZ1+P_rr;DrJ@qlren7fdNzzH7 z&#sTi^H~{TF<+R=p({(Ihq};{ed>2An3wX6fp@kCx*80p^Npr6Ps-FKzn1vw7xZrl zuVH>ln9Y>oA>v&~y14q=nd6!7*qiBZ=g=2*&i_8tI?F5Py}i*~)grrXCge)!Uy)efS_5sE8=`!~QMv=@IIXc~{d6yIV#ng3s9klr-r~Y;Z_>j0zeuU?WHos5eSN_#^%71)c9L9(!Lf;3=r)5-so4JPX zuC*hm$4&HM`hYos@rnz2m{TNAU2R|7OtVfpSjTt<<~qXPz%xLBqEZq3ZM`ZJ=nE5+ zN3g=AJ9MF^KL0NuOY?S5^nIvs^1njXb7*0`mdiDQwhaJ&wI0;k({Mxafi!s9NrUpe z#!l&?ZYz9$;5kY8rWW1_{>gKH2sZE>fUAOV0eA}Gv$jbObz9TpZtjh~L)3MdIYvqp zZJ+hGRmS@a|2u7$Hb~@mqWL!OZ=>E>FF+U0w8!}>{E)tOZ+P+ZY1;>&uRK#n6^>tT z_|gE(XKDZU1zqf>7ae|IdHz!)8h^3Nd`m6ZaHoathCFAWY>1P|^ov$NIlT~x#yEd1!EXE&kssZT{~LuF)nkY8;Z}x`uoRVb(I2z`vK@> zAbeN~>AXeUt<>#TzFOBn`2CKQ<8AW5bwAabu4lggIjo-y^t{6J%YK~n{50>L#Gmz1 zVC`0`u5p``Iry3MFn~yNxgunJ*Ms$)f^{B}wv*tj{dE2JnjbQV_rUj$lR@9oetQyp zZxIHx^>=<(P^LWIRkVE{EN_T^XjpwNvY(|7zAd$$A>G(+b^xr!e18wb(KVSu*Jw3%Khke~=lJ0t zNjj;5}7;ApxPTz(g^mhwZ@@|uF zW6SSg*j255hCTKJ8OQg)#9xta5BD9m>lo|)unD*x*4h;c^99-t1b;K{R`G1wWBl*c z-?d(1tY1l@LZ5@MM#I|-B-}vg7_y!pM7HnqkoN;zUph3ehbx_UiURO=WI_7eJK)E& zCtKn|q4FGs@o)f}nEL{L<_zEat?-8nd3S(!Kzdrsc}ILFog1V8S7?_C$3@;`d_ljP zf$dq%hxf>d63r&xo92VQkBq&(W9qg%6VGb!r{&CRJDA&^k|Zk&_j&TlmoCuupMYHW z&Ytj~UF_G8U6n{i(6ze2+uY>H;3lcO%zTFaaV=>MBk$MA>ssG2^*)L_7Sz$dU-PCm-hqplHXWs-3zgN2Gb10PFhy6_Yt6xM3ayJeC5^|yp z=Gp6|%8Ev|bewreWp+ObpI7I1*Ix*`#aCxV`fgB{cm+WO(kgI$>-gQG{_5viM1Stl znuq(Zr1d&;M_1{k(C4;bO`dN*!n_(qe3fbb0I0WfTzl#Fdwl~4*VFYmJkKN0&nG3% zb;Q@xb)0pnglQ=pfBg^v6M-WK_sy6_0MsXr(T zxcdA*2VMIF@^i{Wxt!m~F5q1Y?LRTT9+OhLhO)m$|JSf9r8Ba?!!G6-=l2@c!0qTq zrvQDe)_y~}YCp5@@qJ_uVl92M3}!B>^;r0I41jda{nT;5Rau|)68zaqq#KX{{;#^v z$q>!ot?l~)ya~^q0sMAJ6_zyWPl@UHtL&6-_H<<}_&Rj+*D_pTMbJ+bcf*HX%v_DE z)}DdelQ41cJIK1tub?3@xX0iFRrcKq;W@_t+xJL||9U5k`p3B5ky2Nz?>2WFetqfl zzh&N@gU(_D_53z{pLMnM3hOI?o!(mLSgFGPh;nRTelwZt-h{RRmHUSv$Jqy%z&f{# zdRW7m`bQ}DdCr~uCXaf?)EF~Nav3vw%_zyi{m9u1$oB2@ z;A?$v#z|)Gv9V&%s&HNW&h+f5aH?p{pmqp=tjAo){Q69dylqK zD1AG^9D6pjn!O*_5`K4s579h!N*-=!7iRr{j zzTHUcG-1ZlNBm*c?PPWHHQ{!l{St8t&5o@9^nIxeF_9A(2jH9Ep$z1$zs)y&pDLIa zS5w#HtxITg-A6io3?Zz56emtM8O^s}2F|jN$=Sxc@UJgu`!&0$gJh5cDnNF?)#sC7 z>|wrg{CvMNclvgY-7j+ZhZdK8l?uxPKZfi2 zD|7Zs{C-}B)1TfV-AMCgzWsCLgLhDmA+G%o__pm*;=axv&0gldp`edUHvdT*n$9@7*bd#R`p-NU z2hvoxSo6`}uCDa|Q^36oI&}|pM_M+L(9I0=3| z*V$J{Fh3%l&4jbr%lnc#x)~k=z&9pXOQ^f4v^&qCu)iWt;I3xx_Lw9@Y?OrdJTsPi zYB=QIk*>>-xlJG*U=5T2kNHpLrZ|26Ufk?|2Q7~cjl;Rn0j$AmNvoFqkV@SzMwS@o zj`8mu>+iM*J4+JP&;1iCA>ySA=-r6h!XL0)}*Cm62@ z))Z(raQ3Qozd970G1K3yPZJ`h@T_~v>}#^dN^$Tsb;<943s)0gq3OWinJ+IBi{cK8g>jq-w=+}!$<#ydHQUgRYzc@ zj9{;^rvu~&@NFZ5zggQ%;km)VI^L~w-Z{S+U(@mL@1QlF^P6u_9!=Ap(Gunl#Q%d6 z-w9(g&j;T7^ZtVOUxeg6+!>FmbJ7Cd_i@mgpYz)u3N62wy1SCK{`Y;$CiVuWQ}1`l znS`luu!}r*Ie9w2;b-)FPhA%`$c5kg?mqIri*F`a)l!$It() zzk}$g7zgA+<3qXFajVdQcv@};YZdky^nPmOE(h6i zo2eKhm$P4{x;mE|M~JI<4E`VvI~meE2ILyY(X)6A-fEoV$LgawHV~$drMei?sP_n& zWSosmE#NwO7OHCzfPJehLl$U%2m!Tk1+!gxzo zSmP-dFw3RH&NQO^kaFn3L)Jf#>EnjQsUC`%s$f2O*Y%40B%mwv5bk$iK5KL_ek=DY z*n5p;-%4DY(H)T{IBh?yJ7gGh=#(Jn>!p|>(8)$TfAF2q^!1&=eusj1&0scj z=1}4*l4K~dpkZb*V|BPpVGS`%VQz(12Vj-+?iR^V!269i;F(F|zx-}5x(2(wVgu_B+ z)4d9F7XASQf8)OL?;@C{x}-HnVXu}9+>m^IcbNP9`3L+j!DR__d}AQqzg-T}L)+>5 z5v(J^tOC>hIDTpCbCd4zsO;@N^IIw9c_Fwk;Pz9!vp_%O63r7Q{BXi(A5q*M!1w%a zK8HAjBg4-0VOSL{%5bo{Zv+-Z*=g<?yv(uBn=){%*pE&WSwwZ>cj*hQhOh;5U07HLfSjS_Qfs5`=M|&nx16;)ii!y+3aA zOv*jeNN)WO!{qs4-w4K&*7^GZ(0@)mN!SHunM^f%2c1ZtH1wa{!SZc zJA^{prVXq@=7{fNF@7h(d<&kZ4Nehn3Slm83&LI87AnJYZI^~1-20Ky#>?dj>n%Ck z7TL+Vun?ZCj(%3GzeAt-oq$aEYWf{Lt!V~wU)2{4{#L&qq_z4!7~b=!G%EBr>Glo6 z^RbLk;17(cU`hZ}U6&$r0rno&Q+s4h06FTqS4aaW44{Fv?NZj%=NX?Mv*bC5ll%rM zy=aO89pKfTqvCR)wF!_#Aho|Gi4? zsHX^H9CUIVe!q|gAexr)r=T2^fu6Rmx%OexZl4U{R#zLoX|lB*y1QPkMaMbj{~}!1 z%CAmUuCP*xXRkQkvjBO>HsmG&XjoUaj5<}E{|kPn`X0(V!2K7#Aq4hf8PEoK{LZsa z_zu~(pts^h=VdW-;9xKWJ(MAmWFMAjcOJ4R%#*4ch+m3)s>Z$>-J>7F+ob6aQ-4y} z^~4Ru9qcP)vYsC=Bi*z7&-(j&kcEGi>zBSyaYyMhdTeKgpM{Pe(BHvl-&@!gWNKk{kW8_J=TxPnL%Pi{zYYqp#Gp!T8GqFZfrto~^ zM?`~^Dcb$T56{>51M4I9wiMR}+>elJ2jEJl?oI~-eIL6NgtdKg=4r)AhpzsHGc4QD zIng|p(k?%SY3LM>l1>`VZPK=Z@Sclguywsu+b>g3$bN05J1QsEa6M&^Q_R z^_}S&<(rI7#~H3hFp7NYa2JyX@{6z!aQo6HP66lLO#15oRXiac*Fd&65+x;YVkKRB=_OIYI&UJD3t;X?eCGK0$+@a8=*C^v$_Q6gc3Bjx22;UPh0Bm4>;5DDSI8e8SnW25YX#PcXyM1=o6%T2e*-q`oD^M7wKif z1BSTY_Z$qT??ceRzRUM;$nO;UcG6Y6LBD84ua@U3=Ffit%qtVnc_?vkCUNx{-3fT& zk#-*Q9Y8KQ(&insc$>9UM%XEY>5Cs=(Z3wo;>`}?rxxqwSTho;m1++~$BYJ3gPi$+w4nI2Sy1JbMy_3UeO&WVjj^xoshPi-p83 zM7EefnFxsKmA@a{o$5T{A0u@jw>jp#dW^ljmNqTO3MnD4V^ zw3|3hjPHeF*c-TP#;@~ixV}P{S<#Vw-}#&$yp3xg*^4vp<#_>D#P{|!8Pk2AqX$}z zKIj$bd|qKc<~z)uO`?TWV%I+Sn_9jJkVdXKLz0afBpiLf4;}YG-(GW=grk43*Y1z* zzG5p_fDX$`($4)7`sNLk=>qOGGts@gj7qxihh07R z-dO4m9SY#M#sT)Y-yyv;%DxAk#+|;OGM3u2kM2SSXqg{KJp16!o1go>WnVg;Gdo|| zeSBZrhtY3&6aB}h;1|5UFI?k%U%MvwzV-*Y#+IQ4( z@AVyVai6(4j(pCeERRs8MaOuIrqXE_(27-_tgg9r!ar!`;c|^D0R(GPlh#zdIQL~-b2P%h5Rg< z^uws*Xyj+n25pFZqy7{akKai^<}to4kYRLIAn!WiQ+wB(>-W1XPW}IoTSzF_EpS$` z-I2s^WA%Cz?V3&CeO9mdc^X;qrh&DfiPbB9;S0fTY+)?LY+#H?WC|9Ik)pOgA`*o} zD0-Vn%wmjJ%oB-YE+Jg_C5Ub4xw7XyMhBEMdaU^@STrwyTFSgC4}ozBGQ$mde=Ru z8R6HRINg_mcfk=Zi{pKdYLU`)B4um1CzsznrwXc;ph={1gh=msB4?0ypT#0ogy~z! z_Sp)N{`mJNjcU>!&?qu+oXDUUKw33~8=MQsr*@0Tkb03i(jAIBl(@qbgc-IC><5QM zh7)&qIhX`^Kb&-iZv~`3{9SNVWJDw&-4Rt_9GC}IfsJ50co7@~O_1CKPy%YeRInJV z0h_@tuwUfN72qh_hjk*Osh2UtA4|NklzDuX$iy_d3FVp0_fOskDDRZ+B2yQNoQ>bK zd7{_s%JivVr^t*wA~O%M_4o>S9~==mhw?P66FHak&kF-7BC~lvhjPqaC31cYs0Z6b zE+CJ2q&aT|<^m!Czc0)UpbnRJ7p@gq)F5&ZVJ;%hMF&MLrv4XK0m5HG`j>1Lxzq+} zpc=d)vSho+(gZ*`E+hQ%10pLZw;Bk>zat8oTc1-5_}!4Z*_#9vtn zrUKGfr68|W&w;~;_F}+JK=^A$0MfW-G1x6~Z4H0Hk@+%QjMO|{6) zxqx?TZG_v0L~bL^b;MmqoOQ=UZs+~&l5wcgCAGMYa(4fkPs{+AZ=>q{wf& zi)>rLW(e;eNfUXL-`h8VZQwcZKA>E`C9U7`?zdCHVnCSR?gR%xlgJM8*-;H9frVf# z*agVzcL{)ZzoU%5TLrd(cfnCaz?5NUE~o-?KqJ@+_JAWIj}iB=5;h)4_i@sFd^I5N z$I1KmgxfV$ih|AnjKdgDv1ifd8NI|8q6K|IhgU`CXCMVn7X; z12%$JKoc7R)Y1M*Fcma{?cjjO>ydyoUT+Y2gM8mu1&H?s@82MwH+PG?RVeZ{;oiv- zd3T1$d%XY45s|-w55hz~tP=UC97`r?7)Idt$0;KJwM*pCL6J}S{b`BF-$?s2opl*Rz!mXZ$hqESw~o|L^8s37l3+}>H>uo!3X+$Rk*iP1L_Y!Rd13NiX8h*8b& z0Rpyz7sVJK1k#>(08=7+#9)3hrpyrIEYg}v8fVvwF>Ra}(-lj_m_fK1q%(uK zGl@5AD|iJQ72})~K%8@kdk$$d>=xs^LNR8O{~Yo;pLZAFH=lGCRf};^qZk)Y0&~Pz z%=cbG9+$2bW62sZmTnf~GU6^L?JLOR3d(lHK{h<8qiZY0xM7AEx9k$*_NijrwNs3{ z!960zrj26UTf-*7Dls;%72`gBLnDn}ju2x@49EhM`vLNL0RIPg|G;i=02~qH!7y-0 zj9={+V=Ml@Chv#liSe5?Vr<(W#>157(KJvFNaIo7J&ON!(%FIkjum43Zj%^08^w4$ zQjFi@KEdy&Mu-93F?RDEPtOtK8Qwj!U5sZN0CApO2X+I>vBw6awWkiu0BgWzK-@j= zf}>*WCH~%APz5N*-bSz$5N0o7_7dg~DWE$boqf3b)FtodCV_=wJdgW4?jKi)@gixx zSPl*X;=Z&=j6c-_^8eFPF0C^tX3Gn|M|IbSR`F}nK5Cd=?%mGUQ@4`2LEr9Uw{$?a$A}hsg ze;7208AbZhq#e6U%*2CYCa)1QWgJ*4W{1V7HSye$eA4-yxmL`qO=4!(0pfN_0Hl>u zC1zf=nEAXfcwfw-?qYW4ckzp2cA*U2YQ%(}H+yUVcqUXP|PKB#9Ue_=CZ9~UPf6~P_E1OhQJ8)FYw`{=oytP%#w?}}ffN*b?-J(SsbC&h1vZNL9_4#)H#h)}i20W=K-j-jiuu2#5{CR%uknswSYW6eGa@2c=xvr zVty704v6_V>3)vi7vu}gGr!m=CUnjGJ8Au$e2(BA*#`Cl!hM+rszC!-4aoaT(*4Q? zg<}3A0h9pZ{bMRvEap+dA3Z4MKbMO6HTiu**l$SZ8}j*P9GC}If$d@*dYf3WJH?8hBUVzbSSf_-$h(Ycu`;oBpOpp6|st^f=01Q8pP_dO02F+#pYDkZ)0NU?gpD^>+@DoLx4h*kBX zSp6!+8o)FBnKf{eSc6DsP$Sq3c7gqX@Pn(xswJ%QYcR)BSYbgtMW)|I4vWi{9;R%0a~4C{TXk@B$Kw^rh}aw;J2 zm883p?^(GEkmgT#|C16>Bi2>qy($cB6zl3nv92NCYsqUh;jbg@>z9gk1MhEGDApSC zypgy!)rocUII-5^cgt$AZe1(ZZS`WUiv*;zp7-l#h;_$0u{KnRb!VYicdrubXO#CI z(%7^~tj#H6-M3w=UlIrY(0X8-SPzo#ugHHZ@wSrxuSw$}-ao|e-|P}=+X2ud7PQfN znD2X-JRc^`!@Pf(vOaQ5EcirgJ9+%JN~|4}jkU4G`qz4l^nOpCyTZWxfUr-*i1j4t z>@ER>e|o7{&j9G7^=y+^dlCTe;P0$`Pka;nnZ+8^Vx4KdhTs0J zV!ggatT*O}_2ve#4y*;de`~Q=&_nC(MnE~Bh1NTy^&akDbH#c;ORNtl^M^CUI=EY` zk6#q)69I?CI7bc_gj?W@I(suw$Ys@O4vkJ%1h6g!T0@#SC=SSWU479iimIbtV~R}$$bHHn?fyJYf* z2H7b~!CJ5j>=(O(fD}Ny4y4~2Xc&@75|S{4AqpXc z5JDKj@A2AsA6uD=@8|o+@Amt(+r9Veyk5`O^Ywf^U)Sfnch(eg%~lg@jK#;y?y~-foERhWKu~h+*Dk z-BSVLyRQV$)BOOk^dx|C(iZ^OkiHWfBzAH9*{c)ay?;3FUa@ma!&n7kmX+x6;-H!0( zA!5VRiH(4sktqKR$d5_{`-z_gea z6k?O|h~*&;b2gitKrDY5u_=(B+7v+7wB5uC%853z2W$W?~nCi;38ROkxX5h+TrR7D4{9p2RL+Ol)x_u`3a` zB#GG4JYrYp61!G`eZ;PVp6iwpyB={jq!TN`=Z%Yr-87om%^Ads5iTw!wj6fc0=X5) zzj6VwTXzw=9d?u?61#)IYGSJpzY4Oe)`AMK8&na)y-#*$637H&h^<~s>@GtYvAc_i zt;r@icRf0_VN0pvcN4WR4O zQcwYq|5N1s44->96Z^c3*cX+=zAPcOZyvF)vWa~?k{I@oY(L7x{LQ{Y_po%!69MwzcRAR>m0&a2N$fWUVB2r7?e}70f0Pra z4aBY8#GRgGzNN!?8F86LTx}(;Gr=a}F4EjwuoxiStpxa1k4K;(I6&O@!Diw?G4Uf( z!AP*5c)bd+lX%=x;z!1TBI5N&6F;gcK)$256F(-Mc*A+bkKIT7IK(BC5^tPA{Deis z6Som>0@;%gZdyXTSt{5{ymMOBHju*TO%zQdfFhpT^jKYMZ`NIJvD`R zC)nDlDe=zn0BM~e-x+D0VOQtXU=#5!+li;4ysoH6H-x*Rj_I)f6y!MtaXnWO?^RA5 z=W3n-`Mw8<_v=YKa|3alsrl(i0Qviu0L1mL1kjO1pdo-AS))NAfJ_!-v#N*>fL#N! zK^|B@eBds^7nT6>3__kknP3c?0G2GBQb1E>TjbHpOzXCx6H zl~4RE1#LD?tVE@vtj55kL?2uzUh+n1C{{f8`UPXTmbD z73?KGF#%))lrs_X6Za9H6bGO)Zvj{hHi7K`Y3DM4JUGkq$p}y0Mm&Ej@hK>0D$2op z%BP~7sVHaaBI47=5HCmuu&ZDv@$+*)CGqL_oDP}kkeR*`loOw^ns{Lr*iC#U%DVu0 zW+8p{Y~pj00LqwifcV^akPaYsVIkO0eBMFg7eW90O~fzWMts3q;tQew66m`Wc`w~d z{4yUP{W7Fqj{KLyj?1C%3dCQrmH6VN#IGzReigz?pmzzbmz06+#Fv&3zq%(to@>Ch z(6`J`MI7@szdjM9fozZm7Jy=~7E}3O&Oj;gN3y^H;_H3T6r=;Bug?dI0OZ$~gKc0BI7l4NzWAfb#5ZISe+;r4alH{X zZG?@FLuS)F;!h+Jf0BW%#GhJ39Q%6ybO!M)BZ)t=fcVxj;?Kg)=TV2}cM-3EEft%I zzldwB1$k&oCYmK>YVb0CxWlfBarf zf*433VHJ|F_mW^4B(Ucf0$~*g4v^4gB-|_#-dYkqu8$zlltjIyB;pWvWC@A-1tg9_ z-lOm>r1#ZzYk8^yD;94v?=6!fi4^C5g7H0pi=0 z0hE(c0-&os;yUan(Xom|r$mrKqBC@NhHmV+MHlGBo?CQdpb#Lwdpt-5(A9ktiFD*S z8D;e-B5_J7iC%GF1E?U;8`r(@*=IIbOd=zLMBhCm`Vm0g`$0!0(lU!loYoU$lQ?JW2;h}56 zW)eBDe^@5iM`Ad_XEY@-DvQME4J6J+zOhLp#z9YRIf;qeN#r5VdAmqVArjLnNK8lC zOz4}lm&AO87bcUq6m_^9HY`3s;>rvXOL~%6ngbwnHR7*H0;tEeh`$zgEK3Aw0QJ8P zdas)ewu3$3Ac^Y}z*2zp8yF}6$X^6Km`g=bDTy0>fa@C#wM^GK=1Z^0J-fI0CI0a?#(Q)mc(1}0J1xv zdk5rqrh+0+3J|}ulEmAif%)7|;+;5<0xdW0P*kcCb5eE z>c0#6cVzR5o zHiJqMAE7UN1V4Q=8x(^LU?->|@o__t21bI}pcrfbI{|d=i39N29+b7G2$X{g5;!M` zPZ0kJWIu(>r;7mcdp>hM(w z*bH`oDiU8W21v*Kc!6h*;+r&p>u>gw_!eb;i}df30POekAa?GDcA|BNc!2Jlw<%~f>f}UMLc9U$B309MAjC}t{9?~mGo&dQM zvcWQd>qL~B2)&80@kH2vBJ`aI-<*iFCa|MPE+_@4%SkB!<({;aWK#vQ0Q5B7PO@1; zfHInuf&C<#r-IpF6F_=%Zczk~uLbnB*aZ%dO!7ffkPdP{ zK3D`+f^x78>;VT!wu}eKAOnmBgq=Oug4;F!ypd4%id%!`G$?+f=WPs5CvN-F@@JU9PA<4-Uq2* zG(dd&GLjv3kxWe>*$MHT3rKcByLQ=1G7bGKZ4=3^XxpwO0QtM2tZrz>ZqV1Qie&d( zfc8z#1dvbPNfP@*dGa!VaF2KZy*+Y3AwZcuia-^~QyK!uo-z_tlI#iHJ&Q^9f{ne* zXBElb$k%%($v#;CeKKPtK>iHK_k~4`$;)qDL|bELgzr_8N@&~C?=T=o!RLiA3!b}y0iC@9Gn0UH+UsL+~8d#hxh>c zhU9^Au!rPO=owl;GA9mz*4Z6 zJuT!MT{p=0Sbu#4o?kh{7m$OQRd z8Q2P-=Njm_26?VQo@-FnH7M&E6A0%0XJVhx0HW$qXDC0)R-Hbdp$AdJ0>*8!s2#Ucb0NZd5 zk+yh?oLx8-GW|7>0{2MArZbUg75&w7!$OMHXHzfh+*pyH5iM1r3+)whU zc#sB0g9TtU*a;4j+>E@NbHHMN@;2`#`7{HNe;V>nL;h*VKV1R#k=zmoGQe!G5^M&0 zNIs)LGROw7 zUR_4AGLPhTpX8f{6q0YHk=y~f9mul-`gTBO2lVWy0DDR9R3HiT1S3hljeKt--`l%M zzEei>-K8YoBa-h!&jxqlnUZ^3u7Nq&zq4=9p9q?7y+@;~k)S(Qfe=TwrvWRd(egXBS6{|0cM zN&b;fiW0$I{H77ZZ_g=GEDL|ZYYZt-jK6;6g9MNavOys*@teSIQVMxg5IlTw!=IY0hq!tuuiiGW zpHy5aKp97t0O+a@9YAQPaRjwPg0^GS6|A=MdryFf=8@^)=V3iq&7 zcgS_GB9)G^)Ay4)8M;s21`x*nMD;+PQ&LIw97C!X^!82!6<{B!KFHGtX?;cll+_1i z_1OyclFIM_=?Mw}${T`wL-vyz3i+Xs9}4-QkRJ-2Lm{67 z`J8M3`J7S!{llPt803ecF2m-5wO~7_A~hWH!y!K$^4P1W;Tr(*jKJqe=*FH!jY=kU zrULjJ9SWcUNlgJ$!L&+J=cByoJxR@AqzXrpnwbgkdBGS`vsROu zokwa8^v)>;C}YlUQgb0YHwnOwxdi}nb1O*AOCWU-%A5}!7iW-K0I-Iug?mU{g1nb( zBX#LKQj4~dx?%yT#mIkULx4P2LeG^&U<24k>Z&+^yjP+8tCoVbq?W8CwG{cU#^=@L zq^?;^3Tv=hhPdleKn|Dd_6JlIa^CZrcbZh0oCB6Uj{ zsTIYfZiTMfHj%o$gj5Ogl@tQRmmscWH>o=qNCH`)02F}=u#eO#l(8xUz>ZbWw+ixi zqP#oPz-Usd5x*Mtt=0c0MB%qGZeN(0cpX#rRbD!@Kc zPs9O~|HMczkJOXU|5QU#n-c)ipI%IA3*xpwC)QN8wHRz7h5Lf)xt*jcU`xebQZH-< zu;E2qznBcNK_P(rOQobf!(CuUPcP*sCpOI?^cq+y)(57WxfZ!@8to6-_HT~#M-Jp*h=a{l=l(BSUc5c zi%ETs>n|3N`ZAZ)zLBK9Dj@ZBIjL{xO~9j>v~sUINwV>+oSeEyt4>X)UY zeqBWBAU=PC-0zj7{wN_$J4svH@LSOgY$eThlNL)!V=dDPpSqN^mqFT(2YW~ddr8+@ zP5Q_<()AmXKB_6{_#D#5Z6$sD0n!ObZ_Gdy=|lxk#))G9(wdZ!J}HlM(`?esW|PMK z1l=NwbW#fGmWXS$opfu+C4;um-*y-2cCaU9E$Q|>Nq5L0jdeq(#)E^TJ0sjBm2}z| z(p|Tc?p{tB=T_YVpQj*iFWAu=X?+%x?pHuM6M9a=C+2p2`cBgQVNVu52O@6JCeqml zNDqOYq1#C3AbuDa(UbH@aE4EMR728d#*rQko6kxneRc-va|%h1DJ4CQNRMwyIv3?l zC?Y*Ejr62M(s|H*?mW_yQ%L6{ZVGfwIY@dc%A2;8bio4B=NFTnUPgKb@)d%aOG)FL zq-R0btSZuT5=hTQc^77qo;RBGMTnn|axX@`79j7!64ICCkzR!QF4|1`as|psU$L9? z;tJANL2k)5(m0D~%xN0)nZ6dbFC(y@^!2zdf^9b2!g=qfHFy*wR2=PgLP zWjpB=kXr#AEB29IIT9f4)+Df&^lgb?6Y1L#fBQkwC8*1)ouuzdAbs~P(rYpa-3cru zeNP-HA${*`()Tq4$X7Oo^!+IJ{?()(KzR@BCH){|*TNp``SnAi!G6-~7Lk4!c^*do za>$n#fCHo-fv!iYNUtv>{b(G3{TrZX!#iXV)tgSQ;4PyIs2PctE!g%lcpE8_|quXNf=K17?SXfdfZI6W& z#ajunu(k$TDY39ibM5)Du!nafRK>zR-rENM^NWxFNR@MAfb88X3L@l z&13peOS}2Xn2bXWn#XLj805=i@to#0uZ)H9*Ud2^tLtk?s^4N@ER4Uq(IQr^r75ok zzTIhjWx0^YH%6<&9>ur#Iu`b+L6RK{2Q)XSVJv(Ev83x`;d+#ibWu>TVE23Z$>s|y-=n(CA5z9L#qY($@OFLNi;gSaV*@D z+~m2jaI0E&wkDPwwM#N(Bu8~=gPnO)_qG(59P5WxJ4#I66VsDI=e1$6aC@@bOpS#* zP<)$$Shyo4w2AbmQeK;qSbQh4+p1W&GbOZjW8p3cM{Sfw6WjKV#dn2#X)N5Wmj3SK zx4kzOpH3ax;@uL}emfaY!uQ6)Jt(KUcF2x} zbI8YuD%3NKdUu!?3lFFG4o}9yBPg!J>#^`iayxt!3!g!m9ilNi3Ma3qf1XJVJEq5E zMpI(PoLKlQn%l837CyU{J?B6^s^b`nPZcqlu{0_*YL{{NhH#X3JT*?89h1qWwyDwB zpFs0dx5nZp;y2PF|4*tlj`Ap>Q$|eYT>4qfwjAfK24KLTcD0YSWoCuuGGe`~?)> zC2E&h6xU@@EPggM?@|;C&!O|WY>b8H){?)F{IsUA_<2-6EhQGdh=!y^KABH;TBPS< z$V5I|KuKw(G5Lj*kX9KBUxILC^QF|JtBS=hqPAUIQ6HK{1vH1I)44Q>@~Ds!DT&5Y zOMJGcuK2;<62wh_-7dod@TQ?bT zcndWCuglK1b;gcB&iraG^+G)-;3paR2cQ(=!LiV6u1wtvaXqyCbH#O341)PF`yTW!mX#5AN#{?nSFv33l7ZFGqq?UTB0Ph-Q9Jz1 zs{UzhdQc-i(Oaax;)J5izq+lJU)7bIgK{LZpmG-gec#`oA4tW<(xt$KTm& z#_cTB!FVYgS;j}x;r|KMeyQ>JK-6FgVvSFm95SAoASUu-lh`O|f||Pc$${Q*MAc{- zdYDh5F=zVTc$6>|WrU^AtZp4+PuPy8^`fyi2(4jc6KnN~tV1-IK6vnUf5^Gs@ zXt|L!<6&h1^o5p;!>5_S!X6scHkvEU7@Y>4rr(Z59@7uQ9$(wLW=x(+y=ZW?WFdM| z;$Paylxb>ZdbAlWhuRkPGviqo^X&f{KW?oS$fmwDu)0=*@R@^97Uugt)v-hI|KIsP z5tf@-yK{Bk3GmhQ>M=XLdNiBaJOy+2|9drbF?SAu{kartg?p;9QKba4j@5g6TVEUdzWwG-K4PM2Xe^ zo6%`{V?p(ZH2oy9#q?S;hnfCo@`f{=kq>9dP__Um)95_Jn$=`R^{g9NF%wz~s`DDH zQQe~cm{>h4PKfCZ=l2QdttQ>9VKrVE3(08pG~>Evjxc?CYW1pb#*eABNi~*Fsjk0S z`%J8vYs?sqMq6lMXnVLmm|B@pH4$Z-wY5ez8b@JTICF*GG^@BN-+0%|O=jM$@r|+H ztoCMAF=O7eOSoGJ=Yhx;(^8QoVM?g80C|lc%!+JwBBq1_XsMZFryTF}fj#+s-F4xI_iniaK4C}sLkXmPmCg{!;q?@aWWa2_+>G;683o*0X(@tN_V zNiliM$~O)+ zWKuvEKOB z^og+V)Lz9S&(w?pASaor*PfAjkn`-5rY6sY$I}(9mi>*6caW{><1k z5tvyg@=auoX{VYM#LNq({fwN+b!c1G9>3wv{_x$ZIVG5}Zpy;n4XIw~{+r*!ab;G5 zuuQY^8IMP&uh5_2Tr&ecH#2wCJHnQXX71WEMmUF0gZy;l2xph5hGup#^MScGWAV^E zf9)}GXe)uHo~KQlL^U_o znYqRIIh;jI-NIe#q33fmpBsy7Moic)k?n(^!JOue*UZ{;_^$2m?2h_RwC4zS6Vcf_ z9LqIcIJ{RzE56xlnAOtw%G57x-@jXj{+Iq2)pGz!I&|e5UhTc;e!<_>Gisl@X2!60 znv`h%HG3XYN;Kl9RnL@0!t`phMwt}T^P=@*Ci-Bw4>D_YBw_Z*W|wT_&AJipE23Os ztl7bs(Pl~yr6XGp-I<1atHV8gsLa3hf^e*u@o2`KDJSePX8kww+4Smh8tnj0pA6e7 z+Dq1WB-}0kQ_l$JZnLv9J!2L=P5&@^KC^F~`sZvLwQR3yzZqXl$4prFY!`XPw1jE# zN!7Z;b}p%gy%TevCmOBc{2A^`&3VkUb69s{VYDl%*=L7+C+rE+AsOD+irOt)8xO6uxd&wC zA=3|{zF=B7nz5qQ|KICvdSX~oxc4#F(LOi43l`}#DPhY+7S`l60bNY~%@ws1FO#&tB3!aiWMn0ecjY|0ByX66o@ z@sY8^oRo|O(Hw8QX{61G=kS#$96SFuQ-}M}XtfXbon~~^K4XM-nYNl3TctZx*ZhAk z^S`%QxGI=2^mk{Dzw2qHhlRc9e>q0m|ij!^2Xz4ZLYa9XKV@YP=veUubb+1uQmH@1a4#;BY_{cLJ%X8mwyZN{ouUqg={y0;4F*6Fda(=j${n04#lM(6)B zPQub^`rZ)O^l$s=+NTS%2QurK*(uCMTZLyjGY8bH&Zgx|j|vwM*Su&C6ZHl& z3c_a`W`Azxw3?k>pXxqx=m@EO7K-|RI5tdgjPA3TdvMV@Y;;EVSi&=Bv|b##D+qg| z>Dy5cF*!|bqf~J>{-bo5a+<&=CVM>U6Z0^;EB^}xe|E|Q^`lH@e`;>6# zeW+-@Ful}_;Qv3|_xk%j5%!>*KhH7)sn?(Pw`xB{Jk&>F8=H}2R$DXsPeq%AGegu$ z7a%o!mTY=k*m`DMA3Bd6zSjw7_Gn%4Q3iHur*V^X(9$ z>x}C1&7I90XfnA>KI1uKX)lnC|A!#eJYO7!T;|He4#Q_QxM38=s-?jn~W*$=coteP7f6 zjMW(^#n{^qvNdg9+g{`OzqWy~`|v&xwXV@+TF3Y!tJ(`g;lI9+H=4p2li&0n)4%(~ zJQ(T>d$;jwBpdoLyiZ};Z!lUj^n0l9@LHI9ji~lnjn_jz8w*W8F(pR#Kx*39lxb{^ z>@j`T^rdJd)Of{=GGmD;$+We(G9{RiV0zLZ_|&9DqcAKZYR6&Kc_XbR-=Y4EMoT0Y z<@=`@BJ@#Iw$WWvchftJFGCAWD~I0rcfDg_B!}|u|_s?SGTMOa+x^f*}rkNl9Z)~oezqZZhuBf@&U{hj)sf2*-} zP0ZQXe2c=gz8U!w?M0Xc4~zSYfBRO?Apfmp5BFk zs%-8>)R~I)$fQh#g@gW6502Iu)1zx!@Mqg1Hk=KjdjZw0Z(VNZG>U69w9(DgYwtg` zYBW~NGvC@J)RbJU(Qaa&)ad9&C)cV$*w5-}FVp+}Z~J;}9kuO>^cuU3AOBq2th+F? znR)Dxb?z_y#Ox2vdTj3M{TII+-aBgNuWeUd<2BYNl5^`E8BvcvyeFHTR#eOX#lqV8 zYg-uU{Hy26FuVP?GHYh{S~;wB_L4@AVIB{^176bxHFr%y@6?p>PqOAdv1!E`>BBu_ z&Q4Kn{ud9`&i_vyip?ihIoj{=nWYc z;qaMxiV)q?HGVWJk9iIeemgx}z3XnTx^qVs)%1I_hMAQp`j(XW)`{6Snr9~yQCm~1 znw_`#u5P&7s>_n-iC^^mqUH|D4Cpsc0*$_NP@2(Nh*4Ts4NZ&Bsg@jsabvta;cw0t zVc#&8N3 zo_-6)o8Go#jS?cu~u8Fo%N;lg*Dxp@s}}fMrQaegSvW}Igv!a#}cmG zp`Cy2b!L{W)??MPdRQl06L5yCJ8yqT&H-lb|F3Op##rrdozz^9MJdJ`HQ$nmJP|(I zFuTH<)-?Sn{FY}_19RsnvS;Yul=9c#mN4hgzn=cg_%WwAGiFSmH8reh+o(kIJ~XB$ zhySig&6ujow!g|BZtb7>9Z`q&NIKMG&Q8WJW`u?DY*6=%S67aI?r)L4@QLnpv-a5) z_6zol_Dgk^U!yH2W=Xh5H}_JESL?RR+?5W$hcXr;p>`dkeqUpUnIHeuW`Az)wLh~z zwLe+YV9n8M;@2FtratZ})z&tn`Xpq063)HGSK+rE!?9+5C*|+_6}9NUSOLw^zQ<@b zYr9#s{;s4Zw@K%$#p#b7PIE*+2EH$PRNFIy6^ZBWy18VTNtOX;c_O8BvYQ z%oW`~YJ-xEPr@gq=6*udODAD0{Jq@Yr%kHfyM#}I&36%M?)OCHn{~zPO2VFNMnrVa z%O;0bV`;Df4&HI0?KjRpDJ6E_ryDK!EoQ z)T1~$lIjzF=betG26PNHq+{`Jf#dN`fkt?X!3ik!L_AkGiJDR~d_SxOzMb2WT2X7< z&uoLI{O#~NH|_B|IUOk#zq#2Nzkid4-{$N_-6@?;rXF+(-d|wuX7;JRCw*%5{qocB zJAVDKUmAcuY3`&C#(8BZ`r|OH>m%^Y;S99QnKYWt!kZ7yp)siIIMms!TNA6->vQp) zrpc)P6!^*P@6N9tLFQYK7of+@Mz5Jm7t%bsi00G9cn`xux`ZyJMRXZmPFK)kx{|J< zCA1XpWw?f}rDb#-T~9Yq5#2~P;m>jx({j3nR?tei6>nI$ol58qT19u#YPyT=rZrSb z_t3p`pJmhi^Z-3bYw00cM-NjuJwog0QM`%aG1^Fv(+%0=-Bt(KdP+?}>PoD(N-Lp*Qf>hBxUg+JX0^yiM=WyR?hmqxb0p+D#u;??8kl zEoI?N2$pC0R$v{09YUOSq*dQK%8Iv+wi;N+RPR!bvyQhCtVUL2>jW#&I?-xkon$q& znpw@Q7FLqg(rRV3wvw$j)w`J#tG(61>S(1}ovhAQ7c0%`YIU=^Tj|(&^sr8`dRo1# z-c}zg!|H3DYW1@+t<$X2t^QV)H309E7-VHzgRLRfP%Fn8W(~JSSR<`7tWnmP)@bW2 z>ul>BYm7D48fT5Sa;*v0L~D|jXPs-EXHB;9ttr-2YnoMHoo{wiR-rZ1y1<%c&9>%P zbFB-ldDcbNeCuLsfwj=O#Jbd4WL;)mZe3w5wyw0UvX)p&t*fnTtZS`h)^*nP)(uvX zb)$8Yb+c7$Ew^s5R#+>oTdmuy+pQAo4r`Tlr?uL;%evcIW0hL>Sod1@S!LG!)&tgq z)>`W!Yn}D5Rc<|Et+yVvHdv2Y8?DE!P1Y0Elh#wwsl(c8J!?H@J#ST5FIX>HFIn5H zm#tT-TYIcetWT}a zti9If*gt-0?X$kJzP7%x_FLat-&x;V2dp2gAFZFPD(h$K7wcE+p!J*eyY&ZNs$to- z?byubwy>qGY;C)?XZvceT6O z-R*SyWV?rbirv%hW%s7v>^}5|oniO2Pqq8mnf7V+>2`lR%N}44v8yR7uqxJ3+!3;Y`U!M_GR|v_7(PG`og}_ zzRF%=FSW0>ud%PSm)Y0Z*W>Q<=XQ~OqkWTovt4X2w{Njm*emT@?c40z?GpPAdzF2s zz1qIZzS~}7m)iH(_uBW_W%m8{1NMXVTKge;o&B&~Za-qLw;#1P*pJy8akkrJKVd&< zKV@&WpSHKy&)8e-XYJ?g=c5(h-e$jSzhb{?SK6=HuiJ0f+wC{)x9lDEPWx^99s6B- zm;IjozWsr{+y2o0$o|;gV}F9R`7^B1pW9#9U)uZZuk5ewZ|wc{xAu4T_x1t%2m43+ zC%el2+5W};)jnwdX8&&g;gDlFw&OU=;f`>mqa5wHj_3GJ;2h!9bK;yMo%+sEPP}up z)4(~#Y3Lm59OoSGBsh(n#?A>&qI06t#5u`n>NInjJ1v|fr=`=%Y3(FCZJf4FJ150y z?{siFI;l=4r?b<=Npre7-JI@Dx^uGA!#Tz2>GX1XJAIrCr>}FW)6dCtPIFFo`a4<9 z0B4{x$jPRkoWafzXQ-3o40DD%Bb<@W8O|u@OlP!nmUFgqjx)v?OFui~obgVsGr^hY zOmgy^bDi^?$xgmA#hL0%a|)dEo$1aDr_h<{T;R-dW;=77xz2^oJm(^3zH_m&z**>A z;#`V1dRIA@IhQ+EIE$StovWNB&Qj-U=Nji)XPI-IbG>teQ{>#}+~nNs6g$hETbvcn zO6OMRHs^My#JR&+<=pA4cJ6ZScGfth&OOe(&V5dqbHDR|^PscVdB|DkJnWP^k2vd{ zN1YAMW6nnBac7hBg!82Hl(X4++S%ef<7{=Fb)IvccPgA0oEM#!oNdm_&MVHVPNnmj z^Sbkfv)y^qdCS@1>~!9C-f`Y_b~*1k?>iqjyPXf6kDQO4JrWt}JFVhcU*PV3H}O znae!pvw$7J>ajR>B&*MkV)5)~)_@(u8nR>AaqM`Oz#6f}>;#s`PGn8kNvtVr#+tJh zEQz&btypW8j1yB^){dpH_N)Wz$WmD+)|qu-X{;;j#=5g~b~5Y1PGLP+FV>s&VHvD1 zJC*fgnd~%nI_r=3R{p{Uuz_q4%VvYw5H^(MuwiUC8^K1hGuSA0CL7JpVrR2+*cdjJ zjbr0kE}Ot6vPmqDoy*Q+lUY8S!ltrmtbm=*rn4EWkj=!Oo14XEvpH-oyO7Oe7qR*5 zVzz)SWS6i@*&=osyPRFY7PBkaRcr}c%C2VDuxr^eb{)H(-N1_2jqE0NGb?7x*)41Z zTgh%^x3Sw<3A=->Vt2CD>@IdUTf<7(J?vg~A1h<`vj^CNY%P0;tz!?fa`p&Y&mLtP z*kf!Xdz@`zPp~K1Q*1MPnr&gvu&wM__8fbjRj?P>i|i$~jlIlXVXv}E_8NPgy}`D# zH`!Zk2iwWsX78|f*)H}Td!K#4cC!!JN9<#^hke36WuLLV>~r=7`;zTrU$L**H*7!q zmVL**r;phI_5=Ho{lu!+&+HfWD?5lkb@n^^gA=#7%^l7-=YmVFxaKbRxX%NA1h2>A z_>sImKZ?imqj>{<3~$Jf<;U^kc>-_58}k!*B0rHg;V02YyeV(SoAVYtiMQmfcx#@_ z+wiu$9Z%uyc?aH+r}9p`Gw;IFcvs$ycjxK+WZr|H!h7;wyf^Q|Gk9NqD(}ZL`Dy%g z-k)dj0em1I#IyNeK7UBxp35ijiF^{z z{Gb_;vhxegiM!H}aeK&AgZ|=eO_`d?mk?-^Op}CHxM) zir>js^Sk)nd<`$<_wal9eY}j{&mZ6q^0oXSzK%c4%lRXGJ%5yM;E(Z*{BgdCKf#~m zPw~zCX}*O&!?*Hh`E&evUcq1BFY=f8HvTeyg}=%x`D^@j{s!O9-{f!c9egK$o4>=~ z<-7QM{C)lb-_1YdAMua*9{vgclz+ze^3VAf{7b%%f5pG%-|+qXTmBvYo*&>p@E`e4 zyo&$Kf8oFKgZwxCJO4wFu!Jof!2}mVNTGxluJD8}0&#?>C*s7BqP{pv#EYXv196OK zD2^4!iQ`3rXe1ho6GWmoQ8W=JiKe2NXf9faB+*i|60JqDXhUC%wxXR#5$#0>(NUy| zPNK8uBGN=x(M@z0>EdM3L!2Udie93(=p!;jUvaAFCo;up;&jnpWQhS{pco{w#bEkM z3=u;`ju<9}ixFa^I75sQXNu9{EOE9tM~o3;#W*oup(*pC}XeiwDGmVy$>ctP>B5a`A{*FCG;e#A9NkcwB4}PtYggN%54}ES?ry=%9E; zY!%On=fv}(LcAbe6fcQw;$`uQcvV!2*Tn1M4Y6IkDc%x0#7^KQ;zRL~_*m=_pNLPzXJW7TTznzE6#K+i;%o7Z*e|{n--++V0r7+QQT!yT#Lwav z@vAr}eiOfoKO{*@+R~9saw(*gN^0p!Px>;DN630IP97=i%cEqxJX$u8$H<2ASb3a0 zUM9#!vavitCdw0K6M2$sDx1mXvV}~NEoCd&S|-ajvaM_vXksAyT~-z zRd$ozWx70B_K>H@p0bzhE&Ip}*;k$_`^ijsnmk?hmsxUv94H6LY&lpCkwayU943d$ z5ptwFLynSX%F*&HdA2-9j*(;KI5}SC$_a9!oFwz)x$-TyBz2$S37fa4OD|vwi>L4sG%xH4O7F_2sKijp+?~q z!K2k#>TGq68bhC|v1*(euX5D{HBn7cdFotso|>%k)f6>VO;ZKxd^KIoP=#uyxa=->MFHFEmc>mYt*%BnYs?I&%Hqv zsTSk4}maALT3bj(*s%}%as}gmGTBYt(tJPift-2ept}Rvf;03k!(RZp$-LD={ z53055A+=6DOy8?=^$1=t`>5KW9#b3D<7$(7LOrRTQk&J&YKwYCZB@^z=hX9f@#_nC z+3QPcn|fKjqFz;%>NUL3^$oRMy{X<(JJe3~wt7dst9GgP)cfiKwOf6tK2jg6J?azn zsrpRqRiCRb)R$_X`bvFG2h=z8gW9jYRo|)a@dDN#)Q|L|`bkx(pVcqwS9MVRrhZp{ zXuNDy+uG4gb1k&gN^9+EPy0I1N9cMwP9Lf3>!WnMK3X@>$LNOoSbdy6UMJ{Ay0Jb% zC+ZV*6Md3ys+;NNx`j^CEp;p1S|{r^x~*=fQ*?XXL3h-tx|8m#yXZ9CRd>_fb-F%T z_t2;4p1POrt^4Q<-B+Kg`{_)5nm%3k*I9ai9;gTDY&}>H(L;5P9;S!u5qhLPLyyvD z>e2cveYQSFkI`fGI6YqH>Ir(Ho}}~ixwKcGrzh)tJw;E|({zD8Ur*OFbfKQ9FVM5} zY(0m*(R1~MdY-;W&(|001$v>rL|>{G>C5!x`U<^RU#YLsOY~BGwZ2AQtC#8P^!54% zU8HZ+H|d*ov0kok(JS;yeXG7r->ysa9eS0%Q?J%{>AUqBU8?WV_v-s}nZ92?pdZw0 z^+S4{epr|5NA!CAsNSF-(;M~UdXs)aKdGP6oAuLri+)CL)z9kZ^z*txzo1{#FX?Ui zW&MhNRaffQ^y~T!yG$;e`UAaNf2cpwAL~8(6aA_FOz+j7 z>o4?|dY}GEf33gK`}Mc_JN>;rpnuRm>YsF#{#pN`f7J)|Z~Axr2YzkOa&6afnaf?_ zN>{nsbzRT(-M~G?zMN4oXhquhA+Xt#lTjN6d*yT`i6xyQQ+ZX>s`dxD$jp6E7l zPjZ{O&D`d03pdGa>9%rPyUA`Fx2@aGO>x`19o&v?s@uu!>~?X}+^%jnx4WC}p6vE; zPjP#?z1-ezA2-A8>z?ZNb2Hu3+|%9uZk9X19q0~nv)#e&5O=7X;|_C&yCd9@?iubV z_e^)RdzO2)dyYHC9qW#B$Gf@i1b3o4$<1@mb6}#?#1o`ccFWUd#Ss~z0AGby~17WUg=)tE^(K-SG(7^*SgEx z>)h+z8{8uIM)xN7X1CZ~?%v|Aa96swy0^KvyCv=&?ke|AceQ($d$+sBEp_j4?{)8U z%iR0j2iynUweCaiI`?6>+_2oa36Cwx{te?+$Y>8-KX5m?$hoT_ZfGq`>gw% z`@CD>zTm#-zT|FmUv^({Uv(?p*WB0LH{9*+o99zuDi>9&wbzhz}@YB z=zipW?CxuXn1~&&%{q^G^5rds*H9Z=g5G%k~C) zL%gA0jyKF3?v3z9dS`f}yfeMg-dWz+-Z|bFZ>%@Y8}H?M6TFGuBrnf9*E`Rf?B#n? zys6$aufRLso9@l<3cZ=$1>P)gwl~L{>s{#0^Dgq{dl!2PyoKH+-lg6m?=tUl?+S0R zccpigx5QiOUF}`tUF$9LuJf+fPqu?v;3Vc&ofS zz17}b-re3BuhhH8yVtwVEA#I69`GLY)_M%51(a_Fx8r^1k-I@%DS)df$29 zdk4H9ydS-vyejW!?-%b^@1Xaa_q+FpPrl{bzT-2W`@)yL^0n{!p6~mCe}rGpkMoc8 z>-$If@&3_%1OFJmp?|D@oPWHZ;5YIc`zQE`{)v7Q|0KVu-^_3BxA2qvmVPV0wV&*_ z@!R_C{1m^v-@)(br}~}z&VCm^&F|`W^Sk@${>gq1{}jKc-^=gq_wh6QzW%9xKR?qy z%|G4m?`Qb~{DJ-;KiePd5AlckIsPzzxIe-l>7U_`^3U`~`)B!Q`{(#${IUKxf4raT zPw*%Dll(mYT>m_OvY+oy@u&LJ`~v@cf4V=zFZ5^n7x=UM+5Q}Vu79CF&%el@?_caM z@E7`*_?P;N{LB2y{VV*%{+0e!{t|zwf3<&&f33gFzs|qjzrio^Z}e~SZ}yA*<^C=H z3V)@4tACq+yI_6>q@t^Ux`p^2$`Oo_m{tNz#{!9Kg|7HIb|5d-z zf6ag0f5YGIzv;i_@9=l}Z~O1~@A|v^_x$($5B%N!hyF+Y$NnDw6aQ2HGk>rDx&MX# zrN7Vr%KzH`#^3LM>wo8e?;r4g@PG7w@~ixx{a^fF{e%8*{_p-D0R>iI2Ts5O9*95& zD$s!&c!3`T!4W~dATBsEs2>~^#0N(Q4T58WhQYDHal!FHLeMB^9Gnm&1}6qhf|G)# zL9?KF&>~0*S_Z9x)VN? zL9d{9&?m?U`Ua;4{esNkwBYohe~=Xn2nGg&g6v>$FeDfnDmXJ3 z9h?=M9h?)43C0HFg7HCaFd>*2ObYUXbA$7O$w7WFC72pa3krhsgXzJHpfH#jToB9( zW(RYExxt0Oyx^i>esFQHAXpe&5?mTA3N8yS53UFn2UiAH1xtdZ!PUVv!L`A%;JV=Y z;D(?mxG}gXxH%{emIt>4D}t55t-)=gww5nlI^nY46K=U*3DE_Z7Xb?0r@5t9xJ5``X^u z^}fFM4ZUydeN*q7d*9Og*50@EzPU!UL8I#d{Fq{@S5-;;X}iR zg%1xO5ndZUGJI6{=&%(&CZr(?c_=~|`e6`;VHC#UW5dUVj}M;^J~4b!cqmN5G|WO3 z>M#!vheyJr;dSBl;c|FGcw=}hyeYgnJRYuuC&H8AE#ax~*6_CQ_VCH!Q^Kc)PYX|n zPY>JSYS;<8VG*u{>)}S&3-1Weg#B18$g})sBO8D&XIpME{cZa_g{(ATu;cte|4WAeOR`~qzx5M8FUl9Io z_WGnfYBu%m;Pl}{W`pF;}CZlAWd~EV@$;T(3kbGkDNy$UWB$+0& zq)O^!o;;j9l02HcE_r=&IeA0!#^kZ&P05>+$CE3`6UmdwTau@ewN9mzAvesVK8NDh-*$vcyyrm?hbld6OAoufQt2gFTY9R} zQ-G9n+AZByx&&6+W0fAa*~Z<{s&NUXw$nT<*0!%6EuLMlvguHj&Q2J ztSg4y`iV+e#t`2O`N_1fyb-l--V58!Y)H0^6rJ?-qwQxGO*&35a7oqx9@yU5JK8zD zd2N63t_En65Oe7)px*$RDdp1`Bdb!wvzb~zn`wGBQwwM_jn8Ik0m>8^zR8r3^P4pB znYPUf!FiKIy=VbV+u(Wc88dYNZKkvI0RMZbHdBkBYH9)A3-EhWFcGhLMVB-Ynhf`C z@9r&*7RP(X4JdXj#pGB9V3sTcFkKozuzAxY?{BA310%V{lQV!xi+{`!XV}>sZ8NN+ z!*7>-(s}=0aTd@{$5_~S-ScWv^dC4p+P${8xp%O4(6aV@V*QQ;x^J7@-vPxjunEwC zy}UaII&iT^)tzf9za+ly6XV@h6ufsEY_cSG#S%+mM6jhPF4$7DhDv0eZX3=m9oT~{ z!*rl54W2m*i)?ci7CEN_yO+$}k^ivTBqjQSxM?bZfeO zG(CKdnlaKg?A1V{*Dr{DKbWBE7h6vp-a?6(Yn~Iy$#5+FXJoabR%SSsY!x$WLuta2u<#Gz2DtG1ZcCq_CV=T~vnfnRu21wOCI1d0+vlkd|}P zbqu}%RWvPYR&2-1p8#$y!56eKBYO{F@ukwZTWp5`S2$twb#bz-(Icu= z--w1bIO_G7JuFLWE6;|Nk*ROstQyZOSx~Dj2BV8Rtu}UHXZP@AduL~HaAJ+rsfaoe z$zxE>Gc`H3ZhI?WGf@lz&3l=*u}oK zX5`4uhw;!B`D#2|Dv@U#2h22$7l%D9`x=0f#3+eaSyv`q%d*;f!{Nd8!~H8K&+jkD zoVn;Xs8(lN$EUZ5Qd(Y|b?7xaOD|PC(Ttxtg*dQz!GX<2!QL-3}H+Ofov3c5>#lJ?ZI@LZX<1DH<@R(i3KUm@rl-S@${r*Q&Ey(!bE9`Qznvi;;a9KTpOefKEUkZVgEy7b0HLIA;`y^jkv89xQ1hR6yEh(JJ z%BPDaNS~`y-jAyhrVqtN-d8pj0~T5Z`eeXr?EFmGn{%N8=UM;Is^%N5Czz}?t0&TZ zUn`3^!@&`;ajfSYl~Puja?CFEsr!KcM0e6Zs|szRn?hP)mf{m z&ZMf&T2*x>Rdv>?sxBzf!CKwr7$mzKGh45hIEL$ZooR_mvkUcEj`68Z%GH_DQ|AY{q^tH_6)jjQ02Sl+O>U@yoQp()2T8@#|4d3%3A4X^lTPRe?Df7-@If;2!=dRR15#WZP&cp>2x1gOo?X*hi#3jccEpfdpDV2bS(sOccRaS9Zg86T@KHru=NQsZ; z{fpOI6>IH!XYSQOM<-WKj?7$-?SydmMPBRgfSjIBjp&mhTbxX}B7&$?R=GXG_Md_*$eO5IP1|`)zV>ax$wE~u0>Iamb zT(a%tCWB5+Xpoi@ zA)#w~c2u^>KsooQWAzjgn(Vdb7!8JHpR%#erEug@I4*=;?-@OIO2e4DgmqUxD{yi^{-Y}DkBy#!vK4z|~wcrtPq#!Az-tnc1? zM+AY*jJ4qi_+zW!BIY7)F3q-@j5glU9Z=ZBhvKUxK8THYKKqWwJZ&xH{W;;~xC=-B zO-F#Ssw%NB zx6M#8IR$G@9-KxMo6nrl@NmvxYtCdyi!(x7`w-fE1}9UG$+%a?TFJ4<$8jD*^+TQ4 zj~0uA{q2L@y&WT7SO^n(PqN@-Or3+fqWECesO zz|h($^GNA4k~RhmkgVMlAhj+8Wwp^3w6Ja?%r{?uh9>B{6+-B=(ST}Q&&aKyg%KD7 zZrjT~jd+knvfyy153%DNC+SKl)1@2xed9SJsr%Z=m6>-{gyQ&uV1sq-d?UFba;<*w zgY6w-*e+RVUT16jw#aWX;J3|v?s8_W!MD(g7j?jP#VfSp6~5xd1)(misTeNh)rwkZ zMMWd+CU0dQ3*8!}#xjSpykk_uZwo8w?rhAC%I{9qEJ;1C08in5y0D69PlZ^X`2JiEmdla!*?k>=D!z!IEl zIrE`@je)Dhy6#ssvPB1KvXS5#88{PGFV3jUtDy)(@1-2_@CE2ex|Bv8P)W-_ttC(s zM6J1~HJ7NH*&#+hGr1x8kY|m-Tzoa0uN^c43@iS4?ZC#%W=~B)4IH!+))uQShnokT z@wUtrE%wmDB-tAJamSI9;X%Ke>-#YG-RV7xgX@>@5B7@V#Xj!O#CvZSBcQW=1);O` z2z&kE%{bokjC!sNw>gIi)kp2Fnrn}nv%}b|b6#ZYB7!Y>b_4pCwv~)IOA2aU$AvO#FFsf*x zgP@Yf5}*nBzN|UEWv$hcjEf~g=j_Ix7(FI+1Y8UTy~7))xFvaXdUJpK^n^G$^H(~Q zsoF!CN+&Q?djeDGN^BLc#2ORUm5y$z_UML(8PMP7Ylj{Ey`r?KOPe=1<D^z*2(k7 zi<7M@5%%`Q(cyUqMXXVc^#V$}4K~bylhwl;herpZHXEs_AZ2xJjb}9vUyv?VkN2+A z9t>qn==0zP=gvl1>_Qw)M#`uTqHRjDJg0#Z;nrif76&+ewAL#frB!}=fD8*x6GKwR z!4fA9Oy|AF?!a>VT30&NfzhC0CLQ5a?Ga9;n^0AI6ROhLPt~6N=;R|$>$S(aEGP2# z!Itj@%XC6*IwYUL_x&cs}|KytsM;%0d+DcDn5SHNFA8Xkt=D{~>9V{|leXPSGRst*`GS^zpc-R9Lk<^$- z4;k{*+_waNB?}~qvnS8Z{2s$)UMO^C0L5~dN6g?4Uh8+^*zFn5;XAmjwhd?4#H~*_(3C9I1|Zjp z-p?khZ}!s2WdjVA(?kwx(c)`|%0R&|1Iex9StSE`&U<>;FV5DE+GQUdCbMS6+EHiu zCu$(0RtTMs*Zpeq=*;qujf?t00Sp#0S^`5YKXz*i@_Zy^s@l3tQ-7q{Tvpti+cxya zsDMxm${Rk}go_kZYw?>KfOra&=2Plev`}ESILnc!`7~}cuvy^bX;9I3XMmfvZ9`90`Q=-KzS4cy$7FJ%fdDc}V}d)N#|?$DdeFakylg}Q%yNmb}J+lj&^jO1HP=#Vfb<*654gYUR;3vvy`CJ<%J@0YCX9qJVS!W;$BT z$mSSV;{${eQxuLV_hXP-oT$a1+B`AV>|T76C8_p3srHt&HiNZnlzBE?V_~uu2P(~Y zg90MIzjA_;C`xyoD9v1aPAXp5!jqZDPvTNTLTWaSG-tJ2vXRBs+-LG*xc# z*hGso2W6^bOWCCI2~RO6G)820U_0j;fFn<9AG3%pOtc-&M@GfDZ!$_*pi|mY%e=Lr zmN?aEEYR((eLm29&ybJR8}xk}80; zNNbX8`^5m}vfH&=*_L4ySv&1&qLAx6yojnq9Yu6k0oCmv%y@P|RzYm8l^(2~#=G;z zcr|xKFi~n<(^Z0+42jygYSwYntRoMF%StF@fHS9s;BP0@c=fbZDc$DGofT9_wOiZb z273_2TBXTV`9ZMO-%7U6+;4qO$Mz(on!;Xpa}VYbUQXC2JY>2)?<}mKM4j zmAhEf!^VnB7fxN&3mKX!T71x}QSZq+jFgQhgzsSO$?ixMYfpAZYPA03(n$G;P}-n! zjSwjbF|_(*XQZ5-#%dTDOCuyN)iTl4*wR0#ky5xkQdl&_eH4oG`04`m0uPKq=gid2X+}09=Mqq?CJWz3+!AXE>I-&X& zb%-OocZ9Q1m~}Hx+Gyr>(;VKxmcmg}(*_BQ zU>GB5?{4R%6$Vn>m=#K}aGDNR4{t86Z;PjwJ{ z>I{OKmAd*obJd>;4CWYVms0J*X4RR^5Q8RIV5P@pK=q?$l4fqq9Z5@dt9B{q7f;-N zOnY)#>Mc@6RRFAPLxGc$;+~ymV`)+dYZ99*d8-GbY#4#$n(w|k^9UO^0S%5ev{B?T zjB;#asQp!0vJ1K#M0pCunX6MdFV>zXQJ-&#YNJy620(9ra@}4lxnuwVE)}&l>NOW2 z2qy8!ynp`rrzr{V^T=(Ya85l7&(x@AS<{*H+S=)9c4=HnF!H0zwBfm;r=z2ZS?lMi z^s9KRcJu14vDjqjvfU3i%gAyVXN|9M0qR%CZ3i0S%23KU)b~dmuDy(+2AJYNdF^c^ z;@g&q2x5@VIq&ykG$nm`l?mU;^SNv^rS95g(?WzPI)p)XD-Npz1yre*1EuRfTqMvD z&bJ|z4s>74;Jt?lJtUF}Ir93+H2*h(TVhqndrmjB<*7s7aE+6Db{9ll@leOrLdxl278U2Xj#qO4oI=sg89y?qJbjMC|a;DF{H4$;n=0*E~Kq|o)XDC9iPDgrZ@KtgDAMJOGZ1{^fd-?+91KNm({i?M+S>om~hiVehXZNDk;GPjqG2Ir6W z-bH%Hh|w-aI<=v{-Y_+$?+A!CFb&BH@D4zW?f6$d`tK)7{v`*>#`VgWWV+ErbaxeTLS zaWc&6jm5impfK!SwTC&dG)ymIp2jdr#n=+!Gr?vN7SLO)47ggg*xp?n1$4EOj3v_C zZ#0j_(H4Um##>MA?JoFnYE3{FYns3d?>sgU!`MXcou_CZy8k&kUKrVtyRsYz9WmrC zRm!$Ps@;%9enU3G;3kF&R4&n+S2PT}HMOFo9pxL_2WWSHvA4}~R2%S^(}o(|uzGS_ z>E?IpzmRt}1b~wzQm)B>Q+f`07?%%e^~ej}+QacLCsx6mc1W|HLN65xS{;y|!mMCF z&<=H9y*faeJEdA{d_ipw=N77;SJ;pyK80@i6g=>NCjwhj5VGEaCuvBt-h$UMkR}@! zGG`VtR~K46bV7!F!k612q(vW{jnxgy>e4k{#W~AjC6ih)Go1>2opnEQ(amN#uWRF_ zT&JXlMQ28?^2mJy&g(eEhb$b^EX={wWf|Sq!pSkWNU+)1_gYt?@QTIKvUKOLHY-hH zIGnC_9E4Z5w7)JxVCrw z^k}h*<9Ms8<`t8?7V4>6^eUp1a{mVYw64LQfmMk&Wkj{m9jys9JeUL6WUYtV@@xRO z_>Ql&kS40~cz`t9tHtkn`42c3WxeG_ntZA^+_)OZQOLUlMKV1lg;2IZ24#)&Avu&z^9M}MrTNoMPjGjj0XEHbfo98#H~{!QUi=2E^V?Ss zpGCLrL&Oh8evKsc3G~P=#l&#y$tR;96h<#M(aV=(LG+7Uja#8KY|dv}SNEUWKYVWM z6n}1m-Q3%a8#Syjo-Yb|OUC?d0vQc6WQ?8k0ym%mSk1QsQ6d_+x;ZK32iX>?w#x_w z<6HZ&p5`k`HeV6vS9WheVPfTC89O3~shBVenI~9^^uc$hT8xTnh>~janiB`13DC({ zw=4<|(}6~IaHruOJbj1Z9z1=A;T}AFr{Ts}rnUhp(!tj3`71D@p1k4cnaTdiD?q^X zHYhHGJ#xa)XlBCP+%gn>AI4_5!4fSD)U8qVSvl@pdKs6HL<-Q3&f$sj}{x19w~x-l6|kU?@%c`F8KJ35TFw1UAf zZAXXkPOtIR7H_M=X=P=cR!yQUg*j#t3ZXF5%@Y_`#c^89sF!2xnJ?F?{(?#eM5IJE z_w`s5xCj*PXhxUtib5hIdKZc_%Ua3xcCJWE_94aVjQCa4Xns~|0*@93p3yXNn5NOZ zJB^S)Ms;dV0m4cmeHy9hH4}&unnqT21affubvsaucLxd(l+-k`cuR6LP_vVs%za#Xr=F4d7%IU2b$6@ot3FYyYPzDj9 zJl=LWLL#A9&Ix6yn25_ulW0qU6K$pk8FMx{+U&B8_3WuyzpIv_x3+ zmewfpub%%FI-@V#wU56^k#%1?8(?LV8JekKkI89z6GBg$z{A2vtnx~|<<1G^B`{^t zE_6|2Dx+KB*Hm~84B0`Uk?Z)QWF5#i!dx zp-Eg>T+g!QWWJm%mXqajvcHtfm#fN`tIC(F%9pFkm#Zq4t16bODweA%ma8h3t16eP zDwnG&m#Zq5t16eP>MvK-U#_aZTvdO$s(v31%ETHNm24@MFQtm5RJoLrU%`63b##2P zeeIg9t9dvAl^DO{{EU zeZ@viiP4sq7;Slp(UzAOZFz}dQv!ZYRm-p`F>FcXUv*4Fcs&Zv;+e`9NqZB0}jU z{$hmpu_NxR2Y04U;c-p}FN0!y9#a>tHe_BKXle7;EPd^m`?}hRdygC2M@NUx?H)d7 zFI#YOV3?x?Zr0%CeBE-K-cpO&pp-RBp5EF$*i??)gR932g7>jd#saYX+2W6od#`*x zhnILXc@$SoWuZknm3zek4j9guZLpVWVNhYY0cl_|gyfwK2)?x&kbw+VED+V%&IY6l zMAh7Yv_SUfDo#Qove-LXKVVHa7C>kO$+JmKGGK2IsmV?vLBiqSwcmV?#OaalSNDw$ z%_naM^-_uS_t5f+0JyOb%!{TZA~aUOBTZAC;jYP82cfYN;HDKJZLEZgwZfY_PNaoD zwI{qUp^-1w1Nxi?&v^C4e&fKy2F<cX1^9e=8m3pUA@$Ly6w6U{#vI-kInQ_2- z1GrB{q6~N&B_Hq{4QCie1U%R!F$?Y^kS37}Y4`;-b-*rum$2fDw`zadT00lCa@8;=9v@lO~nz9|k7o_Y-T`_EYv7^yB1$ zB}()rpQLUL(3wE0GcWw&O3a7$*#C^?s(HraO@yR(R!|$99=;1&l4JLl=WAINr`_A>`A zO-+;;boJVB4KmU$A_S=8hU;(^b(j3dI}doY**>cj?lwgl#0v0?q^-8-KpjbBh~uHW z-Y%d%_Ha%b`NznEA4WkGzc}Q-5P_8c4nzl}pUg3*efFb6B(}iquVgHNaWPpxQM3fc z#q~RZaV7Z<;N@Qp0dmdnW(Y(_6kqtDo^qUrE@?sEi ziQ8K`-ihUH8r=@-(5A`PAxs17P{oG^zs$nVcn;p_SnPq_xxXEQ&QGw0Z!1ebn`C>Z%rdXs@AI}(w)v%v26lE*V;<%}W)(lGp&>8f z2>?<5{R~0j1q9HQtI8Fh`nFM@^?=5*cB0t%jRa+cN9@j&_u_!I@&itCyuDGAA^!4# z<5t3PJcR~++MtP+F79v#*2NB7|F<=iE`i{nB6inIgn9F`45|o}kEpEdv=zseH+Jl> z8nMuE>~9}<&lR$(_3{rm{Qoa@_xAUFtpr(GtR{{MoKQ3f!p}d-nJ|R^ZjVOy{TyqI zQtQcIpCL?})dAl4y-@VL?g(TH8rbwRIB=#KXR zu7tf0fI2#eIu6KSBvdt^{IVZAfjQ>Ui8+N=pY?YYAWUUXG|Yq%P>KGvJE9sMD5hDJ3lGvMu)k7DjoWFR84!9&5g&F6Yks&wpd z3*T7?QD-cSzgLS+ccP90ezc}325;(3y*Q~CVsL`)(Xwb;YZWc{Klco9f!;=qb*?Fm zPDPwHuI(SS0-Rt)MkA7QrB9z-8zZ=!P$zT(|(F|n|tXN zWFwd3b^v>imkZ8zxQyHId*1}4N*Rg(Uar?OLU= zgb}3U))p0B+;zvg5%W&9t9$RjXH5&o4SGx5Ig`e-W_;jr7SNSgV6*X|+T`oU5z|;S zo=yP@9oMX$8D3dEbBw~c621O5U*SSbRdt-G>MJhMJF&n~9hPg*EZfxzK3Tj{I7Drt zCOOr+hUMVi)HA?Ysz>4c6w#Gul!iXu$an1bTd0>shS+4%buOx0@I2&Y5%mIyI*t&) z9)-I+uN?EdnR)Ous>;y!1`2PY4YaW^L|>u})Cg)F?l-avbGPXw{E#+zi-sa4E;>6G zvu*09JT1X5zv*J&8J3o!3&<3MYI_& z819u9__Ty?HyEWEqgJBUvO!OVo*&K(4d@iyz)zOMAIx>yHLQa696>}|v`u|W4sNF% z7|PiUZXx4fQZ%$44FOtPz_}jwJ2B`w-~%i~n4I|`c_Aj@hA&1rvC69j%v{5{v0YwJfJiKjRQ9IYe8CgSDcmq@&I8$`a#fsxlM7`Q+RFj3~(XnQFsDGbc0?15eprEfXahP980KgQB0gS zr+@kkY`qOcJty8uwjFLZAb>V=84dtZ!|I_=LA)aMZ)nj;1ad|wXB47x=ccx!r#3`a zZh0E6+7!i5c_$I|(Sa)J7{ZHzV~DvB+dF>}eN}i>aL=YF7Q;@|t3vq}+Hgp8&eqh> z?(NDl5`X2L{t52Nn#vpGP-8QO@KhEH9O2If4lM8-!i&OnR2#LyqgOEj(OItqQ70p! z-XNlp1WDJ)SW{tVq)iJnp^VAVNTv@3_9K3lmh(_VF5?9Kk>-b4ofrb+l-ZeUSwgcBH zVmOvYu?JUpu$9@VmI@?pc{)W8Tj6H{4*+Td8cl1vDJfU=dsAs^ozo`*^P-2bGw>8G}#`8W4OoYHii2UWg!zp2^@P<5aKz;1^+-S12L)e%D z;WNOuTbT4tSg01B7oxL4DsNN>@NGlx=%zx<3f~ln+H1WSU2=Hgc`#^AwCNe7tdj{* z?>eGQUR;Rss&Dg)q56Q7R|1C;4N~w5pbYVGf19_dIzAr9oUzfu9K==fCQ~~lGd$N- zFXQ9fChE=D!&*Xtwo?z$ShrpFkk7z5e(%N786P6gGd@I~>qkcs^70H^53rmr(a6;Z zzVlKI{8z9ZzH^`Jn_kGTcJDZ*@(mw!Bh5=q^6!q{YZpGe$%gWzd%?F@ z_|D5w*x?&p3gMOW`3Jre&zzUCfVYnqFFWCiBgc;q$&sJ^hVNq1nXbO(u=X;|%Tb`4 z`p=i$s8{snt2MZobG+3)oxvfWkGJ|wPXHK4)kh~QD6cdhvLnrYsP;d39Uex&m!q=}cDA%d?s9{N=@q}5;gSw>ppNrwO^ulY%TcW6(1&zrkQ zi~s2~WsV!VY@e4qkrqC528y)G(=Qj&Do_7hNUPuU@Q$?jj+aQ0);#1TQlv!}I<^2l zoL4w@7apW54k?+Y$9VKh<@lQ%NDJ?JaPIZ*J@@sGzxBnt?)tp9z2~kkI)Co`xt&`t zo?D+jwDAR>`OH(Z?A-df*IN40=bS$}|7qu6cYd_~+SOOBtX~lL^VdFO?Z(;@Ym?RN zp4Z%M;4qzReBev(_tMK>y6>g%(1n-QUqUSxk^ez_IQP)f|M9~YoxA7Ux$4E&-1Y7^ zzx6#2z5A^(y<5Hb;W7Q*^XCXSSH10(FMe1AeewCvI(OH~L+{>s!gAt6{O_Sl?>>KW r{?pH2KL4@nAGP}GmG$u2_ulnqpZ%Vd&wcUkbJZ8F?yjAys&oGzvQ3ZV literal 0 HcmV?d00001 diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index e954ada..20954a0 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -6,6 +6,13 @@ document layout analysis (segmentation) with output in PAGE-XML """ +# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files +import sys +if sys.version_info < (3, 10): + import importlib_resources +else: + import importlib.resources as importlib_resources + from difflib import SequenceMatcher as sq from PIL import Image, ImageDraw, ImageFont import math @@ -5638,8 +5645,10 @@ class Eynollah_ocr: if dir_out_image_text: - font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = ImageFont.truetype(font_path, 40) + #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! + font = importlib_resources.files(__package__) / "Charis-Regular.ttf" + with importlib_resources.as_file(font) as font: + font = ImageFont.truetype(font=font, size=40) for indexer_text, bb_ind in enumerate(total_bb_coordinates): @@ -5649,7 +5658,7 @@ class Eynollah_ocr: w_bb = bb_ind[2] h_bb = bb_ind[3] - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font.path, w_bb, int(h_bb*0.4) ) ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) @@ -6135,8 +6144,10 @@ class Eynollah_ocr: if dir_out_image_text: - font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = ImageFont.truetype(font_path, 40) + #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! + font = importlib_resources.files(__package__) / "Charis-Regular.ttf" + with importlib_resources.as_file(font) as font: + font = ImageFont.truetype(font=font, size=40) for indexer_text, bb_ind in enumerate(total_bb_coordinates): @@ -6146,7 +6157,7 @@ class Eynollah_ocr: w_bb = bb_ind[2] h_bb = bb_ind[3] - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font_path, w_bb, int(h_bb*0.4) ) + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font.path, w_bb, int(h_bb*0.4) ) ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) diff --git a/tests/test_run.py b/tests/test_run.py index aea5808..d42bc0f 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -247,7 +247,7 @@ def test_run_eynollah_mbreorder_directory(tmp_path, subtests, pytestconfig, capl def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') - outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.xml') + outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') outrenderfile.parent.mkdir() args = [ '-m', MODELS_OCR, From 42fb452a7ea60fab52997ca0f9e58a755b1de08b Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 26 Sep 2025 12:54:29 +0200 Subject: [PATCH 235/492] disable the -doit OCR test --- tests/test_run.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_run.py b/tests/test_run.py index d42bc0f..b8baf7b 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -264,8 +264,10 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): return logrec.name == 'eynollah' runner = CliRunner() for options in [ - [], # defaults - ["-doit", str(outrenderfile.parent)], + # kba Fri Sep 26 12:53:49 CEST 2025 + # disabled until error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged + # [], # defaults + # ["-doit", str(outrenderfile.parent)], ["-trocr"], ]: with subtests.test(#msg="test CLI", From eb8d4573a823c7674c524b1b19d37fc5d9b062e9 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 26 Sep 2025 13:57:08 +0200 Subject: [PATCH 236/492] tests: also disable ...ocr_directory test --- tests/test_run.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_run.py b/tests/test_run.py index b8baf7b..40ec6cc 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -1,5 +1,6 @@ from os import environ from pathlib import Path +import pytest import logging from PIL import Image from eynollah.cli import ( @@ -265,7 +266,7 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): runner = CliRunner() for options in [ # kba Fri Sep 26 12:53:49 CEST 2025 - # disabled until error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged + # Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged # [], # defaults # ["-doit", str(outrenderfile.parent)], ["-trocr"], @@ -288,6 +289,7 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): assert len(out_texts) >= 2, ("result is inaccurate", out_texts) assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) +@pytest.skip("Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged") def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path From 830cc2c30a3f183f939126ec2bbc4cc264974a8a Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 26 Sep 2025 14:37:04 +0200 Subject: [PATCH 237/492] comment out the offending test outright --- tests/test_run.py | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/tests/test_run.py b/tests/test_run.py index 40ec6cc..da0455a 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -289,26 +289,27 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): assert len(out_texts) >= 2, ("result is inaccurate", out_texts) assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) -@pytest.skip("Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged") -def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_OCR, - '-di', str(indir), - '-dx', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert len(list(outdir.iterdir())) == 2 +# kba Fri Sep 26 12:53:49 CEST 2025 +# Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged +# def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): +# indir = testdir.joinpath('resources') +# outdir = tmp_path +# args = [ +# '-m', MODELS_OCR, +# '-di', str(indir), +# '-dx', str(indir), +# '-o', str(outdir), +# ] +# if pytestconfig.getoption('verbose') > 0: +# args.extend(['-l', 'DEBUG']) +# caplog.set_level(logging.INFO) +# def only_eynollah(logrec): +# return logrec.name == 'eynollah' +# runner = CliRunner() +# with caplog.filtering(only_eynollah): +# result = runner.invoke(ocr_cli, args, catch_exceptions=False) +# assert result.exit_code == 0, result.stdout +# logmsgs = [logrec.message for logrec in caplog.records] +# # FIXME: ocr has no logging! +# #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs +# assert len(list(outdir.iterdir())) == 2 From 3123add815f4fc610f90ade8bb5dc9ad6bd634c5 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 26 Sep 2025 15:07:32 +0200 Subject: [PATCH 238/492] :memo: update README --- README.md | 55 ++++++++++++++++++++------------ docs/models.md | 20 +++++++++++- docs/train.md | 81 +++++++++++++++++++++++++++++++++++++---------- tests/test_run.py | 47 ++++++++++++++------------- 4 files changed, 141 insertions(+), 62 deletions(-) diff --git a/README.md b/README.md index 1adc3d7..4683eb7 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # Eynollah -> Document Layout Analysis with Deep Learning and Heuristics + +> Document Layout Analysis, Binarization and OCR with Deep Learning and Heuristics [![PyPI Version](https://img.shields.io/pypi/v/eynollah)](https://pypi.org/project/eynollah/) [![GH Actions Test](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml) @@ -23,6 +24,7 @@ historical documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. ## Installation + Python `3.8-3.11` with Tensorflow `<2.13` on Linux are currently supported. For (limited) GPU support the CUDA toolkit needs to be installed. @@ -42,19 +44,30 @@ cd eynollah; pip install -e . Alternatively, you can run `make install` or `make install-dev` for editable installation. +To also install the dependencies for the OCR engines: + +``` +pip install "eynollah[OCR]" +# or +make install EXTRAS=OCR +``` + ## Models -Pretrained models can be downloaded from [qurator-data.de](https://qurator-data.de/eynollah/) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). +Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). For documentation on methods and models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). ## Train + In case you want to train your own model with Eynollah, have a look at [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md). ## Usage -Eynollah supports four use cases: layout analysis (segmentation), binarization, text recognition (OCR), -and (trainable) reading order detection. + +Eynollah supports five use cases: layout analysis (segmentation), binarization, +image enhancement, text recognition (OCR), and (trainable) reading order detection. ### Layout Analysis + The layout analysis module is responsible for detecting layouts, identifying text lines, and determining reading order using both heuristic methods or a machine-based reading order detection model. @@ -97,58 +110,54 @@ and marginals). The best output quality is produced when RGB images are used as input rather than greyscale or binarized images. ### Binarization + The binarization module performs document image binarization using pretrained pixelwise segmentation models. The command-line interface for binarization of single image can be called like this: ```sh eynollah binarization \ + -i | -di \ + -o \ -m \ - \ - -``` - -and for flowing from a directory like this: - -```sh -eynollah binarization \ - -m \ - -di \ - -do ``` ### OCR + The OCR module performs text recognition from images using two main families of pretrained models: CNN-RNN–based OCR and Transformer-based OCR. The command-line interface for ocr can be called like this: ```sh eynollah ocr \ - -m | --model_name \ -i | -di \ -dx \ - -o + -o \ + -m | --model_name \ ``` ### Machine-based-reading-order + The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. The command-line interface for machine based reading order can be called like this: ```sh eynollah machine-based-reading-order \ - -m \ + -i | -di \ -xml | -dx \ + -m \ -o ``` #### Use as OCR-D processor + Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). In this case, the source image file group with (preferably) RGB images should be used as input like this: - ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models 2022-04-05 + ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: - existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) @@ -160,14 +169,20 @@ If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynol (because some other preprocessing step was in effect like `denoised`), then the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models 2022-04-05 + ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 Still, in general, it makes more sense to add other workflow steps **after** Eynollah. +There is also an OCR-D processor for the binarization: + + ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 + #### Additional documentation + Please check the [wiki](https://github.com/qurator-spk/eynollah/wiki). ## How to cite + If you find this tool useful in your work, please consider citing our paper: ```bibtex diff --git a/docs/models.md b/docs/models.md index ac563b0..3d296d5 100644 --- a/docs/models.md +++ b/docs/models.md @@ -1,5 +1,6 @@ # Models documentation -This suite of 14 models presents a document layout analysis (DLA) system for historical documents implemented by + +This suite of 15 models presents a document layout analysis (DLA) system for historical documents implemented by pixel-wise segmentation using a combination of a ResNet50 encoder with various U-Net decoders. In addition, heuristic methods are applied to detect marginals and to determine the reading order of text regions. @@ -23,6 +24,7 @@ See the flowchart below for the different stages and how they interact: ## Models ### Image enhancement + Model card: [Image Enhancement](https://huggingface.co/SBB/eynollah-enhancement) This model addresses image resolution, specifically targeting documents with suboptimal resolution. In instances where @@ -30,12 +32,14 @@ the detection of document layout exhibits inadequate performance, the proposed e the quality and clarity of the images, thus facilitating enhanced visual interpretation and analysis. ### Page extraction / border detection + Model card: [Page Extraction/Border Detection](https://huggingface.co/SBB/eynollah-page-extraction) A problem that can negatively affect OCR are black margins around a page caused by document scanning. A deep learning model helps to crop to the page borders by using a pixel-wise segmentation method. ### Column classification + Model card: [Column Classification](https://huggingface.co/SBB/eynollah-column-classifier) This model is a trained classifier that recognizes the number of columns in a document by use of a training set with @@ -43,6 +47,7 @@ manual classification of all documents into six classes with either one, two, th respectively. ### Binarization + Model card: [Binarization](https://huggingface.co/SBB/eynollah-binarization) This model is designed to tackle the intricate task of document image binarization, which involves segmentation of the @@ -52,6 +57,7 @@ capability of the model enables improved accuracy and reliability in subsequent enhanced document understanding and interpretation. ### Main region detection + Model card: [Main Region Detection](https://huggingface.co/SBB/eynollah-main-regions) This model has employed a different set of labels, including an artificial class specifically designed to encompass the @@ -61,6 +67,7 @@ during the inference phase. By incorporating this methodology, improved efficien model's ability to accurately identify and classify text regions within documents. ### Main region detection (with scaling augmentation) + Model card: [Main Region Detection (with scaling augmentation)](https://huggingface.co/SBB/eynollah-main-regions-aug-scaling) Utilizing scaling augmentation, this model leverages the capability to effectively segment elements of extremely high or @@ -69,12 +76,14 @@ categorizing and isolating such elements, thereby enhancing its overall performa documents with varying scale characteristics. ### Main region detection (with rotation augmentation) + Model card: [Main Region Detection (with rotation augmentation)](https://huggingface.co/SBB/eynollah-main-regions-aug-rotation) This model takes advantage of rotation augmentation. This helps the tool to segment the vertical text regions in a robust way. ### Main region detection (ensembled) + Model card: [Main Region Detection (ensembled)](https://huggingface.co/SBB/eynollah-main-regions-ensembled) The robustness of this model is attained through an ensembling technique that combines the weights from various epochs. @@ -82,16 +91,19 @@ By employing this approach, the model achieves a high level of resilience and st strengths of multiple epochs to enhance its overall performance and deliver consistent and reliable results. ### Full region detection (1,2-column documents) + Model card: [Full Region Detection (1,2-column documents)](https://huggingface.co/SBB/eynollah-full-regions-1column) This model deals with documents comprising of one and two columns. ### Full region detection (3,n-column documents) + Model card: [Full Region Detection (3,n-column documents)](https://huggingface.co/SBB/eynollah-full-regions-3pluscolumn) This model is responsible for detecting headers and drop capitals in documents with three or more columns. ### Textline detection + Model card: [Textline Detection](https://huggingface.co/SBB/eynollah-textline) The method for textline detection combines deep learning and heuristics. In the deep learning part, an image-to-image @@ -106,6 +118,7 @@ segmentation is first deskewed and then the textlines are separated with the sam textline bounding boxes. Later, the strap is rotated back into its original orientation. ### Textline detection (light) + Model card: [Textline Detection Light (simpler but faster method)](https://huggingface.co/SBB/eynollah-textline_light) The method for textline detection combines deep learning and heuristics. In the deep learning part, an image-to-image @@ -119,6 +132,7 @@ enhancing the model's ability to accurately identify and delineate individual te eliminates the need for additional heuristics in extracting textline contours. ### Table detection + Model card: [Table Detection](https://huggingface.co/SBB/eynollah-tables) The objective of this model is to perform table segmentation in historical document images. Due to the pixel-wise @@ -128,17 +142,21 @@ effectively identify and delineate tables within the historical document images, enabling subsequent analysis and interpretation. ### Image detection + Model card: [Image Detection](https://huggingface.co/SBB/eynollah-image-extraction) This model is used for the task of illustration detection only. ### Reading order detection + Model card: [Reading Order Detection]() TODO ## Heuristic methods + Additionally, some heuristic methods are employed to further improve the model predictions: + * After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. * For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. * A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. diff --git a/docs/train.md b/docs/train.md index 9f44a63..47ad67b 100644 --- a/docs/train.md +++ b/docs/train.md @@ -1,4 +1,5 @@ # Training documentation + This aims to assist users in preparing training datasets, training models, and performing inference with trained models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based reading order detection. For each use case, we provide guidance on how to generate the corresponding training dataset. @@ -11,6 +12,7 @@ The following three tasks can all be accomplished using the code in the * inference with the trained model ## Generate training dataset + The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following command demonstrates, the dataset generator provides three different commands: @@ -23,14 +25,19 @@ These three commands are: * pagexml2label ### image-enhancement + Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of high-resolution images. The training dataset can then be generated using the following command: -`python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded -images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs -"degrading scales json file"` +```sh +python generate_gt_for_training.py image-enhancement \ + -dis "dir of high resolution images" \ + -dois "dir where degraded images will be written" \ + -dols "dir where the corresponding high resolution image will be written as label" \ + -scs "degrading scales json file" +``` -The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are +The scales JSON file is a dictionary with a key named `scales` and values representing scales smaller than 1. Images are downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose resolution at different scales. The degraded images are used as input images, and the original high-resolution images serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: @@ -42,6 +49,7 @@ serve as labels. The enhancement model can be trained with this generated datase ``` ### machine-based-reading-order + For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's input is a three-channel image: the first and last channels contain information about each of the two text regions, while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. @@ -52,10 +60,18 @@ For output images, it is necessary to specify the width and height. Additionally to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area to the image area, with a default value of zero. To run the dataset generator, use the following command: -`python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images -will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` +```shell +python generate_gt_for_training.py machine-based-reading-order \ + -dx "dir of GT xml files" \ + -domi "dir where output images will be written" \ + -docl "dir where the labels will be written" \ + -ih "height" \ + -iw "width" \ + -min "min area ratio" +``` ### pagexml2label + pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script @@ -119,9 +135,13 @@ graphic region, "stamp" has its own class, while all other types are classified region" are also present in the label. However, other regions like "noise region" and "table region" will not be included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. -`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will -be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just -to visualise the labels" "` +```sh +python generate_gt_for_training.py pagexml2label \ + -dx "dir of GT xml files" \ + -do "dir where output label png files will be written" \ + -cfg "custom config json file" \ + -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" +``` We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, @@ -169,12 +189,19 @@ in this scenario, since cropping will be applied to the label files, the directo provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels required for training are obtained. The command should resemble the following: -`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will -be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just -to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will -be written" ` +```sh +python generate_gt_for_training.py pagexml2label \ + -dx "dir of GT xml files" \ + -do "dir where output label png files will be written" \ + -cfg "custom config json file" \ + -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" \ + -ps \ + -di "dir where the org images are located" \ + -doi "dir where the cropped output images will be written" +``` ## Train a model + ### classification For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, @@ -225,7 +252,9 @@ And the "dir_eval" the same structure as train directory: The classification model can be trained using the following command line: -`python train.py with config_classification.json` +```sh +python train.py with config_classification.json +``` As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, @@ -276,6 +305,7 @@ The classification model can be trained like the classification case command lin ### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement #### Parameter configuration for segmentation or enhancement usecases + The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. @@ -355,6 +385,7 @@ command, similar to the process for classification and reading order: `python train.py with config_classification.json` #### Binarization + An example config json file for binarization can be like this: ```yaml @@ -550,6 +581,7 @@ For page segmentation (or printspace or border segmentation), the model needs to hence the patches parameter should be set to false. #### layout segmentation + An example config json file for layout segmentation with 5 classes (including background) can be like this: ```yaml @@ -605,26 +637,41 @@ An example config json file for layout segmentation with 5 classes (including ba ## Inference with the trained model ### classification + For conducting inference with a trained model, you simply need to execute the following command line, specifying the directory of the model and the image on which to perform inference: -`python inference.py -m "model dir" -i "image" ` +```sh +python inference.py -m "model dir" -i "image" +``` This will straightforwardly return the class of the image. ### machine based reading order + To infer the reading order using a reading order model, we need a page XML file containing layout information but without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The new XML file with the added reading order will be written to the output directory with the same name. We need to run: -`python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` +```sh +python inference.py \ + -m "model dir" \ + -xml "page xml file" \ + -o "output dir to write new xml with reading order" +``` ### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: -`python inference.py -m "model dir" -i "image" -p -s "output image" ` +```sh +python inference.py \ + -m "model dir" \ + -i "image" \ + -p \ + -s "output image" +``` Note that in the case of page extraction the -p flag is not needed. diff --git a/tests/test_run.py b/tests/test_run.py index da0455a..be928a0 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -289,27 +289,26 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): assert len(out_texts) >= 2, ("result is inaccurate", out_texts) assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) -# kba Fri Sep 26 12:53:49 CEST 2025 -# Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged -# def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): -# indir = testdir.joinpath('resources') -# outdir = tmp_path -# args = [ -# '-m', MODELS_OCR, -# '-di', str(indir), -# '-dx', str(indir), -# '-o', str(outdir), -# ] -# if pytestconfig.getoption('verbose') > 0: -# args.extend(['-l', 'DEBUG']) -# caplog.set_level(logging.INFO) -# def only_eynollah(logrec): -# return logrec.name == 'eynollah' -# runner = CliRunner() -# with caplog.filtering(only_eynollah): -# result = runner.invoke(ocr_cli, args, catch_exceptions=False) -# assert result.exit_code == 0, result.stdout -# logmsgs = [logrec.message for logrec in caplog.records] -# # FIXME: ocr has no logging! -# #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs -# assert len(list(outdir.iterdir())) == 2 +@pytest.mark.skip("Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged") +def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', MODELS_OCR, + '-di', str(indir), + '-dx', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'eynollah' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(ocr_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: ocr has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert len(list(outdir.iterdir())) == 2 From 37e64b4e458613a433f4837a120a66378ea6668a Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 26 Sep 2025 16:19:04 +0200 Subject: [PATCH 239/492] :memo: changelog --- CHANGELOG.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad86fe5..a05919e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,13 @@ Versioned according to [Semantic Versioning](http://semver.org/). Fixed: - * restoring the contour in the original image caused an error due to an empty tuple + * restoring the contour in the original image caused an error due to an empty tuple, #154 + +Added: + + * `eynollah machine-based-reading-order` CLI to run reading order detection, #175 + * `eynollah enhancement` CLI to run image enhancement, #175 + * Improved models for page extraction and reading order detection, #175 ## [0.4.0] - 2025-04-07 From 6ea6a6280165ff040b63abe8dc9e917b4150a40b Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 26 Sep 2025 16:23:46 +0200 Subject: [PATCH 240/492] :memo: v0.5.0 --- CHANGELOG.md | 4 ++++ src/eynollah/ocrd-tool.json | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a05919e..0ad9a09 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +## [0.5.0] - 2025-09-26 + Fixed: * restoring the contour in the original image caused an error due to an empty tuple, #154 @@ -193,6 +195,8 @@ Fixed: Initial release +[0.5.0]: ../../compare/v0.5.0...v0.4.0 +[0.4.0]: ../../compare/v0.4.0...v0.3.1 [0.3.1]: ../../compare/v0.3.1...v0.3.0 [0.3.0]: ../../compare/v0.3.0...v0.2.0 [0.2.0]: ../../compare/v0.2.0...v0.1.0 diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index fbc6c1a..5d89c92 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -1,5 +1,5 @@ { - "version": "0.4.0", + "version": "0.5.0", "git_url": "https://github.com/qurator-spk/eynollah", "dockerhub": "ocrd/eynollah", "tools": { From 92c1e824dc0683fc74eaa037cabcdb41f49cf677 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky <38561704+bertsky@users.noreply.github.com> Date: Fri, 26 Sep 2025 23:05:47 +0200 Subject: [PATCH 241/492] CD: master is now main --- .github/workflows/build-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index d77958b..d2869ed 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -2,7 +2,7 @@ name: CD on: push: - branches: [ "master" ] + branches: [ "main" ] workflow_dispatch: # run manually jobs: From a48e52c00eef1b1e8c85b25bf4d95e46ecaf0cf1 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky <38561704+bertsky@users.noreply.github.com> Date: Mon, 29 Sep 2025 13:49:18 +0200 Subject: [PATCH 242/492] :memo: extend changelog for v0.5.0 --- CHANGELOG.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ad9a09..bfdd1ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,12 +11,37 @@ Fixed: * restoring the contour in the original image caused an error due to an empty tuple, #154 +Changed + + * CLIs: read only allowed filename suffixes (image or XML) with `--dir_in` + * CLIs: make all output option required, and `-i` / `-di` required but mutually exclusive + * ocr CLI: drop redundant `-brb` in favour of just `-dib` + * APIs: move all input/output path options from class (kwarg and attribute) ro `run` kwarg + * layout textlines: polygonal also without `-cl` + Added: * `eynollah machine-based-reading-order` CLI to run reading order detection, #175 * `eynollah enhancement` CLI to run image enhancement, #175 * Improved models for page extraction and reading order detection, #175 +Merged PRs: + + * better machine based reading order + layout and textline + ocr by @vahidrezanezhad in https://github.com/qurator-spk/eynollah/pull/175 + * CI: pypi by @kba in https://github.com/qurator-spk/eynollah/pull/154 + * CI: Use most recent actions/setup-python@v5 by @kba in https://github.com/qurator-spk/eynollah/pull/157 + * update docker by @bertsky in https://github.com/qurator-spk/eynollah/pull/159 + * Ocrd fixes by @kba in https://github.com/qurator-spk/eynollah/pull/167 + * Updating readme for eynollah use cases cli by @kba in https://github.com/qurator-spk/eynollah/pull/166 + * OCR-D processor: expose reading_order_machine_based by @bertsky in https://github.com/qurator-spk/eynollah/pull/171 + * prepare release v0.5.0: fix logging by @bertsky in https://github.com/qurator-spk/eynollah/pull/180 + * mb_ro_on_layout: remove copy-pasta code not actually used by @kba in https://github.com/qurator-spk/eynollah/pull/181 + * prepare release v0.5.0: improve CLI docstring, refactor I/O path options from class to run kwargs, increase test coverage @bertsky in #182 + * prepare release v0.5.0: fix for OCR doit subtest by @bertsky in https://github.com/qurator-spk/eynollah/pull/183 + * Prepare release v0.5.0 by @kba in https://github.com/qurator-spk/eynollah/pull/178 + * updating eynollah README, how to use it for use cases by @vahidrezanezhad in https://github.com/qurator-spk/eynollah/pull/156 + * add feedback to command line interface by @michalbubula in https://github.com/qurator-spk/eynollah/pull/170 + ## [0.4.0] - 2025-04-07 Fixed: From 56c4b7af8872514527965c0249553771fa4417d5 Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 29 Sep 2025 14:59:41 +0200 Subject: [PATCH 243/492] :memo: align pre-merge docs/train.md with former upstream train.md syntactically --- docs/train.md | 167 ++++++++++++++++++++++++++----------------------- train/train.md | 135 ++++++++++++++++++++++++++++----------- 2 files changed, 187 insertions(+), 115 deletions(-) diff --git a/docs/train.md b/docs/train.md index 47ad67b..b920a07 100644 --- a/docs/train.md +++ b/docs/train.md @@ -1,10 +1,12 @@ # Training documentation -This aims to assist users in preparing training datasets, training models, and performing inference with trained models. -We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based -reading order detection. For each use case, we provide guidance on how to generate the corresponding training dataset. +This aims to assist users in preparing training datasets, training models, and +performing inference with trained models. We cover various use cases including +pixel-wise segmentation, image classification, image enhancement, and +machine-based reading order detection. For each use case, we provide guidance +on how to generate the corresponding training dataset. -The following three tasks can all be accomplished using the code in the +The following three tasks can all be accomplished using the code in the [`train`](https://github.com/qurator-spk/sbb_pixelwise_segmentation/tree/unifying-training-models) directory: * generate training dataset @@ -13,7 +15,7 @@ The following three tasks can all be accomplished using the code in the ## Generate training dataset -The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following +The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following command demonstrates, the dataset generator provides three different commands: `python generate_gt_for_training.py --help` @@ -26,7 +28,7 @@ These three commands are: ### image-enhancement -Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of +Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of high-resolution images. The training dataset can then be generated using the following command: ```sh @@ -37,9 +39,9 @@ python generate_gt_for_training.py image-enhancement \ -scs "degrading scales json file" ``` -The scales JSON file is a dictionary with a key named `scales` and values representing scales smaller than 1. Images are -downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose -resolution at different scales. The degraded images are used as input images, and the original high-resolution images +The scales JSON file is a dictionary with a key named `scales` and values representing scales smaller than 1. Images are +downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose +resolution at different scales. The degraded images are used as input images, and the original high-resolution images serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: ```yaml @@ -50,14 +52,14 @@ serve as labels. The enhancement model can be trained with this generated datase ### machine-based-reading-order -For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's -input is a three-channel image: the first and last channels contain information about each of the two text regions, -while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. -To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct +For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's +input is a three-channel image: the first and last channels contain information about each of the two text regions, +while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. +To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct reading order. -For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set -to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area +For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set +to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area to the image area, with a default value of zero. To run the dataset generator, use the following command: ```shell @@ -74,15 +76,15 @@ python generate_gt_for_training.py machine-based-reading-order \ pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. -To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script -expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled -as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four +To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script +expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled +as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. -In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired +In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired element is automatically encoded as 1 in the PNG label. -To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. +To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. For example, in the case of 'textline' detection, the JSON file would resemble this: ```yaml @@ -116,23 +118,23 @@ A possible custom config json file for layout segmentation where the "printspace } ``` -For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. -In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. -For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', +For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. +In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. +For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', and 'tableregion'. -Text regions and graphic regions also have their own specific types. The known types for text regions are 'paragraph', -'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', -and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and +Text regions and graphic regions also have their own specific types. The known types for text regions are 'paragraph', +'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', +and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and 'signature'. -Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined -two additional types, "rest_as_paragraph" and "rest_as_decoration", to ensure that no unknown types are missed. +Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined +two additional types, "rest_as_paragraph" and "rest_as_decoration", to ensure that no unknown types are missed. This way, users can extract all known types from the labels and be confident that no unknown types are overlooked. -In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown -as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the -graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator -region" are also present in the label. However, other regions like "noise region" and "table region" will not be +In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown +as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the +graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator +region" are also present in the label. However, other regions like "noise region" and "table region" will not be included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. ```sh @@ -143,8 +145,8 @@ python generate_gt_for_training.py pagexml2label \ -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" ``` -We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key -is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, +We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key +is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, the example JSON config file should look like this: ```yaml @@ -167,13 +169,13 @@ the example JSON config file should look like this: } ``` -This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the +This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the elements labeled as "paragraph," "header," "heading," and "marginalia." -For "textline", "word", and "glyph", the artificial class on the boundaries will be activated only if the -"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements -represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the -artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use +For "textline", "word", and "glyph", the artificial class on the boundaries will be activated only if the +"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements +represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the +artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use case: ```yaml @@ -183,10 +185,10 @@ case: } ``` -If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to -crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that -in this scenario, since cropping will be applied to the label files, the directory of the original images must be -provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels +If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to +crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that +in this scenario, since cropping will be applied to the label files, the directory of the original images must be +provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels required for training are obtained. The command should resemble the following: ```sh @@ -204,11 +206,11 @@ python generate_gt_for_training.py pagexml2label \ ### classification -For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, -all we require is a training directory with subdirectories, each containing images of its respective classes. We need -separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both -directories. Additionally, the class names should be specified in the config JSON file, as shown in the following -example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the +For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, +all we require is a training directory with subdirectories, each containing images of its respective classes. We need +separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both +directories. Additionally, the class names should be specified in the config JSON file, as shown in the following +example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the "classification_classes_name" key in the config file should appear as follows: ```yaml @@ -233,7 +235,7 @@ example. If, for instance, we aim to classify "apple" and "orange," with a total The "dir_train" should be like this: -``` +``` . └── train # train directory ├── apple # directory of images for apple class @@ -242,7 +244,7 @@ The "dir_train" should be like this: And the "dir_eval" the same structure as train directory: -``` +``` . └── eval # evaluation directory ├── apple # directory of images for apple class @@ -256,9 +258,9 @@ The classification model can be trained using the following command line: python train.py with config_classification.json ``` -As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. -This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, -an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". +As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. +This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, +an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". ### reading order @@ -306,25 +308,25 @@ The classification model can be trained like the classification case command lin #### Parameter configuration for segmentation or enhancement usecases -The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, -its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for +The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, +its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. -* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we -* offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first +* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we +* offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first * apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. * task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". -* patches: If you want to break input images into smaller patches (input size of the model) you need to set this -* parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this +* parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be * set to ``false``. * n_batch: Number of batches at each iteration. -* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it +* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it * should set to 1. And for the case of layout detection just the unique number of classes should be given. * n_epochs: Number of epochs. * input_height: This indicates the height of model's input. * input_width: This indicates the width of model's input. * weight_decay: Weight decay of l2 regularization of model layers. -* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved +* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved * in a folder named "pretrained_model" in the same directory of "train.py" script. * augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. * flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. @@ -344,9 +346,15 @@ classification and machine-based reading order, as you can see in their example * brightness: The amount of brightenings. * thetha: Rotation angles. * degrade_scales: The amount of degradings. -* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the + training. So it is needed to providethe dir of trained model with "dir_of_start_model" and index for naming + themodels. For example if you have already trained for 3 epochs then your lastindex is 2 and if you want to continue + from model_1.h5, you can set +``index_start`` to 3 to start naming model with index 3. * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train + and eval data are in"dir_output".Since when once we provide training data we resize and augmentthem and then wewrite + them in sub-directories train and eval in "dir_output". * dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. * index_start: Starting index for saved models in the case that "continue_training" is ``true``. * dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. @@ -379,7 +387,7 @@ And the "dir_eval" the same structure as train directory: └── labels # directory of labels ``` -After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following +After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following command, similar to the process for classification and reading order: `python train.py with config_classification.json` @@ -429,7 +437,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -474,7 +482,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -519,7 +527,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -529,7 +537,7 @@ An example config json file for binarization can be like this: } ``` -It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel +It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel image. #### Page extraction @@ -567,7 +575,7 @@ image. "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -577,8 +585,8 @@ image. } ``` -For page segmentation (or printspace or border segmentation), the model needs to view the input image in its entirety, -hence the patches parameter should be set to false. +For page segmentation (or print space or border segmentation), the model needs to view the input image in its +entirety,hence the patches parameter should be set to false. #### layout segmentation @@ -625,7 +633,7 @@ An example config json file for layout segmentation with 5 classes (including ba "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -638,7 +646,7 @@ An example config json file for layout segmentation with 5 classes (including ba ### classification -For conducting inference with a trained model, you simply need to execute the following command line, specifying the +For conducting inference with a trained model, you simply need to execute the following command line, specifying the directory of the model and the image on which to perform inference: ```sh @@ -649,10 +657,9 @@ This will straightforwardly return the class of the image. ### machine based reading order -To infer the reading order using a reading order model, we need a page XML file containing layout information but -without the reading order. We simply need to provide the model directory, the XML file, and the output directory. -The new XML file with the added reading order will be written to the output directory with the same name. -We need to run: +To infer the reading order using a reading order model, we need a page XML file containing layout information but +without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The +new XML file with the added reading order will be written to the output directory with the same name. We need to run: ```sh python inference.py \ @@ -662,8 +669,8 @@ python inference.py \ ``` ### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement -For conducting inference with a trained model for segmentation and enhancement you need to run the following command -line: + +For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: ```sh python inference.py \ @@ -675,5 +682,5 @@ python inference.py \ Note that in the case of page extraction the -p flag is not needed. -For segmentation or binarization tasks, if a ground truth (GT) label is available, the IoU evaluation metric can be +For segmentation or binarization tasks, if a ground truth (GT) label is available, the IoU evaluation metric can be calculated for the output. To do this, you need to provide the GT label using the argument -gt. diff --git a/train/train.md b/train/train.md index 553522b..3eeb715 100644 --- a/train/train.md +++ b/train/train.md @@ -1,6 +1,9 @@ # Documentation for Training Models -This repository assists users in preparing training datasets, training models, and performing inference with trained models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training dataset. +This repository assists users in preparing training datasets, training models, and performing inference with trained +models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and +machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training +dataset. All these use cases are now utilized in the Eynollah workflow. As mentioned, the following three tasks can be accomplished using this repository: @@ -23,11 +26,15 @@ These three commands are: ### image-enhancement -Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of high-resolution images. The training dataset can then be generated using the following command: +Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of +high-resolution images. The training dataset can then be generated using the following command: `python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs "degrading scales json file"` -The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose resolution at different scales. The degraded images are used as input images, and the original high-resolution images serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: +The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are +downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose +resolution at different scales. The degraded images are used as input images, and the original high-resolution images +serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: ```yaml { @@ -37,21 +44,33 @@ The scales JSON file is a dictionary with a key named 'scales' and values repres ### machine-based-reading-order -For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's input is a three-channel image: the first and last channels contain information about each of the two text regions, while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct reading order. +For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's +input is a three-channel image: the first and last channels contain information about each of the two text regions, +while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. +To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct +reading order. -For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area to the image area, with a default value of zero. To run the dataset generator, use the following command: +For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set +to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area +to the image area, with a default value of zero. To run the dataset generator, use the following command: `python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` ### pagexml2label -pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. -To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. +pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, +including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. +To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script +expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled +as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four +elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. -In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired element is automatically encoded as 1 in the PNG label. +In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired +element is automatically encoded as 1 in the PNG label. -To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. For example, in the case of 'textline' detection, the JSON file would resemble this: +To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. +For example, in the case of 'textline' detection, the JSON file would resemble this: ```yaml { @@ -83,16 +102,31 @@ A possible custom config json file for layout segmentation where the "printspac "printspace_as_class_in_layout" : 8 } ``` -For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', and 'tableregion'. +For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a +given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an +image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', +and 'tableregion'. -Text regions and graphic regions also have their own specific types. The known types for us for text regions are 'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and 'signature'. -Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, users can extract all known types from the labels and be confident that no unknown types are overlooked. +Text regions and graphic regions also have their own specific types. The known types for us for text regions are +'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', +'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', +'stamp', and 'signature'. -In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator region" are also present in the label. However, other regions like "noise region" and "table region" will not be included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. +Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two +additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, +users can extract all known types from the labels and be confident that no unknown types are overlooked. + +In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown +as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the +graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator +region" are also present in the label. However, other regions like "noise region" and "table region" will not be +included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. `python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" "` -We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, the example JSON config file should look like this: +We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key +is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, +the example JSON config file should look like this: ```yaml { @@ -114,9 +148,14 @@ We have also defined an artificial class that can be added to the boundary of te } ``` -This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the elements labeled as "paragraph," "header," "heading," and "marginalia." +This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the +elements labeled as "paragraph," "header," "heading," and "marginalia." -For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the "artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use case: +For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the +"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements +represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the +artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use +case: ```yaml { @@ -125,7 +164,11 @@ For "textline," "word," and "glyph," the artificial class on the boundaries will } ``` -If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that in this scenario, since cropping will be applied to the label files, the directory of the original images must be provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels required for training are obtained. The command should resemble the following: +If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to +crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that +in this scenario, since cropping will be applied to the label files, the directory of the original images must be +provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels +required for training are obtained. The command should resemble the following: `python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will be written" ` @@ -178,7 +221,10 @@ The classification model can be trained using the following command line: `python train.py with config_classification.json` -As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". +As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. +This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, +an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". +Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". ### reading order An example config json file for machine based reading order should be like this: @@ -225,18 +271,25 @@ The classification model can be trained like the classification case command lin #### Parameter configuration for segmentation or enhancement usecases -The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. +The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, +its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for +classification and machine-based reading order, as you can see in their example config files. -* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. +* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we + offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first +apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. * task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". -* patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be set to ``false``. +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this + parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be +set to ``false``. * n_batch: Number of batches at each iteration. * n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it should set to 1. And for the case of layout detection just the unique number of classes should be given. * n_epochs: Number of epochs. * input_height: This indicates the height of model's input. * input_width: This indicates the width of model's input. * weight_decay: Weight decay of l2 regularization of model layers. -* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved in a folder named "pretrained_model" in the same directory of "train.py" script. +* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved + in a folder named "pretrained_model" in the same directory of "train.py" script. * augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. * flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. * blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. @@ -255,9 +308,14 @@ The following parameter configuration can be applied to all segmentation use cas * brightness: The amount of brightenings. * thetha: Rotation angles. * degrade_scales: The amount of degradings. -* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the + training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the + models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from + model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train + and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we + write them in sub-directories train and eval in "dir_output". * dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. * index_start: Starting index for saved models in the case that "continue_training" is ``true``. * dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. @@ -290,7 +348,8 @@ And the "dir_eval" the same structure as train directory: └── labels # directory of labels ``` -After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following command, similar to the process for classification and reading order: +After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following +command, similar to the process for classification and reading order: `python train.py with config_classification.json` @@ -339,7 +398,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -384,7 +443,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -429,7 +488,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -439,7 +498,8 @@ An example config json file for binarization can be like this: } ``` -It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel image. +It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel +image. #### Page extraction @@ -486,7 +546,8 @@ It's important to mention that the value of n_classes for enhancement should be } ``` -For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, hence the patches parameter should be set to false. +For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, +hence the patches parameter should be set to false. #### layout segmentation @@ -533,7 +594,7 @@ An example config json file for layout segmentation with 5 classes (including ba "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -543,9 +604,11 @@ An example config json file for layout segmentation with 5 classes (including ba } ``` ## Inference with the trained model + ### classification -For conducting inference with a trained model, you simply need to execute the following command line, specifying the directory of the model and the image on which to perform inference: +For conducting inference with a trained model, you simply need to execute the following command line, specifying the +directory of the model and the image on which to perform inference: `python inference.py -m "model dir" -i "image" ` @@ -554,8 +617,9 @@ This will straightforwardly return the class of the image. ### machine based reading order - -To infer the reading order using an reading order model, we need a page XML file containing layout information but without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The new XML file with the added reading order will be written to the output directory with the same name. We need to run: +To infer the reading order using an reading order model, we need a page XML file containing layout information but +without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The +new XML file with the added reading order will be written to the output directory with the same name. We need to run: `python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` @@ -570,7 +634,8 @@ For conducting inference with a trained model for segmentation and enhancement y Note that in the case of page extraction the -p flag is not needed. -For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be calculated for the output. To do this, you need to provide the GT label using the argument -gt. +For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be +calculated for the output. To do this, you need to provide the GT label using the argument -gt. From ea05461dfeb9551f2e333d03a708e01295ccfb2d Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 29 Sep 2025 15:04:46 +0200 Subject: [PATCH 244/492] add documentation on eynollah layout from eynollah wiki --- docs/eynollah-layout.md | 100 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 docs/eynollah-layout.md diff --git a/docs/eynollah-layout.md b/docs/eynollah-layout.md new file mode 100644 index 0000000..e76ed51 --- /dev/null +++ b/docs/eynollah-layout.md @@ -0,0 +1,100 @@ +# `eynollah layout` documentation + +Eynollah can currently be used to detect the following region types/elements: +* Background +* [Border](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_BorderType.html) +* [Textregion](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextRegionType.html) +* [Textline](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html) +* [Header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html) +* [Image](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_ImageRegionType.html) +* [Separator](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_SeparatorRegionType.html) +* [Marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html) +* [Initial (Drop Capital)](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html) +* [Table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) + +In addition, the tool can detect the [ReadingOrder](https://ocr-d.de/en/gt-guidelines/trans/lyLeserichtung.html) of text regions, both from left-to-right or from right-to-left. The final goal is to feed the output to an OCR model. + +## Method description + +Eynollah is based on pixelwise segmentation using a combination of a ResNet50 encoder with various U-Net decoders. +It uses a combination of multiple models and heuristics (see the flowchart below for the different stages and how they interact): +* [Border detection](https://github.com/qurator-spk/eynollah#border-detection) +* [Layout detection](https://github.com/qurator-spk/eynollah#layout-detection) +* [Textline detection](https://github.com/qurator-spk/eynollah#textline-detection) +* [Image enhancement](https://github.com/qurator-spk/eynollah#Image_enhancement) +* [Scale classification](https://github.com/qurator-spk/eynollah#Scale_classification) +* [Heuristic methods](https://https://github.com/qurator-spk/eynollah#heuristic-methods) + +![](https://user-images.githubusercontent.com/952378/100619946-1936f680-331e-11eb-9297-6e8b4cab3c16.png) + +### Border detection +For the purpose of text recognition (OCR) and in order to avoid noise being introduced from texts outside the printspace, one first needs to detect the border of the printed frame. This is done by a binary pixel-wise-segmentation model trained on a dataset of 2,000 documents where about 1,200 of them come from the [dhSegment](https://github.com/dhlab-epfl/dhSegment/) project (you can download the dataset from [here](https://github.com/dhlab-epfl/dhSegment/releases/download/v0.2/pages.zip)) and the remainder having been annotated in SBB. For border detection, the model needs to be fed with the whole image at once rather than separated in patches. + +### Layout detection +As a next step, text regions need to be identified by means of layout detection. Again a pixel-wise segmentation model was trained on 131 labeled images from the SBB digital collections, including some data augmentation. Since the target of this tool are historical documents, we consider as main region types text regions, separators, images, tables and background - each with their own subclasses, e.g. in the case of text regions, subclasses like header/heading, drop capital, main body text etc. While it would be desirable to detect and classify each of these classes in a granular way, there are also limitations due to having a suitably large and balanced training set. Accordingly, the current version of this tool is focussed on the main region types background, text region, image and separator. + +### Textline detection +In a subsequent step, binary pixel-wise segmentation is used again to classify pixels in a document that constitute textlines. For textline segmentation, a model was initially trained on documents with only one column/block of text and some augmentation with regard to scaling. By fine-tuning the parameters also for multi-column documents, additional training data was produced that resulted in a much more robust textline detection model. + +### Image enhancement +This is an image to image model which input was low quality of an image and label was actually the original image. For this one we did not have any GT, so we decreased the quality of documents in SBB and then feed them into model. + +### Scale classification +This is simply an image classifier which classifies images based on their scales or better to say based on their number of columns. + +### Heuristic methods +Some heuristic methods are also employed to further improve the model predictions: +* After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. +* For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. +* A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. +* Deskewing is applied on the text region level (due to regions having different degrees of skew) in order to improve the textline segmentation result. +* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels. +* Finally, using the derived coordinates, bounding boxes are determined for each textline. + +## Models + +TODO + +## How to use + +First, this model makes use of up to 9 trained models which are responsible for different operations like size detection, column classification, image enhancement, page extraction, main layout detection, full layout detection and textline detection.That does not mean that all 9 models are always required for every document. Based on the document characteristics and parameters specified, different scenarios can be applied. + +* If none of the parameters is set to `true`, the tool will perform a layout detection of main regions (background, text, images, separators and marginals). An advantage of this tool is that it tries to extract main text regions separately as much as possible. + +* If you set `-ae` (**a**llow image **e**nhancement) parameter to `true`, the tool will first check the ppi (pixel-per-inch) of the image and when it is less than 300, the tool will resize it and only then image enhancement will occur. Image enhancement can also take place without this option, but by setting this option to `true`, the layout xml data (e.g. coordinates) will be based on the resized and enhanced image instead of the original image. + +* For some documents, while the quality is good, their scale is very large, and the performance of tool decreases. In such cases you can set `-as` (**a**llow **s**caling) to `true`. With this option enabled, the tool will try to rescale the image and only then the layout detection process will begin. + +* If you care about drop capitals (initials) and headings, you can set `-fl` (**f**ull **l**ayout) to `true`. With this setting, the tool can currently distinguish 7 document layout classes/elements. + +* In cases where the document includes curved headers or curved lines, rectangular bounding boxes for textlines will not be a great option. In such cases it is strongly recommended setting the flag `-cl` (**c**urved **l**ines) to `true` to find contours of curved lines instead of rectangular bounding boxes. Be advised that enabling this option increases the processing time of the tool. + +* To crop and save image regions inside the document, set the parameter `-si` (**s**ave **i**mages) to true and provide a directory path to store the extracted images. + +* To extract only images from a document, set the parameter `-eoi` (**e**xtract **o**nly **i**mages). Choosing this option disables any other processing. To save the cropped images add `-ep` and `-si`. + +* This tool is actively being developed. If problems occur, or the performance does not meet your expectations, we welcome your feedback via [issues](https://github.com/qurator-spk/eynollah/issues). + + +### `--full-layout` vs `--no-full-layout` + +Here are the difference in elements detected depending on the `--full-layout`/`--no-full-layout` command line flags: + +| | `--full-layout` | `--no-full-layout` | +| --- | --- | --- | +| reading order | x | x | +| header regions | x | - | +| text regions | x | x | +| text regions / text line | x | x | +| drop-capitals | x | - | +| marginals | x | x | +| marginals / text line | x | x | +| image region | x | x | + +### Use as OCR-D processor + +Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) processor. In this case, the source image file group with (preferably) RGB images should be used as input (the image provided by `@imageFilename` is passed on directly): + +`ocrd-eynollah-segment -I OCR-D-IMG -O SEG-LINE -P models` + +## Examples From 52a7c93319d094c47fc1376171ca890cc80f5936 Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 29 Sep 2025 15:05:05 +0200 Subject: [PATCH 245/492] add documentation on training eynollah from sbb_pixelwise_segmentation wiki --- docs/train_wiki.md | 576 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 576 insertions(+) create mode 100644 docs/train_wiki.md diff --git a/docs/train_wiki.md b/docs/train_wiki.md new file mode 100644 index 0000000..d1c0875 --- /dev/null +++ b/docs/train_wiki.md @@ -0,0 +1,576 @@ +# Documentation + +This repository assists users in preparing training datasets, training models, and performing inference with trained models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training dataset. +All these use cases are now utilized in the Eynollah workflow. +As mentioned, the following three tasks can be accomplished using this repository: + +* Generate training dataset +* Train a model +* Inference with the trained model + +## Generate training dataset +The script generate_gt_for_training.py is used for generating training datasets. As the results of the following command demonstrate, the dataset generator provides three different commands: + +`python generate_gt_for_training.py --help` + + +These three commands are: + +* image-enhancement +* machine-based-reading-order +* pagexml2label + + +### image-enhancement + +Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of high-resolution images. The training dataset can then be generated using the following command: + +`python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs "degrading scales json file"` + +The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose resolution at different scales. The degraded images are used as input images, and the original high-resolution images serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: + +```yaml +{ + "scales": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] +} +``` + +### machine-based-reading-order + +For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's input is a three-channel image: the first and last channels contain information about each of the two text regions, while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct reading order. + +For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area to the image area, with a default value of zero. To run the dataset generator, use the following command: + + +`python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` + +### pagexml2label + +pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. +To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. + +In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired element is automatically encoded as 1 in the PNG label. + +To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. For example, in the case of 'textline' detection, the JSON file would resemble this: + +```yaml +{ +"use_case": "textline" +} +``` + +In the case of layout segmentation a possible custom config json file can be like this: + +```yaml +{ +"use_case": "layout", +"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, +"imageregion":4, +"separatorregion":5, +"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} +} +``` + +A possible custom config json file for layout segmentation where the "printspace" is wished to be a class: + +```yaml +{ +"use_case": "layout", +"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, +"imageregion":4, +"separatorregion":5, +"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} +"printspace_as_class_in_layout" : 8 +} +``` +For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', and 'tableregion'. + +Text regions and graphic regions also have their own specific types. The known types for us for text regions are 'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and 'signature'. +Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, users can extract all known types from the labels and be confident that no unknown types are overlooked. + +In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator region" are also present in the label. However, other regions like "noise region" and "table region" will not be included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. + +`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" "` + +We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, the example JSON config file should look like this: + +```yaml +{ + "use_case": "layout", + "textregions": { + "paragraph": 1, + "drop-capital": 1, + "header": 2, + "heading": 2, + "marginalia": 3 + }, + "imageregion": 4, + "separatorregion": 5, + "graphicregions": { + "rest_as_decoration": 6 + }, + "artificial_class_on_boundary": ["paragraph", "header", "heading", "marginalia"], + "artificial_class_label": 7 +} +``` + +This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the elements labeled as "paragraph," "header," "heading," and "marginalia." + +For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the "artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use case: + +```yaml +{ + "use_case": "textline", + "artificial_class_label": 2 +} +``` + +If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that in this scenario, since cropping will be applied to the label files, the directory of the original images must be provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels required for training are obtained. The command should resemble the following: + +`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will be written" ` + +## Train a model +### classification + +For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, all we require is a training directory with subdirectories, each containing images of its respective classes. We need separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both directories. Additionally, the class names should be specified in the config JSON file, as shown in the following example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the "classification_classes_name" key in the config file should appear as follows: + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "classification", + "n_classes" : 2, + "n_epochs" : 10, + "input_height" : 448, + "input_width" : 448, + "weight_decay" : 1e-6, + "n_batch" : 4, + "learning_rate": 1e-4, + "f1_threshold_classification": 0.8, + "pretraining" : true, + "classification_classes_name" : {"0":"apple", "1":"orange"}, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +The "dir_train" should be like this: + +``` +. +└── train # train directory + ├── apple # directory of images for apple class + └── orange # directory of images for orange class +``` + +And the "dir_eval" the same structure as train directory: + +``` +. +└── eval # evaluation directory + ├── apple # directory of images for apple class + └── orange # directory of images for orange class + +``` + +The classification model can be trained using the following command line: + +`python train.py with config_classification.json` + + +As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". + +### reading order +An example config json file for machine based reading order should be like this: + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "reading_order", + "n_classes" : 1, + "n_epochs" : 5, + "input_height" : 672, + "input_width" : 448, + "weight_decay" : 1e-6, + "n_batch" : 4, + "learning_rate": 1e-4, + "pretraining" : true, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +The "dir_train" should be like this: + +``` +. +└── train # train directory + ├── images # directory of images + └── labels # directory of labels +``` + +And the "dir_eval" the same structure as train directory: + +``` +. +└── eval # evaluation directory + ├── images # directory of images + └── labels # directory of labels +``` + +The classification model can be trained like the classification case command line. + +### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement + +#### Parameter configuration for segmentation or enhancement usecases + +The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. + +* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. +* task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be set to ``false``. +* n_batch: Number of batches at each iteration. +* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it should set to 1. And for the case of layout detection just the unique number of classes should be given. +* n_epochs: Number of epochs. +* input_height: This indicates the height of model's input. +* input_width: This indicates the width of model's input. +* weight_decay: Weight decay of l2 regularization of model layers. +* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved in a folder named "pretrained_model" in the same directory of "train.py" script. +* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. +* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. +* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. +* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" parameter. +* degrading: If ``true``, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. +* brightening: If ``true``, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. +* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. +* rotation: If ``true``, 90 degree rotation will be applied on image. +* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. +* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. +* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. +* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. +* flip_index: Type of flips. +* blur_k: Type of blurrings. +* scales: Scales of scaling. +* brightness: The amount of brightenings. +* thetha: Rotation angles. +* degrade_scales: The amount of degradings. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. +* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. +* index_start: Starting index for saved models in the case that "continue_training" is ``true``. +* dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. +* transformer_num_patches_xy: Number of patches for vision transformer in x and y direction respectively. +* transformer_patchsize_x: Patch size of vision transformer patches in x direction. +* transformer_patchsize_y: Patch size of vision transformer patches in y direction. +* transformer_projection_dim: Transformer projection dimension. Default value is 64. +* transformer_mlp_head_units: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. +* transformer_layers: transformer layers. Default value is 8. +* transformer_num_heads: Transformer number of heads. Default value is 4. +* transformer_cnn_first: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. + +In the case of segmentation and enhancement the train and evaluation directory should be as following. + +The "dir_train" should be like this: + +``` +. +└── train # train directory + ├── images # directory of images + └── labels # directory of labels +``` + +And the "dir_eval" the same structure as train directory: + +``` +. +└── eval # evaluation directory + ├── images # directory of images + └── labels # directory of labels +``` + +After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following command, similar to the process for classification and reading order: + +`python train.py with config_classification.json` + +#### Binarization + +An example config json file for binarization can be like this: + +```yaml +{ + "backbone_type" : "transformer", + "task": "binarization", + "n_classes" : 2, + "n_epochs" : 4, + "input_height" : 224, + "input_width" : 672, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "transformer_num_patches_xy": [7, 7], + "transformer_patchsize_x": 3, + "transformer_patchsize_y": 1, + "transformer_projection_dim": 192, + "transformer_mlp_head_units": [128, 64], + "transformer_layers": 8, + "transformer_num_heads": 4, + "transformer_cnn_first": true, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +#### Textline + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "segmentation", + "n_classes" : 2, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +#### Enhancement + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "enhancement", + "n_classes" : 3, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 4, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel image. + +#### Page extraction + +```yaml +{ + "backbone_type" : "nontransformer", + "task": "segmentation", + "n_classes" : 2, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : false, + "pretraining" : true, + "augmentation" : false, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` + +For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, hence the patches parameter should be set to false. + +#### layout segmentation + +An example config json file for layout segmentation with 5 classes (including background) can be like this: + +```yaml +{ + "backbone_type" : "transformer", + "task": "segmentation", + "n_classes" : 5, + "n_epochs" : 4, + "input_height" : 448, + "input_width" : 224, + "weight_decay" : 1e-6, + "n_batch" : 1, + "learning_rate": 1e-4, + "patches" : true, + "pretraining" : true, + "augmentation" : true, + "flip_aug" : false, + "blur_aug" : false, + "scaling" : true, + "degrading": false, + "brightening": false, + "binarization" : false, + "scaling_bluring" : false, + "scaling_binarization" : false, + "scaling_flip" : false, + "rotation": false, + "rotation_not_90": false, + "transformer_num_patches_xy": [7, 14], + "transformer_patchsize_x": 1, + "transformer_patchsize_y": 1, + "transformer_projection_dim": 64, + "transformer_mlp_head_units": [128, 64], + "transformer_layers": 8, + "transformer_num_heads": 4, + "transformer_cnn_first": true, + "blur_k" : ["blur","guass","median"], + "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], + "brightness" : [1.3, 1.5, 1.7, 2], + "degrade_scales" : [0.2, 0.4], + "flip_index" : [0, 1, -1], + "thetha" : [10, -10], + "continue_training": false, + "index_start" : 0, + "dir_of_start_model" : " ", + "weighted_loss": false, + "is_loss_soft_dice": false, + "data_is_provided": false, + "dir_train": "./train", + "dir_eval": "./eval", + "dir_output": "./output" +} +``` +## Inference with the trained model +### classification + +For conducting inference with a trained model, you simply need to execute the following command line, specifying the directory of the model and the image on which to perform inference: + + +`python inference.py -m "model dir" -i "image" ` + +This will straightforwardly return the class of the image. + +### machine based reading order + + +To infer the reading order using an reading order model, we need a page XML file containing layout information but without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The new XML file with the added reading order will be written to the output directory with the same name. We need to run: + +`python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` + + +### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement + +For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: + + +`python inference.py -m "model dir" -i "image" -p -s "output image" ` + + +Note that in the case of page extraction the -p flag is not needed. + +For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be calculated for the output. To do this, you need to provide the GT label using the argument -gt. + + + From 6d379782abf6de1574912878a2dc61d2cfa0d18c Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 29 Sep 2025 15:11:02 +0200 Subject: [PATCH 246/492] :memo: align former upstream train.md with wiki train.md syntactically --- docs/train_wiki.md | 145 +++++++++++++++++++++++++++++++++------------ train/train.md | 18 +++--- 2 files changed, 116 insertions(+), 47 deletions(-) diff --git a/docs/train_wiki.md b/docs/train_wiki.md index d1c0875..5158a80 100644 --- a/docs/train_wiki.md +++ b/docs/train_wiki.md @@ -1,7 +1,10 @@ # Documentation -This repository assists users in preparing training datasets, training models, and performing inference with trained models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training dataset. -All these use cases are now utilized in the Eynollah workflow. +This repository assists users in preparing training datasets, training models, and performing inference with trained +models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and +machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training +dataset. +All these use cases are now utilized in the Eynollah workflow. As mentioned, the following three tasks can be accomplished using this repository: * Generate training dataset @@ -23,11 +26,15 @@ These three commands are: ### image-enhancement -Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of high-resolution images. The training dataset can then be generated using the following command: +Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of +high-resolution images. The training dataset can then be generated using the following command: `python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs "degrading scales json file"` -The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose resolution at different scales. The degraded images are used as input images, and the original high-resolution images serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: +The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are +downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose +resolution at different scales. The degraded images are used as input images, and the original high-resolution images +serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: ```yaml { @@ -37,21 +44,34 @@ The scales JSON file is a dictionary with a key named 'scales' and values repres ### machine-based-reading-order -For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's input is a three-channel image: the first and last channels contain information about each of the two text regions, while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct reading order. +For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's +input is a three-channel image: the first and last channels contain information about each of the two text regions, +while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. +To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct +reading order. -For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area to the image area, with a default value of zero. To run the dataset generator, use the following command: +For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set +to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area +to the image area, with a default value of zero. To run the dataset generator, use the following command: `python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` ### pagexml2label -pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. -To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. +pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, +including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. -In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired element is automatically encoded as 1 in the PNG label. +To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script +expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled +as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four +elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. -To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. For example, in the case of 'textline' detection, the JSON file would resemble this: +In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired +element is automatically encoded as 1 in the PNG label. + +To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. +For example, in the case of 'textline' detection, the JSON file would resemble this: ```yaml { @@ -83,16 +103,32 @@ A possible custom config json file for layout segmentation where the "printspac "printspace_as_class_in_layout" : 8 } ``` -For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', and 'tableregion'. -Text regions and graphic regions also have their own specific types. The known types for us for text regions are 'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', 'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', 'stamp', and 'signature'. -Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, users can extract all known types from the labels and be confident that no unknown types are overlooked. +For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a +given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an +image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', +and 'tableregion'. -In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator region" are also present in the label. However, other regions like "noise region" and "table region" will not be included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. +Text regions and graphic regions also have their own specific types. The known types for us for text regions are +'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', +'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', +'stamp', and 'signature'. + +Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two +additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, +users can extract all known types from the labels and be confident that no unknown types are overlooked. + +In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown +as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the +graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator +region" are also present in the label. However, other regions like "noise region" and "table region" will not be +included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. `python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" "` -We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, the example JSON config file should look like this: +We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key +is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, +the example JSON config file should look like this: ```yaml { @@ -114,9 +150,14 @@ We have also defined an artificial class that can be added to the boundary of te } ``` -This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the elements labeled as "paragraph," "header," "heading," and "marginalia." +This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the +elements labeled as "paragraph," "header," "heading," and "marginalia." -For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the "artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use case: +For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the +"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements +represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the +artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use +case: ```yaml { @@ -125,7 +166,11 @@ For "textline," "word," and "glyph," the artificial class on the boundaries will } ``` -If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that in this scenario, since cropping will be applied to the label files, the directory of the original images must be provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels required for training are obtained. The command should resemble the following: +If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to +crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that +in this scenario, since cropping will be applied to the label files, the directory of the original images must be +provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels +required for training are obtained. The command should resemble the following: `python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will be written" ` @@ -156,7 +201,7 @@ For the classification use case, we haven't provided a ground truth generator, a The "dir_train" should be like this: -``` +``` . └── train # train directory ├── apple # directory of images for apple class @@ -165,7 +210,7 @@ The "dir_train" should be like this: And the "dir_eval" the same structure as train directory: -``` +``` . └── eval # evaluation directory ├── apple # directory of images for apple class @@ -178,7 +223,10 @@ The classification model can be trained using the following command line: `python train.py with config_classification.json` -As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". +As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. +This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, +an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". +Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". ### reading order An example config json file for machine based reading order should be like this: @@ -225,18 +273,25 @@ The classification model can be trained like the classification case command lin #### Parameter configuration for segmentation or enhancement usecases -The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. +The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, +its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for +classification and machine-based reading order, as you can see in their example config files. -* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. +* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we + offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first + apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. * task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". -* patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be set to ``false``. +* patches: If you want to break input images into smaller patches (input size of the model) you need to set this + parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be + set to ``false``. * n_batch: Number of batches at each iteration. * n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it should set to 1. And for the case of layout detection just the unique number of classes should be given. * n_epochs: Number of epochs. * input_height: This indicates the height of model's input. * input_width: This indicates the width of model's input. * weight_decay: Weight decay of l2 regularization of model layers. -* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved in a folder named "pretrained_model" in the same directory of "train.py" script. +* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved + in a folder named "pretrained_model" in the same directory of "train.py" script. * augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. * flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. * blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. @@ -255,9 +310,14 @@ The following parameter configuration can be applied to all segmentation use cas * brightness: The amount of brightenings. * thetha: Rotation angles. * degrade_scales: The amount of degradings. -* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. +* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the + training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the + models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from + model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". +* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train + and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we + write them in sub-directories train and eval in "dir_output". * dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. * index_start: Starting index for saved models in the case that "continue_training" is ``true``. * dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. @@ -290,7 +350,8 @@ And the "dir_eval" the same structure as train directory: └── labels # directory of labels ``` -After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following command, similar to the process for classification and reading order: +After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following +command, similar to the process for classification and reading order: `python train.py with config_classification.json` @@ -339,7 +400,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -384,7 +445,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -429,7 +490,7 @@ An example config json file for binarization can be like this: "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -439,7 +500,8 @@ An example config json file for binarization can be like this: } ``` -It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel image. +It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel +image. #### Page extraction @@ -476,7 +538,7 @@ It's important to mention that the value of n_classes for enhancement should be "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -486,7 +548,8 @@ It's important to mention that the value of n_classes for enhancement should be } ``` -For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, hence the patches parameter should be set to false. +For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, +hence the patches parameter should be set to false. #### layout segmentation @@ -533,7 +596,7 @@ An example config json file for layout segmentation with 5 classes (including ba "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, @@ -543,9 +606,11 @@ An example config json file for layout segmentation with 5 classes (including ba } ``` ## Inference with the trained model + ### classification -For conducting inference with a trained model, you simply need to execute the following command line, specifying the directory of the model and the image on which to perform inference: +For conducting inference with a trained model, you simply need to execute the following command line, specifying the +directory of the model and the image on which to perform inference: `python inference.py -m "model dir" -i "image" ` @@ -554,8 +619,9 @@ This will straightforwardly return the class of the image. ### machine based reading order - -To infer the reading order using an reading order model, we need a page XML file containing layout information but without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The new XML file with the added reading order will be written to the output directory with the same name. We need to run: +To infer the reading order using an reading order model, we need a page XML file containing layout information but +without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The +new XML file with the added reading order will be written to the output directory with the same name. We need to run: `python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` @@ -570,7 +636,8 @@ For conducting inference with a trained model for segmentation and enhancement y Note that in the case of page extraction the -p flag is not needed. -For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be calculated for the output. To do this, you need to provide the GT label using the argument -gt. +For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be +calculated for the output. To do this, you need to provide the GT label using the argument -gt. diff --git a/train/train.md b/train/train.md index 3eeb715..7e7ab63 100644 --- a/train/train.md +++ b/train/train.md @@ -4,7 +4,7 @@ This repository assists users in preparing training datasets, training models, a models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training dataset. -All these use cases are now utilized in the Eynollah workflow. +All these use cases are now utilized in the Eynollah workflow. As mentioned, the following three tasks can be accomplished using this repository: * Generate training dataset @@ -61,6 +61,7 @@ to the image area, with a default value of zero. To run the dataset generator, u pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. + To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four @@ -102,6 +103,7 @@ A possible custom config json file for layout segmentation where the "printspac "printspace_as_class_in_layout" : 8 } ``` + For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', @@ -199,7 +201,7 @@ For the classification use case, we haven't provided a ground truth generator, a The "dir_train" should be like this: -``` +``` . └── train # train directory ├── apple # directory of images for apple class @@ -208,7 +210,7 @@ The "dir_train" should be like this: And the "dir_eval" the same structure as train directory: -``` +``` . └── eval # evaluation directory ├── apple # directory of images for apple class @@ -277,11 +279,11 @@ classification and machine-based reading order, as you can see in their example * backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first -apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. + apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. * task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". * patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be -set to ``false``. + set to ``false``. * n_batch: Number of batches at each iteration. * n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it should set to 1. And for the case of layout detection just the unique number of classes should be given. * n_epochs: Number of epochs. @@ -311,11 +313,11 @@ set to ``false``. * continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from - model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. + model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. * weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` * data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we - write them in sub-directories train and eval in "dir_output". + write them in sub-directories train and eval in "dir_output". * dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. * index_start: Starting index for saved models in the case that "continue_training" is ``true``. * dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. @@ -536,7 +538,7 @@ image. "thetha" : [10, -10], "continue_training": false, "index_start" : 0, - "dir_of_start_model" : " ", + "dir_of_start_model" : " ", "weighted_loss": false, "is_loss_soft_dice": false, "data_is_provided": false, From ce02a3553b084f9d30ade931a640e1d9711cf3e9 Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 29 Sep 2025 15:18:21 +0200 Subject: [PATCH 247/492] :fire: remove obsolete versions of the training document --- docs/train_wiki.md | 643 --------------------------------------------- train/train.md | 643 --------------------------------------------- 2 files changed, 1286 deletions(-) delete mode 100644 docs/train_wiki.md delete mode 100644 train/train.md diff --git a/docs/train_wiki.md b/docs/train_wiki.md deleted file mode 100644 index 5158a80..0000000 --- a/docs/train_wiki.md +++ /dev/null @@ -1,643 +0,0 @@ -# Documentation - -This repository assists users in preparing training datasets, training models, and performing inference with trained -models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and -machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training -dataset. -All these use cases are now utilized in the Eynollah workflow. -As mentioned, the following three tasks can be accomplished using this repository: - -* Generate training dataset -* Train a model -* Inference with the trained model - -## Generate training dataset -The script generate_gt_for_training.py is used for generating training datasets. As the results of the following command demonstrate, the dataset generator provides three different commands: - -`python generate_gt_for_training.py --help` - - -These three commands are: - -* image-enhancement -* machine-based-reading-order -* pagexml2label - - -### image-enhancement - -Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of -high-resolution images. The training dataset can then be generated using the following command: - -`python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs "degrading scales json file"` - -The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are -downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose -resolution at different scales. The degraded images are used as input images, and the original high-resolution images -serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: - -```yaml -{ - "scales": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] -} -``` - -### machine-based-reading-order - -For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's -input is a three-channel image: the first and last channels contain information about each of the two text regions, -while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. -To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct -reading order. - -For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set -to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area -to the image area, with a default value of zero. To run the dataset generator, use the following command: - - -`python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` - -### pagexml2label - -pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, -including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. - -To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script -expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled -as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four -elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. - -In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired -element is automatically encoded as 1 in the PNG label. - -To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. -For example, in the case of 'textline' detection, the JSON file would resemble this: - -```yaml -{ -"use_case": "textline" -} -``` - -In the case of layout segmentation a possible custom config json file can be like this: - -```yaml -{ -"use_case": "layout", -"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} -} -``` - -A possible custom config json file for layout segmentation where the "printspace" is wished to be a class: - -```yaml -{ -"use_case": "layout", -"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} -"printspace_as_class_in_layout" : 8 -} -``` - -For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a -given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an -image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', -and 'tableregion'. - -Text regions and graphic regions also have their own specific types. The known types for us for text regions are -'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', -'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', -'stamp', and 'signature'. - -Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two -additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, -users can extract all known types from the labels and be confident that no unknown types are overlooked. - -In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown -as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the -graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator -region" are also present in the label. However, other regions like "noise region" and "table region" will not be -included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. - -`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" "` - -We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key -is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, -the example JSON config file should look like this: - -```yaml -{ - "use_case": "layout", - "textregions": { - "paragraph": 1, - "drop-capital": 1, - "header": 2, - "heading": 2, - "marginalia": 3 - }, - "imageregion": 4, - "separatorregion": 5, - "graphicregions": { - "rest_as_decoration": 6 - }, - "artificial_class_on_boundary": ["paragraph", "header", "heading", "marginalia"], - "artificial_class_label": 7 -} -``` - -This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the -elements labeled as "paragraph," "header," "heading," and "marginalia." - -For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the -"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements -represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the -artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use -case: - -```yaml -{ - "use_case": "textline", - "artificial_class_label": 2 -} -``` - -If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to -crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that -in this scenario, since cropping will be applied to the label files, the directory of the original images must be -provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels -required for training are obtained. The command should resemble the following: - -`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will be written" ` - -## Train a model -### classification - -For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, all we require is a training directory with subdirectories, each containing images of its respective classes. We need separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both directories. Additionally, the class names should be specified in the config JSON file, as shown in the following example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the "classification_classes_name" key in the config file should appear as follows: - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "classification", - "n_classes" : 2, - "n_epochs" : 10, - "input_height" : 448, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "f1_threshold_classification": 0.8, - "pretraining" : true, - "classification_classes_name" : {"0":"apple", "1":"orange"}, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── apple # directory of images for apple class - └── orange # directory of images for orange class -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── apple # directory of images for apple class - └── orange # directory of images for orange class - -``` - -The classification model can be trained using the following command line: - -`python train.py with config_classification.json` - - -As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. -This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, -an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". -Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". - -### reading order -An example config json file for machine based reading order should be like this: - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "reading_order", - "n_classes" : 1, - "n_epochs" : 5, - "input_height" : 672, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "pretraining" : true, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── images # directory of images - └── labels # directory of labels -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── images # directory of images - └── labels # directory of labels -``` - -The classification model can be trained like the classification case command line. - -### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement - -#### Parameter configuration for segmentation or enhancement usecases - -The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, -its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for -classification and machine-based reading order, as you can see in their example config files. - -* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we - offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first - apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. -* task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". -* patches: If you want to break input images into smaller patches (input size of the model) you need to set this - parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be - set to ``false``. -* n_batch: Number of batches at each iteration. -* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it should set to 1. And for the case of layout detection just the unique number of classes should be given. -* n_epochs: Number of epochs. -* input_height: This indicates the height of model's input. -* input_width: This indicates the width of model's input. -* weight_decay: Weight decay of l2 regularization of model layers. -* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved - in a folder named "pretrained_model" in the same directory of "train.py" script. -* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. -* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. -* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. -* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" parameter. -* degrading: If ``true``, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. -* brightening: If ``true``, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. -* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. -* rotation: If ``true``, 90 degree rotation will be applied on image. -* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. -* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. -* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. -* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. -* flip_index: Type of flips. -* blur_k: Type of blurrings. -* scales: Scales of scaling. -* brightness: The amount of brightenings. -* thetha: Rotation angles. -* degrade_scales: The amount of degradings. -* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the - training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the - models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from - model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. -* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train - and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we - write them in sub-directories train and eval in "dir_output". -* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. -* index_start: Starting index for saved models in the case that "continue_training" is ``true``. -* dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. -* transformer_num_patches_xy: Number of patches for vision transformer in x and y direction respectively. -* transformer_patchsize_x: Patch size of vision transformer patches in x direction. -* transformer_patchsize_y: Patch size of vision transformer patches in y direction. -* transformer_projection_dim: Transformer projection dimension. Default value is 64. -* transformer_mlp_head_units: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. -* transformer_layers: transformer layers. Default value is 8. -* transformer_num_heads: Transformer number of heads. Default value is 4. -* transformer_cnn_first: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. - -In the case of segmentation and enhancement the train and evaluation directory should be as following. - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── images # directory of images - └── labels # directory of labels -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── images # directory of images - └── labels # directory of labels -``` - -After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following -command, similar to the process for classification and reading order: - -`python train.py with config_classification.json` - -#### Binarization - -An example config json file for binarization can be like this: - -```yaml -{ - "backbone_type" : "transformer", - "task": "binarization", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 224, - "input_width" : 672, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "transformer_num_patches_xy": [7, 7], - "transformer_patchsize_x": 3, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 192, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 8, - "transformer_num_heads": 4, - "transformer_cnn_first": true, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -#### Textline - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -#### Enhancement - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "enhancement", - "n_classes" : 3, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel -image. - -#### Page extraction - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : false, - "pretraining" : true, - "augmentation" : false, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, -hence the patches parameter should be set to false. - -#### layout segmentation - -An example config json file for layout segmentation with 5 classes (including background) can be like this: - -```yaml -{ - "backbone_type" : "transformer", - "task": "segmentation", - "n_classes" : 5, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "transformer_num_patches_xy": [7, 14], - "transformer_patchsize_x": 1, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 64, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 8, - "transformer_num_heads": 4, - "transformer_cnn_first": true, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` -## Inference with the trained model - -### classification - -For conducting inference with a trained model, you simply need to execute the following command line, specifying the -directory of the model and the image on which to perform inference: - - -`python inference.py -m "model dir" -i "image" ` - -This will straightforwardly return the class of the image. - -### machine based reading order - -To infer the reading order using an reading order model, we need a page XML file containing layout information but -without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The -new XML file with the added reading order will be written to the output directory with the same name. We need to run: - -`python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` - - -### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement - -For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: - - -`python inference.py -m "model dir" -i "image" -p -s "output image" ` - - -Note that in the case of page extraction the -p flag is not needed. - -For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be -calculated for the output. To do this, you need to provide the GT label using the argument -gt. - - - diff --git a/train/train.md b/train/train.md deleted file mode 100644 index 7e7ab63..0000000 --- a/train/train.md +++ /dev/null @@ -1,643 +0,0 @@ -# Documentation for Training Models - -This repository assists users in preparing training datasets, training models, and performing inference with trained -models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and -machine-based reading order. For each use case, we provide guidance on how to generate the corresponding training -dataset. -All these use cases are now utilized in the Eynollah workflow. -As mentioned, the following three tasks can be accomplished using this repository: - -* Generate training dataset -* Train a model -* Inference with the trained model - -## Generate training dataset -The script generate_gt_for_training.py is used for generating training datasets. As the results of the following command demonstrate, the dataset generator provides three different commands: - -`python generate_gt_for_training.py --help` - - -These three commands are: - -* image-enhancement -* machine-based-reading-order -* pagexml2label - - -### image-enhancement - -Generating a training dataset for image enhancement is quite straightforward. All that is needed is a set of -high-resolution images. The training dataset can then be generated using the following command: - -`python generate_gt_for_training.py image-enhancement -dis "dir of high resolution images" -dois "dir where degraded images will be written" -dols "dir where the corresponding high resolution image will be written as label" -scs "degrading scales json file"` - -The scales JSON file is a dictionary with a key named 'scales' and values representing scales smaller than 1. Images are -downscaled based on these scales and then upscaled again to their original size. This process causes the images to lose -resolution at different scales. The degraded images are used as input images, and the original high-resolution images -serve as labels. The enhancement model can be trained with this generated dataset. The scales JSON file looks like this: - -```yaml -{ - "scales": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9] -} -``` - -### machine-based-reading-order - -For machine-based reading order, we aim to determine the reading priority between two sets of text regions. The model's -input is a three-channel image: the first and last channels contain information about each of the two text regions, -while the middle channel encodes prominent layout elements necessary for reading order, such as separators and headers. -To generate the training dataset, our script requires a page XML file that specifies the image layout with the correct -reading order. - -For output images, it is necessary to specify the width and height. Additionally, a minimum text region size can be set -to filter out regions smaller than this minimum size. This minimum size is defined as the ratio of the text region area -to the image area, with a default value of zero. To run the dataset generator, use the following command: - - -`python generate_gt_for_training.py machine-based-reading-order -dx "dir of GT xml files" -domi "dir where output images will be written" -docl "dir where the labels will be written" -ih "height" -iw "width" -min "min area ratio"` - -### pagexml2label - -pagexml2label is designed to generate labels from GT page XML files for various pixel-wise segmentation use cases, -including 'layout,' 'textline,' 'printspace,' 'glyph,' and 'word' segmentation. - -To train a pixel-wise segmentation model, we require images along with their corresponding labels. Our training script -expects a PNG image where each pixel corresponds to a label, represented by an integer. The background is always labeled -as zero, while other elements are assigned different integers. For instance, if we have ground truth data with four -elements including the background, the classes would be labeled as 0, 1, 2, and 3 respectively. - -In binary segmentation scenarios such as textline or page extraction, the background is encoded as 0, and the desired -element is automatically encoded as 1 in the PNG label. - -To specify the desired use case and the elements to be extracted in the PNG labels, a custom JSON file can be passed. -For example, in the case of 'textline' detection, the JSON file would resemble this: - -```yaml -{ -"use_case": "textline" -} -``` - -In the case of layout segmentation a possible custom config json file can be like this: - -```yaml -{ -"use_case": "layout", -"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} -} -``` - -A possible custom config json file for layout segmentation where the "printspace" is wished to be a class: - -```yaml -{ -"use_case": "layout", -"textregions":{"rest_as_paragraph":1 , "drop-capital": 1, "header":2, "heading":2, "marginalia":3}, -"imageregion":4, -"separatorregion":5, -"graphicregions" :{"rest_as_decoration":6 ,"stamp":7} -"printspace_as_class_in_layout" : 8 -} -``` - -For the layout use case, it is beneficial to first understand the structure of the page XML file and its elements. In a -given image, the annotations of elements are recorded in a page XML file, including their contours and classes. For an -image document, the known regions are 'textregion', 'separatorregion', 'imageregion', 'graphicregion', 'noiseregion', -and 'tableregion'. - -Text regions and graphic regions also have their own specific types. The known types for us for text regions are -'paragraph', 'header', 'heading', 'marginalia', 'drop-capital', 'footnote', 'footnote-continued', 'signature-mark', -'page-number', and 'catch-word'. The known types for graphic regions are 'handwritten-annotation', 'decoration', -'stamp', and 'signature'. - -Since we don't know all types of text and graphic regions, unknown cases can arise. To handle these, we have defined two -additional types: "rest_as_paragraph" and "rest_as_decoration" to ensure that no unknown types are missed. This way, -users can extract all known types from the labels and be confident that no unknown types are overlooked. - -In the custom JSON file shown above, "header" and "heading" are extracted as the same class, while "marginalia" is shown -as a different class. All other text region types, including "drop-capital," are grouped into the same class. For the -graphic region, "stamp" has its own class, while all other types are classified together. "Image region" and "separator -region" are also present in the label. However, other regions like "noise region" and "table region" will not be -included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. - -`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" "` - -We have also defined an artificial class that can be added to the boundary of text region types or text lines. This key -is called "artificial_class_on_boundary." If users want to apply this to certain text regions in the layout use case, -the example JSON config file should look like this: - -```yaml -{ - "use_case": "layout", - "textregions": { - "paragraph": 1, - "drop-capital": 1, - "header": 2, - "heading": 2, - "marginalia": 3 - }, - "imageregion": 4, - "separatorregion": 5, - "graphicregions": { - "rest_as_decoration": 6 - }, - "artificial_class_on_boundary": ["paragraph", "header", "heading", "marginalia"], - "artificial_class_label": 7 -} -``` - -This implies that the artificial class label, denoted by 7, will be present on PNG files and will only be added to the -elements labeled as "paragraph," "header," "heading," and "marginalia." - -For "textline," "word," and "glyph," the artificial class on the boundaries will be activated only if the -"artificial_class_label" key is specified in the config file. Its value should be set as 2 since these elements -represent binary cases. For example, if the background and textline are denoted as 0 and 1 respectively, then the -artificial class should be assigned the value 2. The example JSON config file should look like this for "textline" use -case: - -```yaml -{ - "use_case": "textline", - "artificial_class_label": 2 -} -``` - -If the coordinates of "PrintSpace" or "Border" are present in the page XML ground truth files, and the user wishes to -crop only the print space area, this can be achieved by activating the "-ps" argument. However, it should be noted that -in this scenario, since cropping will be applied to the label files, the directory of the original images must be -provided to ensure that they are cropped in sync with the labels. This ensures that the correct images and labels -required for training are obtained. The command should resemble the following: - -`python generate_gt_for_training.py pagexml2label -dx "dir of GT xml files" -do "dir where output label png files will be written" -cfg "custom config json file" -to "output type which has 2d and 3d. 2d is used for training and 3d is just to visualise the labels" -ps -di "dir where the org images are located" -doi "dir where the cropped output images will be written" ` - -## Train a model -### classification - -For the classification use case, we haven't provided a ground truth generator, as it's unnecessary. For classification, all we require is a training directory with subdirectories, each containing images of its respective classes. We need separate directories for training and evaluation, and the class names (subdirectories) must be consistent across both directories. Additionally, the class names should be specified in the config JSON file, as shown in the following example. If, for instance, we aim to classify "apple" and "orange," with a total of 2 classes, the "classification_classes_name" key in the config file should appear as follows: - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "classification", - "n_classes" : 2, - "n_epochs" : 10, - "input_height" : 448, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "f1_threshold_classification": 0.8, - "pretraining" : true, - "classification_classes_name" : {"0":"apple", "1":"orange"}, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── apple # directory of images for apple class - └── orange # directory of images for orange class -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── apple # directory of images for apple class - └── orange # directory of images for orange class - -``` - -The classification model can be trained using the following command line: - -`python train.py with config_classification.json` - - -As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. -This parameter is employed to gather all models with an evaluation f1 score surpassing this threshold. Subsequently, -an ensemble of these model weights is executed, and a model is saved in the output directory as "model_ens_avg". -Additionally, the weight of the best model based on the evaluation f1 score is saved as "model_best". - -### reading order -An example config json file for machine based reading order should be like this: - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "reading_order", - "n_classes" : 1, - "n_epochs" : 5, - "input_height" : 672, - "input_width" : 448, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "pretraining" : true, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── images # directory of images - └── labels # directory of labels -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── images # directory of images - └── labels # directory of labels -``` - -The classification model can be trained like the classification case command line. - -### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement - -#### Parameter configuration for segmentation or enhancement usecases - -The following parameter configuration can be applied to all segmentation use cases and enhancements. The augmentation, -its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for -classification and machine-based reading order, as you can see in their example config files. - -* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we - offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first - apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. -* task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". -* patches: If you want to break input images into smaller patches (input size of the model) you need to set this - parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be - set to ``false``. -* n_batch: Number of batches at each iteration. -* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it should set to 1. And for the case of layout detection just the unique number of classes should be given. -* n_epochs: Number of epochs. -* input_height: This indicates the height of model's input. -* input_width: This indicates the width of model's input. -* weight_decay: Weight decay of l2 regularization of model layers. -* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved - in a folder named "pretrained_model" in the same directory of "train.py" script. -* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. -* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. -* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. -* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" parameter. -* degrading: If ``true``, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. -* brightening: If ``true``, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. -* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. -* rotation: If ``true``, 90 degree rotation will be applied on image. -* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. -* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. -* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. -* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. -* flip_index: Type of flips. -* blur_k: Type of blurrings. -* scales: Scales of scaling. -* brightness: The amount of brightenings. -* thetha: Rotation angles. -* degrade_scales: The amount of degradings. -* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the - training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the - models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from - model_1.h5, you can set ``index_start`` to 3 to start naming model with index 3. -* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train - and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we - write them in sub-directories train and eval in "dir_output". -* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. -* index_start: Starting index for saved models in the case that "continue_training" is ``true``. -* dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. -* transformer_num_patches_xy: Number of patches for vision transformer in x and y direction respectively. -* transformer_patchsize_x: Patch size of vision transformer patches in x direction. -* transformer_patchsize_y: Patch size of vision transformer patches in y direction. -* transformer_projection_dim: Transformer projection dimension. Default value is 64. -* transformer_mlp_head_units: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. -* transformer_layers: transformer layers. Default value is 8. -* transformer_num_heads: Transformer number of heads. Default value is 4. -* transformer_cnn_first: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. - -In the case of segmentation and enhancement the train and evaluation directory should be as following. - -The "dir_train" should be like this: - -``` -. -└── train # train directory - ├── images # directory of images - └── labels # directory of labels -``` - -And the "dir_eval" the same structure as train directory: - -``` -. -└── eval # evaluation directory - ├── images # directory of images - └── labels # directory of labels -``` - -After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following -command, similar to the process for classification and reading order: - -`python train.py with config_classification.json` - -#### Binarization - -An example config json file for binarization can be like this: - -```yaml -{ - "backbone_type" : "transformer", - "task": "binarization", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 224, - "input_width" : 672, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "transformer_num_patches_xy": [7, 7], - "transformer_patchsize_x": 3, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 192, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 8, - "transformer_num_heads": 4, - "transformer_cnn_first": true, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -#### Textline - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -#### Enhancement - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "enhancement", - "n_classes" : 3, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 4, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -It's important to mention that the value of n_classes for enhancement should be 3, as the model's output is a 3-channel -image. - -#### Page extraction - -```yaml -{ - "backbone_type" : "nontransformer", - "task": "segmentation", - "n_classes" : 2, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : false, - "pretraining" : true, - "augmentation" : false, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` - -For page segmentation (or print space or border segmentation), the model needs to view the input image in its entirety, -hence the patches parameter should be set to false. - -#### layout segmentation - -An example config json file for layout segmentation with 5 classes (including background) can be like this: - -```yaml -{ - "backbone_type" : "transformer", - "task": "segmentation", - "n_classes" : 5, - "n_epochs" : 4, - "input_height" : 448, - "input_width" : 224, - "weight_decay" : 1e-6, - "n_batch" : 1, - "learning_rate": 1e-4, - "patches" : true, - "pretraining" : true, - "augmentation" : true, - "flip_aug" : false, - "blur_aug" : false, - "scaling" : true, - "degrading": false, - "brightening": false, - "binarization" : false, - "scaling_bluring" : false, - "scaling_binarization" : false, - "scaling_flip" : false, - "rotation": false, - "rotation_not_90": false, - "transformer_num_patches_xy": [7, 14], - "transformer_patchsize_x": 1, - "transformer_patchsize_y": 1, - "transformer_projection_dim": 64, - "transformer_mlp_head_units": [128, 64], - "transformer_layers": 8, - "transformer_num_heads": 4, - "transformer_cnn_first": true, - "blur_k" : ["blur","guass","median"], - "scales" : [0.6, 0.7, 0.8, 0.9, 1.1, 1.2, 1.4], - "brightness" : [1.3, 1.5, 1.7, 2], - "degrade_scales" : [0.2, 0.4], - "flip_index" : [0, 1, -1], - "thetha" : [10, -10], - "continue_training": false, - "index_start" : 0, - "dir_of_start_model" : " ", - "weighted_loss": false, - "is_loss_soft_dice": false, - "data_is_provided": false, - "dir_train": "./train", - "dir_eval": "./eval", - "dir_output": "./output" -} -``` -## Inference with the trained model - -### classification - -For conducting inference with a trained model, you simply need to execute the following command line, specifying the -directory of the model and the image on which to perform inference: - - -`python inference.py -m "model dir" -i "image" ` - -This will straightforwardly return the class of the image. - -### machine based reading order - -To infer the reading order using an reading order model, we need a page XML file containing layout information but -without the reading order. We simply need to provide the model directory, the XML file, and the output directory. The -new XML file with the added reading order will be written to the output directory with the same name. We need to run: - -`python inference.py -m "model dir" -xml "page xml file" -o "output dir to write new xml with reading order" ` - - -### Segmentation (Textline, Binarization, Page extraction and layout) and enhancement - -For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: - - -`python inference.py -m "model dir" -i "image" -p -s "output image" ` - - -Note that in the case of page extraction the -p flag is not needed. - -For segmentation or binarization tasks, if a ground truth (GT) label is available, the IOU evaluation metric can be -calculated for the output. To do this, you need to provide the GT label using the argument -gt. - - - From 2bcd20ebc740ba17fd1af11910cb9a2983da68e6 Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 29 Sep 2025 15:21:42 +0200 Subject: [PATCH 248/492] reference the now-merged training tools in README.md --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4683eb7..e576f4d 100644 --- a/README.md +++ b/README.md @@ -53,13 +53,16 @@ make install EXTRAS=OCR ``` ## Models + Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). For documentation on methods and models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). -## Train +## Training -In case you want to train your own model with Eynollah, have a look at [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md). +In case you want to train your own model with Eynollah, have see the +documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the +tools in the [`train` folder](https://github.com/qurator-spk/eynollah/tree/main/train). ## Usage From 9d8b858dfc9099f25c928adf39d4096309ced200 Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 29 Sep 2025 16:01:29 +0200 Subject: [PATCH 249/492] remove docs/eynollah-layout, superseded by docs/model.md and docs/usage.md --- .gitignore | 1 + docs/eynollah-layout.md | 100 ---------------------------------------- 2 files changed, 1 insertion(+), 100 deletions(-) delete mode 100644 docs/eynollah-layout.md diff --git a/.gitignore b/.gitignore index 0d5d834..da03449 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ output.html /build /dist *.tif +*.sw? diff --git a/docs/eynollah-layout.md b/docs/eynollah-layout.md deleted file mode 100644 index e76ed51..0000000 --- a/docs/eynollah-layout.md +++ /dev/null @@ -1,100 +0,0 @@ -# `eynollah layout` documentation - -Eynollah can currently be used to detect the following region types/elements: -* Background -* [Border](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_BorderType.html) -* [Textregion](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextRegionType.html) -* [Textline](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html) -* [Header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html) -* [Image](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_ImageRegionType.html) -* [Separator](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_SeparatorRegionType.html) -* [Marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html) -* [Initial (Drop Capital)](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html) -* [Table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) - -In addition, the tool can detect the [ReadingOrder](https://ocr-d.de/en/gt-guidelines/trans/lyLeserichtung.html) of text regions, both from left-to-right or from right-to-left. The final goal is to feed the output to an OCR model. - -## Method description - -Eynollah is based on pixelwise segmentation using a combination of a ResNet50 encoder with various U-Net decoders. -It uses a combination of multiple models and heuristics (see the flowchart below for the different stages and how they interact): -* [Border detection](https://github.com/qurator-spk/eynollah#border-detection) -* [Layout detection](https://github.com/qurator-spk/eynollah#layout-detection) -* [Textline detection](https://github.com/qurator-spk/eynollah#textline-detection) -* [Image enhancement](https://github.com/qurator-spk/eynollah#Image_enhancement) -* [Scale classification](https://github.com/qurator-spk/eynollah#Scale_classification) -* [Heuristic methods](https://https://github.com/qurator-spk/eynollah#heuristic-methods) - -![](https://user-images.githubusercontent.com/952378/100619946-1936f680-331e-11eb-9297-6e8b4cab3c16.png) - -### Border detection -For the purpose of text recognition (OCR) and in order to avoid noise being introduced from texts outside the printspace, one first needs to detect the border of the printed frame. This is done by a binary pixel-wise-segmentation model trained on a dataset of 2,000 documents where about 1,200 of them come from the [dhSegment](https://github.com/dhlab-epfl/dhSegment/) project (you can download the dataset from [here](https://github.com/dhlab-epfl/dhSegment/releases/download/v0.2/pages.zip)) and the remainder having been annotated in SBB. For border detection, the model needs to be fed with the whole image at once rather than separated in patches. - -### Layout detection -As a next step, text regions need to be identified by means of layout detection. Again a pixel-wise segmentation model was trained on 131 labeled images from the SBB digital collections, including some data augmentation. Since the target of this tool are historical documents, we consider as main region types text regions, separators, images, tables and background - each with their own subclasses, e.g. in the case of text regions, subclasses like header/heading, drop capital, main body text etc. While it would be desirable to detect and classify each of these classes in a granular way, there are also limitations due to having a suitably large and balanced training set. Accordingly, the current version of this tool is focussed on the main region types background, text region, image and separator. - -### Textline detection -In a subsequent step, binary pixel-wise segmentation is used again to classify pixels in a document that constitute textlines. For textline segmentation, a model was initially trained on documents with only one column/block of text and some augmentation with regard to scaling. By fine-tuning the parameters also for multi-column documents, additional training data was produced that resulted in a much more robust textline detection model. - -### Image enhancement -This is an image to image model which input was low quality of an image and label was actually the original image. For this one we did not have any GT, so we decreased the quality of documents in SBB and then feed them into model. - -### Scale classification -This is simply an image classifier which classifies images based on their scales or better to say based on their number of columns. - -### Heuristic methods -Some heuristic methods are also employed to further improve the model predictions: -* After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. -* For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. -* A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. -* Deskewing is applied on the text region level (due to regions having different degrees of skew) in order to improve the textline segmentation result. -* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels. -* Finally, using the derived coordinates, bounding boxes are determined for each textline. - -## Models - -TODO - -## How to use - -First, this model makes use of up to 9 trained models which are responsible for different operations like size detection, column classification, image enhancement, page extraction, main layout detection, full layout detection and textline detection.That does not mean that all 9 models are always required for every document. Based on the document characteristics and parameters specified, different scenarios can be applied. - -* If none of the parameters is set to `true`, the tool will perform a layout detection of main regions (background, text, images, separators and marginals). An advantage of this tool is that it tries to extract main text regions separately as much as possible. - -* If you set `-ae` (**a**llow image **e**nhancement) parameter to `true`, the tool will first check the ppi (pixel-per-inch) of the image and when it is less than 300, the tool will resize it and only then image enhancement will occur. Image enhancement can also take place without this option, but by setting this option to `true`, the layout xml data (e.g. coordinates) will be based on the resized and enhanced image instead of the original image. - -* For some documents, while the quality is good, their scale is very large, and the performance of tool decreases. In such cases you can set `-as` (**a**llow **s**caling) to `true`. With this option enabled, the tool will try to rescale the image and only then the layout detection process will begin. - -* If you care about drop capitals (initials) and headings, you can set `-fl` (**f**ull **l**ayout) to `true`. With this setting, the tool can currently distinguish 7 document layout classes/elements. - -* In cases where the document includes curved headers or curved lines, rectangular bounding boxes for textlines will not be a great option. In such cases it is strongly recommended setting the flag `-cl` (**c**urved **l**ines) to `true` to find contours of curved lines instead of rectangular bounding boxes. Be advised that enabling this option increases the processing time of the tool. - -* To crop and save image regions inside the document, set the parameter `-si` (**s**ave **i**mages) to true and provide a directory path to store the extracted images. - -* To extract only images from a document, set the parameter `-eoi` (**e**xtract **o**nly **i**mages). Choosing this option disables any other processing. To save the cropped images add `-ep` and `-si`. - -* This tool is actively being developed. If problems occur, or the performance does not meet your expectations, we welcome your feedback via [issues](https://github.com/qurator-spk/eynollah/issues). - - -### `--full-layout` vs `--no-full-layout` - -Here are the difference in elements detected depending on the `--full-layout`/`--no-full-layout` command line flags: - -| | `--full-layout` | `--no-full-layout` | -| --- | --- | --- | -| reading order | x | x | -| header regions | x | - | -| text regions | x | x | -| text regions / text line | x | x | -| drop-capitals | x | - | -| marginals | x | x | -| marginals / text line | x | x | -| image region | x | x | - -### Use as OCR-D processor - -Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) processor. In this case, the source image file group with (preferably) RGB images should be used as input (the image provided by `@imageFilename` is passed on directly): - -`ocrd-eynollah-segment -I OCR-D-IMG -O SEG-LINE -P models` - -## Examples From 09ece86f0dcb860eef978319b2350ccf7df13c2c Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 19 Aug 2025 11:58:45 +0200 Subject: [PATCH 250/492] dilate_textregions_contours: simplify (via shapely's Polygon.buffer()), ensure validity --- src/eynollah/eynollah.py | 212 ++-------------------------------- src/eynollah/utils/contour.py | 30 ++++- 2 files changed, 36 insertions(+), 206 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index d47016b..55789ae 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -27,6 +27,7 @@ from loky import ProcessPoolExecutor import xml.etree.ElementTree as ET import cv2 import numpy as np +from shapely.geometry import Polygon from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d from numba import cuda @@ -68,6 +69,7 @@ from .utils.contour import ( get_text_region_boxes_by_given_contours, get_textregion_contours_in_org_image, get_textregion_contours_in_org_image_light, + make_valid, return_contours_of_image, return_contours_of_interested_region, return_contours_of_interested_region_by_min_size, @@ -3670,211 +3672,15 @@ class Eynollah: return x_differential_new def dilate_textregions_contours_textline_version(self, all_found_textline_polygons): - #print(all_found_textline_polygons) - for j in range(len(all_found_textline_polygons)): - for ij in range(len(all_found_textline_polygons[j])): - con_ind = all_found_textline_polygons[j][ij] - area = cv2.contourArea(con_ind) - con_ind = con_ind.astype(float) - - x_differential = np.diff( con_ind[:,0,0]) - y_differential = np.diff( con_ind[:,0,1]) - - x_differential = gaussian_filter1d(x_differential, 0.1) - y_differential = gaussian_filter1d(y_differential, 0.1) - - x_min = float(np.min( con_ind[:,0,0] )) - y_min = float(np.min( con_ind[:,0,1] )) - - x_max = float(np.max( con_ind[:,0,0] )) - y_max = float(np.max( con_ind[:,0,1] )) - - x_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in x_differential] - y_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in y_differential] - - abs_diff=abs(abs(x_differential)- abs(y_differential) ) - - inc_x = np.zeros(len(x_differential)+1) - inc_y = np.zeros(len(x_differential)+1) - - if (y_max-y_min) <= (x_max-x_min): - dilation_m1 = round(area / (x_max-x_min) * 0.12) - else: - dilation_m1 = round(area / (y_max-y_min) * 0.12) - - if dilation_m1>8: - dilation_m1 = 8 - if dilation_m1<6: - dilation_m1 = 6 - #print(dilation_m1, 'dilation_m1') - dilation_m1 = 6 - dilation_m2 = int(dilation_m1/2.) +1 - - for i in range(len(x_differential)): - if abs_diff[i]==0: - inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) - inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) - elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]==0 and y_differential_mask_nonzeros[i]!=0: - inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) - elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]!=0 and y_differential_mask_nonzeros[i]==0: - inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) - - elif abs_diff[i]!=0 and abs_diff[i]>=3: - if abs(x_differential[i])>abs(y_differential[i]): - inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) - else: - inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) - else: - inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) - inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) - - inc_x[0] = inc_x[-1] - inc_y[0] = inc_y[-1] - - con_scaled = con_ind*1 - - con_scaled[:,0, 0] = con_ind[:,0,0] + np.array(inc_x)[:] - con_scaled[:,0, 1] = con_ind[:,0,1] + np.array(inc_y)[:] - - con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 - con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 - - area_scaled = cv2.contourArea(con_scaled.astype(np.int32)) - - con_ind = con_ind.astype(np.int32) - - results = [cv2.pointPolygonTest(con_ind, (con_scaled[ind,0, 0], con_scaled[ind,0, 1]), False) - for ind in range(len(con_scaled[:,0, 1])) ] - results = np.array(results) - #print(results,'results') - results[results==0] = 1 - - diff_result = np.diff(results) - - indices_2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==2] - indices_m2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==-2] - - if results[0]==1: - con_scaled[:indices_m2[0]+1,0, 1] = con_ind[:indices_m2[0]+1,0,1] - con_scaled[:indices_m2[0]+1,0, 0] = con_ind[:indices_m2[0]+1,0,0] - #indices_2 = indices_2[1:] - indices_m2 = indices_m2[1:] - - if len(indices_2)>len(indices_m2): - con_scaled[indices_2[-1]+1:,0, 1] = con_ind[indices_2[-1]+1:,0,1] - con_scaled[indices_2[-1]+1:,0, 0] = con_ind[indices_2[-1]+1:,0,0] - indices_2 = indices_2[:-1] - - for ii in range(len(indices_2)): - con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 1] = con_scaled[indices_2[ii],0, 1] - con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 0] = con_scaled[indices_2[ii],0, 0] - - all_found_textline_polygons[j][ij][:,0,1] = con_scaled[:,0, 1] - all_found_textline_polygons[j][ij][:,0,0] = con_scaled[:,0, 0] - return all_found_textline_polygons + return [[np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords, + dtype=int)[:, np.newaxis] + for poly in region] + for region in all_found_textline_polygons] def dilate_textregions_contours(self, all_found_textline_polygons): - #print(all_found_textline_polygons) - for j in range(len(all_found_textline_polygons)): - con_ind = all_found_textline_polygons[j] - #print(len(con_ind[:,0,0]),'con_ind[:,0,0]') - area = cv2.contourArea(con_ind) - con_ind = con_ind.astype(float) - - x_differential = np.diff( con_ind[:,0,0]) - y_differential = np.diff( con_ind[:,0,1]) - - x_differential = gaussian_filter1d(x_differential, 0.1) - y_differential = gaussian_filter1d(y_differential, 0.1) - - x_min = float(np.min( con_ind[:,0,0] )) - y_min = float(np.min( con_ind[:,0,1] )) - - x_max = float(np.max( con_ind[:,0,0] )) - y_max = float(np.max( con_ind[:,0,1] )) - - x_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in x_differential] - y_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in y_differential] - - abs_diff=abs(abs(x_differential)- abs(y_differential) ) - - inc_x = np.zeros(len(x_differential)+1) - inc_y = np.zeros(len(x_differential)+1) - - if (y_max-y_min) <= (x_max-x_min): - dilation_m1 = round(area / (x_max-x_min) * 0.12) - else: - dilation_m1 = round(area / (y_max-y_min) * 0.12) - - if dilation_m1>8: - dilation_m1 = 8 - if dilation_m1<6: - dilation_m1 = 6 - #print(dilation_m1, 'dilation_m1') - dilation_m1 = 6 - dilation_m2 = int(dilation_m1/2.) +1 - - for i in range(len(x_differential)): - if abs_diff[i]==0: - inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) - inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) - elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]==0 and y_differential_mask_nonzeros[i]!=0: - inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) - elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]!=0 and y_differential_mask_nonzeros[i]==0: - inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) - - elif abs_diff[i]!=0 and abs_diff[i]>=3: - if abs(x_differential[i])>abs(y_differential[i]): - inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) - else: - inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) - else: - inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) - inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) - - inc_x[0] = inc_x[-1] - inc_y[0] = inc_y[-1] - - con_scaled = con_ind*1 - - con_scaled[:,0, 0] = con_ind[:,0,0] + np.array(inc_x)[:] - con_scaled[:,0, 1] = con_ind[:,0,1] + np.array(inc_y)[:] - - con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 - con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 - - area_scaled = cv2.contourArea(con_scaled.astype(np.int32)) - - con_ind = con_ind.astype(np.int32) - - results = [cv2.pointPolygonTest(con_ind, (con_scaled[ind,0, 0], con_scaled[ind,0, 1]), False) - for ind in range(len(con_scaled[:,0, 1])) ] - results = np.array(results) - #print(results,'results') - results[results==0] = 1 - - diff_result = np.diff(results) - indices_2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==2] - indices_m2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==-2] - - if results[0]==1: - con_scaled[:indices_m2[0]+1,0, 1] = con_ind[:indices_m2[0]+1,0,1] - con_scaled[:indices_m2[0]+1,0, 0] = con_ind[:indices_m2[0]+1,0,0] - #indices_2 = indices_2[1:] - indices_m2 = indices_m2[1:] - - if len(indices_2)>len(indices_m2): - con_scaled[indices_2[-1]+1:,0, 1] = con_ind[indices_2[-1]+1:,0,1] - con_scaled[indices_2[-1]+1:,0, 0] = con_ind[indices_2[-1]+1:,0,0] - indices_2 = indices_2[:-1] - - for ii in range(len(indices_2)): - con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 1] = con_scaled[indices_2[ii],0, 1] - con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 0] = con_scaled[indices_2[ii],0, 0] - - all_found_textline_polygons[j][:,0,1] = con_scaled[:,0, 1] - all_found_textline_polygons[j][:,0,0] = con_scaled[:,0, 0] - return all_found_textline_polygons + return [np.array(make_valid(Polygon(poly[:, 0])).buffer(5).exterior.coords, + dtype=int)[:, np.newaxis] + for poly in all_found_textline_polygons] def dilate_textline_contours(self, all_found_textline_polygons): for j in range(len(all_found_textline_polygons)): diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 0e84153..3d7e5c8 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -1,7 +1,7 @@ from functools import partial import cv2 import numpy as np -from shapely import geometry +from shapely.geometry import Polygon from .rotate import rotate_image, rotation_image_new @@ -43,7 +43,7 @@ def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area if len(c) < 3: # A polygon cannot have less than 3 points continue - polygon = geometry.Polygon([point[0] for point in c]) + polygon = Polygon([point[0] for point in c]) area = polygon.area if (area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and @@ -58,7 +58,7 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m if len(c) < 3: # A polygon cannot have less than 3 points continue - polygon = geometry.Polygon([point[0] for point in c]) + polygon = Polygon([point[0] for point in c]) # area = cv2.contourArea(c) area = polygon.area ##print(np.prod(thresh.shape[:2])) @@ -332,3 +332,27 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, return img_ret[:, :, 0] +def make_valid(polygon: Polygon) -> Polygon: + """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" + points = list(polygon.exterior.coords) + # try by re-arranging points + for split in range(1, len(points)): + if polygon.is_valid or polygon.simplify(polygon.area).is_valid: + break + # simplification may not be possible (at all) due to ordering + # in that case, try another starting point + polygon = Polygon(points[-split:]+points[:-split]) + # try by simplification + for tolerance in range(int(polygon.area + 1.5)): + if polygon.is_valid: + break + # simplification may require a larger tolerance + polygon = polygon.simplify(tolerance + 1) + # try by enlarging + for tolerance in range(1, int(polygon.area + 2.5)): + if polygon.is_valid: + break + # enlargement may require a larger tolerance + polygon = polygon.buffer(tolerance) + assert polygon.is_valid, polygon.wkt + return polygon From b48c41e68ff59d8cff97a59a534fee20d2d32408 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 19 Aug 2025 20:09:09 +0200 Subject: [PATCH 251/492] return_boxes_of_images_by_order_of_reading_new: simplify, avoid changing dtype during np.append --- src/eynollah/eynollah.py | 2 +- src/eynollah/utils/__init__.py | 214 +++++++++++++++------------------ 2 files changed, 97 insertions(+), 119 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 55789ae..959e9a6 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3678,7 +3678,7 @@ class Eynollah: for region in all_found_textline_polygons] def dilate_textregions_contours(self, all_found_textline_polygons): - return [np.array(make_valid(Polygon(poly[:, 0])).buffer(5).exterior.coords, + return [np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords, dtype=int)[:, np.newaxis] for poly in all_found_textline_polygons] diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index c5962f8..7168d95 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1632,6 +1632,7 @@ def return_boxes_of_images_by_order_of_reading_new( regions_without_separators = cv2.flip(regions_without_separators,1) boxes=[] peaks_neg_tot_tables = [] + splitter_y_new = np.array(splitter_y_new, dtype=int) for i in range(len(splitter_y_new)-1): #print(splitter_y_new[i],splitter_y_new[i+1]) matrix_new = matrix_of_lines_ch[:,:][(matrix_of_lines_ch[:,6]> splitter_y_new[i] ) & @@ -1644,14 +1645,9 @@ def return_boxes_of_images_by_order_of_reading_new( # 0.1 * (np.abs(splitter_y_new[i+1]-splitter_y_new[i]))): if True: try: - if erosion_hurts: - num_col, peaks_neg_fin = find_num_col( - regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], - num_col_classifier, tables, multiplier=6.) - else: - num_col, peaks_neg_fin = find_num_col( - regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], - num_col_classifier, tables, multiplier=7.) + num_col, peaks_neg_fin = find_num_col( + regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], :], + num_col_classifier, tables, multiplier=6. if erosion_hurts else 7.) except: peaks_neg_fin=[] num_col = 0 @@ -1661,7 +1657,7 @@ def return_boxes_of_images_by_order_of_reading_new( #print('burda') if len(peaks_neg_fin)==0: num_col, peaks_neg_fin = find_num_col( - regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], + regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], :], num_col_classifier, tables, multiplier=3.) peaks_neg_fin_early=[] peaks_neg_fin_early.append(0) @@ -1674,21 +1670,21 @@ def return_boxes_of_images_by_order_of_reading_new( peaks_neg_fin_rev=[] for i_n in range(len(peaks_neg_fin_early)-1): #print(i_n,'i_n') - #plt.plot(regions_without_separators[int(splitter_y_new[i]): - # int(splitter_y_new[i+1]), + #plt.plot(regions_without_separators[splitter_y_new[i]: + # splitter_y_new[i+1], # peaks_neg_fin_early[i_n]: # peaks_neg_fin_early[i_n+1]].sum(axis=0) ) #plt.show() try: num_col, peaks_neg_fin1 = find_num_col( - regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]), + regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], peaks_neg_fin_early[i_n]:peaks_neg_fin_early[i_n+1]], num_col_classifier,tables, multiplier=7.) except: peaks_neg_fin1=[] try: num_col, peaks_neg_fin2 = find_num_col( - regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]), + regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], peaks_neg_fin_early[i_n]:peaks_neg_fin_early[i_n+1]], num_col_classifier,tables, multiplier=5.) except: @@ -1716,7 +1712,7 @@ def return_boxes_of_images_by_order_of_reading_new( except: pass #num_col, peaks_neg_fin = find_num_col( - # regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], + # regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1],:], # multiplier=7.0) x_min_hor_some=matrix_new[:,2][ (matrix_new[:,9]==0) ] x_max_hor_some=matrix_new[:,3][ (matrix_new[:,9]==0) ] @@ -1738,31 +1734,28 @@ def return_boxes_of_images_by_order_of_reading_new( y_lines_with_child_without_mother, x_start_with_child_without_mother, x_end_with_child_without_mother, \ new_main_sep_y = return_x_start_end_mothers_childs_and_type_of_reading_order( x_min_hor_some, x_max_hor_some, cy_hor_some, peaks_neg_tot, cy_hor_diff) - x_starting = np.array(x_starting) - x_ending = np.array(x_ending) - y_type_2 = np.array(y_type_2) - y_diff_type_2 = np.array(y_diff_type_2) + all_columns = set(range(len(peaks_neg_tot) - 1)) if ((reading_order_type==1) or (reading_order_type==0 and (len(y_lines_without_mother)>=2 or there_is_sep_with_child==1))): try: - y_grenze=int(splitter_y_new[i])+300 + y_grenze = splitter_y_new[i] + 300 #check if there is a big separator in this y_mains_sep_ohne_grenzen args_early_ys=np.arange(len(y_type_2)) #print(args_early_ys,'args_early_ys') - #print(int(splitter_y_new[i]),int(splitter_y_new[i+1])) + #print(splitter_y_new[i], splitter_y_new[i+1]) - x_starting_up = x_starting[(y_type_2 > int(splitter_y_new[i])) & + x_starting_up = x_starting[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - x_ending_up = x_ending[(y_type_2 > int(splitter_y_new[i])) & + x_ending_up = x_ending[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - y_type_2_up = y_type_2[(y_type_2 > int(splitter_y_new[i])) & + y_type_2_up = y_type_2[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - y_diff_type_2_up = y_diff_type_2[(y_type_2 > int(splitter_y_new[i])) & + y_diff_type_2_up = y_diff_type_2[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - args_up = args_early_ys[(y_type_2 > int(splitter_y_new[i])) & + args_up = args_early_ys[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] if len(y_type_2_up) > 0: y_main_separator_up = y_type_2_up [(x_starting_up==0) & @@ -1776,8 +1769,8 @@ def return_boxes_of_images_by_order_of_reading_new( args_to_be_kept = np.array(list( set(args_early_ys) - set(args_main_to_deleted) )) #print(args_to_be_kept,'args_to_be_kept') boxes.append([0, peaks_neg_tot[len(peaks_neg_tot)-1], - int(splitter_y_new[i]), int( np.max(y_diff_main_separator_up))]) - splitter_y_new[i]=[ np.max(y_diff_main_separator_up) ][0] + splitter_y_new[i], y_diff_main_separator_up.max()]) + splitter_y_new[i] = y_diff_main_separator_up.max() #print(splitter_y_new[i],'splitter_y_new[i]') y_type_2 = y_type_2[args_to_be_kept] @@ -1786,29 +1779,28 @@ def return_boxes_of_images_by_order_of_reading_new( y_diff_type_2 = y_diff_type_2[args_to_be_kept] #print('galdiha') - y_grenze=int(splitter_y_new[i])+200 + y_grenze = splitter_y_new[i] + 200 args_early_ys2=np.arange(len(y_type_2)) - y_type_2_up=y_type_2[(y_type_2 > int(splitter_y_new[i])) & + y_type_2_up=y_type_2[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - x_starting_up=x_starting[(y_type_2 > int(splitter_y_new[i])) & + x_starting_up=x_starting[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - x_ending_up=x_ending[(y_type_2 > int(splitter_y_new[i])) & + x_ending_up=x_ending[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - y_diff_type_2_up=y_diff_type_2[(y_type_2 > int(splitter_y_new[i])) & + y_diff_type_2_up=y_diff_type_2[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] - args_up2=args_early_ys2[(y_type_2 > int(splitter_y_new[i])) & + args_up2=args_early_ys2[(y_type_2 > splitter_y_new[i]) & (y_type_2 <= y_grenze)] #print(y_type_2_up,x_starting_up,x_ending_up,'didid') - nodes_in = [] + nodes_in = set() for ij in range(len(x_starting_up)): - nodes_in = nodes_in + list(range(x_starting_up[ij], - x_ending_up[ij])) - nodes_in = np.unique(nodes_in) + nodes_in.update(range(x_starting_up[ij], + x_ending_up[ij])) #print(nodes_in,'nodes_in') - if set(nodes_in)==set(range(len(peaks_neg_tot)-1)): + if nodes_in == set(range(len(peaks_neg_tot)-1)): pass - elif set(nodes_in)==set(range(1, len(peaks_neg_tot)-1)): + elif nodes_in == set(range(1, len(peaks_neg_tot)-1)): pass else: #print('burdaydikh') @@ -1823,17 +1815,16 @@ def return_boxes_of_images_by_order_of_reading_new( pass #print('burdaydikh2') elif len(y_diff_main_separator_up)==0: - nodes_in = [] + nodes_in = set() for ij in range(len(x_starting_up)): - nodes_in = nodes_in + list(range(x_starting_up[ij], - x_ending_up[ij])) - nodes_in = np.unique(nodes_in) + nodes_in.update(range(x_starting_up[ij], + x_ending_up[ij])) #print(nodes_in,'nodes_in2') #print(np.array(range(len(peaks_neg_tot)-1)),'np.array(range(len(peaks_neg_tot)-1))') - if set(nodes_in)==set(range(len(peaks_neg_tot)-1)): + if nodes_in == set(range(len(peaks_neg_tot)-1)): pass - elif set(nodes_in)==set(range(1,len(peaks_neg_tot)-1)): + elif nodes_in == set(range(1,len(peaks_neg_tot)-1)): pass else: #print('burdaydikh') @@ -1858,26 +1849,24 @@ def return_boxes_of_images_by_order_of_reading_new( x_end_by_order=[] if (len(x_end_with_child_without_mother)==0 and reading_order_type==0) or reading_order_type==1: if reading_order_type==1: - y_lines_by_order.append(int(splitter_y_new[i])) + y_lines_by_order.append(splitter_y_new[i]) x_start_by_order.append(0) x_end_by_order.append(len(peaks_neg_tot)-2) else: #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') - columns_covered_by_mothers = [] + columns_covered_by_mothers = set() for dj in range(len(x_start_without_mother)): - columns_covered_by_mothers = columns_covered_by_mothers + \ - list(range(x_start_without_mother[dj], - x_end_without_mother[dj])) - columns_covered_by_mothers = list(set(columns_covered_by_mothers)) - - all_columns=np.arange(len(peaks_neg_tot)-1) - columns_not_covered=list(set(all_columns) - set(columns_covered_by_mothers)) - y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * (len(columns_not_covered) + len(x_start_without_mother))) - ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + columns_covered_by_mothers.update( + range(x_start_without_mother[dj], + x_end_without_mother[dj])) + columns_not_covered = list(all_columns - columns_covered_by_mothers) + y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + len(x_start_without_mother), + dtype=int) * splitter_y_new[i]) + ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, columns_not_covered) + x_starting = np.append(x_starting, np.array(columns_not_covered, int)) x_starting = np.append(x_starting, x_start_without_mother) - x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + x_ending = np.append(x_ending, np.array(columns_not_covered, int) + 1) x_ending = np.append(x_ending, x_end_without_mother) ind_args=np.arange(len(y_type_2)) @@ -1906,39 +1895,34 @@ def return_boxes_of_images_by_order_of_reading_new( x_end_by_order.append(x_end_column_sort[ii]-1) else: #print(x_start_without_mother,x_end_without_mother,peaks_neg_tot,'dodo') - columns_covered_by_mothers = [] + columns_covered_by_mothers = set() for dj in range(len(x_start_without_mother)): - columns_covered_by_mothers = columns_covered_by_mothers + \ - list(range(x_start_without_mother[dj], - x_end_without_mother[dj])) - columns_covered_by_mothers = list(set(columns_covered_by_mothers)) - - all_columns=np.arange(len(peaks_neg_tot)-1) - columns_not_covered=list(set(all_columns) - set(columns_covered_by_mothers)) - y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * (len(columns_not_covered) + len(x_start_without_mother))) - ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + columns_covered_by_mothers.update( + range(x_start_without_mother[dj], + x_end_without_mother[dj])) + columns_not_covered = list(all_columns - columns_covered_by_mothers) + y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + len(x_start_without_mother), + dtype=int) * splitter_y_new[i]) + ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, columns_not_covered) + x_starting = np.append(x_starting, np.array(columns_not_covered, int)) x_starting = np.append(x_starting, x_start_without_mother) - x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + x_ending = np.append(x_ending, np.array(columns_not_covered, int) + 1) x_ending = np.append(x_ending, x_end_without_mother) - columns_covered_by_with_child_no_mothers = [] + columns_covered_by_with_child_no_mothers = set() for dj in range(len(x_end_with_child_without_mother)): - columns_covered_by_with_child_no_mothers = columns_covered_by_with_child_no_mothers + \ - list(range(x_start_with_child_without_mother[dj], - x_end_with_child_without_mother[dj])) - columns_covered_by_with_child_no_mothers = list(set(columns_covered_by_with_child_no_mothers)) - - all_columns = np.arange(len(peaks_neg_tot)-1) - columns_not_covered_child_no_mother = list(set(all_columns) - set(columns_covered_by_with_child_no_mothers)) + columns_covered_by_with_child_no_mothers.update( + range(x_start_with_child_without_mother[dj], + x_end_with_child_without_mother[dj])) + columns_not_covered_child_no_mother = list(all_columns - columns_covered_by_with_child_no_mothers) #indexes_to_be_spanned=[] for i_s in range(len(x_end_with_child_without_mother)): columns_not_covered_child_no_mother.append(x_start_with_child_without_mother[i_s]) columns_not_covered_child_no_mother = np.sort(columns_not_covered_child_no_mother) ind_args = np.arange(len(y_type_2)) - x_end_with_child_without_mother = np.array(x_end_with_child_without_mother) - x_start_with_child_without_mother = np.array(x_start_with_child_without_mother) + x_end_with_child_without_mother = np.array(x_end_with_child_without_mother, int) + x_start_with_child_without_mother = np.array(x_start_with_child_without_mother, int) for i_s_nc in columns_not_covered_child_no_mother: if i_s_nc in x_start_with_child_without_mother: x_end_biggest_column = x_end_with_child_without_mother[x_start_with_child_without_mother==i_s_nc][0] @@ -1951,7 +1935,7 @@ def return_boxes_of_images_by_order_of_reading_new( for i_c in range(len(y_column_nc)): if i_c==(len(y_column_nc)-1): ind_all_lines_between_nm_wc=ind_args[(y_type_2>y_column_nc[i_c]) & - (y_type_2=i_s_nc) & (x_ending<=x_end_biggest_column)] else: @@ -1967,21 +1951,19 @@ def return_boxes_of_images_by_order_of_reading_new( if len(x_diff_all_between_nm_wc)>0: biggest=np.argmax(x_diff_all_between_nm_wc) - columns_covered_by_mothers = [] + columns_covered_by_mothers = set() for dj in range(len(x_starting_all_between_nm_wc)): - columns_covered_by_mothers = columns_covered_by_mothers + \ - list(range(x_starting_all_between_nm_wc[dj], - x_ending_all_between_nm_wc[dj])) - columns_covered_by_mothers = list(set(columns_covered_by_mothers)) - - all_columns=np.arange(i_s_nc, x_end_biggest_column) - columns_not_covered = list(set(all_columns) - set(columns_covered_by_mothers)) + columns_covered_by_mothers.update( + range(x_starting_all_between_nm_wc[dj], + x_ending_all_between_nm_wc[dj])) + child_columns = set(range(i_s_nc, x_end_biggest_column)) + columns_not_covered = list(child_columns - columns_covered_by_mothers) should_longest_line_be_extended=0 if (len(x_diff_all_between_nm_wc) > 0 and set(list(range(x_starting_all_between_nm_wc[biggest], x_ending_all_between_nm_wc[biggest])) + - list(columns_not_covered)) != set(all_columns)): + list(columns_not_covered)) != child_columns): should_longest_line_be_extended=1 index_lines_so_close_to_top_separator = \ np.arange(len(y_all_between_nm_wc))[(y_all_between_nm_wc>y_column_nc[i_c]) & @@ -2008,8 +1990,8 @@ def return_boxes_of_images_by_order_of_reading_new( pass y_all_between_nm_wc = np.append(y_all_between_nm_wc, [y_column_nc[i_c]] * len(columns_not_covered)) - x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, columns_not_covered) - x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, np.array(columns_not_covered) + 1) + x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, np.array(columns_not_covered, int)) + x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, np.array(columns_not_covered, int) + 1) ind_args_between=np.arange(len(x_ending_all_between_nm_wc)) for column in range(i_s_nc, x_end_biggest_column): @@ -2078,7 +2060,7 @@ def return_boxes_of_images_by_order_of_reading_new( if len(y_in_cols)>0: y_down=np.min(y_in_cols) else: - y_down=[int(splitter_y_new[i+1])][0] + y_down=splitter_y_new[i+1] #print(y_itself,'y_itself') boxes.append([peaks_neg_tot[column], peaks_neg_tot[column+1], @@ -2086,45 +2068,42 @@ def return_boxes_of_images_by_order_of_reading_new( y_down]) except: boxes.append([0, peaks_neg_tot[len(peaks_neg_tot)-1], - int(splitter_y_new[i]), int(splitter_y_new[i+1])]) + splitter_y_new[i], splitter_y_new[i+1]]) else: y_lines_by_order=[] x_start_by_order=[] x_end_by_order=[] if len(x_starting)>0: - all_columns = np.arange(len(peaks_neg_tot)-1) - columns_covered_by_lines_covered_more_than_2col = [] + columns_covered_by_lines_covered_more_than_2col = set() for dj in range(len(x_starting)): - if set(list(range(x_starting[dj],x_ending[dj]))) == set(all_columns): - pass - else: - columns_covered_by_lines_covered_more_than_2col = columns_covered_by_lines_covered_more_than_2col + \ - list(range(x_starting[dj],x_ending[dj])) - columns_covered_by_lines_covered_more_than_2col = list(set(columns_covered_by_lines_covered_more_than_2col)) - columns_not_covered = list(set(all_columns) - set(columns_covered_by_lines_covered_more_than_2col)) + if set(range(x_starting[dj], x_ending[dj])) != all_columns: + columns_covered_by_lines_covered_more_than_2col.update( + range(x_starting[dj], x_ending[dj])) + columns_not_covered = list(all_columns - columns_covered_by_lines_covered_more_than_2col) - y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * (len(columns_not_covered) + 1)) - ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + 1, + dtype=int) * splitter_y_new[i]) + ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, columns_not_covered) - x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + x_starting = np.append(x_starting, np.array(columns_not_covered, x_starting.dtype)) + x_ending = np.append(x_ending, np.array(columns_not_covered, x_ending.dtype) + 1) if len(new_main_sep_y) > 0: x_starting = np.append(x_starting, 0) - x_ending = np.append(x_ending, len(peaks_neg_tot)-1) + x_ending = np.append(x_ending, len(peaks_neg_tot) - 1) else: x_starting = np.append(x_starting, x_starting[0]) x_ending = np.append(x_ending, x_ending[0]) else: - all_columns = np.arange(len(peaks_neg_tot)-1) - columns_not_covered = list(set(all_columns)) - y_type_2 = np.append(y_type_2, [int(splitter_y_new[i])] * len(columns_not_covered)) - ##y_lines_by_order = np.append(y_lines_by_order, [int(splitter_y_new[i])] * len(columns_not_covered)) + columns_not_covered = list(all_columns) + y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered), + dtype=int) * splitter_y_new[i]) + ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) - x_starting = np.append(x_starting, columns_not_covered) - x_ending = np.append(x_ending, np.array(columns_not_covered) + 1) + x_starting = np.append(x_starting, np.array(columns_not_covered, x_starting.dtype)) + x_ending = np.append(x_ending, np.array(columns_not_covered, x_ending.dtype) + 1) - ind_args=np.array(range(len(y_type_2))) - #ind_args=np.array(ind_args) + ind_args = np.arange(len(y_type_2)) + for column in range(len(peaks_neg_tot)-1): #print(column,'column') ind_args_in_col=ind_args[x_starting==column] @@ -2155,7 +2134,6 @@ def return_boxes_of_images_by_order_of_reading_new( x_start_itself=x_start_copy.pop(il) x_end_itself=x_end_copy.pop(il) - #print(y_copy,'y_copy2') for column in range(x_start_itself, x_end_itself+1): #print(column,'cols') y_in_cols=[] @@ -2170,7 +2148,7 @@ def return_boxes_of_images_by_order_of_reading_new( if len(y_in_cols)>0: y_down=np.min(y_in_cols) else: - y_down=[int(splitter_y_new[i+1])][0] + y_down=splitter_y_new[i+1] #print(y_itself,'y_itself') boxes.append([peaks_neg_tot[column], peaks_neg_tot[column+1], From 66b2bce8b9f420895b8c47ebf46faf1ca3bbdd03 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 19 Sep 2025 12:19:58 +0200 Subject: [PATCH 252/492] return_boxes_of_images_by_order_of_reading_new: log any exceptions --- src/eynollah/eynollah.py | 6 ++++-- src/eynollah/utils/__init__.py | 22 ++++++++++++++++------ 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 959e9a6..8080035 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4553,11 +4553,13 @@ class Eynollah: if np.abs(slope_deskew) < SLOPE_THRESHOLD: boxes, peaks_neg_tot_tables = return_boxes_of_images_by_order_of_reading_new( splitter_y_new, regions_without_separators, matrix_of_lines_ch, - num_col_classifier, erosion_hurts, self.tables, self.right2left) + num_col_classifier, erosion_hurts, self.tables, self.right2left, + logger=self.logger) else: boxes_d, peaks_neg_tot_tables_d = return_boxes_of_images_by_order_of_reading_new( splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, - num_col_classifier, erosion_hurts, self.tables, self.right2left) + num_col_classifier, erosion_hurts, self.tables, self.right2left, + logger=self.logger) if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 7168d95..3c130d7 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1,3 +1,5 @@ +from typing import Tuple +from logging import getLogger import time import math @@ -1626,10 +1628,16 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, def return_boxes_of_images_by_order_of_reading_new( splitter_y_new, regions_without_separators, matrix_of_lines_ch, - num_col_classifier, erosion_hurts, tables, right2left_readingorder): + num_col_classifier, erosion_hurts, tables, + right2left_readingorder, + logger=None): if right2left_readingorder: regions_without_separators = cv2.flip(regions_without_separators,1) + if logger is None: + logger = getLogger(__package__) + logger.debug('enter return_boxes_of_images_by_order_of_reading_new') + boxes=[] peaks_neg_tot_tables = [] splitter_y_new = np.array(splitter_y_new, dtype=int) @@ -1710,7 +1718,7 @@ def return_boxes_of_images_by_order_of_reading_new( #print(peaks_neg_fin,'peaks_neg_fin') except: - pass + logger.exception("cannot find peaks consistent with columns") #num_col, peaks_neg_fin = find_num_col( # regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1],:], # multiplier=7.0) @@ -1987,7 +1995,7 @@ def return_boxes_of_images_by_order_of_reading_new( x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, x_starting_all_between_nm_wc[biggest]) x_ending_all_between_nm_wc = np.append(x_ending_all_between_nm_wc, x_ending_all_between_nm_wc[biggest]) except: - pass + logger.exception("cannot append") y_all_between_nm_wc = np.append(y_all_between_nm_wc, [y_column_nc[i_c]] * len(columns_not_covered)) x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, np.array(columns_not_covered, int)) @@ -2067,6 +2075,7 @@ def return_boxes_of_images_by_order_of_reading_new( y_itself, y_down]) except: + logger.exception("cannot assign boxes") boxes.append([0, peaks_neg_tot[len(peaks_neg_tot)-1], splitter_y_new[i], splitter_y_new[i+1]]) else: @@ -2170,6 +2179,7 @@ def return_boxes_of_images_by_order_of_reading_new( x_end_new = regions_without_separators.shape[1] - boxes[i][0] boxes[i][0] = x_start_new boxes[i][1] = x_end_new - return boxes, peaks_neg_tot_tables_new - else: - return boxes, peaks_neg_tot_tables + peaks_neg_tot_tables = peaks_neg_tot_tables_new + + logger.debug('exit return_boxes_of_images_by_order_of_reading_new') + return boxes, peaks_neg_tot_tables From afba70c920b4f1dc80bd70511a07df82439e6db3 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 19 Aug 2025 22:56:36 +0200 Subject: [PATCH 253/492] separate_lines/do_work_of_slopes: skip if crop is empty --- src/eynollah/utils/separate_lines.py | 46 +++++++++++++++------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 0322579..ffbfff7 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1345,24 +1345,26 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_interest return contours_rotated_clean -def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, plotter=None): +def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, plotter=None): if logger is None: logger = getLogger(__package__) + if not np.prod(img_crop.shape): + return img_crop if num_col == 1: - num_patches = int(img_path.shape[1] / 200.0) + num_patches = int(img_crop.shape[1] / 200.0) else: - num_patches = int(img_path.shape[1] / 140.0) - # num_patches=int(img_path.shape[1]/200.) + num_patches = int(img_crop.shape[1] / 140.0) + # num_patches=int(img_crop.shape[1]/200.) if num_patches == 0: num_patches = 1 - img_patch_ineterst = img_path[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[15]+dis_down ,:] + img_patch_interest = img_crop[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[15]+dis_down ,:] - # plt.imshow(img_patch_ineterst) + # plt.imshow(img_patch_interest) # plt.show() - length_x = int(img_path.shape[1] / float(num_patches)) + length_x = int(img_crop.shape[1] / float(num_patches)) # margin = int(0.04 * length_x) just recently this was changed because it break lines into 2 margin = int(0.04 * length_x) # if margin<=4: @@ -1370,7 +1372,7 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl # margin=0 width_mid = length_x - 2 * margin - nxf = img_path.shape[1] / float(width_mid) + nxf = img_crop.shape[1] / float(width_mid) if nxf > int(nxf): nxf = int(nxf) + 1 @@ -1386,12 +1388,12 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl index_x_d = i * width_mid index_x_u = index_x_d + length_x - if index_x_u > img_path.shape[1]: - index_x_u = img_path.shape[1] - index_x_d = img_path.shape[1] - length_x + if index_x_u > img_crop.shape[1]: + index_x_u = img_crop.shape[1] + index_x_d = img_crop.shape[1] - length_x # img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] - img_xline = img_patch_ineterst[:, index_x_d:index_x_u] + img_xline = img_patch_interest[:, index_x_d:index_x_u] try: assert img_xline.any() @@ -1407,9 +1409,9 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl img_line_rotated = rotate_image(img_xline, slope_xline) img_line_rotated[:, :][img_line_rotated[:, :] != 0] = 1 - img_patch_ineterst = img_path[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[14]+dis_down ,:] + img_patch_interest = img_crop[:, :] # [peaks_neg_true[14]-dis_up:peaks_neg_true[14]+dis_down ,:] - img_patch_ineterst_revised = np.zeros(img_patch_ineterst.shape) + img_patch_interest_revised = np.zeros(img_patch_interest.shape) for i in range(nxf): if i == 0: @@ -1419,11 +1421,11 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl index_x_d = i * width_mid index_x_u = index_x_d + length_x - if index_x_u > img_path.shape[1]: - index_x_u = img_path.shape[1] - index_x_d = img_path.shape[1] - length_x + if index_x_u > img_crop.shape[1]: + index_x_u = img_crop.shape[1] + index_x_d = img_crop.shape[1] - length_x - img_xline = img_patch_ineterst[:, index_x_d:index_x_u] + img_xline = img_patch_interest[:, index_x_d:index_x_u] img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] @@ -1446,9 +1448,9 @@ def separate_lines_new2(img_path, thetha, num_col, slope_region, logger=None, pl int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] - img_patch_ineterst_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size + img_patch_interest_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size - return img_patch_ineterst_revised + return img_patch_interest_revised def do_image_rotation(angle, img, sigma_des, logger=None): if logger is None: @@ -1546,7 +1548,7 @@ def do_work_of_slopes_new( img_int_p = all_text_region_raw[:,:] img_int_p = cv2.erode(img_int_p, KERNEL, iterations=2) - if img_int_p.shape[0] /img_int_p.shape[1] < 0.1: + if not np.prod(img_int_p.shape) or img_int_p.shape[0] /img_int_p.shape[1] < 0.1: slope = 0 slope_for_all = slope_deskew all_text_region_raw = textline_mask_tot_ea[y: y + h, x: x + w] @@ -1603,7 +1605,7 @@ def do_work_of_slopes_new_curved( # plt.imshow(img_int_p) # plt.show() - if img_int_p.shape[0] / img_int_p.shape[1] < 0.1: + if not np.prod(img_int_p.shape) or img_int_p.shape[0] / img_int_p.shape[1] < 0.1: slope = 0 slope_for_all = slope_deskew else: From 41cc38c51aaa74fb27854a101e9fbe727478f86b Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 20 Aug 2025 14:28:14 +0200 Subject: [PATCH 254/492] get_textregion_contours_in_org_image_light: no back rotation, drop slope_first (always 0) --- src/eynollah/eynollah.py | 14 ++++++-------- src/eynollah/utils/contour.py | 26 +++++++++++--------------- 2 files changed, 17 insertions(+), 23 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 8080035..49f6b33 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2927,12 +2927,10 @@ class Eynollah: #print(textline_mask_tot_ea.shape, 'textline_mask_tot_ea deskew') slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), 2, 30, True, map=self.executor.map, logger=self.logger, plotter=self.plotter) - slope_first = 0 - if self.plotter: self.plotter.save_deskewed_image(slope_deskew) self.logger.info("slope_deskew: %.2f°", slope_deskew) - return slope_deskew, slope_first + return slope_deskew def run_marginals( self, image_page, textline_mask_tot_ea, mask_images, mask_lines, @@ -4173,9 +4171,9 @@ class Eynollah: textline_mask_tot_ea_deskew = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) - slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea_deskew) + slope_deskew = self.run_deskew(textline_mask_tot_ea_deskew) else: - slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) + slope_deskew = self.run_deskew(textline_mask_tot_ea) #print("text region early -2,5 in %.1fs", time.time() - t0) #self.logger.info("Textregion detection took %.1fs ", time.time() - t1t) num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ @@ -4216,7 +4214,7 @@ class Eynollah: textline_mask_tot_ea = self.run_textline(image_page) self.logger.info("textline detection took %.1fs", time.time() - t1) t1 = time.time() - slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea) + slope_deskew = self.run_deskew(textline_mask_tot_ea) self.logger.info("deskewing took %.1fs", time.time() - t1) elif num_col_classifier in (1,2): org_h_l_m = textline_mask_tot_ea.shape[0] @@ -4405,12 +4403,12 @@ class Eynollah: contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, marginal_cnts=polygons_of_marginals) #print("text region early 3.5 in %.1fs", time.time() - t0) txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( - contours_only_text_parent, self.image, slope_first, confidence_matrix, map=self.executor.map) + contours_only_text_parent, self.image, confidence_matrix) #txt_con_org = self.dilate_textregions_contours(txt_con_org) #contours_only_text_parent = self.dilate_textregions_contours(contours_only_text_parent) else: txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( - contours_only_text_parent, self.image, slope_first, confidence_matrix, map=self.executor.map) + contours_only_text_parent, self.image, confidence_matrix) #print("text region early 4 in %.1fs", time.time() - t0) boxes_text, _ = get_text_region_boxes_by_given_contours(contours_only_text_parent) boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 3d7e5c8..249748a 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -247,23 +247,19 @@ def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first cont_int[0][:, 0, 1] = cont_int[0][:, 0, 1] + np.abs(img_copy.shape[0] - img.shape[0]) return cont_int[0], index_r_con, confidence_contour -def get_textregion_contours_in_org_image_light(cnts, img, slope_first, confidence_matrix, map=map): +def get_textregion_contours_in_org_image_light(cnts, img, confidence_matrix): if not len(cnts): return [], [] - - confidence_matrix = cv2.resize(confidence_matrix, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) - img = cv2.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation=cv2.INTER_NEAREST) - ##cnts = list( (np.array(cnts)/2).astype(np.int16) ) - #cnts = cnts/2 - cnts = [(i/6).astype(int) for i in cnts] - results = map(partial(do_back_rotation_and_get_cnt_back, - img=img, - slope_first=slope_first, - confidence_matrix=confidence_matrix, - ), - cnts, range(len(cnts))) - contours, indexes, conf_contours = tuple(zip(*results)) - return [i*6 for i in contours], list(conf_contours) + + confidence_matrix = cv2.resize(confidence_matrix, + (img.shape[1] // 6, img.shape[0] // 6), + interpolation=cv2.INTER_NEAREST) + confs = [] + for cnt in cnts: + cnt_mask = np.zeros(confidence_matrix.shape) + cnt_mask = cv2.fillPoly(cnt_mask, pts=[cnt // 6], color=1.0) + confs.append(np.sum(confidence_matrix * cnt_mask) / np.sum(cnt_mask)) + return cnts, confs def return_contours_of_interested_textline(region_pre_p, pixel): # pixels of images are identified by 5 From 7b51fd662497ecd7c35b09764df2ed5c6b651a76 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 21 Aug 2025 01:03:46 +0200 Subject: [PATCH 255/492] avoid creating invalid polygons via rounding --- src/eynollah/eynollah.py | 5 +++-- src/eynollah/utils/contour.py | 9 +++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 49f6b33..0f458b4 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3670,16 +3670,17 @@ class Eynollah: return x_differential_new def dilate_textregions_contours_textline_version(self, all_found_textline_polygons): - return [[np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords, + return [[np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], dtype=int)[:, np.newaxis] for poly in region] for region in all_found_textline_polygons] def dilate_textregions_contours(self, all_found_textline_polygons): - return [np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords, + return [np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], dtype=int)[:, np.newaxis] for poly in all_found_textline_polygons] + def dilate_textline_contours(self, all_found_textline_polygons): for j in range(len(all_found_textline_polygons)): for ij in range(len(all_found_textline_polygons[j])): diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 249748a..8205c2b 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -49,7 +49,7 @@ def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1): found_polygons_early.append(np.array([[point] - for point in polygon.exterior.coords], dtype=np.uint)) + for point in polygon.exterior.coords[:-1]], dtype=np.uint)) return found_polygons_early def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): @@ -70,7 +70,7 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m True): # print(c[0][0][1]) found_polygons_early.append(np.array([[point] - for point in polygon.exterior.coords], dtype=np.int32)) + for point in polygon.exterior.coords[:-1]], dtype=np.int32)) return found_polygons_early def find_new_features_of_contours(contours_main): @@ -330,6 +330,11 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, def make_valid(polygon: Polygon) -> Polygon: """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" + def isint(x): + return isinstance(x, int) or int(x) == x + # make sure rounding does not invalidate + if not all(map(isint, np.array(polygon.exterior.coords).flat)) and polygon.minimum_clearance < 1.0: + polygon = Polygon(np.round(polygon.exterior.coords)) points = list(polygon.exterior.coords) # try by re-arranging points for split in range(1, len(points)): From e730725da3d40cfbd20f857c36843190713725ca Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 21 Aug 2025 01:05:15 +0200 Subject: [PATCH 256/492] check_any_text_region_in_model_one_is_main_or_header_light: return original instead of resampled contours --- src/eynollah/utils/__init__.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 3c130d7..c479744 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -957,11 +957,11 @@ def check_any_text_region_in_model_one_is_main_or_header_light( regions_model_full = cv2.resize(regions_model_full, (regions_model_full.shape[1] // zoom, regions_model_full.shape[0] // zoom), interpolation=cv2.INTER_NEAREST) - contours_only_text_parent = [(i / zoom).astype(int) for i in contours_only_text_parent] + contours_only_text_parent_z = [(cnt / zoom).astype(int) for cnt in contours_only_text_parent] ### cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin = \ - find_new_features_of_contours(contours_only_text_parent) + find_new_features_of_contours(contours_only_text_parent_z) length_con=x_max_main-x_min_main height_con=y_max_main-y_min_main @@ -984,8 +984,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light( contours_only_text_parent_main_d=[] contours_only_text_parent_head_d=[] - for ii in range(len(contours_only_text_parent)): - con=contours_only_text_parent[ii] + for ii, con in enumerate(contours_only_text_parent_z): img=np.zeros((regions_model_1.shape[0], regions_model_1.shape[1], 3)) img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) @@ -996,23 +995,22 @@ def check_any_text_region_in_model_one_is_main_or_header_light( if (pixels_header/float(pixels_main)>=0.3) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 - contours_only_text_parent_head.append(con) + contours_only_text_parent_head.append(contours_only_text_parent[ii]) + conf_contours_head.append(None) # why not conf_contours[ii], too? if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_head.append(all_box_coord[ii]) slopes_head.append(slopes[ii]) all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) - conf_contours_head.append(None) else: regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 - contours_only_text_parent_main.append(con) + contours_only_text_parent_main.append(contours_only_text_parent[ii]) conf_contours_main.append(conf_contours[ii]) if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_main.append(all_box_coord[ii]) slopes_main.append(slopes[ii]) all_found_textline_polygons_main.append(all_found_textline_polygons[ii]) - #print(all_pixels,pixels_main,pixels_header) ### to make it faster @@ -1020,8 +1018,6 @@ def check_any_text_region_in_model_one_is_main_or_header_light( # regions_model_full = cv2.resize(img, (regions_model_full.shape[1] // zoom, # regions_model_full.shape[0] // zoom), # interpolation=cv2.INTER_NEAREST) - contours_only_text_parent_head = [(i * zoom).astype(int) for i in contours_only_text_parent_head] - contours_only_text_parent_main = [(i * zoom).astype(int) for i in contours_only_text_parent_main] ### return (regions_model_1, From 17bcf1af71802d790f7508d52221d64ea4fff939 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 21 Aug 2025 01:32:32 +0200 Subject: [PATCH 257/492] =?UTF-8?q?rename=20*lines=5Fxml=20=E2=86=92=20*se?= =?UTF-8?q?plines=20for=20clarity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/eynollah/eynollah.py | 58 ++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0f458b4..c04c481 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1713,9 +1713,9 @@ class Eynollah: mask_texts_only = (prediction_regions_org[:,:] ==1)*1 mask_images_only=(prediction_regions_org[:,:] ==2)*1 - polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) - polygons_lines_xml = textline_con_fil = filter_contours_area_of_image( - mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) + polygons_seplines = textline_con_fil = filter_contours_area_of_image( + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) @@ -1779,7 +1779,7 @@ class Eynollah: [page_coord_img[2], page_coord_img[1]]])) self.logger.debug("exit get_regions_extract_images_only") - return text_regions_p_true, erosion_hurts, polygons_lines_xml, polygons_of_images_fin, image_page, page_coord, cont_page + return text_regions_p_true, erosion_hurts, polygons_seplines, polygons_of_images_fin, image_page, page_coord, cont_page def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier, skip_layout_and_reading_order=False): self.logger.debug("enter get_regions_light_v") @@ -1895,24 +1895,24 @@ class Eynollah: mask_texts_only = cv2.dilate(mask_texts_only, kernel=np.ones((2,2), np.uint8), iterations=1) mask_images_only=(prediction_regions_org[:,:] ==2)*1 - polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) + polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) test_khat = np.zeros(prediction_regions_org.shape) - test_khat = cv2.fillPoly(test_khat, pts=polygons_lines_xml, color=(1,1,1)) + test_khat = cv2.fillPoly(test_khat, pts=polygons_seplines, color=(1,1,1)) #plt.imshow(test_khat[:,:]) #plt.show() #for jv in range(1): - #print(jv, hir_lines_xml[0][232][3]) + #print(jv, hir_seplines[0][232][3]) #test_khat = np.zeros(prediction_regions_org.shape) - #test_khat = cv2.fillPoly(test_khat, pts = [polygons_lines_xml[232]], color=(1,1,1)) + #test_khat = cv2.fillPoly(test_khat, pts = [polygons_seplines[232]], color=(1,1,1)) #plt.imshow(test_khat[:,:]) #plt.show() - polygons_lines_xml = filter_contours_area_of_image( - mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + polygons_seplines = filter_contours_area_of_image( + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) test_khat = np.zeros(prediction_regions_org.shape) - test_khat = cv2.fillPoly(test_khat, pts = polygons_lines_xml, color=(1,1,1)) + test_khat = cv2.fillPoly(test_khat, pts = polygons_seplines, color=(1,1,1)) #plt.imshow(test_khat[:,:]) #plt.show() @@ -1937,7 +1937,7 @@ class Eynollah: #plt.show() #print("inside 4 ", time.time()-t_in) self.logger.debug("exit get_regions_light_v") - return text_regions_p_true, erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin, confidence_matrix + return text_regions_p_true, erosion_hurts, polygons_seplines, textline_mask_tot_ea, img_bin, confidence_matrix else: img_bin = resize_image(img_bin,img_height_h, img_width_h ) self.logger.debug("exit get_regions_light_v") @@ -2020,9 +2020,9 @@ class Eynollah: mask_texts_only=(prediction_regions_org[:,:]==1)*1 mask_images_only=(prediction_regions_org[:,:]==2)*1 - polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) - polygons_lines_xml = filter_contours_area_of_image( - mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) + polygons_seplines = filter_contours_area_of_image( + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) @@ -2034,7 +2034,7 @@ class Eynollah: text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1)) self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_lines_xml + return text_regions_p_true, erosion_hurts, polygons_seplines except: if self.input_binary: prediction_bin = np.copy(img_org) @@ -2069,9 +2069,9 @@ class Eynollah: mask_texts_only = (prediction_regions_org == 1)*1 mask_images_only= (prediction_regions_org == 2)*1 - polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) - polygons_lines_xml = filter_contours_area_of_image( - mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001) + polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) + polygons_seplines = filter_contours_area_of_image( + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) @@ -2084,7 +2084,7 @@ class Eynollah: erosion_hurts = True self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_lines_xml + return text_regions_p_true, erosion_hurts, polygons_seplines def do_order_of_regions_full_layout( self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): @@ -4102,7 +4102,7 @@ class Eynollah: img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) self.logger.info("Enhancing took %.1fs ", time.time() - t0) if self.extract_only_images: - text_regions_p_1, erosion_hurts, polygons_lines_xml, polygons_of_images, image_page, page_coord, cont_page = \ + text_regions_p_1, erosion_hurts, polygons_seplines, polygons_of_images, image_page, page_coord, cont_page = \ self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) ocr_all_textlines = None pcgts = self.writer.build_pagexml_no_full_layout( @@ -4145,7 +4145,7 @@ class Eynollah: polygons_of_marginals = [] all_found_textline_polygons_marginals = [] all_box_coord_marginals = [] - polygons_lines_xml = [] + polygons_seplines = [] contours_tables = [] ocr_all_textlines = None conf_contours_textregions =None @@ -4153,13 +4153,13 @@ class Eynollah: cont_page, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, - cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + cont_page, polygons_seplines, contours_tables, ocr_all_textlines, conf_contours_textregions) return pcgts #print("text region early -1 in %.1fs", time.time() - t0) t1 = time.time() if self.light_version: - text_regions_p_1 ,erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin_light, confidence_matrix = \ + text_regions_p_1, erosion_hurts, polygons_seplines, textline_mask_tot_ea, img_bin_light, confidence_matrix = \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) #print("text region early -2 in %.1fs", time.time() - t0) @@ -4186,7 +4186,7 @@ class Eynollah: textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) #print("text region early -4 in %.1fs", time.time() - t0) else: - text_regions_p_1 ,erosion_hurts, polygons_lines_xml = \ + text_regions_p_1, erosion_hurts, polygons_seplines = \ self.get_regions_from_xy_2models(img_res, is_image_enhanced, num_col_classifier) self.logger.info("Textregion detection took %.1fs ", time.time() - t1) @@ -4385,13 +4385,13 @@ class Eynollah: [], [], page_coord, [], [], [], [], [], [], polygons_of_images, contours_tables, [], polygons_of_marginals, empty_marginals, empty_marginals, [], [], [], - cont_page, polygons_lines_xml, [], [], []) + cont_page, polygons_seplines, [], [], []) else: pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], polygons_of_images, polygons_of_marginals, empty_marginals, empty_marginals, [], [], - cont_page, polygons_lines_xml, contours_tables, [], []) + cont_page, polygons_seplines, contours_tables, [], []) return pcgts @@ -4586,7 +4586,7 @@ class Eynollah: all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_h, slopes_marginals, - cont_page, polygons_lines_xml, ocr_all_textlines, conf_contours_textregions, conf_contours_textregions_h) + cont_page, polygons_seplines, ocr_all_textlines, conf_contours_textregions, conf_contours_textregions_h) return pcgts contours_only_text_parent_h = None @@ -4665,7 +4665,7 @@ class Eynollah: txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, - cont_page, polygons_lines_xml, contours_tables, ocr_all_textlines, conf_contours_textregions) + cont_page, polygons_seplines, contours_tables, ocr_all_textlines, conf_contours_textregions) return pcgts From a433c736281dcf86630f80bfa686064814b313d9 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 21 Aug 2025 01:33:16 +0200 Subject: [PATCH 258/492] filter_contours_area_of_image*: also ensure validity here --- src/eynollah/eynollah.py | 4 ++-- src/eynollah/utils/contour.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index c04c481..7b3b81a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3671,13 +3671,13 @@ class Eynollah: def dilate_textregions_contours_textline_version(self, all_found_textline_polygons): return [[np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], - dtype=int)[:, np.newaxis] + dtype=np.uint)[:, np.newaxis] for poly in region] for region in all_found_textline_polygons] def dilate_textregions_contours(self, all_found_textline_polygons): return [np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], - dtype=int)[:, np.newaxis] + dtype=np.uint)[:, np.newaxis] for poly in all_found_textline_polygons] diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 8205c2b..03d45b7 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -48,8 +48,8 @@ def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area if (area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1): - found_polygons_early.append(np.array([[point] - for point in polygon.exterior.coords[:-1]], dtype=np.uint)) + found_polygons_early.append(np.array(make_valid(polygon).exterior.coords[:-1], + dtype=np.uint)[:, np.newaxis]) return found_polygons_early def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): @@ -69,8 +69,8 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m # hierarchy[0][jv][3]==-1 True): # print(c[0][0][1]) - found_polygons_early.append(np.array([[point] - for point in polygon.exterior.coords[:-1]], dtype=np.int32)) + found_polygons_early.append(np.array(make_valid(polygon).exterior.coords[:-1], + dtype=np.uint)[:, np.newaxis]) return found_polygons_early def find_new_features_of_contours(contours_main): From 0650274ffad576acde6048822b5f74b6303ef689 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 21 Aug 2025 01:42:46 +0200 Subject: [PATCH 259/492] =?UTF-8?q?move=20dilate=5F*=5Fcontours=20to=20.ut?= =?UTF-8?q?ils.contour,=20rename=20dilate=5Ftextregions=5Fcontours=5Ftextl?= =?UTF-8?q?ine=5Fversion=20=E2=86=92=20dilate=5Ftextline=5Fcontours?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/eynollah/eynollah.py | 253 ++-------------------------------- src/eynollah/utils/contour.py | 11 ++ 2 files changed, 22 insertions(+), 242 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 7b3b81a..fe233cb 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -69,12 +69,13 @@ from .utils.contour import ( get_text_region_boxes_by_given_contours, get_textregion_contours_in_org_image, get_textregion_contours_in_org_image_light, - make_valid, return_contours_of_image, return_contours_of_interested_region, return_contours_of_interested_region_by_min_size, return_contours_of_interested_textline, return_parent_contours, + dilate_textregion_contours, + dilate_textline_contours, ) from .utils.rotate import ( rotate_image, @@ -1919,7 +1920,7 @@ class Eynollah: #sys.exit() polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - ##polygons_of_only_texts = self.dilate_textregions_contours(polygons_of_only_texts) + ##polygons_of_only_texts = dilate_textregion_contours(polygons_of_only_texts) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) text_regions_p_true = np.zeros(prediction_regions_org.shape) @@ -3669,117 +3670,6 @@ class Eynollah: return x_differential_new - def dilate_textregions_contours_textline_version(self, all_found_textline_polygons): - return [[np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], - dtype=np.uint)[:, np.newaxis] - for poly in region] - for region in all_found_textline_polygons] - - def dilate_textregions_contours(self, all_found_textline_polygons): - return [np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], - dtype=np.uint)[:, np.newaxis] - for poly in all_found_textline_polygons] - - - def dilate_textline_contours(self, all_found_textline_polygons): - for j in range(len(all_found_textline_polygons)): - for ij in range(len(all_found_textline_polygons[j])): - con_ind = all_found_textline_polygons[j][ij] - area = cv2.contourArea(con_ind) - - con_ind = con_ind.astype(float) - - x_differential = np.diff( con_ind[:,0,0]) - y_differential = np.diff( con_ind[:,0,1]) - - x_differential = gaussian_filter1d(x_differential, 3) - y_differential = gaussian_filter1d(y_differential, 3) - - x_min = float(np.min( con_ind[:,0,0] )) - y_min = float(np.min( con_ind[:,0,1] )) - - x_max = float(np.max( con_ind[:,0,0] )) - y_max = float(np.max( con_ind[:,0,1] )) - - x_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in x_differential] - y_differential_mask_nonzeros = [ ind/abs(ind) if ind!=0 else ind for ind in y_differential] - - abs_diff=abs(abs(x_differential)- abs(y_differential) ) - - inc_x = np.zeros(len(x_differential)+1) - inc_y = np.zeros(len(x_differential)+1) - - if (y_max-y_min) <= (x_max-x_min): - dilation_m1 = round(area / (x_max-x_min) * 0.35) - else: - dilation_m1 = round(area / (y_max-y_min) * 0.35) - - if dilation_m1>12: - dilation_m1 = 12 - if dilation_m1<4: - dilation_m1 = 4 - #print(dilation_m1, 'dilation_m1') - dilation_m2 = int(dilation_m1/2.) +1 - - for i in range(len(x_differential)): - if abs_diff[i]==0: - inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) - inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) - elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]==0 and y_differential_mask_nonzeros[i]!=0: - inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) - elif abs_diff[i]!=0 and x_differential_mask_nonzeros[i]!=0 and y_differential_mask_nonzeros[i]==0: - inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) - - elif abs_diff[i]!=0 and abs_diff[i]>=3: - if abs(x_differential[i])>abs(y_differential[i]): - inc_y[i+1] = dilation_m1*(x_differential_mask_nonzeros[i]) - else: - inc_x[i+1]= dilation_m1*(-1*y_differential_mask_nonzeros[i]) - else: - inc_x[i+1] = dilation_m2*(-1*y_differential_mask_nonzeros[i]) - inc_y[i+1] = dilation_m2*(x_differential_mask_nonzeros[i]) - - inc_x[0] = inc_x[-1] - inc_y[0] = inc_y[-1] - - con_scaled = con_ind*1 - - con_scaled[:,0, 0] = con_ind[:,0,0] + np.array(inc_x)[:] - con_scaled[:,0, 1] = con_ind[:,0,1] + np.array(inc_y)[:] - - con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 - con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 - - con_ind = con_ind.astype(np.int32) - - results = [cv2.pointPolygonTest(con_ind, (con_scaled[ind,0, 0], con_scaled[ind,0, 1]), False) - for ind in range(len(con_scaled[:,0, 1])) ] - results = np.array(results) - results[results==0] = 1 - - diff_result = np.diff(results) - - indices_2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==2] - indices_m2 = [ind for ind in range(len(diff_result)) if diff_result[ind]==-2] - - if results[0]==1: - con_scaled[:indices_m2[0]+1,0, 1] = con_ind[:indices_m2[0]+1,0,1] - con_scaled[:indices_m2[0]+1,0, 0] = con_ind[:indices_m2[0]+1,0,0] - indices_m2 = indices_m2[1:] - - if len(indices_2)>len(indices_m2): - con_scaled[indices_2[-1]+1:,0, 1] = con_ind[indices_2[-1]+1:,0,1] - con_scaled[indices_2[-1]+1:,0, 0] = con_ind[indices_2[-1]+1:,0,0] - indices_2 = indices_2[:-1] - - for ii in range(len(indices_2)): - con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 1] = con_scaled[indices_2[ii],0, 1] - con_scaled[indices_2[ii]+1:indices_m2[ii]+1,0, 0] = con_scaled[indices_2[ii],0, 0] - - all_found_textline_polygons[j][ij][:,0,1] = con_scaled[:,0, 1] - all_found_textline_polygons[j][ij][:,0,0] = con_scaled[:,0, 0] - return all_found_textline_polygons - def filter_contours_inside_a_bigger_one(self,contours, contours_d_ordered, image, marginal_cnts=None, type_contour="textregion"): if type_contour=="textregion": areas = [cv2.contourArea(contours[j]) for j in range(len(contours))] @@ -3917,121 +3807,6 @@ class Eynollah: return contours, text_con_org, conf_contours_textregions, contours_textline, contours_only_text_parent_d_ordered, np.array(range(len(contours))) - def dilate_textlines(self, all_found_textline_polygons): - for j in range(len(all_found_textline_polygons)): - for i in range(len(all_found_textline_polygons[j])): - con_ind = all_found_textline_polygons[j][i] - con_ind = con_ind.astype(float) - - x_differential = np.diff( con_ind[:,0,0]) - y_differential = np.diff( con_ind[:,0,1]) - - x_min = float(np.min( con_ind[:,0,0] )) - y_min = float(np.min( con_ind[:,0,1] )) - - x_max = float(np.max( con_ind[:,0,0] )) - y_max = float(np.max( con_ind[:,0,1] )) - - if (y_max - y_min) > (x_max - x_min) and (x_max - x_min)<70: - x_biger_than_x = np.abs(x_differential) > np.abs(y_differential) - mult = x_biger_than_x*x_differential - - arg_min_mult = np.argmin(mult) - arg_max_mult = np.argmax(mult) - - if y_differential[0]==0: - y_differential[0] = 0.1 - if y_differential[-1]==0: - y_differential[-1]= 0.1 - y_differential = [y_differential[ind] if y_differential[ind] != 0 - else 0.5 * (y_differential[ind-1] + y_differential[ind+1]) - for ind in range(len(y_differential))] - - if y_differential[0]==0.1: - y_differential[0] = y_differential[1] - if y_differential[-1]==0.1: - y_differential[-1] = y_differential[-2] - y_differential.append(y_differential[0]) - - y_differential = [-1 if y_differential[ind] < 0 else 1 - for ind in range(len(y_differential))] - y_differential = self.return_it_in_two_groups(y_differential) - y_differential = np.array(y_differential) - - con_scaled = con_ind*1 - con_scaled[:,0, 0] = con_ind[:,0,0] - 8*y_differential - con_scaled[arg_min_mult,0, 1] = con_ind[arg_min_mult,0,1] + 8 - con_scaled[arg_min_mult+1,0, 1] = con_ind[arg_min_mult+1,0,1] + 8 - - try: - con_scaled[arg_min_mult-1,0, 1] = con_ind[arg_min_mult-1,0,1] + 5 - con_scaled[arg_min_mult+2,0, 1] = con_ind[arg_min_mult+2,0,1] + 5 - except: - pass - - con_scaled[arg_max_mult,0, 1] = con_ind[arg_max_mult,0,1] - 8 - con_scaled[arg_max_mult+1,0, 1] = con_ind[arg_max_mult+1,0,1] - 8 - - try: - con_scaled[arg_max_mult-1,0, 1] = con_ind[arg_max_mult-1,0,1] - 5 - con_scaled[arg_max_mult+2,0, 1] = con_ind[arg_max_mult+2,0,1] - 5 - except: - pass - - else: - y_biger_than_x = np.abs(y_differential) > np.abs(x_differential) - mult = y_biger_than_x*y_differential - - arg_min_mult = np.argmin(mult) - arg_max_mult = np.argmax(mult) - - if x_differential[0]==0: - x_differential[0] = 0.1 - if x_differential[-1]==0: - x_differential[-1]= 0.1 - x_differential = [x_differential[ind] if x_differential[ind] != 0 - else 0.5 * (x_differential[ind-1] + x_differential[ind+1]) - for ind in range(len(x_differential))] - - if x_differential[0]==0.1: - x_differential[0] = x_differential[1] - if x_differential[-1]==0.1: - x_differential[-1] = x_differential[-2] - x_differential.append(x_differential[0]) - - x_differential = [-1 if x_differential[ind] < 0 else 1 - for ind in range(len(x_differential))] - x_differential = self.return_it_in_two_groups(x_differential) - x_differential = np.array(x_differential) - - con_scaled = con_ind*1 - con_scaled[:,0, 1] = con_ind[:,0,1] + 8*x_differential - con_scaled[arg_min_mult,0, 0] = con_ind[arg_min_mult,0,0] + 8 - con_scaled[arg_min_mult+1,0, 0] = con_ind[arg_min_mult+1,0,0] + 8 - - try: - con_scaled[arg_min_mult-1,0, 0] = con_ind[arg_min_mult-1,0,0] + 5 - con_scaled[arg_min_mult+2,0, 0] = con_ind[arg_min_mult+2,0,0] + 5 - except: - pass - - con_scaled[arg_max_mult,0, 0] = con_ind[arg_max_mult,0,0] - 8 - con_scaled[arg_max_mult+1,0, 0] = con_ind[arg_max_mult+1,0,0] - 8 - - try: - con_scaled[arg_max_mult-1,0, 0] = con_ind[arg_max_mult-1,0,0] - 5 - con_scaled[arg_max_mult+2,0, 0] = con_ind[arg_max_mult+2,0,0] - 5 - except: - pass - - con_scaled[:,0, 1][con_scaled[:,0, 1]<0] = 0 - con_scaled[:,0, 0][con_scaled[:,0, 0]<0] = 0 - - all_found_textline_polygons[j][i][:,0,1] = con_scaled[:,0, 1] - all_found_textline_polygons[j][i][:,0,0] = con_scaled[:,0, 0] - - return all_found_textline_polygons - def delete_regions_without_textlines( self, slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, index_by_text_par_con): @@ -4130,8 +3905,7 @@ class Eynollah: all_found_textline_polygons=[ all_found_textline_polygons ] - all_found_textline_polygons = self.dilate_textregions_contours_textline_version( - all_found_textline_polygons) + all_found_textline_polygons = dilate_textline_contours(all_found_textline_polygons) all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( all_found_textline_polygons, None, textline_mask_tot_ea, type_contour="textline") @@ -4255,14 +4029,14 @@ class Eynollah: boxes, boxes_d, polygons_of_marginals, contours_tables = \ self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, table_prediction, erosion_hurts) - ###polygons_of_marginals = self.dilate_textregions_contours(polygons_of_marginals) + ###polygons_of_marginals = dilate_textregion_contours(polygons_of_marginals) else: polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, \ regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = \ self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions, table_prediction, erosion_hurts, img_bin_light if self.light_version else None) - ###polygons_of_marginals = self.dilate_textregions_contours(polygons_of_marginals) + ###polygons_of_marginals = dilate_textregion_contours(polygons_of_marginals) if self.light_version: drop_label_in_full_layout = 4 textline_mask_tot_ea_org[img_revised_tab==drop_label_in_full_layout] = 0 @@ -4398,15 +4172,14 @@ class Eynollah: #print("text region early 3 in %.1fs", time.time() - t0) if self.light_version: - contours_only_text_parent = self.dilate_textregions_contours( - contours_only_text_parent) + contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) contours_only_text_parent , contours_only_text_parent_d_ordered = self.filter_contours_inside_a_bigger_one( contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, marginal_cnts=polygons_of_marginals) #print("text region early 3.5 in %.1fs", time.time() - t0) txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( contours_only_text_parent, self.image, confidence_matrix) - #txt_con_org = self.dilate_textregions_contours(txt_con_org) - #contours_only_text_parent = self.dilate_textregions_contours(contours_only_text_parent) + #txt_con_org = dilate_textregion_contours(txt_con_org) + #contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) else: txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( contours_only_text_parent, self.image, confidence_matrix) @@ -4433,14 +4206,10 @@ class Eynollah: #slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, polygons_of_marginals, polygons_of_marginals, _ = \ # self.delete_regions_without_textlines(slopes_marginals, all_found_textline_polygons_marginals, # boxes_marginals, polygons_of_marginals, polygons_of_marginals, np.array(range(len(polygons_of_marginals)))) - #all_found_textline_polygons = self.dilate_textlines(all_found_textline_polygons) - #####all_found_textline_polygons = self.dilate_textline_contours(all_found_textline_polygons) - all_found_textline_polygons = self.dilate_textregions_contours_textline_version( - all_found_textline_polygons) + all_found_textline_polygons = dilate_textline_contours(all_found_textline_polygons) all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( all_found_textline_polygons, None, textline_mask_tot_ea_org, type_contour="textline") - all_found_textline_polygons_marginals = self.dilate_textregions_contours_textline_version( - all_found_textline_polygons_marginals) + all_found_textline_polygons_marginals = dilate_textline_contours(all_found_textline_polygons_marginals) contours_only_text_parent, txt_con_org, conf_contours_textregions, all_found_textline_polygons, contours_only_text_parent_d_ordered, \ index_by_text_par_con = self.filter_contours_without_textline_inside( contours_only_text_parent, txt_con_org, all_found_textline_polygons, contours_only_text_parent_d_ordered, conf_contours_textregions) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 03d45b7..f228e53 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -328,6 +328,17 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, return img_ret[:, :, 0] +def dilate_textline_contours(self, all_found_textline_polygons): + return [[np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], + dtype=np.uint)[:, np.newaxis] + for poly in region] + for region in all_found_textline_polygons] + +def dilate_textregion_contours(self, all_found_textline_polygons): + return [np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], + dtype=np.uint)[:, np.newaxis] + for poly in all_found_textline_polygons] + def make_valid(polygon: Polygon) -> Polygon: """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" def isint(x): From f3faa29528ce7acdafa0c02fc2a9ec4732d91e4a Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 21 Aug 2025 12:59:03 +0200 Subject: [PATCH 260/492] refactor shapely converisons into contour2polygon / polygon2contour, also handle heterogeneous geometries --- src/eynollah/eynollah.py | 1 - src/eynollah/utils/contour.py | 107 ++++++++++++++++++++++++++-------- 2 files changed, 83 insertions(+), 25 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index fe233cb..54ace30 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -27,7 +27,6 @@ from loky import ProcessPoolExecutor import xml.etree.ElementTree as ET import cv2 import numpy as np -from shapely.geometry import Polygon from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d from numba import cuda diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index f228e53..1123241 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -1,7 +1,15 @@ +from typing import Sequence, Union +from numbers import Number from functools import partial +import itertools + import cv2 import numpy as np -from shapely.geometry import Polygon +from scipy.sparse.csgraph import minimum_spanning_tree +from shapely.geometry import Polygon, LineString +from shapely.geometry.polygon import orient +from shapely import set_precision +from shapely.ops import unary_union, nearest_points from .rotate import rotate_image, rotation_image_new @@ -37,29 +45,28 @@ def get_text_region_boxes_by_given_contours(contours): return boxes, contours_new -def filter_contours_area_of_image(image, contours, hierarchy, max_area, min_area): +def filter_contours_area_of_image(image, contours, hierarchy, max_area=1.0, min_area=0.0, dilate=0): found_polygons_early = [] - for jv,c in enumerate(contours): - if len(c) < 3: # A polygon cannot have less than 3 points + for jv, contour in enumerate(contours): + if len(contour) < 3: # A polygon cannot have less than 3 points continue - polygon = Polygon([point[0] for point in c]) + polygon = contour2polygon(contour, dilate=dilate) area = polygon.area if (area >= min_area * np.prod(image.shape[:2]) and area <= max_area * np.prod(image.shape[:2]) and hierarchy[0][jv][3] == -1): - found_polygons_early.append(np.array(make_valid(polygon).exterior.coords[:-1], - dtype=np.uint)[:, np.newaxis]) + found_polygons_early.append(polygon2contour(polygon)) return found_polygons_early -def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, min_area): +def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area=1.0, min_area=0.0, dilate=0): found_polygons_early = [] - for jv,c in enumerate(contours): - if len(c) < 3: # A polygon cannot have less than 3 points + for jv, contour in enumerate(contours): + if len(contour) < 3: # A polygon cannot have less than 3 points continue - polygon = Polygon([point[0] for point in c]) - # area = cv2.contourArea(c) + polygon = contour2polygon(contour, dilate=dilate) + # area = cv2.contourArea(contour) area = polygon.area ##print(np.prod(thresh.shape[:2])) # Check that polygon has area greater than minimal area @@ -68,9 +75,8 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area, m area <= max_area * np.prod(image.shape[:2]) and # hierarchy[0][jv][3]==-1 True): - # print(c[0][0][1]) - found_polygons_early.append(np.array(make_valid(polygon).exterior.coords[:-1], - dtype=np.uint)[:, np.newaxis]) + # print(contour[0][0][1]) + found_polygons_early.append(polygon2contour(polygon)) return found_polygons_early def find_new_features_of_contours(contours_main): @@ -328,16 +334,29 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, return img_ret[:, :, 0] -def dilate_textline_contours(self, all_found_textline_polygons): - return [[np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], - dtype=np.uint)[:, np.newaxis] - for poly in region] +def dilate_textline_contours(all_found_textline_polygons): + return [[polygon2contour(contour2polygon(contour, dilate=5)) + for contour in region] for region in all_found_textline_polygons] -def dilate_textregion_contours(self, all_found_textline_polygons): - return [np.array(make_valid(Polygon(poly[:, 0]).buffer(5)).exterior.coords[:-1], - dtype=np.uint)[:, np.newaxis] - for poly in all_found_textline_polygons] +def dilate_textregion_contours(all_found_textline_polygons): + return [polygon2contour(contour2polygon(contour, dilate=5)) + for contour in all_found_textline_polygons] + +def contour2polygon(contour: Union[np.ndarray, Sequence[Sequence[Sequence[Number]]]], dilate=0): + polygon = Polygon([point[0] for point in contour]) + if dilate: + polygon = polygon.buffer(dilate) + if polygon.geom_type == 'GeometryCollection': + # heterogeneous result: filter zero-area shapes (LineString, Point) + polygon = unary_union([geom for geom in polygon.geoms if geom.area > 0]) + if polygon.geom_type == 'MultiPolygon': + # homogeneous result: construct convex hull to connect + polygon = join_polygons(polygon.geoms) + return make_valid(polygon) + +def polygon2contour(polygon: Polygon) -> np.ndarray: + return np.array(polygon.exterior.coords[:-1], dtype=np.uint)[:, np.newaxis] def make_valid(polygon: Polygon) -> Polygon: """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" @@ -346,7 +365,7 @@ def make_valid(polygon: Polygon) -> Polygon: # make sure rounding does not invalidate if not all(map(isint, np.array(polygon.exterior.coords).flat)) and polygon.minimum_clearance < 1.0: polygon = Polygon(np.round(polygon.exterior.coords)) - points = list(polygon.exterior.coords) + points = list(polygon.exterior.coords[:-1]) # try by re-arranging points for split in range(1, len(points)): if polygon.is_valid or polygon.simplify(polygon.area).is_valid: @@ -368,3 +387,43 @@ def make_valid(polygon: Polygon) -> Polygon: polygon = polygon.buffer(tolerance) assert polygon.is_valid, polygon.wkt return polygon + +def join_polygons(polygons: Sequence[Polygon], scale=20) -> Polygon: + """construct concave hull (alpha shape) from input polygons by connecting their pairwise nearest points""" + # ensure input polygons are simply typed and all oriented equally + polygons = [orient(poly) + for poly in itertools.chain.from_iterable( + [poly.geoms + if poly.geom_type in ['MultiPolygon', 'GeometryCollection'] + else [poly] + for poly in polygons])] + npoly = len(polygons) + if npoly == 1: + return polygons[0] + # find min-dist path through all polygons (travelling salesman) + pairs = itertools.combinations(range(npoly), 2) + dists = np.zeros((npoly, npoly), dtype=float) + for i, j in pairs: + dist = polygons[i].distance(polygons[j]) + if dist < 1e-5: + dist = 1e-5 # if pair merely touches, we still need to get an edge + dists[i, j] = dist + dists[j, i] = dist + dists = minimum_spanning_tree(dists, overwrite=True) + # add bridge polygons (where necessary) + for prevp, nextp in zip(*dists.nonzero()): + prevp = polygons[prevp] + nextp = polygons[nextp] + nearest = nearest_points(prevp, nextp) + bridgep = orient(LineString(nearest).buffer(max(1, scale/5), resolution=1), -1) + polygons.append(bridgep) + jointp = unary_union(polygons) + assert jointp.geom_type == 'Polygon', jointp.wkt + # follow-up calculations will necessarily be integer; + # so anticipate rounding here and then ensure validity + jointp2 = set_precision(jointp, 1.0) + if jointp2.geom_type != 'Polygon' or not jointp2.is_valid: + jointp2 = Polygon(np.round(jointp.exterior.coords)) + jointp2 = make_valid(jointp2) + assert jointp2.geom_type == 'Polygon', jointp2.wkt + return jointp2 From 7a9e8256ee8a4c777baa0bd972697cece3e269a5 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 21 Aug 2025 13:00:31 +0200 Subject: [PATCH 261/492] =?UTF-8?q?increase=20dilatation:=20textregions/li?= =?UTF-8?q?nes=20(5=E2=86=926),=20seplines=20(0=E2=86=921)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/eynollah/eynollah.py | 10 +++++----- src/eynollah/utils/contour.py | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 54ace30..8cb1d52 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1714,8 +1714,8 @@ class Eynollah: mask_images_only=(prediction_regions_org[:,:] ==2)*1 polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - polygons_seplines = textline_con_fil = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) + polygons_seplines = filter_contours_area_of_image( + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) @@ -1909,7 +1909,7 @@ class Eynollah: #plt.show() polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) test_khat = np.zeros(prediction_regions_org.shape) test_khat = cv2.fillPoly(test_khat, pts = polygons_seplines, color=(1,1,1)) @@ -2022,7 +2022,7 @@ class Eynollah: polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001) @@ -2071,7 +2071,7 @@ class Eynollah: polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001) + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 1123241..c571be6 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -335,12 +335,12 @@ def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, return img_ret[:, :, 0] def dilate_textline_contours(all_found_textline_polygons): - return [[polygon2contour(contour2polygon(contour, dilate=5)) + return [[polygon2contour(contour2polygon(contour, dilate=6)) for contour in region] for region in all_found_textline_polygons] def dilate_textregion_contours(all_found_textline_polygons): - return [polygon2contour(contour2polygon(contour, dilate=5)) + return [polygon2contour(contour2polygon(contour, dilate=6)) for contour in all_found_textline_polygons] def contour2polygon(contour: Union[np.ndarray, Sequence[Sequence[Sequence[Number]]]], dilate=0): From 11e143afee1f446bfef7c6b19ba720e5cddb981d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 29 Aug 2025 12:16:56 +0200 Subject: [PATCH 262/492] polygon2contour: avoid overflow --- src/eynollah/utils/contour.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index c571be6..2cd7080 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -356,7 +356,8 @@ def contour2polygon(contour: Union[np.ndarray, Sequence[Sequence[Sequence[Number return make_valid(polygon) def polygon2contour(polygon: Polygon) -> np.ndarray: - return np.array(polygon.exterior.coords[:-1], dtype=np.uint)[:, np.newaxis] + polygon = np.array(polygon.exterior.coords[:-1], dtype=int) + return np.maximum(0, polygon).astype(np.uint)[:, np.newaxis] def make_valid(polygon: Polygon) -> Polygon: """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" From 235539a35071559f8929bfcda9cb47d506c23d58 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 29 Aug 2025 12:19:37 +0200 Subject: [PATCH 263/492] filter_contours_without_textline_inside: avoid removing from identical lists twice --- src/eynollah/eynollah.py | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 8cb1d52..b636b09 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3764,7 +3764,9 @@ class Eynollah: return contours def filter_contours_without_textline_inside( - self, contours,text_con_org, contours_textline, contours_only_text_parent_d_ordered, conf_contours_textregions): + self, contours, text_con_org, contours_textline, + contours_only_text_parent_d_ordered, + conf_contours_textregions): ###contours_txtline_of_all_textregions = [] ###for jj in range(len(contours_textline)): ###contours_txtline_of_all_textregions = contours_txtline_of_all_textregions + contours_textline[jj] @@ -3788,23 +3790,23 @@ class Eynollah: ###if np.any(results==1): ###contours_with_textline.append(con_tr) - textregion_index_to_del = [] + textregion_index_to_del = set() for index_textregion, textlines_textregion in enumerate(contours_textline): - if len(textlines_textregion)==0: - textregion_index_to_del.append(index_textregion) + if len(textlines_textregion) == 0: + textregion_index_to_del.add(index_textregion) + def filterfun(lis): + if len(lis) == 0: + return [] + if len(textregion_index_to_del) == 0: + return lis + return list(np.delete(lis, list(textregion_index_to_del))) - uniqe_args_trs = np.unique(textregion_index_to_del) - uniqe_args_trs_sorted = np.sort(uniqe_args_trs)[::-1] - - for ind_u_a_trs in uniqe_args_trs_sorted: - conf_contours_textregions.pop(ind_u_a_trs) - contours.pop(ind_u_a_trs) - contours_textline.pop(ind_u_a_trs) - text_con_org.pop(ind_u_a_trs) - if len(contours_only_text_parent_d_ordered) > 0: - contours_only_text_parent_d_ordered.pop(ind_u_a_trs) - - return contours, text_con_org, conf_contours_textregions, contours_textline, contours_only_text_parent_d_ordered, np.array(range(len(contours))) + return (filterfun(contours), + filterfun(text_con_org), + filterfun(conf_contours_textregions), + filterfun(contours_textline), + filterfun(contours_only_text_parent_d_ordered), + np.arange(len(contours) - len(textregion_index_to_del))) def delete_regions_without_textlines( self, slopes, all_found_textline_polygons, boxes_text, txt_con_org, From bca2ae3d78fcc6536c5365c9b93a0143ebbbf658 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 29 Aug 2025 12:37:44 +0200 Subject: [PATCH 264/492] get_marginals: exit early if no peaks found to avoid spurious overlap mask --- src/eynollah/utils/marginals.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/eynollah/utils/marginals.py b/src/eynollah/utils/marginals.py index a29e50d..22ada4e 100644 --- a/src/eynollah/utils/marginals.py +++ b/src/eynollah/utils/marginals.py @@ -94,6 +94,8 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve except: point_left=first_nonzero + if point_left == first_nonzero and point_right == last_nonzero: + return text_regions if point_right>=mask_marginals.shape[1]: From 9b5182c1c07ebbdb65ea81978f9c667917b82743 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 26 Aug 2025 21:00:33 +0200 Subject: [PATCH 265/492] utils: introduce box2rect and box2slice --- src/eynollah/utils/__init__.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index c479744..bbf30a8 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -300,9 +300,17 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( x_end_with_child_without_mother, new_main_sep_y) +def box2rect(box: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: + return (box[1], box[1] + box[3], + box[0], box[0] + box[2]) + +def box2slice(box: Tuple[int, int, int, int]) -> Tuple[slice, slice]: + return (slice(box[1], box[1] + box[3]), + slice(box[0], box[0] + box[2])) + def crop_image_inside_box(box, img_org_copy): - image_box = img_org_copy[box[1] : box[1] + box[3], box[0] : box[0] + box[2]] - return image_box, [box[1], box[1] + box[3], box[0], box[0] + box[2]] + image_box = img_org_copy[box2slice(box)] + return image_box, box2rect(box) def otsu_copy_binary(img): img_r = np.zeros((img.shape[0], img.shape[1], 3)) From 5bff2d156ab32b72470b547870874da3053a3d7b Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 26 Aug 2025 21:02:43 +0200 Subject: [PATCH 266/492] use box2rect instead of crop_image_inside_box when no image needed --- src/eynollah/eynollah.py | 8 +++++--- src/eynollah/utils/separate_lines.py | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index b636b09..6847c1f 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -98,6 +98,8 @@ from .utils.resize import resize_image from .utils import ( boosting_headers_by_longshot_region_segmentation, crop_image_inside_box, + box2rect, + box2slice, find_num_col, otsu_copy_binary, put_drop_out_from_only_drop_model, @@ -1542,7 +1544,7 @@ class Eynollah: all_found_textline_polygons.append(textlines_ins[::-1]) slopes.append(slope_deskew) - _, crop_coor = crop_image_inside_box(boxes[index],image_page_rotated) + crop_coor = box2rect(boxes[index]) all_box_coord.append(crop_coor) return all_found_textline_polygons, boxes, contours, contours_par, all_box_coord, np.array(range(len(contours_par))), slopes @@ -1754,7 +1756,7 @@ class Eynollah: ##polygons_of_images_fin.append(ploy_img_ind) box = cv2.boundingRect(ploy_img_ind) - _, page_coord_img = crop_image_inside_box(box, text_regions_p_true) + page_coord_img = box2rect(box) # cont_page.append(np.array([[page_coord[2], page_coord[0]], # [page_coord[3], page_coord[0]], # [page_coord[3], page_coord[1]], @@ -1768,7 +1770,7 @@ class Eynollah: if h < 150 or w < 150: pass else: - _, page_coord_img = crop_image_inside_box(box, text_regions_p_true) + page_coord_img = box2rect(box) # cont_page.append(np.array([[page_coord[2], page_coord[0]], # [page_coord[3], page_coord[0]], # [page_coord[3], page_coord[1]], diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index ffbfff7..b1a90b5 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -18,6 +18,8 @@ from .contour import ( from . import ( find_num_col_deskew, crop_image_inside_box, + box2rect, + box2slice, ) def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): @@ -1540,7 +1542,7 @@ def do_work_of_slopes_new( logger.debug('enter do_work_of_slopes_new') x, y, w, h = box_text - _, crop_coor = crop_image_inside_box(box_text, image_page_rotated) + crop_coor = box2rect(box_text) mask_textline = np.zeros(textline_mask_tot_ea.shape) mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1)) all_text_region_raw = textline_mask_tot_ea * mask_textline @@ -1631,7 +1633,7 @@ def do_work_of_slopes_new_curved( slope_for_all = slope_deskew slope = slope_for_all - _, crop_coor = crop_image_inside_box(box_text, image_page_rotated) + crop_coor = box2rect(box_text) if abs(slope_for_all) < 45: textline_region_in_image = np.zeros(textline_mask_tot_ea.shape) @@ -1685,7 +1687,7 @@ def do_work_of_slopes_new_light( logger.debug('enter do_work_of_slopes_new_light') x, y, w, h = box_text - _, crop_coor = crop_image_inside_box(box_text, image_page_rotated) + crop_coor = box2rect(box_text) mask_textline = np.zeros(textline_mask_tot_ea.shape) mask_textline = cv2.fillPoly(mask_textline, pts=[contour], color=(1,1,1)) all_text_region_raw = textline_mask_tot_ea * mask_textline From 5b16c2fc0066f3e1542dfdf7a1fe9f9241401c38 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 26 Aug 2025 21:05:40 +0200 Subject: [PATCH 267/492] avoid pulling unused 'image_page_rotated' through functions --- src/eynollah/eynollah.py | 48 +++++++++++++--------------- src/eynollah/utils/separate_lines.py | 6 ++-- 2 files changed, 25 insertions(+), 29 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6847c1f..8f66af5 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1521,7 +1521,7 @@ class Eynollah: self.logger.debug("exit extract_text_regions") return prediction_regions, prediction_regions2 - def get_slopes_and_deskew_new_light2(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + def get_slopes_and_deskew_new_light2(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot,1,0.00001) M_main_tot = [cv2.moments(polygons_of_textlines[j]) @@ -1549,13 +1549,12 @@ class Eynollah: return all_found_textline_polygons, boxes, contours, contours_par, all_box_coord, np.array(range(len(contours_par))), slopes - def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): if not len(contours): return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_light") results = self.executor.map(partial(do_work_of_slopes_new_light, textline_mask_tot_ea=textline_mask_tot, - image_page_rotated=image_page_rotated, slope_deskew=slope_deskew,textline_light=self.textline_light, logger=self.logger,), boxes, contours, contours_par, range(len(contours_par))) @@ -1563,13 +1562,12 @@ class Eynollah: self.logger.debug("exit get_slopes_and_deskew_new_light") return tuple(zip(*results)) - def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew): + def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): if not len(contours): return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new") results = self.executor.map(partial(do_work_of_slopes_new, textline_mask_tot_ea=textline_mask_tot, - image_page_rotated=image_page_rotated, slope_deskew=slope_deskew, MAX_SLOPE=MAX_SLOPE, KERNEL=KERNEL, @@ -1580,13 +1578,12 @@ class Eynollah: self.logger.debug("exit get_slopes_and_deskew_new") return tuple(zip(*results)) - def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, mask_texts_only, num_col, scale_par, slope_deskew): + def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, boxes, mask_texts_only, num_col, scale_par, slope_deskew): if not len(contours): return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_curved") results = self.executor.map(partial(do_work_of_slopes_new_curved, textline_mask_tot_ea=textline_mask_tot, - image_page_rotated=image_page_rotated, mask_texts_only=mask_texts_only, num_col=num_col, scale_par=scale_par, @@ -2935,10 +2932,10 @@ class Eynollah: return slope_deskew def run_marginals( - self, image_page, textline_mask_tot_ea, mask_images, mask_lines, + self, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction): - image_page_rotated, textline_mask_tot = image_page[:, :], textline_mask_tot_ea[:, :] + textline_mask_tot = textline_mask_tot_ea[:, :] textline_mask_tot[mask_images[:, :] == 1] = 0 text_regions_p_1[mask_lines[:, :] == 1] = 3 @@ -2957,10 +2954,7 @@ class Eynollah: except Exception as e: self.logger.error("exception %s", e) - if self.plotter: - self.plotter.save_plot_of_layout_main_all(text_regions_p, image_page) - self.plotter.save_plot_of_layout_main(text_regions_p, image_page) - return textline_mask_tot, text_regions_p, image_page_rotated + return textline_mask_tot, text_regions_p def run_boxes_no_full_layout( self, image_page, textline_mask_tot, text_regions_p, @@ -3112,7 +3106,7 @@ class Eynollah: text_regions_p[:,:][table_prediction[:,:]==1] = 10 img_revised_tab = text_regions_p[:,:] if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - image_page_rotated_n, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ + _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1]) @@ -3132,7 +3126,7 @@ class Eynollah: else: if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - image_page_rotated_n, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ + _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1]) @@ -4010,9 +4004,12 @@ class Eynollah: text_regions_p_1 = resize_image(text_regions_p_1,img_h_new, img_w_new ) table_prediction = resize_image(table_prediction,img_h_new, img_w_new ) - textline_mask_tot, text_regions_p, image_page_rotated = \ - self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, + textline_mask_tot, text_regions_p = \ + self.run_marginals(textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction) + if self.plotter: + self.plotter.save_plot_of_layout_main_all(text_regions_p, image_page) + self.plotter.save_plot_of_layout_main(text_regions_p, image_page) if self.light_version and num_col_classifier in (1,2): image_page = resize_image(image_page,org_h_l_m, org_w_l_m ) @@ -4021,7 +4018,6 @@ class Eynollah: textline_mask_tot = resize_image(textline_mask_tot,org_h_l_m, org_w_l_m ) text_regions_p_1 = resize_image(text_regions_p_1,org_h_l_m, org_w_l_m ) table_prediction = resize_image(table_prediction,org_h_l_m, org_w_l_m ) - image_page_rotated = resize_image(image_page_rotated,org_h_l_m, org_w_l_m ) self.logger.info("detection of marginals took %.1fs", time.time() - t1) #print("text region early 2 marginal in %.1fs", time.time() - t0) @@ -4197,11 +4193,11 @@ class Eynollah: all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_light2( txt_con_org, contours_only_text_parent, textline_mask_tot_ea_org, - image_page_rotated, boxes_text, slope_deskew) + boxes_text, slope_deskew) all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_light2( polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_org, - image_page_rotated, boxes_marginals, slope_deskew) + boxes_marginals, slope_deskew) #slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, index_by_text_par_con = \ # self.delete_regions_without_textlines(slopes, all_found_textline_polygons, @@ -4221,11 +4217,11 @@ class Eynollah: all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, \ index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_light( txt_con_org, contours_only_text_parent, textline_mask_tot_ea, - image_page_rotated, boxes_text, slope_deskew) + boxes_text, slope_deskew) all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_light( polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, - image_page_rotated, boxes_marginals, slope_deskew) + boxes_marginals, slope_deskew) #all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( # all_found_textline_polygons, textline_mask_tot_ea_org, type_contour="textline") else: @@ -4233,25 +4229,25 @@ class Eynollah: all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new( txt_con_org, contours_only_text_parent, textline_mask_tot_ea, - image_page_rotated, boxes_text, slope_deskew) + boxes_text, slope_deskew) all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new( polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, - image_page_rotated, boxes_marginals, slope_deskew) + boxes_marginals, slope_deskew) else: scale_param = 1 textline_mask_tot_ea_erode = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=2) all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_curved( txt_con_org, contours_only_text_parent, textline_mask_tot_ea_erode, - image_page_rotated, boxes_text, text_only, + boxes_text, text_only, num_col_classifier, scale_param, slope_deskew) all_found_textline_polygons = small_textlines_to_parent_adherence2( all_found_textline_polygons, textline_mask_tot_ea, num_col_classifier) all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_curved( polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_erode, - image_page_rotated, boxes_marginals, text_only, + boxes_marginals, text_only, num_col_classifier, scale_param, slope_deskew) all_found_textline_polygons_marginals = small_textlines_to_parent_adherence2( all_found_textline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index b1a90b5..dcddc65 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1532,7 +1532,7 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map def do_work_of_slopes_new( box_text, contour, contour_par, index_r_con, - textline_mask_tot_ea, image_page_rotated, slope_deskew, + textline_mask_tot_ea, slope_deskew, logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None ): if KERNEL is None: @@ -1590,7 +1590,7 @@ def do_work_of_slopes_new( def do_work_of_slopes_new_curved( box_text, contour, contour_par, index_r_con, - textline_mask_tot_ea, image_page_rotated, mask_texts_only, num_col, scale_par, slope_deskew, + textline_mask_tot_ea, mask_texts_only, num_col, scale_par, slope_deskew, logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None ): if KERNEL is None: @@ -1679,7 +1679,7 @@ def do_work_of_slopes_new_curved( def do_work_of_slopes_new_light( box_text, contour, contour_par, index_r_con, - textline_mask_tot_ea, image_page_rotated, slope_deskew, textline_light, + textline_mask_tot_ea, slope_deskew, textline_light, logger=None ): if logger is None: From 4337d6298596b1272c35b909a0ec0ee50adc4ba2 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 26 Aug 2025 21:06:36 +0200 Subject: [PATCH 268/492] =?UTF-8?q?contours:=20rename=20'pixel'=20?= =?UTF-8?q?=E2=86=92=20'label'=20for=20clarity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/eynollah/utils/contour.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 2cd7080..0700ed4 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -141,12 +141,12 @@ def return_parent_contours(contours, hierarchy): if hierarchy[0][i][3] == -1] return contours_parent -def return_contours_of_interested_region(region_pre_p, pixel, min_area=0.0002): +def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: - cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: - cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = cnts_images.astype(np.uint8) cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) @@ -267,12 +267,12 @@ def get_textregion_contours_in_org_image_light(cnts, img, confidence_matrix): confs.append(np.sum(confidence_matrix * cnt_mask) / np.sum(cnt_mask)) return cnts, confs -def return_contours_of_interested_textline(region_pre_p, pixel): +def return_contours_of_interested_textline(region_pre_p, label): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: - cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: - cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = cnts_images.astype(np.uint8) cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) @@ -295,12 +295,12 @@ def return_contours_of_image(image): contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours, hierarchy -def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_size=0.00003): +def return_contours_of_interested_region_by_min_size(region_pre_p, label, min_size=0.00003): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: - cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: - cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = cnts_images.astype(np.uint8) cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) @@ -313,12 +313,12 @@ def return_contours_of_interested_region_by_min_size(region_pre_p, pixel, min_si return contours_imgs -def return_contours_of_interested_region_by_size(region_pre_p, pixel, min_area, max_area): +def return_contours_of_interested_region_by_size(region_pre_p, label, min_area, max_area): # pixels of images are identified by 5 if len(region_pre_p.shape) == 3: - cnts_images = (region_pre_p[:, :, 0] == pixel) * 1 + cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: - cnts_images = (region_pre_p[:, :] == pixel) * 1 + cnts_images = (region_pre_p[:, :] == label) * 1 cnts_images = cnts_images.astype(np.uint8) cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) From f458e3ece01aa7142c77b930dbdf1843c6835d85 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 26 Aug 2025 21:07:18 +0200 Subject: [PATCH 269/492] writer: SeparatorRegion needs SeparatorRegionType (not ImageRegionType) --- src/eynollah/writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 92e353f..01c86de 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -296,7 +296,7 @@ class EynollahXmlWriter(): page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) for mm in range(len(polygons_lines_to_be_written_in_xml)): - page.add_SeparatorRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0])))) + page.add_SeparatorRegion(SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0])))) for mm in range(len(found_polygons_tables)): page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) From dc0caad512219a2e08da3841c215167eed1526bb Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 26 Aug 2025 21:07:50 +0200 Subject: [PATCH 270/492] writer: use @type='heading' instead of 'header' --- src/eynollah/writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 01c86de..b9e906a 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -268,7 +268,7 @@ class EynollahXmlWriter(): self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) for mm in range(len(found_polygons_text_region_h)): - textregion = TextRegionType(id=counter.next_region_id, type_='header', + textregion = TextRegionType(id=counter.next_region_id, type_='heading', Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) page.add_TextRegion(textregion) From abf5c0f845255f247ce4991d18a5b3b8a3808f4e Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 2 Sep 2025 15:01:52 +0200 Subject: [PATCH 271/492] get_smallest_skew: when shifting search range of rotation angle, compare resulting (maximum) variances instead of blindly assuming the new range is better --- src/eynollah/utils/separate_lines.py | 32 +++++++++++++++++----------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index dcddc65..3363367 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1486,33 +1486,36 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, if main_page and img_patch_org.shape[1] > img_patch_org.shape[0]: angles = np.array([-45, 0, 45, 90,]) - angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + angle, _ = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) angles = np.linspace(angle - 22.5, angle + 22.5, n_tot_angles) - angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + angle, _ = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) elif main_page: angles = np.linspace(-12, 12, n_tot_angles)#np.array([0 , 45 , 90 , -45]) - angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + angle, var = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) early_slope_edge=11 if abs(angle) > early_slope_edge: if angle < 0: - angles = np.linspace(-90, -12, n_tot_angles) + angles2 = np.linspace(-90, -12, n_tot_angles) else: - angles = np.linspace(90, 12, n_tot_angles) - angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + angles2 = np.linspace(90, 12, n_tot_angles) + angle2, var2 = get_smallest_skew(img_resized, sigma_des, angles2, map=map, logger=logger, plotter=plotter) + if var2 > var: + angle = angle2 else: angles = np.linspace(-25, 25, int(0.5 * n_tot_angles) + 10) - angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) + angle, var = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) early_slope_edge=22 if abs(angle) > early_slope_edge: if angle < 0: - angles = np.linspace(-90, -25, int(0.5 * n_tot_angles) + 10) + angles2 = np.linspace(-90, -25, int(0.5 * n_tot_angles) + 10) else: - angles = np.linspace(90, 25, int(0.5 * n_tot_angles) + 10) - angle = get_smallest_skew(img_resized, sigma_des, angles, map=map, logger=logger, plotter=plotter) - + angles2 = np.linspace(90, 25, int(0.5 * n_tot_angles) + 10) + angle2, var2 = get_smallest_skew(img_resized, sigma_des, angles2, map=map, logger=logger, plotter=plotter) + if var2 > var: + angle = angle2 return angle def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map): @@ -1524,11 +1527,14 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map try: var_res = np.array(results) assert var_res.any() - angle = angles[np.argmax(var_res)] + idx = np.argmax(var_res) + angle = angles[idx] + var = var_res[idx] except: logger.exception("cannot determine best angle among %s", str(angles)) angle = 0 - return angle + var = 0 + return angle, var def do_work_of_slopes_new( box_text, contour, contour_par, index_r_con, From 8be2c7977101080856e4d6e43660a0de055b86c9 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 3 Sep 2025 09:01:18 +0200 Subject: [PATCH 272/492] Revert "deskewing with faster multiprocessing" This reverts commit 5db3e9fa64d39c128bd9bee27c9d0fb73b3459d2. --- src/eynollah/eynollah.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 8f66af5..b450b17 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2926,6 +2926,7 @@ class Eynollah: #print(textline_mask_tot_ea.shape, 'textline_mask_tot_ea deskew') slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), 2, 30, True, map=self.executor.map, logger=self.logger, plotter=self.plotter) + if self.plotter: self.plotter.save_deskewed_image(slope_deskew) self.logger.info("slope_deskew: %.2f°", slope_deskew) From 31f240c3b8a6eaa034b5ae02cf009930e8275725 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 2 Sep 2025 15:04:04 +0200 Subject: [PATCH 273/492] do_image_rotation, do_work_of_slopes_new_curved: pass arrays via shared memory --- src/eynollah/eynollah.py | 12 +++++--- src/eynollah/utils/separate_lines.py | 12 ++++++-- src/eynollah/utils/shm.py | 45 ++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 7 deletions(-) create mode 100644 src/eynollah/utils/shm.py diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index b450b17..42af8e4 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -95,6 +95,7 @@ from .utils.drop_capitals import ( ) from .utils.marginals import get_marginals from .utils.resize import resize_image +from .utils.shm import share_ndarray from .utils import ( boosting_headers_by_longshot_region_segmentation, crop_image_inside_box, @@ -1582,9 +1583,11 @@ class Eynollah: if not len(contours): return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_curved") - results = self.executor.map(partial(do_work_of_slopes_new_curved, - textline_mask_tot_ea=textline_mask_tot, - mask_texts_only=mask_texts_only, + with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: + with share_ndarray(mask_texts_only) as mask_texts_only_shared: + results = self.executor.map(partial(do_work_of_slopes_new_curved, + textline_mask_tot_ea=textline_mask_tot_shared, + mask_texts_only=mask_texts_only_shared, num_col=num_col, scale_par=scale_par, slope_deskew=slope_deskew, @@ -1593,7 +1596,8 @@ class Eynollah: logger=self.logger, plotter=self.plotter,), boxes, contours, contours_par, range(len(contours_par))) - #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) + #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) + results = list(results) # exhaust prior to release self.logger.debug("exit get_slopes_and_deskew_new_curved") return tuple(zip(*results)) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 3363367..e4bb953 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -15,6 +15,7 @@ from .contour import ( return_contours_of_interested_textline, find_contours_mean_y_diff, ) +from .shm import share_ndarray, wrap_ndarray_shared from . import ( find_num_col_deskew, crop_image_inside_box, @@ -1454,7 +1455,8 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl return img_patch_interest_revised -def do_image_rotation(angle, img, sigma_des, logger=None): +@wrap_ndarray_shared(kw='img') +def do_image_rotation(angle, img=None, sigma_des=1.0, logger=None): if logger is None: logger = getLogger(__package__) img_rot = rotate_image(img, angle) @@ -1521,7 +1523,8 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map): if logger is None: logger = getLogger(__package__) - results = list(map(partial(do_image_rotation, img=img, sigma_des=sigma_des, logger=logger), angles)) + with share_ndarray(img) as img_shared: + results = list(map(partial(do_image_rotation, img=img_shared, sigma_des=sigma_des, logger=logger), angles)) if plotter: plotter.save_plot_of_rotation_angle(angles, results) try: @@ -1594,9 +1597,12 @@ def do_work_of_slopes_new( return cnt_clean_rot, box_text, contour, contour_par, crop_coor, index_r_con, slope +@wrap_ndarray_shared(kw='textline_mask_tot_ea') +@wrap_ndarray_shared(kw='mask_texts_only') def do_work_of_slopes_new_curved( box_text, contour, contour_par, index_r_con, - textline_mask_tot_ea, mask_texts_only, num_col, scale_par, slope_deskew, + textline_mask_tot_ea=None, mask_texts_only=None, + num_col=1, scale_par=1.0, slope_deskew=0.0, logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None ): if KERNEL is None: diff --git a/src/eynollah/utils/shm.py b/src/eynollah/utils/shm.py new file mode 100644 index 0000000..4b51053 --- /dev/null +++ b/src/eynollah/utils/shm.py @@ -0,0 +1,45 @@ +from multiprocessing import shared_memory +from contextlib import contextmanager +from functools import wraps +import numpy as np + +@contextmanager +def share_ndarray(array: np.ndarray): + size = np.dtype(array.dtype).itemsize * np.prod(array.shape) + shm = shared_memory.SharedMemory(create=True, size=size) + try: + shared_array = np.ndarray(array.shape, dtype=array.dtype, buffer=shm.buf) + shared_array[:] = array[:] + shared_array.flags["WRITEABLE"] = False + yield dict(shape=array.shape, dtype=array.dtype, name=shm.name) + finally: + shm.close() + shm.unlink() + +@contextmanager +def ndarray_shared(array: dict): + shm = shared_memory.SharedMemory(name=array['name']) + try: + array = np.ndarray(array['shape'], dtype=array['dtype'], buffer=shm.buf) + yield array + finally: + shm.close() + +def wrap_ndarray_shared(kw=None): + def wrapper(f): + if kw is None: + @wraps(f) + def shared_func(array, *args, **kwargs): + with ndarray_shared(array) as ndarray: + return f(ndarray, *args, **kwargs) + return shared_func + else: + @wraps(f) + def shared_func(*args, **kwargs): + array = kwargs.pop(kw) + with ndarray_shared(array) as ndarray: + kwargs[kw] = ndarray + return f(*args, **kwargs) + return shared_func + return wrapper + From 0662ece536e090989ad4e2281317336129eae468 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 4 Sep 2025 15:18:55 +0200 Subject: [PATCH 274/492] do_work_of_slopes*: use shm also in non-light mode(s) --- src/eynollah/eynollah.py | 33 ++++++++++++++++------------ src/eynollah/utils/separate_lines.py | 6 +++-- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 42af8e4..6333ca5 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1554,11 +1554,14 @@ class Eynollah: if not len(contours): return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_light") - results = self.executor.map(partial(do_work_of_slopes_new_light, - textline_mask_tot_ea=textline_mask_tot, - slope_deskew=slope_deskew,textline_light=self.textline_light, - logger=self.logger,), - boxes, contours, contours_par, range(len(contours_par))) + with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: + results = self.executor.map(partial(do_work_of_slopes_new_light, + textline_mask_tot_ea=textline_mask_tot_shared, + slope_deskew=slope_deskew, + textline_light=self.textline_light, + logger=self.logger,), + boxes, contours, contours_par, range(len(contours_par))) + results = list(results) # exhaust prior to release #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) self.logger.debug("exit get_slopes_and_deskew_new_light") return tuple(zip(*results)) @@ -1567,14 +1570,16 @@ class Eynollah: if not len(contours): return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new") - results = self.executor.map(partial(do_work_of_slopes_new, - textline_mask_tot_ea=textline_mask_tot, - slope_deskew=slope_deskew, - MAX_SLOPE=MAX_SLOPE, - KERNEL=KERNEL, - logger=self.logger, - plotter=self.plotter,), - boxes, contours, contours_par, range(len(contours_par))) + with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: + results = self.executor.map(partial(do_work_of_slopes_new, + textline_mask_tot_ea=textline_mask_tot_shared, + slope_deskew=slope_deskew, + MAX_SLOPE=MAX_SLOPE, + KERNEL=KERNEL, + logger=self.logger, + plotter=self.plotter,), + boxes, contours, contours_par, range(len(contours_par))) + results = list(results) # exhaust prior to release #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) self.logger.debug("exit get_slopes_and_deskew_new") return tuple(zip(*results)) @@ -1596,8 +1601,8 @@ class Eynollah: logger=self.logger, plotter=self.plotter,), boxes, contours, contours_par, range(len(contours_par))) - #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) results = list(results) # exhaust prior to release + #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) self.logger.debug("exit get_slopes_and_deskew_new_curved") return tuple(zip(*results)) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index e4bb953..1a2f511 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1539,9 +1539,10 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map var = 0 return angle, var +@wrap_ndarray_shared(kw='textline_mask_tot_ea') def do_work_of_slopes_new( box_text, contour, contour_par, index_r_con, - textline_mask_tot_ea, slope_deskew, + textline_mask_tot_ea=None, slope_deskew=0.0, logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None ): if KERNEL is None: @@ -1689,9 +1690,10 @@ def do_work_of_slopes_new_curved( return textlines_cnt_per_region[::-1], box_text, contour, contour_par, crop_coor, index_r_con, slope +@wrap_ndarray_shared(kw='textline_mask_tot_ea') def do_work_of_slopes_new_light( box_text, contour, contour_par, index_r_con, - textline_mask_tot_ea, slope_deskew, textline_light, + textline_mask_tot_ea=None, slope_deskew=0, textline_light=True, logger=None ): if logger is None: From 04c3d7dd1b98b01adf2b8ccd72830ad5fd9a4e95 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 18 Sep 2025 20:07:54 +0200 Subject: [PATCH 275/492] get_smallest_skew: avoid shm if no ProcessPoolExecutor is passed --- src/eynollah/utils/separate_lines.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 1a2f511..4d8badb 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1469,7 +1469,7 @@ def do_image_rotation(angle, img=None, sigma_des=1.0, logger=None): return var def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, - main_page=False, logger=None, plotter=None, map=map): + main_page=False, logger=None, plotter=None, map=None): if main_page and plotter: plotter.save_plot_of_textline_density(img_patch_org) @@ -1523,8 +1523,13 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map): if logger is None: logger = getLogger(__package__) - with share_ndarray(img) as img_shared: - results = list(map(partial(do_image_rotation, img=img_shared, sigma_des=sigma_des, logger=logger), angles)) + if map is None: + results = [do_image_rotation.__wrapped__(angle, img=img, sigma_des=sigma_des, logger=logger) + for angle in angles] + else: + with share_ndarray(img) as img_shared: + results = list(map(partial(do_image_rotation, img=img_shared, sigma_des=sigma_des, logger=logger), + angles)) if plotter: plotter.save_plot_of_rotation_angle(angles, results) try: From b94c96fcbbb5bbce72bc9cdc9b334953abd774ad Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Sat, 20 Sep 2025 00:56:33 +0200 Subject: [PATCH 276/492] find_num_col: exit early if empty (avoiding exceptions) --- src/eynollah/utils/__init__.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index bbf30a8..9daec7d 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -383,6 +383,10 @@ def find_num_col_deskew(regions_without_separators, sigma_, multiplier=3.8): return np.std(z) def find_num_col(regions_without_separators, num_col_classifier, tables, multiplier=3.8): + if not regions_without_separators.any(): + return 0, [] + #plt.imshow(regions_without_separators) + #plt.show() regions_without_separators_0 = regions_without_separators.sum(axis=0) ##plt.plot(regions_without_separators_0) ##plt.show() @@ -402,6 +406,9 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl zneg = gaussian_filter1d(zneg, sigma_) peaks_neg, _ = find_peaks(zneg, height=0) + #plt.plot(zneg) + #plt.plot(peaks_neg, zneg[peaks_neg], 'rx') + #plt.show() peaks, _ = find_peaks(z, height=0) peaks_neg = peaks_neg - 10 - 10 @@ -416,9 +423,13 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl (peaks_neg < (regions_without_separators.shape[1] - 370))] interest_pos = z[peaks] interest_pos = interest_pos[interest_pos > 10] + if not interest_pos.any(): + return 0, [] # plt.plot(z) # plt.show() interest_neg = z[peaks_neg] + if not interest_neg.any(): + return 0, [] min_peaks_pos = np.min(interest_pos) max_peaks_pos = np.max(interest_pos) From 0366707136568241c42bac2f3bf675dda5989fe2 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Sat, 20 Sep 2025 00:57:00 +0200 Subject: [PATCH 277/492] get_smallest_skew: do not pass logger --- src/eynollah/utils/separate_lines.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 4d8badb..1d27a17 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1528,7 +1528,7 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map for angle in angles] else: with share_ndarray(img) as img_shared: - results = list(map(partial(do_image_rotation, img=img_shared, sigma_des=sigma_des, logger=logger), + results = list(map(partial(do_image_rotation, img=img_shared, sigma_des=sigma_des, logger=None), angles)) if plotter: plotter.save_plot_of_rotation_angle(angles, results) From 758602403eb92625608d04e7d77fcbf896c55e2d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Sun, 21 Sep 2025 21:35:22 +0200 Subject: [PATCH 278/492] replace loky with concurrent.futures.ProcessPoolExecutor (faster) --- src/eynollah/eynollah.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6333ca5..1c70498 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -23,7 +23,7 @@ import gc import copy import json -from loky import ProcessPoolExecutor +from concurrent.futures import ProcessPoolExecutor import xml.etree.ElementTree as ET import cv2 import numpy as np @@ -244,7 +244,7 @@ class Eynollah: self.num_col_lower = num_col_lower self.logger = logger if logger else getLogger('eynollah') # for parallelization of CPU-intensive tasks: - self.executor = ProcessPoolExecutor(max_workers=cpu_count(), timeout=1200) + self.executor = ProcessPoolExecutor(max_workers=cpu_count()) atexit.register(self.executor.shutdown) self.dir_models = dir_models self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" From 53c1ca11fc57c86276fc307fff01050f78517e24 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 29 Sep 2025 22:15:17 +0200 Subject: [PATCH 279/492] Update README.md --- README.md | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index e576f4d..9dc4824 100644 --- a/README.md +++ b/README.md @@ -11,23 +11,24 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) ## Features -* Support for up to 10 segmentation classes: +* Support for 10 distinct segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) * Support for various image optimization operations: * cropping (border detection), binarization, deskewing, dewarping, scaling, enhancing, resizing -* Text line segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text -* Detection of reading order (left-to-right or right-to-left) +* Textline segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text +* Text recognition (OCR) using either CNN-RNN or Transformer models +* Detection of reading order (left-to-right or right-to-left) using either heuristics or trainable models * Output in [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML) * [OCR-D](https://github.com/qurator-spk/eynollah#use-as-ocr-d-processor) interface -:warning: Development is currently focused on achieving the best possible quality of results for a wide variety of -historical documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. +:warning: Development is focused on achieving the best quality of results for a wide variety of historical +documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. ## Installation Python `3.8-3.11` with Tensorflow `<2.13` on Linux are currently supported. -For (limited) GPU support the CUDA toolkit needs to be installed. +For (limited) GPU support the CUDA toolkit needs to be installed. A known working config is CUDA `11` with cuDNN `8.6`. You can either install from PyPI @@ -56,26 +57,27 @@ make install EXTRAS=OCR Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). -For documentation on methods and models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +For documentation on methods and models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +Model cards are also provided for our trained models. ## Training -In case you want to train your own model with Eynollah, have see the +In case you want to train your own model with Eynollah, see the documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the tools in the [`train` folder](https://github.com/qurator-spk/eynollah/tree/main/train). ## Usage Eynollah supports five use cases: layout analysis (segmentation), binarization, -image enhancement, text recognition (OCR), and (trainable) reading order detection. +image enhancement, text recognition (OCR), and reading order detection. ### Layout Analysis -The layout analysis module is responsible for detecting layouts, identifying text lines, and determining reading order -using both heuristic methods or a machine-based reading order detection model. +The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading +order using either heuristic methods or a reading order detection model. -Note that there are currently two supported ways for reading order detection: either as part of layout analysis based -on image input, or, currently under development, for given layout analysis results based on PAGE-XML data as input. +Reading order detection can be performed either as part of layout analysis based on image input, or, currently under +development, based on pre-existing layout analysis results in PAGE-XML format as input. The command-line interface for layout analysis can be called like this: @@ -108,15 +110,15 @@ The following options can be used to further configure the processing: | `-sp ` | save cropped page image to this directory | | `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | -If no option is set, the tool performs layout detection of main regions (background, text, images, separators +If no further option is set, the tool performs layout detection of main regions (background, text, images, separators and marginals). -The best output quality is produced when RGB images are used as input rather than greyscale or binarized images. +The best output quality is achieved when RGB images are used as input rather than greyscale or binarized images. ### Binarization The binarization module performs document image binarization using pretrained pixelwise segmentation models. -The command-line interface for binarization of single image can be called like this: +The command-line interface for binarization can be called like this: ```sh eynollah binarization \ @@ -127,16 +129,16 @@ eynollah binarization \ ### OCR -The OCR module performs text recognition from images using two main families of pretrained models: CNN-RNN–based OCR and Transformer-based OCR. +The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. -The command-line interface for ocr can be called like this: +The command-line interface for OCR can be called like this: ```sh eynollah ocr \ -i | -di \ -dx \ -o \ - -m | --model_name \ + -m | --model_name \ ``` ### Machine-based-reading-order @@ -174,20 +176,18 @@ If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynol ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 -Still, in general, it makes more sense to add other workflow steps **after** Eynollah. +In general, it makes more sense to add other workflow steps **after** Eynollah. -There is also an OCR-D processor for the binarization: +There is also an OCR-D processor for binarization: ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 #### Additional documentation -Please check the [wiki](https://github.com/qurator-spk/eynollah/wiki). +Additional documentation is available in the [docs](https://github.com/qurator-spk/eynollah/tree/main/docs) directory. ## How to cite -If you find this tool useful in your work, please consider citing our paper: - ```bibtex @inproceedings{hip23rezanezhad, title = {Document Layout Analysis with Deep Learning and Heuristics}, From 070dafca759f568a7d4bfa6ddfd9cb62324c87f3 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 29 Sep 2025 22:17:27 +0200 Subject: [PATCH 280/492] remove duplicate LICENSE --- train/LICENSE | 201 -------------------------------------------------- 1 file changed, 201 deletions(-) delete mode 100644 train/LICENSE diff --git a/train/LICENSE b/train/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/train/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. From c0137c29ad46adf2096664632e9a20a30afbfe09 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 02:23:43 +0200 Subject: [PATCH 281/492] try to fix the failed outsourcing of utils_ocr --- src/eynollah/eynollah.py | 63 ++------------------------------- src/eynollah/utils/utils_ocr.py | 1 + 2 files changed, 3 insertions(+), 61 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 32490a2..192f6f4 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3917,34 +3917,6 @@ class Eynollah: region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] return ordered, region_ids - def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): - return [ls_cons[sorted_indexes[index]] for index in range(len(sorted_indexes))] - - def return_it_in_two_groups(self, x_differential): - split = [ind if x_differential[ind]!=x_differential[ind+1] else -1 - for ind in range(len(x_differential)-1)] - split_masked = list( np.array(split[:])[np.array(split[:])!=-1] ) - if 0 not in split_masked: - split_masked.insert(0, -1) - split_masked.append(len(x_differential)-1) - - split_masked = np.array(split_masked) +1 - - sums = [np.sum(x_differential[split_masked[ind]:split_masked[ind+1]]) - for ind in range(len(split_masked)-1)] - - indexes_to_bec_changed = [ind if (np.abs(sums[ind-1]) > np.abs(sums[ind]) and - np.abs(sums[ind+1]) > np.abs(sums[ind])) else -1 - for ind in range(1,len(sums)-1)] - indexes_to_bec_changed_filtered = np.array(indexes_to_bec_changed)[np.array(indexes_to_bec_changed)!=-1] - - x_differential_new = np.copy(x_differential) - for i in indexes_to_bec_changed_filtered: - i_slice = slice(split_masked[i], split_masked[i+1]) - x_differential_new[i_slice] = -1 * np.array(x_differential)[i_slice] - - return x_differential_new - def return_start_and_end_of_common_text_of_textline_ocr(self,textline_image, ind_tot): width = np.shape(textline_image)[1] height = np.shape(textline_image)[0] @@ -3988,36 +3960,6 @@ class Eynollah: else: pass - def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(self, textline_image, ind_tot): - width = np.shape(textline_image)[1] - height = np.shape(textline_image)[0] - common_window = int(0.06*width) - - width1 = int ( width/2. - common_window ) - width2 = int ( width/2. + common_window ) - - img_sum = np.sum(textline_image[:,:,0], axis=0) - sum_smoothed = gaussian_filter1d(img_sum, 3) - - peaks_real, _ = find_peaks(sum_smoothed, height=0) - if len(peaks_real)>70: - #print(len(peaks_real), 'len(peaks_real)') - - peaks_real = peaks_real[(peaks_realwidth1)] - - arg_max = np.argmax(sum_smoothed[peaks_real]) - peaks_final = peaks_real[arg_max] - - #plt.figure(ind_tot) - #plt.imshow(textline_image) - #plt.plot([peaks_final, peaks_final], [0, height-1]) - ##plt.plot([peaks_final[1], peaks_final[1]], [0, height-1]) - #plt.savefig('./'+str(ind_tot)+'.png') - - return peaks_final - else: - return None - def return_start_and_end_of_common_text_of_textline_ocr_new_splitted( self, peaks_real, sum_smoothed, start_split, end_split): @@ -4079,8 +4021,7 @@ class Eynollah: #width1 = int ( width/2. - common_window ) #width2 = int ( width/2. + common_window ) - split_point = self.return_start_and_end_of_common_text_of_textline_ocr_without_common_section( - textline_image, ind_tot) + split_point = return_start_and_end_of_common_text_of_textline_ocr_without_common_section(textline_image) if split_point: image1 = textline_image[:, :split_point,:]# image.crop((0, 0, width2, height)) image2 = textline_image[:, split_point:,:]#image.crop((width1, 0, width, height)) @@ -5144,7 +5085,7 @@ class Eynollah: box_ind = all_box_coord[indexing] #print(ind_poly,np.shape(ind_poly), 'ind_poly') #print(box_ind) - ind_poly = self.return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) + ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) #print(ind_poly_copy) ind_poly[ind_poly<0] = 0 x, y, w, h = cv2.boundingRect(ind_poly) diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 4fa99f7..5f19387 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -92,6 +92,7 @@ def return_start_and_end_of_common_text_of_textline_ocr_without_common_section(t return peaks_final else: return None + # Function to fit text inside the given area def fit_text_single_line(draw, text, font_path, max_width, max_height): initial_font_size = 50 From f857ee7b518e23c62b28aab32cd64d396da836fe Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 19 Sep 2025 02:12:18 +0200 Subject: [PATCH 282/492] simplify --- src/eynollah/eynollah.py | 23 +++-------------------- src/eynollah/utils/__init__.py | 2 +- 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 192f6f4..0c9692e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -3182,26 +3182,9 @@ class Eynollah: num_col = num_col + 1 if not num_column_is_classified: num_col_classifier = num_col + 1 - if self.num_col_upper and self.num_col_lower: - if self.num_col_upper == self.num_col_lower: - num_col_classifier = self.num_col_upper - else: - if num_col_classifier < self.num_col_lower: - num_col_classifier = self.num_col_lower - if num_col_classifier > self.num_col_upper: - num_col_classifier = self.num_col_upper - - elif self.num_col_lower and not self.num_col_upper: - if num_col_classifier < self.num_col_lower: - num_col_classifier = self.num_col_lower - - elif self.num_col_upper and not self.num_col_lower: - if num_col_classifier > self.num_col_upper: - num_col_classifier = self.num_col_upper - - else: - pass - + num_col_classifier = min(self.num_col_upper or num_col_classifier, + max(self.num_col_lower or num_col_classifier, + num_col_classifier)) except Exception as why: self.logger.error(why) num_col = None diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 243430e..f8926cf 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1675,9 +1675,9 @@ def return_boxes_of_images_by_order_of_reading_new( peaks_neg_fin=[] num_col = 0 try: - peaks_neg_fin_org=np.copy(peaks_neg_fin) if (len(peaks_neg_fin)+1) Date: Tue, 30 Sep 2025 03:52:19 +0200 Subject: [PATCH 283/492] indent extremely long lines --- src/eynollah/eynollah.py | 750 ++++++++++++++++++--------- src/eynollah/utils/__init__.py | 30 +- src/eynollah/utils/separate_lines.py | 136 +++-- src/eynollah/utils/utils_ocr.py | 25 +- 4 files changed, 652 insertions(+), 289 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0c9692e..2e31433 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -272,7 +272,6 @@ class Eynollah: else: self.threshold_art_class_textline = 0.1 - self.dir_models = dir_models self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" @@ -289,8 +288,17 @@ class Eynollah: self.model_page_dir = dir_models + "/model_eynollah_page_extraction_20250915" self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" - self.model_region_dir_p_ens_light_only_images_extraction = dir_models + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824"#"/model_mb_ro_aug_ens_11"#"/model_step_3200000_mb_ro"#"/model_ens_reading_order_machine_based"#"/model_mb_ro_aug_ens_8"#"/model_ens_reading_order_machine_based" + self.model_region_dir_p_ens_light_only_images_extraction = (dir_models + + "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" + ) + self.model_reading_order_dir = (dir_models + + "/model_eynollah_reading_order_20250824" + #"/model_mb_ro_aug_ens_11" + #"/model_step_3200000_mb_ro" + #"/model_ens_reading_order_machine_based" + #"/model_mb_ro_aug_ens_8" + #"/model_ens_reading_order_machine_based" + ) #"/modelens_12sp_elay_0_3_4__3_6_n" #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" @@ -379,11 +387,9 @@ class Eynollah: self.b_s_ocr = 8 else: self.b_s_ocr = int(batch_size_ocr) - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: characters = json.load(config_file) - AUTOTUNE = tf.data.AUTOTUNE @@ -840,7 +846,9 @@ class Eynollah: self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False, thresholding_for_fl_light_version=False, threshold_art_class_textline=0.1): + thresholding_for_artificial_class_in_light_version=False, + thresholding_for_fl_light_version=False, + threshold_art_class_textline=0.1): self.logger.debug("enter do_prediction") img_height_model = model.layers[-1].output_shape[1] @@ -1254,7 +1262,9 @@ class Eynollah: self, patches, img, model, n_batch_inference=1, marginal_of_patch_percent=0.1, thresholding_for_some_classes_in_light_version=False, - thresholding_for_artificial_class_in_light_version=False, threshold_art_class_textline=0.1, threshold_art_class_layout=0.1): + thresholding_for_artificial_class_in_light_version=False, + threshold_art_class_textline=0.1, + threshold_art_class_layout=0.1): self.logger.debug("enter do_prediction_new_concept") img_height_model = model.layers[-1].output_shape[1] @@ -1384,7 +1394,8 @@ class Eynollah: for i_batch, j_batch in zip(list_i_s, list_j_s): seg_in = seg[indexer_inside_batch] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): seg_in_art = seg_art[indexer_inside_batch] index_y_u_in = list_y_u[indexer_inside_batch] @@ -1404,7 +1415,8 @@ class Eynollah: label_p_pred[0, 0:-margin or None, 0:-margin or None, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + 0:index_y_u_in - margin, index_x_d_in + 0:index_x_u_in - margin, 1] = \ seg_in_art[0:-margin or None, @@ -1421,7 +1433,8 @@ class Eynollah: label_p_pred[0, margin:, margin:, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + margin:index_x_u_in - 0, 1] = \ seg_in_art[margin:, @@ -1439,7 +1452,8 @@ class Eynollah: 0:-margin or None, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + 0:index_x_u_in - margin, 1] = \ seg_in_art[margin:, @@ -1456,7 +1470,8 @@ class Eynollah: label_p_pred[0, 0:-margin or None, margin:, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + 0:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - 0, 1] = \ seg_in_art[0:-margin or None, @@ -1473,7 +1488,8 @@ class Eynollah: label_p_pred[0, margin:-margin or None, 0:-margin or None, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + 0:index_x_u_in - margin, 1] = \ seg_in_art[margin:-margin or None, @@ -1489,7 +1505,8 @@ class Eynollah: label_p_pred[0, margin:-margin or None, margin:, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - 0, 1] = \ seg_in_art[margin:-margin or None, @@ -1505,7 +1522,8 @@ class Eynollah: label_p_pred[0, 0:-margin or None, margin:-margin or None, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + 0:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - margin, 1] = \ seg_in_art[0:-margin or None, @@ -1521,7 +1539,8 @@ class Eynollah: label_p_pred[0, margin:, margin:-margin or None, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + margin:index_y_u_in - 0, index_x_d_in + margin:index_x_u_in - margin, 1] = \ seg_in_art[margin:, @@ -1537,7 +1556,8 @@ class Eynollah: label_p_pred[0, margin:-margin or None, margin:-margin or None, 1] - if thresholding_for_artificial_class_in_light_version or thresholding_for_some_classes_in_light_version: + if (thresholding_for_artificial_class_in_light_version or + thresholding_for_some_classes_in_light_version): prediction_true[index_y_d_in + margin:index_y_u_in - margin, index_x_d_in + margin:index_x_u_in - margin, 1] = \ seg_in_art[margin:-margin or None, @@ -1686,7 +1706,10 @@ class Eynollah: else: img = resize_image(img, int(img_height_h * 2500 / float(img_width_h)), 2500).astype(np.uint8) - prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent=0.1, n_batch_inference=3, thresholding_for_fl_light_version=thresholding_for_fl_light_version) + prediction_regions = self.do_prediction(patches, img, model_region, + marginal_of_patch_percent=0.1, + n_batch_inference=3, + thresholding_for_fl_light_version=thresholding_for_fl_light_version) prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h) self.logger.debug("exit extract_text_regions") return prediction_regions, prediction_regions @@ -1839,7 +1862,10 @@ class Eynollah: cy_textline_in = [cy_main_tot[ind] for ind in indexes_in] w_h_textlines_in = [w_h_textlines[ind][0] / float(w_h_textlines[ind][1]) for ind in indexes_in] - textlines_ins = self.get_textlines_of_a_textregion_sorted(textlines_ins, cx_textline_in, cy_textline_in, w_h_textlines_in) + textlines_ins = self.get_textlines_of_a_textregion_sorted(textlines_ins, + cx_textline_in, + cy_textline_in, + w_h_textlines_in) all_found_textline_polygons.append(textlines_ins)#[::-1]) slopes.append(slope_deskew) @@ -1847,7 +1873,13 @@ class Eynollah: crop_coor = box2rect(boxes[index]) all_box_coord.append(crop_coor) - return all_found_textline_polygons, boxes, contours, contours_par, all_box_coord, np.array(range(len(contours_par))), slopes + return (all_found_textline_polygons, + boxes, + contours, + contours_par, + all_box_coord, + np.array(range(len(contours_par))), + slopes) def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): if not len(contours): @@ -1883,7 +1915,8 @@ class Eynollah: self.logger.debug("exit get_slopes_and_deskew_new") return tuple(zip(*results)) - def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, boxes, mask_texts_only, num_col, scale_par, slope_deskew): + def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, boxes, + mask_texts_only, num_col, scale_par, slope_deskew): if not len(contours): return [], [], [], [], [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_curved") @@ -1914,10 +1947,11 @@ class Eynollah: img_w = img_org.shape[1] img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w)) - prediction_textline = self.do_prediction( - use_patches, img, self.model_textline, - marginal_of_patch_percent=0.15, n_batch_inference=3, - thresholding_for_artificial_class_in_light_version=self.textline_light, threshold_art_class_textline=self.threshold_art_class_textline) + prediction_textline = self.do_prediction(use_patches, img, self.model_textline, + marginal_of_patch_percent=0.15, + n_batch_inference=3, + thresholding_for_artificial_class_in_light_version=self.textline_light, + threshold_art_class_textline=self.threshold_art_class_textline) #if not self.textline_light: #if num_col_classifier==1: #prediction_textline_nopatch = self.do_prediction(False, img, self.model_textline) @@ -2009,12 +2043,14 @@ class Eynollah: boxes_sub_new = [] poly_sub = [] for mv in range(len(boxes_per_process)): - crop_img, _ = crop_image_inside_box(boxes_per_process[mv], np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2)) + crop_img, _ = crop_image_inside_box(boxes_per_process[mv], + np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2)) crop_img = crop_img[:, :, 0] crop_img = cv2.erode(crop_img, KERNEL, iterations=2) try: textline_con, hierarchy = return_contours_of_image(crop_img) - textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, max_area=1, min_area=0.0008) + textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, + max_area=1, min_area=0.0008) y_diff_mean = find_contours_mean_y_diff(textline_con_fil) sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) crop_img[crop_img > 0] = 1 @@ -2139,7 +2175,13 @@ class Eynollah: [page_coord_img[2], page_coord_img[1]]])) self.logger.debug("exit get_regions_extract_images_only") - return text_regions_p_true, erosion_hurts, polygons_seplines, polygons_of_images_fin, image_page, page_coord, cont_page + return (text_regions_p_true, + erosion_hurts, + polygons_seplines, + polygons_of_images_fin, + image_page, + page_coord, + cont_page) def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier, skip_layout_and_reading_order=False): self.logger.debug("enter get_regions_light_v") @@ -2197,7 +2239,8 @@ class Eynollah: #print("inside 1 ", time.time()-t_in) ###textline_mask_tot_ea = self.run_textline(img_bin) - self.logger.debug("detecting textlines on %s with %d colors", str(img_resized.shape), len(np.unique(img_resized))) + self.logger.debug("detecting textlines on %s with %d colors", + str(img_resized.shape), len(np.unique(img_resized))) textline_mask_tot_ea = self.run_textline(img_resized, num_col_classifier) textline_mask_tot_ea = resize_image(textline_mask_tot_ea,img_height_h, img_width_h ) @@ -2214,13 +2257,15 @@ class Eynollah: img_resized.shape[1], img_resized.shape[0], num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( True, img_resized, self.model_region_1_2, n_batch_inference=1, - thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) + thresholding_for_some_classes_in_light_version=True, + threshold_art_class_layout=self.threshold_art_class_layout) else: prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( False, self.image_page_org_size, self.model_region_1_2, n_batch_inference=1, - thresholding_for_artificial_class_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) + thresholding_for_artificial_class_in_light_version=True, + threshold_art_class_layout=self.threshold_art_class_layout) ys = slice(*self.page_coord[0:2]) xs = slice(*self.page_coord[2:4]) prediction_regions_org[ys, xs] = prediction_regions_page @@ -2233,8 +2278,11 @@ class Eynollah: img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( True, img_resized, self.model_region_1_2, n_batch_inference=2, - thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) - ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_region, n_batch_inference=3, thresholding_for_some_classes_in_light_version=True) + thresholding_for_some_classes_in_light_version=True, + threshold_art_class_layout=self.threshold_art_class_layout) + ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_region, + ###n_batch_inference=3, + ###thresholding_for_some_classes_in_light_version=True) #print("inside 3 ", time.time()-t_in) #plt.imshow(prediction_regions_org[:,:,0]) #plt.show() @@ -2297,7 +2345,12 @@ class Eynollah: #plt.show() #print("inside 4 ", time.time()-t_in) self.logger.debug("exit get_regions_light_v") - return text_regions_p_true, erosion_hurts, polygons_seplines, textline_mask_tot_ea, img_bin, confidence_matrix + return (text_regions_p_true, + erosion_hurts, + polygons_seplines, + textline_mask_tot_ea, + img_bin, + confidence_matrix) else: img_bin = resize_image(img_bin,img_height_h, img_width_h ) self.logger.debug("exit get_regions_light_v") @@ -2417,14 +2470,10 @@ class Eynollah: #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) #prediction_regions_org = self.do_prediction(True, img, self.model_region) - #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - #prediction_regions_org = prediction_regions_org[:,:,0] - #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 - mask_lines_only = (prediction_regions_org == 3)*1 mask_texts_only = (prediction_regions_org == 1)*1 mask_images_only= (prediction_regions_org == 2)*1 @@ -2843,7 +2892,8 @@ class Eynollah: contours_new.append(contours_sep[ji]) if num_col_classifier>=2: only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) - only_recent_contour_image= cv2.fillPoly(only_recent_contour_image, pts=[contours_sep[ji]], color=(1,1,1)) + only_recent_contour_image= cv2.fillPoly(only_recent_contour_image, + pts=[contours_sep[ji]], color=(1,1,1)) table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() #print(iou_in,'iou_in_in1') @@ -2928,9 +2978,11 @@ class Eynollah: contours,hirarchy=cv2.findContours(thresh.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if indiv==pixel_table: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = 0.001) + main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, + max_area=1, min_area=0.001) else: - main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area = 1, min_area = min_area) + main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, + max_area=1, min_area=min_area) img_comm = cv2.fillPoly(img_comm, pts = main_contours, color = (indiv, indiv, indiv)) img_comm = img_comm.astype(np.uint8) @@ -2965,8 +3017,14 @@ class Eynollah: y_min_main_line ,y_max_main_line=find_features_of_contours(contours_line) y_min_main_tab ,y_max_main_tab=find_features_of_contours(contours_tab) - cx_tab_m_text,cy_tab_m_text ,x_min_tab_m_text , x_max_tab_m_text, y_min_tab_m_text ,y_max_tab_m_text, _= find_new_features_of_contours(contours_table_m_text) - cx_tabl,cy_tabl ,x_min_tabl , x_max_tabl, y_min_tabl ,y_max_tabl,_= find_new_features_of_contours(contours_tab) + (cx_tab_m_text, cy_tab_m_text, + x_min_tab_m_text, x_max_tab_m_text, + y_min_tab_m_text, y_max_tab_m_text, + _) = find_new_features_of_contours(contours_table_m_text) + (cx_tabl, cy_tabl, + x_min_tabl, x_max_tabl, + y_min_tabl, y_max_tabl, + _) = find_new_features_of_contours(contours_tab) if len(y_min_main_tab )>0: y_down_tabs=[] @@ -2976,9 +3034,15 @@ class Eynollah: y_down_tab=[] y_up_tab=[] for i_l in range(len(y_min_main_line)): - if y_min_main_tab[i_t]>y_min_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l] and y_min_main_tab[i_t]>y_max_main_line[i_l] and y_max_main_tab[i_t]>y_min_main_line[i_l]: + if (y_min_main_tab[i_t] > y_min_main_line[i_l] and + y_max_main_tab[i_t] > y_min_main_line[i_l] and + y_min_main_tab[i_t] > y_max_main_line[i_l] and + y_max_main_tab[i_t] > y_min_main_line[i_l]): pass - elif y_min_main_tab[i_t]= SLOPE_THRESHOLD: _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ - rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) + rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, + table_prediction, slope_deskew) - text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d,text_regions_p.shape[0],text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n,text_regions_p.shape[0],text_regions_p.shape[1]) + text_regions_p_1_n = resize_image(text_regions_p_1_n, + text_regions_p.shape[0], + text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, + text_regions_p.shape[0], + text_regions_p.shape[1]) + table_prediction_n = resize_image(table_prediction_n, + text_regions_p.shape[0], + text_regions_p.shape[1]) regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 @@ -3502,11 +3580,18 @@ class Eynollah: else: if np.abs(slope_deskew) >= SLOPE_THRESHOLD: _, textline_mask_tot_d, text_regions_p_1_n, table_prediction_n = \ - rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, table_prediction, slope_deskew) + rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, + table_prediction, slope_deskew) - text_regions_p_1_n = resize_image(text_regions_p_1_n,text_regions_p.shape[0],text_regions_p.shape[1]) - textline_mask_tot_d = resize_image(textline_mask_tot_d,text_regions_p.shape[0],text_regions_p.shape[1]) - table_prediction_n = resize_image(table_prediction_n,text_regions_p.shape[0],text_regions_p.shape[1]) + text_regions_p_1_n = resize_image(text_regions_p_1_n, + text_regions_p.shape[0], + text_regions_p.shape[1]) + textline_mask_tot_d = resize_image(textline_mask_tot_d, + text_regions_p.shape[0], + text_regions_p.shape[1]) + table_prediction_n = resize_image(table_prediction_n, + text_regions_p.shape[0], + text_regions_p.shape[1]) regions_without_separators_d = (text_regions_p_1_n[:,:] == 1)*1 regions_without_separators_d[table_prediction_n[:,:] == 1] = 1 @@ -3565,7 +3650,8 @@ class Eynollah: pixel_line = 3 img_revised_tab2 = self.add_tables_heuristic_to_layout( - text_regions_p_tables, boxes_d, 0, splitter_y_new_d, peaks_neg_tot_tables_d, text_regions_p_tables, + text_regions_p_tables, boxes_d, 0, splitter_y_new_d, + peaks_neg_tot_tables_d, text_regions_p_tables, num_col_classifier, 0.000005, pixel_line) img_revised_tab2_d,_ = self.check_iou_of_bounding_box_and_contour_for_tables( @@ -3574,8 +3660,9 @@ class Eynollah: img_revised_tab2_d_rotated = np.round(img_revised_tab2_d_rotated) img_revised_tab2_d_rotated = img_revised_tab2_d_rotated.astype(np.int8) - - img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, text_regions_p.shape[0], text_regions_p.shape[1]) + img_revised_tab2_d_rotated = resize_image(img_revised_tab2_d_rotated, + text_regions_p.shape[0], + text_regions_p.shape[1]) if np.abs(slope_deskew) < 0.13: img_revised_tab = np.copy(img_revised_tab2[:,:,0]) @@ -3646,7 +3733,8 @@ class Eynollah: ##else: ##regions_fully_np = filter_small_drop_capitals_from_no_patch_layout(regions_fully_np, text_regions_p) - ###regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, regions_fully_np, img_only_regions) + ###regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, + ### regions_fully_np, img_only_regions) # plt.imshow(regions_fully[:,:,0]) # plt.show() text_regions_p[:, :][regions_fully[:, :, 0] == drop_capital_label_in_full_layout_model] = 4 @@ -3709,7 +3797,10 @@ class Eynollah: min_cont_size_to_be_dilated = 10 if len(contours_only_text_parent)>min_cont_size_to_be_dilated and self.light_version: - cx_conts, cy_conts, x_min_conts, x_max_conts, y_min_conts, y_max_conts, _ = find_new_features_of_contours(contours_only_text_parent) + (cx_conts, cy_conts, + x_min_conts, x_max_conts, + y_min_conts, y_max_conts, + _) = find_new_features_of_contours(contours_only_text_parent) args_cont_located = np.array(range(len(contours_only_text_parent))) diff_y_conts = np.abs(y_max_conts[:]-y_min_conts) @@ -3724,15 +3815,31 @@ class Eynollah: args_cont_located_excluded = args_cont_located[diff_x_ratio>=1.3] args_cont_located_included = args_cont_located[diff_x_ratio<1.3] - contours_only_text_parent_excluded = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]>=1.3]#contours_only_text_parent[diff_x_ratio>=1.3] - contours_only_text_parent_included = [contours_only_text_parent[ind] for ind in range(len(contours_only_text_parent)) if diff_x_ratio[ind]<1.3]#contours_only_text_parent[diff_x_ratio<1.3] + contours_only_text_parent_excluded = [contours_only_text_parent[ind] + #contours_only_text_parent[diff_x_ratio>=1.3] + for ind in range(len(contours_only_text_parent)) + if diff_x_ratio[ind]>=1.3] + contours_only_text_parent_included = [contours_only_text_parent[ind] + #contours_only_text_parent[diff_x_ratio<1.3] + for ind in range(len(contours_only_text_parent)) + if diff_x_ratio[ind]<1.3] - - cx_conts_excluded = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]>=1.3]#cx_conts[diff_x_ratio>=1.3] - cx_conts_included = [cx_conts[ind] for ind in range(len(cx_conts)) if diff_x_ratio[ind]<1.3]#cx_conts[diff_x_ratio<1.3] - - cy_conts_excluded = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]>=1.3]#cy_conts[diff_x_ratio>=1.3] - cy_conts_included = [cy_conts[ind] for ind in range(len(cy_conts)) if diff_x_ratio[ind]<1.3]#cy_conts[diff_x_ratio<1.3] + cx_conts_excluded = [cx_conts[ind] + #cx_conts[diff_x_ratio>=1.3] + for ind in range(len(cx_conts)) + if diff_x_ratio[ind]>=1.3] + cx_conts_included = [cx_conts[ind] + #cx_conts[diff_x_ratio<1.3] + for ind in range(len(cx_conts)) + if diff_x_ratio[ind]<1.3] + cy_conts_excluded = [cy_conts[ind] + #cy_conts[diff_x_ratio>=1.3] + for ind in range(len(cy_conts)) + if diff_x_ratio[ind]>=1.3] + cy_conts_included = [cy_conts[ind] + #cy_conts[diff_x_ratio<1.3] + for ind in range(len(cy_conts)) + if diff_x_ratio[ind]<1.3] #print(diff_x_ratio, 'ratio') text_regions_p = text_regions_p.astype('uint8') @@ -3754,7 +3861,10 @@ class Eynollah: contours_only_dilated, hir_on_text_dilated = return_contours_of_image(text_regions_p_textregions_dilated) contours_only_dilated = return_parent_contours(contours_only_dilated, hir_on_text_dilated) - indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located = self.return_indexes_of_contours_loctaed_inside_another_list_of_contours(contours_only_dilated, contours_only_text_parent_included, cx_conts_included, cy_conts_included, args_cont_located_included) + indexes_of_located_cont, center_x_coordinates_of_located, center_y_coordinates_of_located = \ + self.return_indexes_of_contours_located_inside_another_list_of_contours( + contours_only_dilated, contours_only_text_parent_included, + cx_conts_included, cy_conts_included, args_cont_located_included) if len(args_cont_located_excluded)>0: @@ -3767,7 +3877,7 @@ class Eynollah: flattened_array = np.concatenate([arr.ravel() for arr in array_list]) #print(len( np.unique(flattened_array)), 'indexes_of_located_cont uniques') - missing_textregions = list( set(np.array(range(len(contours_only_text_parent))) ) - set(np.unique(flattened_array)) ) + missing_textregions = list( set(range(len(contours_only_text_parent))) - set(flattened_array) ) #print(missing_textregions, 'missing_textregions') for ind in missing_textregions: @@ -3887,12 +3997,13 @@ class Eynollah: region_with_curr_order = ordered[ind] if region_with_curr_order < len(contours_only_dilated): if np.isscalar(indexes_of_located_cont[region_with_curr_order]): - org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] + org_contours_indexes.extend([indexes_of_located_cont[region_with_curr_order]]) else: arg_sort_located_cont = np.argsort(center_y_coordinates_of_located[region_with_curr_order]) - org_contours_indexes = org_contours_indexes + list(np.array(indexes_of_located_cont[region_with_curr_order])[arg_sort_located_cont]) ##org_contours_indexes + list ( + org_contours_indexes.extend( + np.array(indexes_of_located_cont[region_with_curr_order])[arg_sort_located_cont]) else: - org_contours_indexes = org_contours_indexes + [indexes_of_located_cont[region_with_curr_order]] + org_contours_indexes.extend([indexes_of_located_cont[region_with_curr_order]]) region_ids = ['region_%04d' % i for i in range(len(co_text_all_org))] return org_contours_indexes, region_ids @@ -3915,17 +4026,13 @@ class Eynollah: if len(peaks_real)>70: print(len(peaks_real), 'len(peaks_real)') - peaks_real = peaks_real[(peaks_realwidth1)] arg_sort = np.argsort(sum_smoothed[peaks_real]) - arg_sort4 =arg_sort[::-1][:4] - peaks_sort_4 = peaks_real[arg_sort][::-1][:4] argsort_sorted = np.argsort(peaks_sort_4) - first_4_sorted = peaks_sort_4[argsort_sorted] y_4_sorted = sum_smoothed[peaks_real][arg_sort4[argsort_sorted]] #print(first_4_sorted,'first_4_sorted') @@ -4109,7 +4216,8 @@ class Eynollah: return x_differential_new - def filter_contours_inside_a_bigger_one(self,contours, contours_d_ordered, image, marginal_cnts=None, type_contour="textregion"): + def filter_contours_inside_a_bigger_one(self, contours, contours_d_ordered, image, + marginal_cnts=None, type_contour="textregion"): if type_contour=="textregion": areas = [cv2.contourArea(contours[j]) for j in range(len(contours))] area_tot = image.shape[0]*image.shape[1] @@ -4129,7 +4237,10 @@ class Eynollah: results = [cv2.pointPolygonTest(contours[ind], (cx_main[ind_small], cy_main[ind_small]), False) for ind in contours_index_big] if marginal_cnts: - results_marginal = [cv2.pointPolygonTest(marginal_cnts[ind], (cx_main[ind_small], cy_main[ind_small]), False) + results_marginal = [cv2.pointPolygonTest(marginal_cnts[ind], + (cx_main[ind_small], + cy_main[ind_small]), + False) for ind in range(len(marginal_cnts))] results_marginal = np.array(results_marginal) @@ -4184,7 +4295,10 @@ class Eynollah: args_with_bigger_area = np.array(args_all)[areas_without > 1.5*area_of_con_interest] if len(args_with_bigger_area)>0: - results = [cv2.pointPolygonTest(contours_txtline_of_all_textregions[ind], (cx_main_tot[ij], cy_main_tot[ij]), False) + results = [cv2.pointPolygonTest(contours_txtline_of_all_textregions[ind], + (cx_main_tot[ij], + cy_main_tot[ij]), + False) for ind in args_with_bigger_area ] results = np.array(results) if np.any(results==1): @@ -4196,14 +4310,16 @@ class Eynollah: textregion_index_to_del = np.array(textregion_index_to_del) textline_in_textregion_index_to_del = np.array(textline_in_textregion_index_to_del) for ind_u_a_trs in np.unique(textregion_index_to_del): - textline_in_textregion_index_to_del_ind = textline_in_textregion_index_to_del[textregion_index_to_del==ind_u_a_trs] + textline_in_textregion_index_to_del_ind = \ + textline_in_textregion_index_to_del[textregion_index_to_del==ind_u_a_trs] textline_in_textregion_index_to_del_ind = np.sort(textline_in_textregion_index_to_del_ind)[::-1] for ittrd in textline_in_textregion_index_to_del_ind: contours[ind_u_a_trs].pop(ittrd) return contours - def return_indexes_of_contours_loctaed_inside_another_list_of_contours(self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): + def return_indexes_of_contours_located_inside_another_list_of_contours( + self, contours, contours_loc, cx_main_loc, cy_main_loc, indexes_loc): indexes_of_located_cont = [] center_x_coordinates_of_located = [] center_y_coordinates_of_located = [] @@ -4217,7 +4333,8 @@ class Eynollah: for ind in range(len(cy_main_loc)) ] results = np.array(results) indexes_in = np.where((results == 0) | (results == 1)) - indexes = indexes_loc[indexes_in]# [(results == 0) | (results == 1)]#np.where((results == 0) | (results == 1)) + # [(results == 0) | (results == 1)]#np.where((results == 0) | (results == 1)) + indexes = indexes_loc[indexes_in] indexes_of_located_cont.append(indexes) center_x_coordinates_of_located.append(np.array(cx_main_loc)[indexes_in] ) @@ -4247,7 +4364,10 @@ class Eynollah: ###contours_with_textline = [] ###for ind_tr, con_tr in enumerate(contours): - ###results = [cv2.pointPolygonTest(con_tr, (cx_main_textline[index_textline_con], cy_main_textline[index_textline_con]), False) + ###results = [cv2.pointPolygonTest(con_tr, + ### (cx_main_textline[index_textline_con], + ### cy_main_textline[index_textline_con]), + ### False) ### for index_textline_con in range(len(contours_txtline_of_all_textregions)) ] ###results = np.array(results) ###if np.any(results==1): @@ -4300,7 +4420,9 @@ class Eynollah: return (slopes_rem, all_found_textline_polygons_rem, boxes_text_rem, txt_con_org_rem, contours_only_text_parent_rem, index_by_text_par_con_rem_sort) - def separate_marginals_to_left_and_right_and_order_from_top_to_down(self, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes_marginals, mid_point_of_page_width): + def separate_marginals_to_left_and_right_and_order_from_top_to_down( + self, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, + slopes_marginals, mid_point_of_page_width): cx_marg, cy_marg, _, _, _, _, _ = find_new_features_of_contours( polygons_of_marginals) @@ -4310,8 +4432,10 @@ class Eynollah: poly_marg_left = list( np.array(polygons_of_marginals)[cx_marg < mid_point_of_page_width] ) poly_marg_right = list( np.array(polygons_of_marginals)[cx_marg >= mid_point_of_page_width] ) - all_found_textline_polygons_marginals_left = list( np.array(all_found_textline_polygons_marginals)[cx_marg < mid_point_of_page_width] ) - all_found_textline_polygons_marginals_right = list( np.array(all_found_textline_polygons_marginals)[cx_marg >= mid_point_of_page_width] ) + all_found_textline_polygons_marginals_left = \ + list( np.array(all_found_textline_polygons_marginals)[cx_marg < mid_point_of_page_width] ) + all_found_textline_polygons_marginals_right = \ + list( np.array(all_found_textline_polygons_marginals)[cx_marg >= mid_point_of_page_width] ) all_box_coord_marginals_left = list( np.array(all_box_coord_marginals)[cx_marg < mid_point_of_page_width] ) all_box_coord_marginals_right = list( np.array(all_box_coord_marginals)[cx_marg >= mid_point_of_page_width] ) @@ -4322,20 +4446,38 @@ class Eynollah: cy_marg_left = cy_marg[cx_marg < mid_point_of_page_width] cy_marg_right = cy_marg[cx_marg >= mid_point_of_page_width] - ordered_left_marginals = [poly for _, poly in sorted(zip(cy_marg_left, poly_marg_left), key=lambda x: x[0])] - ordered_right_marginals = [poly for _, poly in sorted(zip(cy_marg_right, poly_marg_right), key=lambda x: x[0])] + ordered_left_marginals = [poly for _, poly in sorted(zip(cy_marg_left, poly_marg_left), + key=lambda x: x[0])] + ordered_right_marginals = [poly for _, poly in sorted(zip(cy_marg_right, poly_marg_right), + key=lambda x: x[0])] - ordered_left_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_left, all_found_textline_polygons_marginals_left), key=lambda x: x[0])] - ordered_right_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_right, all_found_textline_polygons_marginals_right), key=lambda x: x[0])] + ordered_left_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_left, + all_found_textline_polygons_marginals_left), + key=lambda x: x[0])] + ordered_right_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_right, + all_found_textline_polygons_marginals_right), + key=lambda x: x[0])] - ordered_left_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_left, all_box_coord_marginals_left), key=lambda x: x[0])] - ordered_right_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_right, all_box_coord_marginals_right), key=lambda x: x[0])] + ordered_left_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_left, + all_box_coord_marginals_left), + key=lambda x: x[0])] + ordered_right_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_right, + all_box_coord_marginals_right), + key=lambda x: x[0])] - ordered_left_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_left, slopes_marg_left), key=lambda x: x[0])] - ordered_right_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_right, slopes_marg_right), key=lambda x: x[0])] + ordered_left_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_left, slopes_marg_left), + key=lambda x: x[0])] + ordered_right_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_right, slopes_marg_right), + key=lambda x: x[0])] - return ordered_left_marginals, ordered_right_marginals, ordered_left_marginals_textline, ordered_right_marginals_textline, ordered_left_marginals_bbox, ordered_right_marginals_bbox, ordered_left_slopes_marginals, ordered_right_slopes_marginals - + return (ordered_left_marginals, + ordered_right_marginals, + ordered_left_marginals_textline, + ordered_right_marginals_textline, + ordered_left_marginals_bbox, + ordered_right_marginals_bbox, + ordered_left_slopes_marginals, + ordered_right_slopes_marginals) def run(self, overwrite: bool = False, @@ -4420,9 +4562,11 @@ class Eynollah: self.logger.info(f"Processing file: {self.writer.image_filename}") self.logger.info("Step 1/5: Image Enhancement") - img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(self.light_version) + img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = \ + self.run_enhancement(self.light_version) - self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, {self.dpi} DPI, {num_col_classifier} columns") + self.logger.info(f"Image: {self.image.shape[1]}x{self.image.shape[0]}, " + f"{self.dpi} DPI, {num_col_classifier} columns") if is_image_enhanced: self.logger.info("Enhancement applied") @@ -4433,7 +4577,8 @@ class Eynollah: if self.extract_only_images: self.logger.info("Step 2/5: Image Extraction Mode") - text_regions_p_1, erosion_hurts, polygons_seplines, polygons_of_images, image_page, page_coord, cont_page = \ + text_regions_p_1, erosion_hurts, polygons_seplines, polygons_of_images, \ + image_page, page_coord, cont_page = \ self.get_regions_light_v_extract_only_images(img_res, is_image_enhanced, num_col_classifier) pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], @@ -4465,20 +4610,20 @@ class Eynollah: M_main_tot = [cv2.moments(all_found_textline_polygons[j]) for j in range(len(all_found_textline_polygons))] - w_h_textlines = [cv2.boundingRect(all_found_textline_polygons[j])[2:] for j in range(len(all_found_textline_polygons))] + w_h_textlines = [cv2.boundingRect(all_found_textline_polygons[j])[2:] + for j in range(len(all_found_textline_polygons))] w_h_textlines = [w_h_textlines[j][0] / float(w_h_textlines[j][1]) for j in range(len(w_h_textlines))] cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted(all_found_textline_polygons, cx_main_tot, cy_main_tot, w_h_textlines)#all_found_textline_polygons[::-1] - - all_found_textline_polygons=[ all_found_textline_polygons ] - + all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted( + #all_found_textline_polygons[::-1] + all_found_textline_polygons, cx_main_tot, cy_main_tot, w_h_textlines) + all_found_textline_polygons = [ all_found_textline_polygons ] all_found_textline_polygons = dilate_textline_contours(all_found_textline_polygons) all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( all_found_textline_polygons, None, textline_mask_tot_ea, type_contour="textline") - order_text_new = [0] slopes =[0] id_of_texts_tot =['region_0001'] @@ -4498,15 +4643,23 @@ class Eynollah: if self.ocr and not self.tr: gc.collect() - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, textline_light=True) + ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons, self.prediction_model, + self.b_s_ocr, self.num_to_char, textline_light=True) else: ocr_all_textlines = None pcgts = self.writer.build_pagexml_no_full_layout( cont_page, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, page_coord, polygons_of_images, polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, contours_tables, ocr_all_textlines=ocr_all_textlines, conf_contours_textregion=conf_contours_textregions, skip_layout_reading_order=self.skip_layout_and_reading_order) + all_found_textline_polygons, page_coord, polygons_of_images, + polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_seplines, contours_tables, + ocr_all_textlines=ocr_all_textlines, + conf_contours_textregion=conf_contours_textregions, + skip_layout_reading_order=self.skip_layout_and_reading_order) self.logger.info("Basic processing complete") return pcgts @@ -4516,7 +4669,8 @@ class Eynollah: if self.light_version: self.logger.info("Using light version processing") - text_regions_p_1 ,erosion_hurts, polygons_seplines, textline_mask_tot_ea, img_bin_light, confidence_matrix = \ + text_regions_p_1 ,erosion_hurts, polygons_seplines, textline_mask_tot_ea, \ + img_bin_light, confidence_matrix = \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) #print("text region early -2 in %.1fs", time.time() - t0) @@ -4528,7 +4682,6 @@ class Eynollah: img_h_new = img_w_new * textline_mask_tot_ea.shape[0] // textline_mask_tot_ea.shape[1] textline_mask_tot_ea_deskew = resize_image(textline_mask_tot_ea,img_h_new, img_w_new ) - slope_deskew = self.run_deskew(textline_mask_tot_ea_deskew) else: slope_deskew = self.run_deskew(textline_mask_tot_ea) @@ -4537,7 +4690,8 @@ class Eynollah: num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ text_regions_p_1, cont_page, table_prediction, textline_mask_tot_ea, img_bin_light = \ self.run_graphics_and_columns_light(text_regions_p_1, textline_mask_tot_ea, - num_col_classifier, num_column_is_classified, erosion_hurts, img_bin_light) + num_col_classifier, num_column_is_classified, + erosion_hurts, img_bin_light) #self.logger.info("run graphics %.1fs ", time.time() - t1t) #print("text region early -3 in %.1fs", time.time() - t0) textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) @@ -4552,7 +4706,8 @@ class Eynollah: t1 = time.time() num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, \ text_regions_p_1, cont_page, table_prediction = \ - self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts) + self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, + erosion_hurts) self.logger.info(f"Graphics detection took {time.time() - t1:.1f}s") #self.logger.info('cont_page %s', cont_page) #plt.imshow(table_prediction) @@ -4617,13 +4772,15 @@ class Eynollah: ## birdan sora chock chakir t1 = time.time() if not self.full_layout: - polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, \ + polygons_of_images, img_revised_tab, text_regions_p_1_n, \ + textline_mask_tot_d, regions_without_separators_d, \ boxes, boxes_d, polygons_of_marginals, contours_tables = \ self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, table_prediction, erosion_hurts) ###polygons_of_marginals = dilate_textregion_contours(polygons_of_marginals) else: - polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, \ + polygons_of_images, img_revised_tab, text_regions_p_1_n, \ + textline_mask_tot_d, regions_without_separators_d, \ regions_fully, regions_without_separators, polygons_of_marginals, contours_tables = \ self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions, table_prediction, erosion_hurts, @@ -4690,8 +4847,10 @@ class Eynollah: areas_cnt_text_d = self.return_list_of_contours_with_desired_order( areas_cnt_text_d, index_con_parents_d) - cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest_d]) - cx_bigest_d, cy_biggest_d, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_d) + cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = \ + find_new_features_of_contours([contours_biggest_d]) + cx_bigest_d, cy_biggest_d, _, _, _, _, _ = \ + find_new_features_of_contours(contours_only_text_parent_d) try: if len(cx_bigest_d) >= 5: cx_bigest_d_last5 = cx_bigest_d[-5:] @@ -4751,13 +4910,19 @@ class Eynollah: pcgts = self.writer.build_pagexml_full_layout( [], [], page_coord, [], [], [], [], [], [], polygons_of_images, contours_tables, [], - polygons_of_marginals, polygons_of_marginals, empty_marginals, empty_marginals, empty_marginals, empty_marginals, [], [], [], [], + polygons_of_marginals, polygons_of_marginals, + empty_marginals, empty_marginals, + empty_marginals, empty_marginals, + [], [], [], [], cont_page, polygons_seplines) else: pcgts = self.writer.build_pagexml_no_full_layout( [], page_coord, [], [], [], [], polygons_of_images, - polygons_of_marginals, polygons_of_marginals, empty_marginals, empty_marginals, empty_marginals, empty_marginals, [], [], [], + polygons_of_marginals, polygons_of_marginals, + empty_marginals, empty_marginals, + empty_marginals, empty_marginals, + [], [], [], cont_page, polygons_seplines, contours_tables) return pcgts @@ -4767,7 +4932,8 @@ class Eynollah: if self.light_version: contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) contours_only_text_parent , contours_only_text_parent_d_ordered = self.filter_contours_inside_a_bigger_one( - contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, marginal_cnts=polygons_of_marginals) + contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, + marginal_cnts=polygons_of_marginals) #print("text region early 3.5 in %.1fs", time.time() - t0) txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( contours_only_text_parent, self.image, confidence_matrix) @@ -4793,19 +4959,26 @@ class Eynollah: polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_org, boxes_marginals, slope_deskew) - #slopes, all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, index_by_text_par_con = \ + #slopes, all_found_textline_polygons, boxes_text, txt_con_org, \ + # contours_only_text_parent, index_by_text_par_con = \ # self.delete_regions_without_textlines(slopes, all_found_textline_polygons, # boxes_text, txt_con_org, contours_only_text_parent, index_by_text_par_con) - #slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, polygons_of_marginals, polygons_of_marginals, _ = \ + #slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, \ + # polygons_of_marginals, polygons_of_marginals, _ = \ # self.delete_regions_without_textlines(slopes_marginals, all_found_textline_polygons_marginals, - # boxes_marginals, polygons_of_marginals, polygons_of_marginals, np.array(range(len(polygons_of_marginals)))) - all_found_textline_polygons = dilate_textline_contours(all_found_textline_polygons) + # boxes_marginals, polygons_of_marginals, polygons_of_marginals, + # np.array(range(len(polygons_of_marginals)))) + all_found_textline_polygons = dilate_textline_contours( + all_found_textline_polygons) all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( all_found_textline_polygons, None, textline_mask_tot_ea_org, type_contour="textline") - all_found_textline_polygons_marginals = dilate_textline_contours(all_found_textline_polygons_marginals) - contours_only_text_parent, txt_con_org, conf_contours_textregions, all_found_textline_polygons, contours_only_text_parent_d_ordered, \ + all_found_textline_polygons_marginals = dilate_textline_contours( + all_found_textline_polygons_marginals) + contours_only_text_parent, txt_con_org, conf_contours_textregions, \ + all_found_textline_polygons, contours_only_text_parent_d_ordered, \ index_by_text_par_con = self.filter_contours_without_textline_inside( - contours_only_text_parent, txt_con_org, all_found_textline_polygons, contours_only_text_parent_d_ordered, conf_contours_textregions) + contours_only_text_parent, txt_con_org, all_found_textline_polygons, + contours_only_text_parent_d_ordered, conf_contours_textregions) else: textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, \ @@ -4847,7 +5020,13 @@ class Eynollah: all_found_textline_polygons_marginals, textline_mask_tot_ea, num_col_classifier) mid_point_of_page_width = text_regions_p.shape[1] / 2. - polygons_of_marginals_left, polygons_of_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes_marginals_left, slopes_marginals_right = self.separate_marginals_to_left_and_right_and_order_from_top_to_down(polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes_marginals, mid_point_of_page_width) + (polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes_marginals_left, slopes_marginals_right) = \ + self.separate_marginals_to_left_and_right_and_order_from_top_to_down( + polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, + slopes_marginals, mid_point_of_page_width) #print(len(polygons_of_marginals), len(ordered_left_marginals), len(ordered_right_marginals), 'marginals ordred') if self.full_layout: @@ -4871,40 +5050,41 @@ class Eynollah: all_found_textline_polygons, all_found_textline_polygons_h, slopes, slopes_h, \ contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, \ conf_contours_textregions, conf_contours_textregions_h = fun( - text_regions_p, regions_fully, contours_only_text_parent, - all_box_coord, all_found_textline_polygons, slopes, contours_only_text_parent_d_ordered, conf_contours_textregions) + text_regions_p, regions_fully, contours_only_text_parent, + all_box_coord, all_found_textline_polygons, + slopes, contours_only_text_parent_d_ordered, conf_contours_textregions) if self.plotter: self.plotter.save_plot_of_layout(text_regions_p, image_page) self.plotter.save_plot_of_layout_all(text_regions_p, image_page) - pixel_img = 4 - polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, pixel_img) + label_img = 4 + polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, label_img) ##all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline( ##text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, ##all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, ##kernel=KERNEL, curved_line=self.curved_line, textline_light=self.textline_light) if not self.reading_order_machine_based: - pixel_seps = 6 + label_seps = 6 if not self.headers_off: if np.abs(slope_deskew) < SLOPE_THRESHOLD: num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_seps, contours_only_text_parent_h) + num_col_classifier, self.tables, label_seps, contours_only_text_parent_h) else: _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_seps, contours_only_text_parent_h_d_ordered) + num_col_classifier, self.tables, label_seps, contours_only_text_parent_h_d_ordered) elif self.headers_off: if np.abs(slope_deskew) < SLOPE_THRESHOLD: num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_seps) + num_col_classifier, self.tables, label_seps) else: _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_seps) + num_col_classifier, self.tables, label_seps) if num_col_classifier >= 3: if np.abs(slope_deskew) < SLOPE_THRESHOLD: @@ -4949,7 +5129,8 @@ class Eynollah: contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) else: order_text_new, id_of_texts_tot = self.do_order_of_regions( - contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d) + contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, + boxes_d, textline_mask_tot_d) self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") if self.ocr and not self.tr: @@ -4962,27 +5143,37 @@ class Eynollah: gc.collect() if len(all_found_textline_polygons)>0: - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines = None if all_found_textline_polygons_marginals_left and len(all_found_textline_polygons_marginals_left)>0: - ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_left, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons_marginals_left, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_marginals_left = None if all_found_textline_polygons_marginals_right and len(all_found_textline_polygons_marginals_right)>0: - ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_right, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons_marginals_right, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_marginals_right = None if all_found_textline_polygons_h and len(all_found_textline_polygons)>0: - ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_h, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons_h, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_h = None if polygons_of_drop_capitals and len(polygons_of_drop_capitals)>0: - ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines(image_page, polygons_of_drop_capitals, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( + image_page, polygons_of_drop_capitals, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_drop = None else: @@ -4997,9 +5188,15 @@ class Eynollah: pcgts = self.writer.build_pagexml_full_layout( contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, - polygons_of_images, contours_tables, polygons_of_drop_capitals, polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, ocr_all_textlines, ocr_all_textlines_h, ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, ocr_all_textlines_drop, conf_contours_textregions, conf_contours_textregions_h) + polygons_of_images, contours_tables, polygons_of_drop_capitals, + polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_seplines, ocr_all_textlines, ocr_all_textlines_h, + ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, + ocr_all_textlines_drop, + conf_contours_textregions, conf_contours_textregions_h) return pcgts @@ -5034,18 +5231,14 @@ class Eynollah: if self.ocr and self.tr: self.logger.info("Step 4.5/5: OCR Processing") - if torch.cuda.is_available(): self.logger.info("Using GPU acceleration") else: self.logger.info("Using CPU processing") - if self.light_version: self.logger.info("Using light version OCR") - if self.textline_light: self.logger.info("Using light text line detection for OCR") - self.logger.info("Processing text lines...") device = cuda.get_current_device() @@ -5090,7 +5283,8 @@ class Eynollah: img_croped = img_poly_on_img[y:y+h, x:x+w, :] #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) - text_ocr = self.return_ocr_of_textline_without_common_section(img_croped, model_ocr, processor, device, w, h2w_ratio, ind_tot) + text_ocr = self.return_ocr_of_textline_without_common_section( + img_croped, model_ocr, processor, device, w, h2w_ratio, ind_tot) ocr_textline_in_textregion.append(text_ocr) ind_tot = ind_tot +1 ocr_all_textlines.append(ocr_textline_in_textregion) @@ -5098,13 +5292,19 @@ class Eynollah: elif self.ocr and not self.tr: gc.collect() if len(all_found_textline_polygons)>0: - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) if all_found_textline_polygons_marginals_left and len(all_found_textline_polygons_marginals_left)>0: - ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_left, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons_marginals_left, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) if all_found_textline_polygons_marginals_right and len(all_found_textline_polygons_marginals_right)>0: - ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines(image_page, all_found_textline_polygons_marginals_right, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( + image_page, all_found_textline_polygons_marginals_right, self.prediction_model, + self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines = None @@ -5117,9 +5317,14 @@ class Eynollah: pcgts = self.writer.build_pagexml_no_full_layout( txt_con_org, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, contours_tables, ocr_all_textlines, ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, conf_contours_textregions) + all_found_textline_polygons, all_box_coord, polygons_of_images, + polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_seplines, contours_tables, ocr_all_textlines, + ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, + conf_contours_textregions) return pcgts @@ -5138,7 +5343,6 @@ class Eynollah_ocr: min_conf_value_of_textline_text : Optional[float]=None, logger=None, ): - self.dir_models = dir_models self.model_name = model_name self.tr_ocr = tr_ocr self.export_textline_images_and_text = export_textline_images_and_text @@ -5261,7 +5465,9 @@ class Eynollah_ocr: if child_textlines.tag.endswith("Coords"): cropped_lines_region_indexer.append(indexer_text_region) p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) + textline_coords = np.array( [ [int(x.split(',')[0]), + int(x.split(',')[1]) ] + for x in p_h] ) x,y,w,h = cv2.boundingRect(textline_coords) if dir_out_image_text: @@ -5277,9 +5483,12 @@ class Eynollah_ocr: img_crop = img_poly_on_img[y:y+h, x:x+w, :] img_crop[mask_poly==0] = 255 - self.logger.debug("processing %d lines for '%s'", len(cropped_lines), nn.attrib['id']) + self.logger.debug("processing %d lines for '%s'", + len(cropped_lines), nn.attrib['id']) if h2w_ratio > 0.1: - cropped_lines.append(resize_image(img_crop, tr_ocr_input_height_and_width, tr_ocr_input_height_and_width) ) + cropped_lines.append(resize_image(img_crop, + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width) ) cropped_lines_meging_indexing.append(0) indexer_b_s+=1 if indexer_b_s==self.b_s: @@ -5288,8 +5497,10 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -5297,7 +5508,9 @@ class Eynollah_ocr: splited_images, _ = return_textlines_split_if_needed(img_crop, None) #print(splited_images) if splited_images: - cropped_lines.append(resize_image(splited_images[0], tr_ocr_input_height_and_width, tr_ocr_input_height_and_width)) + cropped_lines.append(resize_image(splited_images[0], + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width)) cropped_lines_meging_indexing.append(1) indexer_b_s+=1 @@ -5307,13 +5520,17 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged - cropped_lines.append(resize_image(splited_images[1], tr_ocr_input_height_and_width, tr_ocr_input_height_and_width)) + cropped_lines.append(resize_image(splited_images[1], + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width)) cropped_lines_meging_indexing.append(-1) indexer_b_s+=1 @@ -5323,8 +5540,10 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -5339,8 +5558,10 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -5371,15 +5592,22 @@ class Eynollah_ocr: ####n_end = (i+1)*self.b_s ####imgs = cropped_lines[n_start:n_end] ####pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - ####generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - ####generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + ####generated_ids_merged = self.model_ocr.generate( + #### pixel_values_merged.to(self.device)) + ####generated_text_merged = self.processor.batch_decode( + #### generated_ids_merged, skip_special_tokens=True) ####extracted_texts = extracted_texts + generated_text_merged del cropped_lines gc.collect() - extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] #print(extracted_texts_merged, len(extracted_texts_merged)) @@ -5401,7 +5629,8 @@ class Eynollah_ocr: w_bb = bb_ind[2] h_bb = bb_ind[3] - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font.path, w_bb, int(h_bb*0.4) ) + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], + font.path, w_bb, int(h_bb*0.4) ) ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) @@ -5419,25 +5648,27 @@ class Eynollah_ocr: #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') #######text_by_textregion = [] #######for ind in unique_cropped_lines_region_indexer: - #######extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] - + #######ind = np.array(cropped_lines_region_indexer)==ind + #######extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] #######text_by_textregion.append(" ".join(extracted_texts_merged_un)) text_by_textregion = [] for ind in unique_cropped_lines_region_indexer: - extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] + ind = np.array(cropped_lines_region_indexer) == ind + extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] if len(extracted_texts_merged_un)>1: text_by_textregion_ind = "" next_glue = "" for indt in range(len(extracted_texts_merged_un)): - if extracted_texts_merged_un[indt].endswith('⸗') or extracted_texts_merged_un[indt].endswith('-') or extracted_texts_merged_un[indt].endswith('¬'): - text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt][:-1] + if (extracted_texts_merged_un[indt].endswith('⸗') or + extracted_texts_merged_un[indt].endswith('-') or + extracted_texts_merged_un[indt].endswith('¬')): + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] next_glue = "" else: - text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt] + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] next_glue = " " text_by_textregion.append(text_by_textregion_ind) - else: text_by_textregion.append(" ".join(extracted_texts_merged_un)) @@ -5495,7 +5726,9 @@ class Eynollah_ocr: unicode_textregion.text = text_by_textregion[indexer_textregion] indexer_textregion = indexer_textregion + 1 - ###sample_order = [(id_to_order[tid], text) for tid, text in zip(id_textregions, textregions_by_existing_ids) if tid in id_to_order] + ###sample_order = [(id_to_order[tid], text) + ### for tid, text in zip(id_textregions, textregions_by_existing_ids) + ### if tid in id_to_order] ##ordered_texts_sample = [text for _, text in sorted(sample_order)] ##tot_page_text = ' '.join(ordered_texts_sample) @@ -5569,7 +5802,9 @@ class Eynollah_ocr: if child_textlines.tag.endswith("Coords"): cropped_lines_region_indexer.append(indexer_text_region) p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [ int(x.split(',')[0]) , int(x.split(',')[1]) ] for x in p_h] ) + textline_coords = np.array( [ [int(x.split(',')[0]), + int(x.split(',')[1]) ] + for x in p_h] ) x,y,w,h = cv2.boundingRect(textline_coords) @@ -5601,17 +5836,19 @@ class Eynollah_ocr: img_crop[mask_poly==0] = 255 else: - #print(file_name, angle_degrees,w*h , mask_poly[:,:,0].sum(), mask_poly[:,:,0].sum() /float(w*h) , 'didi') + # print(file_name, angle_degrees, w*h, + # mask_poly[:,:,0].sum(), + # mask_poly[:,:,0].sum() /float(w*h) , + # 'didi') if angle_degrees > 3: better_des_slope = get_orientation_moments(textline_coords) - img_crop = rotate_image_with_padding(img_crop, better_des_slope ) - + img_crop = rotate_image_with_padding(img_crop, better_des_slope) if dir_in_bin is not None: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope ) + img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope ) + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) mask_poly = mask_poly.astype('uint8') #new bounding box @@ -5622,7 +5859,6 @@ class Eynollah_ocr: if not self.do_not_mask_with_textline_contour: img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] if not self.do_not_mask_with_textline_contour: @@ -5630,11 +5866,14 @@ class Eynollah_ocr: if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: if dir_in_bin is not None: - img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) + img_crop, img_crop_bin = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly, img_crop_bin) else: - img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + img_crop, _ = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly) - else: better_des_slope = 0 if not self.do_not_mask_with_textline_contour: @@ -5647,13 +5886,18 @@ class Eynollah_ocr: else: if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: if dir_in_bin is not None: - img_crop, img_crop_bin = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly, img_crop_bin) + img_crop, img_crop_bin = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly, img_crop_bin) else: - img_crop, _ = break_curved_line_into_small_pieces_and_then_merge(img_crop, mask_poly) + img_crop, _ = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly) if not self.export_textline_images_and_text: if w_scaled < 750:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) cropped_lines.append(img_fin) if abs(better_des_slope) > 45: cropped_lines_ver_index.append(1) @@ -5662,13 +5906,15 @@ class Eynollah_ocr: cropped_lines_meging_indexing.append(0) if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) else: splited_images, splited_images_bin = return_textlines_split_if_needed( img_crop, img_crop_bin if dir_in_bin is not None else None) if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[0], image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(1) @@ -5677,7 +5923,8 @@ class Eynollah_ocr: else: cropped_lines_ver_index.append(0) - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[1], image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(-1) @@ -5688,13 +5935,16 @@ class Eynollah_ocr: cropped_lines_ver_index.append(0) if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[0], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[0], image_height, image_width) cropped_lines_bin.append(img_fin) - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images_bin[1], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[1], image_height, image_width) cropped_lines_bin.append(img_fin) else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) @@ -5704,7 +5954,8 @@ class Eynollah_ocr: cropped_lines_ver_index.append(0) if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop_bin, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop_bin, image_height, image_width) cropped_lines_bin.append(img_fin) if self.export_textline_images_and_text: @@ -5716,7 +5967,8 @@ class Eynollah_ocr: if cheild_text.tag.endswith("Unicode"): textline_text = cheild_text.text if textline_text: - base_name = os.path.join(dir_out, file_name + '_line_' + str(indexer_textlines)) + base_name = os.path.join( + dir_out, file_name + '_line_' + str(indexer_textlines)) if self.pref_of_dataset: base_name += '_' + self.pref_of_dataset if not self.do_not_mask_with_textline_contour: @@ -5806,25 +6058,31 @@ class Eynollah_ocr: preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped = \ + np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) masked_means_flipped[np.isnan(masked_means_flipped)] = 0 preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) masked_means[np.isnan(masked_means)] = 0 masked_means_ver = masked_means[indices_ver] #print(masked_means_ver, 'pred_max_not_unk') - indices_where_flipped_conf_value_is_higher = np.where(masked_means_flipped > masked_means_ver)[0] + indices_where_flipped_conf_value_is_higher = \ + np.where(masked_means_flipped > masked_means_ver)[0] #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') if len(indices_where_flipped_conf_value_is_higher)>0: indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds[indices_to_be_replaced,:,:] = preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + preds[indices_to_be_replaced,:,:] = \ + preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] if dir_in_bin is not None: preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) @@ -5833,35 +6091,42 @@ class Eynollah_ocr: preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped = \ + np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) masked_means_flipped[np.isnan(masked_means_flipped)] = 0 preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) masked_means[np.isnan(masked_means)] = 0 masked_means_ver = masked_means[indices_ver] #print(masked_means_ver, 'pred_max_not_unk') - indices_where_flipped_conf_value_is_higher = np.where(masked_means_flipped > masked_means_ver)[0] + indices_where_flipped_conf_value_is_higher = \ + np.where(masked_means_flipped > masked_means_ver)[0] #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') if len(indices_where_flipped_conf_value_is_higher)>0: indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds_bin[indices_to_be_replaced,:,:] = preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + preds_bin[indices_to_be_replaced,:,:] = \ + preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] preds = (preds + preds_bin) / 2. - pred_texts = decode_batch_predictions(preds, self.num_to_char) preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) for ib in range(imgs.shape[0]): pred_texts_ib = pred_texts[ib].replace("[UNK]", "") @@ -5876,31 +6141,40 @@ class Eynollah_ocr: del cropped_lines_bin gc.collect() - extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] - extracted_conf_value_merged = [extracted_conf_value[ind] if cropped_lines_meging_indexing[ind]==0 else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + extracted_conf_value_merged = [extracted_conf_value[ind] + if cropped_lines_meging_indexing[ind]==0 + else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] - extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] for ind_cfm in range(len(extracted_texts_merged)) if extracted_texts_merged[ind_cfm] is not None] + extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] + for ind_cfm in range(len(extracted_texts_merged)) + if extracted_texts_merged[ind_cfm] is not None] extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - if dir_out_image_text: - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! font = importlib_resources.files(__package__) / "Charis-Regular.ttf" with importlib_resources.as_file(font) as font: font = ImageFont.truetype(font=font, size=40) for indexer_text, bb_ind in enumerate(total_bb_coordinates): - - x_bb = bb_ind[0] y_bb = bb_ind[1] w_bb = bb_ind[2] h_bb = bb_ind[3] - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], font.path, w_bb, int(h_bb*0.4) ) + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], + font.path, w_bb, int(h_bb*0.4) ) ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) @@ -5917,24 +6191,25 @@ class Eynollah_ocr: text_by_textregion = [] for ind in unique_cropped_lines_region_indexer: - extracted_texts_merged_un = np.array(extracted_texts_merged)[np.array(cropped_lines_region_indexer)==ind] + ind = np.array(cropped_lines_region_indexer)==ind + extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] if len(extracted_texts_merged_un)>1: text_by_textregion_ind = "" next_glue = "" for indt in range(len(extracted_texts_merged_un)): - if extracted_texts_merged_un[indt].endswith('⸗') or extracted_texts_merged_un[indt].endswith('-') or extracted_texts_merged_un[indt].endswith('¬'): - text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt][:-1] + if (extracted_texts_merged_un[indt].endswith('⸗') or + extracted_texts_merged_un[indt].endswith('-') or + extracted_texts_merged_un[indt].endswith('¬')): + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] next_glue = "" else: - text_by_textregion_ind = text_by_textregion_ind + next_glue + extracted_texts_merged_un[indt] + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] next_glue = " " text_by_textregion.append(text_by_textregion_ind) - else: text_by_textregion.append(" ".join(extracted_texts_merged_un)) #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') - - + ###index_tot_regions = [] ###tot_region_ref = [] @@ -5983,7 +6258,8 @@ class Eynollah_ocr: if childtest3.tag.endswith("TextEquiv"): for child_uc in childtest3: if child_uc.tag.endswith("Unicode"): - childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + childtest3.set('conf', + f"{extracted_conf_value_merged[indexer]:.2f}") child_uc.text = extracted_texts_merged[indexer] indexer = indexer + 1 @@ -5999,7 +6275,9 @@ class Eynollah_ocr: unicode_textregion.text = text_by_textregion[indexer_textregion] indexer_textregion = indexer_textregion + 1 - ###sample_order = [(id_to_order[tid], text) for tid, text in zip(id_textregions, textregions_by_existing_ids) if tid in id_to_order] + ###sample_order = [(id_to_order[tid], text) + ### for tid, text in zip(id_textregions, textregions_by_existing_ids) + ### if tid in id_to_order] ##ordered_texts_sample = [text for _, text in sorted(sample_order)] ##tot_page_text = ' '.join(ordered_texts_sample) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index f8926cf..52bf3ef 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1012,8 +1012,13 @@ def check_any_text_region_in_model_one_is_main_or_header_light( (regions_model_full[:,:,0]==2)).sum() pixels_main = all_pixels - pixels_header - if ( (pixels_header/float(pixels_main)>=0.6) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ) and ( (length_con[ii]/float(height_con[ii]) )<=3 )) or ( (pixels_header/float(pixels_main)>=0.3) and ( (length_con[ii]/float(height_con[ii]) )>=3 ) ): - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 + if (( pixels_header / float(pixels_main) >= 0.6 and + length_con[ii] / float(height_con[ii]) >= 1.3 and + length_con[ii] / float(height_con[ii]) <= 3 ) or + ( pixels_header / float(pixels_main) >= 0.3 and + length_con[ii] / float(height_con[ii]) >=3 )): + + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ] = 2 contours_only_text_parent_head.append(contours_only_text_parent[ii]) conf_contours_head.append(None) # why not conf_contours[ii], too? if contours_only_text_parent_d_ordered is not None: @@ -1021,8 +1026,9 @@ def check_any_text_region_in_model_one_is_main_or_header_light( all_box_coord_head.append(all_box_coord[ii]) slopes_head.append(slopes[ii]) all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) + else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ] = 1 contours_only_text_parent_main.append(contours_only_text_parent[ii]) conf_contours_main.append(conf_contours[ii]) if contours_only_text_parent_d_ordered is not None: @@ -1883,7 +1889,8 @@ def return_boxes_of_images_by_order_of_reading_new( range(x_start_without_mother[dj], x_end_without_mother[dj])) columns_not_covered = list(all_columns - columns_covered_by_mothers) - y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + len(x_start_without_mother), + y_type_2 = np.append(y_type_2, np.ones(len(columns_not_covered) + + len(x_start_without_mother), dtype=int) * splitter_y_new[i]) ##y_lines_by_order = np.append(y_lines_by_order, [splitter_y_new[i]] * len(columns_not_covered)) ##x_start_by_order = np.append(x_start_by_order, [0] * len(columns_not_covered)) @@ -1938,7 +1945,8 @@ def return_boxes_of_images_by_order_of_reading_new( columns_covered_by_with_child_no_mothers.update( range(x_start_with_child_without_mother[dj], x_end_with_child_without_mother[dj])) - columns_not_covered_child_no_mother = list(all_columns - columns_covered_by_with_child_no_mothers) + columns_not_covered_child_no_mother = list( + all_columns - columns_covered_by_with_child_no_mothers) #indexes_to_be_spanned=[] for i_s in range(len(x_end_with_child_without_mother)): columns_not_covered_child_no_mother.append(x_start_with_child_without_mother[i_s]) @@ -1948,7 +1956,8 @@ def return_boxes_of_images_by_order_of_reading_new( x_start_with_child_without_mother = np.array(x_start_with_child_without_mother, int) for i_s_nc in columns_not_covered_child_no_mother: if i_s_nc in x_start_with_child_without_mother: - x_end_biggest_column = x_end_with_child_without_mother[x_start_with_child_without_mother==i_s_nc][0] + x_end_biggest_column = \ + x_end_with_child_without_mother[x_start_with_child_without_mother==i_s_nc][0] args_all_biggest_lines = ind_args[(x_starting==i_s_nc) & (x_ending==x_end_biggest_column)] y_column_nc = y_type_2[args_all_biggest_lines] @@ -1996,9 +2005,12 @@ def return_boxes_of_images_by_order_of_reading_new( np.array(list(set(list(range(len(y_all_between_nm_wc)))) - set(list(index_lines_so_close_to_top_separator)))) if len(indexes_remained_after_deleting_closed_lines) > 0: - y_all_between_nm_wc = y_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] - x_starting_all_between_nm_wc = x_starting_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] - x_ending_all_between_nm_wc = x_ending_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + y_all_between_nm_wc = \ + y_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + x_starting_all_between_nm_wc = \ + x_starting_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] + x_ending_all_between_nm_wc = \ + x_ending_all_between_nm_wc[indexes_remained_after_deleting_closed_lines] y_all_between_nm_wc = np.append(y_all_between_nm_wc, y_column_nc[i_c]) x_starting_all_between_nm_wc = np.append(x_starting_all_between_nm_wc, i_s_nc) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 7a8926d..d41dda1 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -67,7 +67,8 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] + arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ + y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -78,11 +79,14 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): clusters_to_be_deleted = [] if len(arg_diff_cluster) > 0: - clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : - arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : + arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): @@ -179,7 +183,8 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) neg_peaks_max=np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - arg_neg_must_be_deleted= np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3] + arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ + y_padded_up_to_down_padded_e[peaks_neg_e]/float(neg_peaks_max)<0.3] diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -239,7 +244,8 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): try: neg_peaks_max=np.max(y_padded_smoothed[peaks]) - arg_neg_must_be_deleted= np.arange(len(peaks_neg))[y_padded_up_to_down_padded[peaks_neg]/float(neg_peaks_max)<0.42] + arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ + y_padded_up_to_down_padded[peaks_neg]/float(neg_peaks_max)<0.42] diff_arg_neg_must_be_deleted=np.diff(arg_neg_must_be_deleted) arg_diff=np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -316,23 +322,36 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_down =y_max_cont-1 + ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) + #point_up + # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) else: point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down =y_max_cont-1##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_down =y_max_cont-1 + ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) + #point_up + # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( - 1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + 1.4 * dis_to_next_down) + ###-int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj]>mean_value_of_peaks-std_value_of_peaks/2.: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) + ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) + ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) point_down_narrow = peaks[jj] + first_nonzero + int( 1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) @@ -341,7 +360,9 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down_narrow = img_patch.shape[0] - 2 - distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) + distances = [cv2.pointPolygonTest(contour_text_interest_copy, + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), + True) for mj in range(len(xv))] distances = np.array(distances) @@ -468,7 +489,8 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_up =peaks[jj] + first_nonzero - int(1. / 1.8 * dis_to_next) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), + True) for mj in range(len(xv))] distances = np.array(distances) @@ -543,7 +565,8 @@ def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): point_down = peaks[jj] + first_nonzero + int(1. / 1.9 * dis_to_next_down) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), + True) for mj in range(len(xv))] distances = np.array(distances) @@ -613,7 +636,8 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): neg_peaks_max = np.max(y_padded_up_to_down_padded[peaks_neg]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.42] + arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ + y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.42] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -689,30 +713,50 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.3 * dis_to_next_up) + ##+int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 + ##peaks[jj] + first_nonzero + int(1.3 * dis_to_next_down) + #point_up + # np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = x_max_cont - 1 ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) #point_up# np.max(y_cont)#peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.4 * dis_to_next_up) + ##+int(dis_to_next_up*1./4.0) + point_down = x_max_cont - 1 + ##peaks[jj] + first_nonzero + int(1.6 * dis_to_next_down) + #point_up + # np.max(y_cont) + #peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.4 * dis_to_next_down) + ###-int(dis_to_next_down*1./2) else: dis_to_next_up = abs(peaks[jj] - peaks_neg[jj]) dis_to_next_down = abs(peaks[jj] - peaks_neg[jj + 1]) if peaks_values[jj] > mean_value_of_peaks - std_value_of_peaks / 2.0: - point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.1 * dis_to_next_up) + ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) else: - point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) ##+int(dis_to_next_up*1./4.0) - point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) ###-int(dis_to_next_down*1./4.0) + point_up = peaks[jj] + first_nonzero - int(1.23 * dis_to_next_up) + ##+int(dis_to_next_up*1./4.0) + point_down = peaks[jj] + first_nonzero + int(1.33 * dis_to_next_down) + ###-int(dis_to_next_down*1./4.0) - point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) ###-int(dis_to_next_down*1./2) + point_down_narrow = peaks[jj] + first_nonzero + int(1.1 * dis_to_next_down) + ###-int(dis_to_next_down*1./2) if point_down_narrow >= img_patch.shape[0]: point_down_narrow = img_patch.shape[0] - 2 - distances = [cv2.pointPolygonTest(contour_text_interest_copy, tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) for mj in range(len(xv))] + distances = [cv2.pointPolygonTest(contour_text_interest_copy, + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), + True) + for mj in range(len(xv))] distances = np.array(distances) xvinside = xv[distances >= 0] @@ -801,7 +845,8 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): point_up = peaks[jj] + first_nonzero - int(1.0 / 1.8 * dis_to_next) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), + True) for mj in range(len(xv))] distances = np.array(distances) @@ -866,7 +911,8 @@ def separate_lines_vertical(img_patch, contour_text_interest, thetha): point_down = peaks[jj] + first_nonzero + int(1.0 / 1.9 * dis_to_next_down) distances = [cv2.pointPolygonTest(contour_text_interest_copy, - tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), True) + tuple(int(x) for x in np.array([xv[mj], peaks[jj] + first_nonzero])), + True) for mj in range(len(xv))] distances = np.array(distances) @@ -950,7 +996,8 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): peaks_neg_e, _ = find_peaks(y_padded_up_to_down_padded_e, height=0) neg_peaks_max = np.max(y_padded_up_to_down_padded_e[peaks_neg_e]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] + arg_neg_must_be_deleted = np.arange(len(peaks_neg_e))[ + y_padded_up_to_down_padded_e[peaks_neg_e] / float(neg_peaks_max) < 0.3] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -963,8 +1010,11 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): if len(arg_diff_cluster) > 0: clusters_to_be_deleted.append(arg_neg_must_be_deleted[0 : arg_diff_cluster[0] + 1]) for i in range(len(arg_diff_cluster) - 1): - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[i] + 1 : arg_diff_cluster[i + 1] + 1]) - clusters_to_be_deleted.append(arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[arg_diff_cluster[i] + 1: + arg_diff_cluster[i + 1] + 1]) + clusters_to_be_deleted.append( + arg_neg_must_be_deleted[arg_diff_cluster[len(arg_diff_cluster) - 1] + 1 :]) if len(clusters_to_be_deleted) > 0: peaks_new_extra = [] for m in range(len(clusters_to_be_deleted)): @@ -1014,7 +1064,8 @@ def separate_lines_new_inside_tiles2(img_patch, thetha): try: neg_peaks_max = np.max(y_padded_smoothed[peaks]) - arg_neg_must_be_deleted = np.arange(len(peaks_neg))[y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.24] + arg_neg_must_be_deleted = np.arange(len(peaks_neg))[ + y_padded_up_to_down_padded[peaks_neg] / float(neg_peaks_max) < 0.24] diff_arg_neg_must_be_deleted = np.diff(arg_neg_must_be_deleted) arg_diff = np.array(range(len(diff_arg_neg_must_be_deleted))) @@ -1290,7 +1341,9 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i return None, cont_final -def textline_contours_postprocessing(textline_mask, slope, contour_text_interest, box_ind, add_boxes_coor_into_textlines=False): +def textline_contours_postprocessing(textline_mask, slope, + contour_text_interest, box_ind, + add_boxes_coor_into_textlines=False): textline_mask = np.repeat(textline_mask[:, :, np.newaxis], 3, axis=2) * 255 textline_mask = textline_mask.astype(np.uint8) kernel = np.ones((5, 5), np.uint8) @@ -1485,7 +1538,8 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) #img_resized=np.zeros((int( img_int.shape[0]*(1.8) ) , int( img_int.shape[1]*(2.6) ) )) - #img_resized[ int( img_int.shape[0]*(.4)):int( img_int.shape[0]*(.4))+img_int.shape[0] , int( img_int.shape[1]*(.8)):int( img_int.shape[1]*(.8))+img_int.shape[1] ]=img_int[:,:] + #img_resized[ int( img_int.shape[0]*(.4)):int( img_int.shape[0]*(.4))+img_int.shape[0], + # int( img_int.shape[1]*(.8)):int( img_int.shape[1]*(.8))+img_int.shape[1] ]=img_int[:,:] img_resized[ onset_y:onset_y+img_int.shape[0] , onset_x:onset_x+img_int.shape[1] ]=img_int[:,:] if main_page and img_patch_org.shape[1] > img_patch_org.shape[0]: @@ -1689,14 +1743,18 @@ def do_work_of_slopes_new_curved( mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=4) pixel_img = 1 - mask_biggest2 = resize_image(mask_biggest2, int(mask_biggest2.shape[0] * scale_par), int(mask_biggest2.shape[1] * scale_par)) + mask_biggest2 = resize_image(mask_biggest2, + int(mask_biggest2.shape[0] * scale_par), + int(mask_biggest2.shape[1] * scale_par)) cnt_textlines_in_image_ind = return_contours_of_interested_textline(mask_biggest2, pixel_img) try: textlines_cnt_per_region.append(cnt_textlines_in_image_ind[0]) except Exception as why: logger.error(why) else: - textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text, True) + textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, + slope_for_all, contour_par, + box_text, True) return textlines_cnt_per_region[::-1], box_text, contour, contour_par, crop_coor, index_r_con, slope diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 5f19387..602ad6e 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -370,7 +370,11 @@ def return_textline_contour_with_added_box_coordinate(textline_contour, box_ind return textline_contour -def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, prediction_model, b_s_ocr, num_to_char, textline_light=False, curved_line=False): +def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, + prediction_model, + b_s_ocr, num_to_char, + textline_light=False, + curved_line=False): max_len = 512 padding_token = 299 image_width = 512#max_len * 4 @@ -426,17 +430,23 @@ def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, pr splited_images, splited_images_bin = return_textlines_split_if_needed(img_crop, None) if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[0], + image_height, + image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(1) - img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(splited_images[1], + image_height, + image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(-1) else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, image_height, image_width) + img_fin = preprocess_and_resize_image_for_ocrcnn_model(img_crop, + image_height, + image_width) cropped_lines.append(img_fin) cropped_lines_meging_indexing.append(0) @@ -469,7 +479,12 @@ def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, pr pred_texts_ib = pred_texts[ib].replace("[UNK]", "") extracted_texts.append(pred_texts_ib) - extracted_texts_merged = [extracted_texts[ind] if cropped_lines_meging_indexing[ind]==0 else extracted_texts[ind]+" "+extracted_texts[ind+1] if cropped_lines_meging_indexing[ind]==1 else None for ind in range(len(cropped_lines_meging_indexing))] + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) From b21051db21cf4c0f0e1bbf288cd4e985cc01cb7f Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 19:16:00 +0200 Subject: [PATCH 284/492] ProcessPoolExecutor: shutdown during del() instead of atexit() --- src/eynollah/eynollah.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 2e31433..7a28478 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -260,7 +260,6 @@ class Eynollah: # for parallelization of CPU-intensive tasks: self.executor = ProcessPoolExecutor(max_workers=cpu_count()) - atexit.register(self.executor.shutdown) if threshold_art_class_layout: self.threshold_art_class_layout = float(threshold_art_class_layout) @@ -406,6 +405,26 @@ class Eynollah: self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") + def __del__(self): + if hasattr(self, 'executor') and getattr(self, 'executor'): + self.executor.shutdown() + for model_name in ['model_page', + 'model_classifier', + 'model_bin', + 'model_enhancement', + 'model_region', + 'model_region_1_2', + 'model_region_p2', + 'model_region_fl_np', + 'model_region_fl', + 'model_textline', + 'model_reading_order', + 'model_table', + 'model_ocr', + 'processor']: + if hasattr(self, model_name) and getattr(self, model_name): + delattr(self, model_name) + def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} t_c0 = time.time() From 375e0263d4188ff5ca43037a6176544009c74e17 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 19:16:50 +0200 Subject: [PATCH 285/492] CNN-RNN OCR model: switch to 20250930 version (compatible with TF 2.12 on CPU as well) --- src/eynollah/eynollah.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 7a28478..62ce002 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -327,7 +327,7 @@ class Eynollah: if self.ocr and self.tr: self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" elif self.ocr and not self.tr: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250904" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250930" if self.tables: if self.light_version: self.model_table_dir = dir_models + "/modelens_table_0t4_201124" @@ -5392,7 +5392,7 @@ class Eynollah_ocr: if self.model_name: self.model_ocr_dir = self.model_name else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250904" + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250930" model_ocr = load_model(self.model_ocr_dir , compile=False) self.prediction_model = tf.keras.models.Model( From 61b20cc83d153aa0df2f5b75d6059ac80c730b3c Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 19:20:35 +0200 Subject: [PATCH 286/492] tests: switch from subtests to parametrize, use --isolate everywhere to free CUDA memory in between --- Makefile | 2 +- requirements-test.txt | 2 +- tests/test_run.py | 202 ++++++++++++++++++++---------------------- 3 files changed, 100 insertions(+), 106 deletions(-) diff --git a/Makefile b/Makefile index a920615..dd95c0a 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ BIN_MODEL := https://github.com/qurator-spk/sbb_binarization/releases/download/v OCR_MODEL := https://zenodo.org/records/17194824/files/models_ocr_v0_5_0.tar.gz?download=1 -PYTEST_ARGS ?= -vv +PYTEST_ARGS ?= -vv --isolate # BEGIN-EVAL makefile-parser --make-help Makefile diff --git a/requirements-test.txt b/requirements-test.txt index cce9428..3ebcf71 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,4 +1,4 @@ pytest -pytest-subtests +pytest-isolate coverage[toml] black diff --git a/tests/test_run.py b/tests/test_run.py index be928a0..59e5099 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -20,23 +20,9 @@ MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_ MODELS_OCR = environ.get('MODELS_OCR', str(testdir.joinpath('..', 'models_ocr_v0_5_0').resolve())) MODELS_BIN = environ.get('MODELS_BIN', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) -def test_run_eynollah_layout_filename(tmp_path, subtests, pytestconfig, caplog): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - # subtests write to same location - '--overwrite', - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' - runner = CliRunner() - for options in [ +@pytest.mark.parametrize( + "options", + [ [], # defaults ["--allow_scaling", "--curved-line"], ["--allow_scaling", "--curved-line", "--full-layout"], @@ -47,22 +33,34 @@ def test_run_eynollah_layout_filename(tmp_path, subtests, pytestconfig, caplog): # -eoi ... # --do_ocr # --skip_layout_and_reading_order - ]: - with subtests.test(#msg="test CLI", - options=options): - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert str(infile) in logmsgs - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line + ], ids=str) +def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' + args = [ + '-m', MODELS_LAYOUT, + '-i', str(infile), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'eynollah' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(layout_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert str(infile) in logmsgs + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath("//page:TextRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + lines = tree.xpath("//page:TextLine", namespaces=NS) + assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') @@ -86,7 +84,13 @@ def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): assert any(logmsg for logmsg in logmsgs if logmsg.startswith('All jobs done in')) assert len(list(outdir.iterdir())) == 2 -def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, caplog): +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["--no-patches"], + ], ids=str) +def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, options): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ @@ -100,25 +104,19 @@ def test_run_eynollah_binarization_filename(tmp_path, subtests, pytestconfig, ca def only_eynollah(logrec): return logrec.name == 'SbbBinarizer' runner = CliRunner() - for options in [ - [], # defaults - ["--no-patches"], - ]: - with subtests.test(#msg="test CLI", - options=options): - with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as binarized_img: - binarized_size = binarized_img.size - assert original_size == binarized_size + with caplog.filtering(only_eynollah): + result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) + assert outfile.exists() + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as binarized_img: + binarized_size = binarized_img.size + assert original_size == binarized_size -def test_run_eynollah_binarization_directory(tmp_path, subtests, pytestconfig, caplog): +def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ @@ -139,15 +137,19 @@ def test_run_eynollah_binarization_directory(tmp_path, subtests, pytestconfig, c assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 assert len(list(outdir.iterdir())) == 2 -def test_run_eynollah_enhancement_filename(tmp_path, subtests, pytestconfig, caplog): +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["-sos"], + ], ids=str) +def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, options): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), - # subtests write to same location - '--overwrite', ] if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) @@ -155,25 +157,19 @@ def test_run_eynollah_enhancement_filename(tmp_path, subtests, pytestconfig, cap def only_eynollah(logrec): return logrec.name == 'enhancement' runner = CliRunner() - for options in [ - [], # defaults - ["-sos"], - ]: - with subtests.test(#msg="test CLI", - options=options): - with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as enhanced_img: - enhanced_size = enhanced_img.size - assert (original_size == enhanced_size) == ("-sos" in options) + with caplog.filtering(only_eynollah): + result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs + assert outfile.exists() + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as enhanced_img: + enhanced_size = enhanced_img.size + assert (original_size == enhanced_size) == ("-sos" in options) -def test_run_eynollah_enhancement_directory(tmp_path, subtests, pytestconfig, caplog): +def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ @@ -194,7 +190,7 @@ def test_run_eynollah_enhancement_directory(tmp_path, subtests, pytestconfig, ca assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 assert len(list(outdir.iterdir())) == 2 -def test_run_eynollah_mbreorder_filename(tmp_path, subtests, pytestconfig, caplog): +def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') args = [ @@ -223,7 +219,7 @@ def test_run_eynollah_mbreorder_filename(tmp_path, subtests, pytestconfig, caplo #assert in_order != out_order assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] -def test_run_eynollah_mbreorder_directory(tmp_path, subtests, pytestconfig, caplog): +def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ @@ -245,7 +241,15 @@ def test_run_eynollah_mbreorder_directory(tmp_path, subtests, pytestconfig, capl #assert len([logmsg for logmsg in logmsgs if logmsg.startswith('???')]) == 2 assert len(list(outdir.iterdir())) == 2 -def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["-doit", #str(outrenderfile.parent)], + ], + ["-trocr"], + ], ids=str) +def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') @@ -255,8 +259,6 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): '-i', str(infile), '-dx', str(infile.parent), '-o', str(outfile.parent), - # subtests write to same location - '--overwrite', ] if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) @@ -264,33 +266,25 @@ def test_run_eynollah_ocr_filename(tmp_path, subtests, pytestconfig, caplog): def only_eynollah(logrec): return logrec.name == 'eynollah' runner = CliRunner() - for options in [ - # kba Fri Sep 26 12:53:49 CEST 2025 - # Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged - # [], # defaults - # ["-doit", str(outrenderfile.parent)], - ["-trocr"], - ]: - with subtests.test(#msg="test CLI", - options=options): - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert outfile.exists() - if "-doit" in options: - assert outrenderfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) - assert len(out_texts) >= 2, ("result is inaccurate", out_texts) - assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) + if "-doit" in options: + options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) + with caplog.filtering(only_eynollah): + result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: ocr has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert outfile.exists() + if "-doit" in options: + assert outrenderfile.exists() + #in_tree = page_from_file(str(infile)).etree + #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + out_tree = page_from_file(str(outfile)).etree + out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) + assert len(out_texts) >= 2, ("result is inaccurate", out_texts) + assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) -@pytest.mark.skip("Disabled until NHWC/NCHW error in https://github.com/qurator-spk/eynollah/actions/runs/18019655200/job/51273541895 debugged") -def test_run_eynollah_ocr_directory(tmp_path, subtests, pytestconfig, caplog): +def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ From a3d8197930b9e2c07862186d23ee192dc0347ff4 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 21:50:21 +0200 Subject: [PATCH 287/492] makefile: update model URL --- Makefile | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/Makefile b/Makefile index dd95c0a..357aa47 100644 --- a/Makefile +++ b/Makefile @@ -13,10 +13,16 @@ DOCKER ?= docker #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 +SEG_MODELFILE = $(notdir $(patsubst %?download=1,%,$(SEG_MODEL))) +SEG_MODELNAME = $(SEG_MODELFILE:%.tar.gz=%) BIN_MODEL := https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip +BIN_MODELFILE = $(notdir $(BIN_MODEL)) +BIN_MODELNAME := default-2021-03-09 -OCR_MODEL := https://zenodo.org/records/17194824/files/models_ocr_v0_5_0.tar.gz?download=1 +OCR_MODEL := https://zenodo.org/records/17236998/files/models_ocr_v0_5_1.tar.gz?download=1 +OCR_MODELFILE = $(notdir $(patsubst %?download=1,%,$(OCR_MODEL))) +OCR_MODELNAME = $(OCR_MODELFILE:%.tar.gz=%) PYTEST_ARGS ?= -vv --isolate @@ -31,7 +37,8 @@ help: @echo " install Install package with pip" @echo " install-dev Install editable with pip" @echo " deps-test Install test dependencies with pip" - @echo " models Download and extract models to $(CURDIR)/models_layout_v0_5_0" + @echo " models Download and extract models to $(CURDIR):" + @echo " $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME)" @echo " smoke-test Run simple CLI check" @echo " ocrd-test Run OCR-D CLI check" @echo " test Run unit tests" @@ -42,33 +49,29 @@ help: @echo " PYTEST_ARGS pytest args for 'test' (Set to '-s' to see log output during test execution, '-vv' to see individual tests. [$(PYTEST_ARGS)]" @echo " SEG_MODEL URL of 'models' archive to download for segmentation 'test' [$(SEG_MODEL)]" @echo " BIN_MODEL URL of 'models' archive to download for binarization 'test' [$(BIN_MODEL)]" + @echo " OCR_MODEL URL of 'models' archive to download for binarization 'test' [$(OCR_MODEL)]" @echo "" # END-EVAL # Download and extract models to $(PWD)/models_layout_v0_5_0 -models: models_layout_v0_5_0 models_ocr_v0_5_0 default-2021-03-09 +models: $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME) -models_layout_v0_5_0: models_layout_v0_5_0.tar.gz - tar zxf models_layout_v0_5_0.tar.gz - -models_layout_v0_5_0.tar.gz: +$(BIN_MODELFILE): + wget -O $@ $(BIN_MODEL) +$(SEG_MODELFILE): wget -O $@ $(SEG_MODEL) - -models_ocr_v0_5_0: models_ocr_v0_5_0.tar.gz - tar zxf models_ocr_v0_5_0.tar.gz - -models_ocr_v0_5_0.tar.gz: +$(OCR_MODELFILE): wget -O $@ $(OCR_MODEL) -default-2021-03-09: $(notdir $(BIN_MODEL)) - unzip $(notdir $(BIN_MODEL)) +$(BIN_MODELNAME): $(BIN_MODELFILE) mkdir $@ - mv $(basename $(notdir $(BIN_MODEL))) $@ - -$(notdir $(BIN_MODEL)): - wget $(BIN_MODEL) + unzip -d $@ $< +$(SEG_MODELNAME): $(SEG_MODELFILE) + tar zxf $< +$(OCR_MODELNAME): $(OCR_MODELFILE) + tar zxf $< build: $(PIP) install build @@ -82,7 +85,10 @@ install: install-dev: $(PIP) install -e .$(and $(EXTRAS),[$(EXTRAS)]) -deps-test: models_layout_v0_5_0 +ifeq (OCR,$(findstring OCR, $(EXTRAS))) +deps-test: $(OCR_MODELNAME) +endif +deps-test: $(BIN_MODELNAME) $(SEG_MODELNAME) $(PIP) install -r requirements-test.txt smoke-test: TMPDIR != mktemp -d @@ -123,9 +129,9 @@ ocrd-test: tests/resources/kant_aufklaerung_1784_0020.tif $(RM) -r $(TMPDIR) # Run unit tests -test: export MODELS_LAYOUT=$(CURDIR)/models_layout_v0_5_0 -test: export MODELS_OCR=$(CURDIR)/models_ocr_v0_5_0 -test: export MODELS_BIN=$(CURDIR)/default-2021-03-09 +test: export MODELS_LAYOUT=$(CURDIR)/$(SEG_MODELNAME) +test: export MODELS_OCR=$(CURDIR)/$(OCR_MODELNAME) +test: export MODELS_BIN=$(CURDIR)/$(BIN_MODELNAME) test: $(PYTHON) -m pytest tests --durations=0 --continue-on-collection-errors $(PYTEST_ARGS) From c86e59f481ee47ccb9938b7f6105f95f626c5f17 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 22:03:46 +0200 Subject: [PATCH 288/492] CI: update model key, split up cache restore/save --- .github/workflows/test-eynollah.yml | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 042e508..ca213cb 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -24,17 +24,17 @@ jobs: sudo rm -rf "$AGENT_TOOLSDIRECTORY" df -h - uses: actions/checkout@v4 - - uses: actions/cache@v4 + - uses: actions/cache/restore@v4 id: seg_model_cache with: path: models_layout_v0_5_0 - key: ${{ runner.os }}-models - - uses: actions/cache@v4 + key: ${{ runner.os }}-seg-models + - uses: actions/cache/restore@v4 id: ocr_model_cache with: - path: models_ocr_v0_5_0 - key: ${{ runner.os }}-models - - uses: actions/cache@v4 + path: models_ocr_v0_5_1 + key: ${{ runner.os }}-ocr-models + - uses: actions/cache/restore@v4 id: bin_model_cache with: path: default-2021-03-09 @@ -42,6 +42,21 @@ jobs: - name: Download models if: steps.seg_model_cache.outputs.cache-hit != 'true' || steps.bin_model_cache.outputs.cache-hit != 'true' || steps.ocr_model_cache.outputs.cache-hit != true run: make models + - uses: actions/cache/save@v4 + if: steps.seg_model_cache.outputs.cache-hit != 'true' + with: + path: models_layout_v0_5_0 + key: ${{ runner.os }}-seg-models + - uses: actions/cache/save@v4 + if: steps.ocr_model_cache.outputs.cache-hit != 'true' + with: + path: models_ocr_v0_5_1 + key: ${{ runner.os }}-ocr-models + - uses: actions/cache/save@v4 + if: steps.bin_model_cache.outputs.cache-hit != 'true' + with: + path: default-2021-03-09 + key: ${{ runner.os }}-modelbin - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: From ad129ed46c70b03fea7b48060e40e2451b40b975 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 22:05:53 +0200 Subject: [PATCH 289/492] CI: remove OS from model cache keys --- .github/workflows/test-eynollah.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index ca213cb..9d5b2c8 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -28,17 +28,17 @@ jobs: id: seg_model_cache with: path: models_layout_v0_5_0 - key: ${{ runner.os }}-seg-models + key: seg-models - uses: actions/cache/restore@v4 id: ocr_model_cache with: path: models_ocr_v0_5_1 - key: ${{ runner.os }}-ocr-models + key: ocr-models - uses: actions/cache/restore@v4 id: bin_model_cache with: path: default-2021-03-09 - key: ${{ runner.os }}-modelbin + key: bin-models - name: Download models if: steps.seg_model_cache.outputs.cache-hit != 'true' || steps.bin_model_cache.outputs.cache-hit != 'true' || steps.ocr_model_cache.outputs.cache-hit != true run: make models @@ -46,17 +46,17 @@ jobs: if: steps.seg_model_cache.outputs.cache-hit != 'true' with: path: models_layout_v0_5_0 - key: ${{ runner.os }}-seg-models + key: seg-models - uses: actions/cache/save@v4 if: steps.ocr_model_cache.outputs.cache-hit != 'true' with: path: models_ocr_v0_5_1 - key: ${{ runner.os }}-ocr-models + key: ocr-models - uses: actions/cache/save@v4 if: steps.bin_model_cache.outputs.cache-hit != 'true' with: path: default-2021-03-09 - key: ${{ runner.os }}-modelbin + key: bin-models - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: From 7daec392b9846931b932d48fde71680ab4bf33e9 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 22:10:45 +0200 Subject: [PATCH 290/492] Dockerfile: fix up CUDA installation for mixed TF/Torch --- Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile b/Dockerfile index 4ba498b..a15776e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -40,6 +40,8 @@ RUN ocrd ocrd-tool ocrd-tool.json dump-tools > $(dirname $(ocrd bashlib filename RUN ocrd ocrd-tool ocrd-tool.json dump-module-dirs > $(dirname $(ocrd bashlib filename))/ocrd-all-module-dir.json # install everything and reduce image size RUN make install EXTRAS=OCR && rm -rf /build/eynollah +# fixup for broken cuDNN installation (Torch pulls in 8.5.0, which is incompatible with Tensorflow) +RUN pip install nvidia-cudnn-cu11==8.6.0.163 # smoke test RUN eynollah --help From f0de1adabf45f3dd70df72ddc09795a4512d5316 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 23:12:18 +0200 Subject: [PATCH 291/492] rm loky dependency --- .gitignore | 4 ++++ requirements.txt | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 0d5d834..3cc0eac 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,11 @@ __pycache__ sbb_newspapers_org_image/pylint.log models_eynollah* +models_ocr* +models_layout* +default-2021-03-09 output.html /build /dist *.tif +TAGS diff --git a/requirements.txt b/requirements.txt index 4bc0c6a..db1d7df 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,5 +5,4 @@ scikit-learn >= 0.23.2 tensorflow < 2.13 numba <= 0.58.1 scikit-image -loky biopython From 3aa7ad04fafd842fe31c36094a2b51fa43cc1bd3 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 30 Sep 2025 23:14:52 +0200 Subject: [PATCH 292/492] :memo: update changelog --- CHANGELOG.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ad9a09..f6776d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,33 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +Fixed: + + * :fire: polygons: avoid invalid paths (use `Polygon.buffer()` instead of dilation etc.) + * `return_boxes_of_images_by_order_of_reading_new`: avoid Numpy.dtype mismatch, simplify + * `return_boxes_of_images_by_order_of_reading_new`: log any exceptions instead of ignoring + * `filter_contours_without_textline_inside`: avoid removing from duplicate lists twice + * `get_marginals`: exit early if no peaks found to avoid spurious overlap mask + * `get_smallest_skew`: after shifting search range of rotation angle, use overall best result + * Dockerfile: fix CUDA installation (cuDNN contested between Torch and TF due to extra OCR) + * OCR: re-instate missing methods and fix `utils_ocr` function calls + * :fire: writer: `SeparatorRegion` needs `SeparatorRegionType` (not `ImageRegionType`) +f458e3e + * tests: switch from `pytest-subtests` to `parametrize` so we can use `pytest-isolate` + (so CUDA memory gets freed between tests if running on GPU) + +Changed: + + * polygons: slightly widen for regions and lines, increase for separators + * various refactorings, some code style and identifier improvements + * deskewing/multiprocessing: switch back to ProcessPoolExecutor (faster), + but use shared memory if necessary, and switch back from `loky` to stdlib, + and shutdown in `del()` instead of `atexit` + * :fire: OCR: switch CNN-RNN model to `20250930` version compatible with TF 2.12 on CPU, too + * :fire: writer: use `@type='heading'` instead of `'header'` for headings + * CI: update+improve model caching + + ## [0.5.0] - 2025-09-26 Fixed: From 558867eb245d7db1e7a9780d21d226d5729a3c96 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:04:07 +0200 Subject: [PATCH 293/492] fix typo --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ad9a09..8c6c000 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,7 @@ Added: Fixed: * allow empty imports for optional dependencies - * avoid Numpy warnings (empty slices etc) + * avoid Numpy warnings (empty slices etc.) * remove deprecated Numpy types * binarization CLI: make `dir_in` usable again From 9ce127eb51997f6779f6d9877e4eb506ed5fda21 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:04:53 +0200 Subject: [PATCH 294/492] remove unnecessary backslash --- src/eynollah/utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 6eeabd0..58592bd 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1452,7 +1452,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, gray = cv2.bitwise_not(separators_closeup_n_binary) gray=gray.astype(np.uint8) - bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) horizontal = np.copy(bw) vertical = np.copy(bw) From 1d0616eb6918d6017e258fb50356eef0fefd685a Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:15:11 +0200 Subject: [PATCH 295/492] comparisons to None should not use the equality operators --- src/eynollah/utils/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 58592bd..152ac6e 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1211,7 +1211,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): ##plt.plot(z) ##plt.show() - if contours_main != None: + if contours_main is not None: areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] @@ -1222,7 +1222,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) - if len(contours_header) != None: + if len(contours_header) is not None: areas_header = np.array([cv2.contourArea(contours_header[j]) for j in range(len(contours_header))]) M_header = [cv2.moments(contours_header[j]) for j in range(len(contours_header))] cx_header = [(M_header[j]["m10"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] @@ -1243,9 +1243,9 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): if len(cy_main) > 0 and np.max(cy_main) > np.max(peaks_neg_new): cy_main = np.array(cy_main) * (np.max(peaks_neg_new) / np.max(cy_main)) - 10 - if contours_main != None: + if contours_main is not None: indexer_main = np.arange(len(contours_main)) - if contours_main != None: + if contours_main is not None: len_main = len(contours_main) else: len_main = 0 From 70af00182b2332f33e7872b6abc1af9bbba787bc Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:20:18 +0200 Subject: [PATCH 296/492] mutable defaults are the source of all evil --- train/models.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/train/models.py b/train/models.py index 8841bd3..fdc5437 100644 --- a/train/models.py +++ b/train/models.py @@ -394,7 +394,9 @@ def resnet50_unet(n_classes, input_height=224, input_width=224, task="segmentati return model -def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=[128, 64], transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): +def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=None, transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): + if mlp_head_units is None: + mlp_head_units = [128, 64] inputs = layers.Input(shape=(input_height, input_width, 3)) #transformer_units = [ @@ -516,7 +518,9 @@ def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_he return model -def vit_resnet50_unet_transformer_before_cnn(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=[128, 64], transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): +def vit_resnet50_unet_transformer_before_cnn(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=None, transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): + if mlp_head_units is None: + mlp_head_units = [128, 64] inputs = layers.Input(shape=(input_height, input_width, 3)) ##transformer_units = [ From f2f93e0251de3421b26d9b9f18c7d581846e82af Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:26:27 +0200 Subject: [PATCH 297/492] list literal is faster than using list constructor to create a new list --- src/eynollah/utils/__init__.py | 21 +++++++-------------- src/eynollah/utils/contour.py | 3 +-- src/eynollah/utils/separate_lines.py | 6 ++---- train/inference.py | 3 +-- train/train.py | 3 +-- 5 files changed, 12 insertions(+), 24 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 152ac6e..7c06900 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -138,8 +138,7 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( min_ys=np.min(y_sep) max_ys=np.max(y_sep) - y_mains=[] - y_mains.append(min_ys) + y_mains= [min_ys] y_mains_sep_ohne_grenzen=[] for ii in range(len(new_main_sep_y)): @@ -493,8 +492,7 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg_fin[i + 1]) + forest = [peaks_neg_fin[i + 1]] if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -662,8 +660,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg_fin[i + 1]) + forest = [peaks_neg_fin[i + 1]] if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -1235,8 +1232,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): y_max_header = np.array([np.max(contours_header[j][:, 0, 1]) for j in range(len(contours_header))]) # print(cy_main,'mainy') - peaks_neg_new = [] - peaks_neg_new.append(0 + y_ref) + peaks_neg_new = [0 + y_ref] for iii in range(len(peaks_neg)): peaks_neg_new.append(peaks_neg[iii] + y_ref) peaks_neg_new.append(textline_mask.shape[0] + y_ref) @@ -1404,8 +1400,7 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( return img_p_in[:,:,0], special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): - peaks_neg_tot = [] - peaks_neg_tot.append(first_point) + peaks_neg_tot = [first_point] for ii in range(len(peaks_neg_fin)): peaks_neg_tot.append(peaks_neg_fin[ii]) peaks_neg_tot.append(last_point) @@ -1588,8 +1583,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, args_cy_splitter=np.argsort(cy_main_splitters) cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - splitter_y_new=[] - splitter_y_new.append(0) + splitter_y_new= [0] for i in range(len(cy_main_splitters_sort)): splitter_y_new.append( cy_main_splitters_sort[i] ) splitter_y_new.append(region_pre_p.shape[0]) @@ -1663,8 +1657,7 @@ def return_boxes_of_images_by_order_of_reading_new( num_col, peaks_neg_fin = find_num_col( regions_without_separators[int(splitter_y_new[i]):int(splitter_y_new[i+1]),:], num_col_classifier, tables, multiplier=3.) - peaks_neg_fin_early=[] - peaks_neg_fin_early.append(0) + peaks_neg_fin_early= [0] #print(peaks_neg_fin,'peaks_neg_fin') for p_n in peaks_neg_fin: peaks_neg_fin_early.append(p_n) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 0e84153..0be8879 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -239,8 +239,7 @@ def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(cont_int)==0: - cont_int = [] - cont_int.append(contour_par) + cont_int = [contour_par] confidence_contour = 0 else: cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index ead5cfb..c87653c 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1174,8 +1174,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks[i] > cut_off: if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg[i + 1]) + forest = [peaks_neg[i + 1]] if i == (len(peaks_neg) - 1): if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -1195,8 +1194,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks_pos[i] > cut_off: if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) - forest = [] - forest.append(peaks[i + 1]) + forest = [peaks[i + 1]] if i == (len(peaks) - 1): if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) diff --git a/train/inference.py b/train/inference.py index 094c528..0e55aa8 100644 --- a/train/inference.py +++ b/train/inference.py @@ -305,8 +305,7 @@ class sbb_predict: input_1= np.zeros( (inference_bs, img_height, img_width,3)) - starting_list_of_regions = [] - starting_list_of_regions.append( list(range(labels_con.shape[2])) ) + starting_list_of_regions = [list(range(labels_con.shape[2]))] index_update = 0 index_selected = starting_list_of_regions[0] diff --git a/train/train.py b/train/train.py index e8e92af..795009a 100644 --- a/train/train.py +++ b/train/train.py @@ -365,8 +365,7 @@ def run(_config, n_classes, n_epochs, input_height, y_tot=np.zeros((testX.shape[0],n_classes)) - score_best=[] - score_best.append(0) + score_best= [0] num_rows = return_number_of_total_training_data(dir_train) weights=[] From 91d2a74ac950e55e75c0c03ece817ae96a4fc377 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:38:01 +0200 Subject: [PATCH 298/492] remove redundant parentheses --- src/eynollah/eynollah.py | 4 ++-- src/eynollah/plot.py | 2 +- src/eynollah/utils/__init__.py | 8 ++++---- src/eynollah/utils/counter.py | 2 +- src/eynollah/utils/marginals.py | 2 +- src/eynollah/utils/separate_lines.py | 14 +++++++------- src/eynollah/writer.py | 2 +- train/inference.py | 8 ++++---- train/train.py | 10 +++++----- train/utils.py | 6 +++--- 10 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 20954a0..63f7005 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4886,9 +4886,9 @@ class Eynollah: textline_mask_tot_ea_org[img_revised_tab==drop_label_in_full_layout] = 0 - text_only = ((img_revised_tab[:, :] == 1)) * 1 + text_only = (img_revised_tab[:, :] == 1) * 1 if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - text_only_d = ((text_regions_p_1_n[:, :] == 1)) * 1 + text_only_d = (text_regions_p_1_n[:, :] == 1) * 1 #print("text region early 2 in %.1fs", time.time() - t0) ###min_con_area = 0.000005 diff --git a/src/eynollah/plot.py b/src/eynollah/plot.py index 412ae5a..c026e94 100644 --- a/src/eynollah/plot.py +++ b/src/eynollah/plot.py @@ -12,7 +12,7 @@ from .utils import crop_image_inside_box from .utils.rotate import rotate_image_different from .utils.resize import resize_image -class EynollahPlotter(): +class EynollahPlotter: """ Class collecting all the plotting and image writing methods """ diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 7c06900..de083f5 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1267,11 +1267,11 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): top = peaks_neg_new[i] down = peaks_neg_new[i + 1] indexes_in = matrix_of_orders[:, 0][(matrix_of_orders[:, 3] >= top) & - ((matrix_of_orders[:, 3] < down))] + (matrix_of_orders[:, 3] < down)] cxs_in = matrix_of_orders[:, 2][(matrix_of_orders[:, 3] >= top) & - ((matrix_of_orders[:, 3] < down))] + (matrix_of_orders[:, 3] < down)] cys_in = matrix_of_orders[:, 3][(matrix_of_orders[:, 3] >= top) & - ((matrix_of_orders[:, 3] < down))] + (matrix_of_orders[:, 3] < down)] types_of_text = matrix_of_orders[:, 1][(matrix_of_orders[:, 3] >= top) & (matrix_of_orders[:, 3] < down)] index_types_of_text = matrix_of_orders[:, 4][(matrix_of_orders[:, 3] >= top) & @@ -1408,7 +1408,7 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, pixel_lines, contours_h=None): t_ins_c0 = time.time() - separators_closeup=( (region_pre_p[:,:,:]==pixel_lines))*1 + separators_closeup= (region_pre_p[:, :, :] == pixel_lines) * 1 separators_closeup[0:110,:,:]=0 separators_closeup[separators_closeup.shape[0]-150:,:,:]=0 diff --git a/src/eynollah/utils/counter.py b/src/eynollah/utils/counter.py index 9a3ed70..e6205c8 100644 --- a/src/eynollah/utils/counter.py +++ b/src/eynollah/utils/counter.py @@ -3,7 +3,7 @@ from collections import Counter REGION_ID_TEMPLATE = 'region_%04d' LINE_ID_TEMPLATE = 'region_%04d_line_%04d' -class EynollahIdCounter(): +class EynollahIdCounter: def __init__(self, region_idx=0, line_idx=0): self._counter = Counter() diff --git a/src/eynollah/utils/marginals.py b/src/eynollah/utils/marginals.py index ac8dc1d..9ec0737 100644 --- a/src/eynollah/utils/marginals.py +++ b/src/eynollah/utils/marginals.py @@ -76,7 +76,7 @@ def get_marginals(text_with_lines, text_regions, num_col, slope_deskew, light_ve peaks, _ = find_peaks(text_with_lines_y_rev, height=0) peaks=np.array(peaks) - peaks=peaks[(peaks>first_nonzero) & ((peaksfirst_nonzero) & (peaks < last_nonzero)] peaks=peaks[region_sum_0[peaks]=batchsize: ret_x = ret_x/255. - yield (ret_x, ret_y) + yield ret_x, ret_y ret_x= np.zeros((batchsize, height,width, 3)).astype(np.int16) ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) batchcount = 0 @@ -446,7 +446,7 @@ def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batch ret_y[batchcount, :] = label_class batchcount+=1 if batchcount>=batchsize: - yield (ret_x, ret_y) + yield ret_x, ret_y ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) batchcount = 0 @@ -464,7 +464,7 @@ def generate_arrays_from_folder_reading_order(classes_file_dir, modal_dir, batch ret_y[batchcount, :] = label_class batchcount+=1 if batchcount>=batchsize: - yield (ret_x, ret_y) + yield ret_x, ret_y ret_x= np.zeros((batchsize, height, width, 3))#.astype(np.int16) ret_y= np.zeros((batchsize, n_classes)).astype(np.int16) batchcount = 0 From e027bc038e28736a5557d342f0adcbd153bacc57 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 01:05:15 +0200 Subject: [PATCH 299/492] Update README.md --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9dc4824..144ccd4 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ make install EXTRAS=OCR Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). -For documentation on methods and models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). Model cards are also provided for our trained models. ## Training @@ -74,7 +74,7 @@ image enhancement, text recognition (OCR), and reading order detection. ### Layout Analysis The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading -order using either heuristic methods or a reading order detection model. +order using either heuristic methods or a [pretrained reading order detection model](https://github.com/qurator-spk/eynollah#machine-based-reading-order). Reading order detection can be performed either as part of layout analysis based on image input, or, currently under development, based on pre-existing layout analysis results in PAGE-XML format as input. @@ -174,6 +174,7 @@ If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynol (because some other preprocessing step was in effect like `denoised`), then the output PAGE-XML will be based on that as new top-level (`@imageFilename`) + ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 In general, it makes more sense to add other workflow steps **after** Eynollah. From 4514d417a77a61d6143622d3503ea475106cb25b Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 1 Oct 2025 01:16:25 +0200 Subject: [PATCH 300/492] force GH markdown code block in list --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 144ccd4..3ba5086 100644 --- a/README.md +++ b/README.md @@ -174,8 +174,7 @@ If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynol (because some other preprocessing step was in effect like `denoised`), then the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 + ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 In general, it makes more sense to add other workflow steps **after** Eynollah. From 5725e4fd1f6bab4c1152c88cc28c44c0e8c2c584 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 1 Oct 2025 15:58:03 +0200 Subject: [PATCH 301/492] =?UTF-8?q?-Continue=20processing=20when=20num=5Fc?= =?UTF-8?q?ol=20is=20None=20but=20textregions=20exist.=20-Convert=20margin?= =?UTF-8?q?al-only=20=20to=20main=20body=20if=20no=20main=20body=20is=20pr?= =?UTF-8?q?esent.=20-Reset=20deskew=20angle=20to=200=20when=20text=20regio?= =?UTF-8?q?n=20density=20(textregion=20area=20to=20page=20area)=20<=200.3?= =?UTF-8?q?=20and=20angle=20>=2045=C2=B0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/eynollah/eynollah.py | 41 +++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 20954a0..5e8412e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1,4 +1,4 @@ -# pylint: disable=no-member,invalid-name,line-too-long,missing-function-docstring,missing-class-docstring,too-many-branches +#run_single# pylint: disable=no-member,invalid-name,line-too-long,missing-function-docstring,missing-class-docstring,too-many-branches # pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member # pylint: disable=too-many-public-methods,too-many-arguments,too-many-instance-attributes,too-many-public-methods, # pylint: disable=consider-using-enumerate @@ -2245,6 +2245,7 @@ class Eynollah: ##mask_texts_only = cv2.dilate(mask_texts_only, KERNEL, iterations=1) mask_texts_only = cv2.dilate(mask_texts_only, kernel=np.ones((2,2), np.uint8), iterations=1) + mask_images_only=(prediction_regions_org[:,:] ==2)*1 polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only) @@ -2280,20 +2281,18 @@ class Eynollah: text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) - #plt.imshow(textline_mask_tot_ea) #plt.show() textline_mask_tot_ea[(text_regions_p_true==0) | (text_regions_p_true==4) ] = 0 - #plt.imshow(textline_mask_tot_ea) #plt.show() #print("inside 4 ", time.time()-t_in) self.logger.debug("exit get_regions_light_v") - return text_regions_p_true, erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin, confidence_matrix + return text_regions_p_true, erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin, confidence_matrix, polygons_of_only_texts else: img_bin = resize_image(img_bin,img_height_h, img_width_h ) self.logger.debug("exit get_regions_light_v") - return None, erosion_hurts, None, textline_mask_tot_ea, img_bin, None + return None, erosion_hurts, None, textline_mask_tot_ea, img_bin, None, None def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_from_xy_2models") @@ -2386,7 +2385,7 @@ class Eynollah: text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1)) self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_lines_xml + return text_regions_p_true, erosion_hurts, polygons_lines_xml, polygons_of_only_texts except: if self.input_binary: prediction_bin = np.copy(img_org) @@ -2436,7 +2435,7 @@ class Eynollah: erosion_hurts = True self.logger.debug("exit get_regions_from_xy_2models") - return text_regions_p_true, erosion_hurts, polygons_lines_xml + return text_regions_p_true, erosion_hurts, polygons_lines_xml, polygons_of_only_texts def do_order_of_regions_full_layout( self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): @@ -4701,7 +4700,7 @@ class Eynollah: self.logger.info("Step 2/5: Basic Processing Mode") self.logger.info("Skipping layout analysis and reading order detection") - _ ,_, _, textline_mask_tot_ea, img_bin_light, _ = \ + _ ,_, _, textline_mask_tot_ea, img_bin_light, _,_= \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier, skip_layout_and_reading_order=self.skip_layout_and_reading_order) @@ -4768,10 +4767,10 @@ class Eynollah: if self.light_version: self.logger.info("Using light version processing") - text_regions_p_1 ,erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin_light, confidence_matrix = \ + text_regions_p_1 ,erosion_hurts, polygons_lines_xml, textline_mask_tot_ea, img_bin_light, confidence_matrix, polygons_text_early = \ self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier) #print("text region early -2 in %.1fs", time.time() - t0) - + if num_col_classifier == 1 or num_col_classifier ==2: if num_col_classifier == 1: img_w_new = 1000 @@ -4793,9 +4792,9 @@ class Eynollah: #self.logger.info("run graphics %.1fs ", time.time() - t1t) #print("text region early -3 in %.1fs", time.time() - t0) textline_mask_tot_ea_org = np.copy(textline_mask_tot_ea) - #print("text region early -4 in %.1fs", time.time() - t0) + else: - text_regions_p_1 ,erosion_hurts, polygons_lines_xml = \ + text_regions_p_1 ,erosion_hurts, polygons_lines_xml, polygons_text_early = \ self.get_regions_from_xy_2models(img_res, is_image_enhanced, num_col_classifier) self.logger.info(f"Textregion detection took {time.time() - t1:.1f}s") @@ -4811,7 +4810,7 @@ class Eynollah: #plt.show() self.logger.info(f"Layout analysis complete ({time.time() - t1:.1f}s)") - if not num_col: + if not num_col and len(polygons_text_early) == 0: self.logger.info("No columns detected - generating empty PAGE-XML") pcgts = self.writer.build_pagexml_no_full_layout( @@ -4848,6 +4847,15 @@ class Eynollah: textline_mask_tot, text_regions_p, image_page_rotated = \ self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1, table_prediction) + + + if image_page.shape[0]!=0 and image_page.shape[1]!=0: + # if ratio of text regions to page area is smaller that 0.3, deskew angle is not aloowed to exceed 45 + if ( ( text_regions_p[:,:]==1).sum() + (text_regions_p[:,:]==4).sum() ) / float(image_page.shape[0]*image_page.shape[1] ) <= 0.3 and abs(slope_deskew) > 45: + slope_deskew = 0 + + if (text_regions_p[:,:]==1).sum() == 0: + text_regions_p[:,:][text_regions_p[:,:]==4] = 1 self.logger.info("Step 3/5: Text Line Detection") @@ -4894,6 +4902,8 @@ class Eynollah: ###min_con_area = 0.000005 contours_only_text, hir_on_text = return_contours_of_image(text_only) contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text) + + if len(contours_only_text_parent) > 0: areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) @@ -4995,7 +5005,9 @@ class Eynollah: contours_only_text_parent_d_ordered = [] contours_only_text_parent_d = [] #contours_only_text_parent = [] - + + boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals) + if not len(contours_only_text_parent): # stop early empty_marginals = [[]] * len(polygons_of_marginals) @@ -5031,7 +5043,6 @@ class Eynollah: contours_only_text_parent, self.image, slope_first, confidence_matrix, map=self.executor.map) #print("text region early 4 in %.1fs", time.time() - t0) boxes_text, _ = get_text_region_boxes_by_given_contours(contours_only_text_parent) - boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals) #print("text region early 5 in %.1fs", time.time() - t0) ## birdan sora chock chakir if not self.curved_line: From 733af1e9a71b31ab5902a9630ded787411255b76 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 17:43:32 +0200 Subject: [PATCH 302/492] :memo: update train/README.md, align with docs/train.md --- docs/train.md | 135 +++++++++++++++++++++++++++++------------------- train/README.md | 107 ++++++++++++++------------------------ 2 files changed, 120 insertions(+), 122 deletions(-) diff --git a/docs/train.md b/docs/train.md index b920a07..839529f 100644 --- a/docs/train.md +++ b/docs/train.md @@ -1,18 +1,24 @@ # Training documentation -This aims to assist users in preparing training datasets, training models, and +This document aims to assist users in preparing training datasets, training models, and performing inference with trained models. We cover various use cases including pixel-wise segmentation, image classification, image enhancement, and machine-based reading order detection. For each use case, we provide guidance on how to generate the corresponding training dataset. The following three tasks can all be accomplished using the code in the -[`train`](https://github.com/qurator-spk/sbb_pixelwise_segmentation/tree/unifying-training-models) directory: +[`train`](https://github.com/qurator-spk/eynollah/tree/main/train) directory: * generate training dataset * train a model * inference with the trained model +## Training , evaluation and output + +The train and evaluation folders should contain subfolders of `images` and `labels`. + +The output folder should be an empty folder where the output model will be written to. + ## Generate training dataset The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following @@ -66,7 +72,7 @@ to the image area, with a default value of zero. To run the dataset generator, u python generate_gt_for_training.py machine-based-reading-order \ -dx "dir of GT xml files" \ -domi "dir where output images will be written" \ - -docl "dir where the labels will be written" \ +"" -docl "dir where the labels will be written" \ -ih "height" \ -iw "width" \ -min "min area ratio" @@ -312,60 +318,59 @@ The following parameter configuration can be applied to all segmentation use cas its sub-parameters, and continued training are defined only for segmentation use cases and enhancements, not for classification and machine-based reading order, as you can see in their example config files. -* backbone_type: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we -* offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first -* apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. -* task : The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". -* patches: If you want to break input images into smaller patches (input size of the model) you need to set this -* parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be -* set to ``false``. -* n_batch: Number of batches at each iteration. -* n_classes: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it -* should set to 1. And for the case of layout detection just the unique number of classes should be given. -* n_epochs: Number of epochs. -* input_height: This indicates the height of model's input. -* input_width: This indicates the width of model's input. -* weight_decay: Weight decay of l2 regularization of model layers. -* pretraining: Set to ``true`` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved -* in a folder named "pretrained_model" in the same directory of "train.py" script. -* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. -* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. -* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. -* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" parameter. -* degrading: If ``true``, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. -* brightening: If ``true``, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. -* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. -* rotation: If ``true``, 90 degree rotation will be applied on image. -* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. -* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. -* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. -* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. -* flip_index: Type of flips. -* blur_k: Type of blurrings. -* scales: Scales of scaling. -* brightness: The amount of brightenings. -* thetha: Rotation angles. -* degrade_scales: The amount of degradings. -* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the +* `backbone_type`: For segmentation tasks (such as text line, binarization, and layout detection) and enhancement, we + offer two backbone options: a "nontransformer" and a "transformer" backbone. For the "transformer" backbone, we first + apply a CNN followed by a transformer. In contrast, the "nontransformer" backbone utilizes only a CNN ResNet-50. +* `task`: The task parameter can have values such as "segmentation", "enhancement", "classification", and "reading_order". +* `patches`: If you want to break input images into smaller patches (input size of the model) you need to set this +* parameter to `true`. In the case that the model should see the image once, like page extraction, patches should be + set to ``false``. +* `n_batch`: Number of batches at each iteration. +* `n_classes`: Number of classes. In the case of binary classification this should be 2. In the case of reading_order it + should set to 1. And for the case of layout detection just the unique number of classes should be given. +* `n_epochs`: Number of epochs. +* `input_height`: This indicates the height of model's input. +* `input_width`: This indicates the width of model's input. +* `weight_decay`: Weight decay of l2 regularization of model layers. +* `pretraining`: Set to `true` to load pretrained weights of ResNet50 encoder. The downloaded weights should be saved + in a folder named "pretrained_model" in the same directory of "train.py" script. +* `augmentation`: If you want to apply any kind of augmentation this parameter should first set to `true`. +* `flip_aug`: If `true`, different types of filp will be applied on image. Type of flips is given with "flip_index" parameter. +* `blur_aug`: If `true`, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" parameter. +* `scaling`: If `true`, scaling will be applied on image. Scale of scaling is given with "scales" parameter. +* `degrading`: If `true`, degrading will be applied to the image. The amount of degrading is defined with "degrade_scales" parameter. +* `brightening`: If `true`, brightening will be applied to the image. The amount of brightening is defined with "brightness" parameter. +* `rotation_not_90`: If `true`, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" parameter. +* `rotation`: If `true`, 90 degree rotation will be applied on image. +* `binarization`: If `true`,Otsu thresholding will be applied to augment the input data with binarized images. +* `scaling_bluring`: If `true`, combination of scaling and blurring will be applied on image. +* `scaling_binarization`: If `true`, combination of scaling and binarization will be applied on image. +* `scaling_flip`: If `true`, combination of scaling and flip will be applied on image. +* `flip_index`: Type of flips. +* `blur_k`: Type of blurrings. +* `scales`: Scales of scaling. +* `brightness`: The amount of brightenings. +* `thetha`: Rotation angles. +* `degrade_scales`: The amount of degradings. +* `continue_training`: If `true`, it means that you have already trained a model and you would like to continue the training. So it is needed to providethe dir of trained model with "dir_of_start_model" and index for naming themodels. For example if you have already trained for 3 epochs then your lastindex is 2 and if you want to continue - from model_1.h5, you can set -``index_start`` to 3 to start naming model with index 3. -* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train + from model_1.h5, you can set `index_start` to 3 to start naming model with index 3. +* `weighted_loss`: If `true`, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to `true`the parameter "is_loss_soft_dice" should be ``false`` +* `data_is_provided`: If you have already provided the input data you can set this to `true`. Be sure that the train and eval data are in"dir_output".Since when once we provide training data we resize and augmentthem and then wewrite them in sub-directories train and eval in "dir_output". -* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. -* index_start: Starting index for saved models in the case that "continue_training" is ``true``. -* dir_of_start_model: Directory containing pretrained model to continue training the model in the case that "continue_training" is ``true``. -* transformer_num_patches_xy: Number of patches for vision transformer in x and y direction respectively. -* transformer_patchsize_x: Patch size of vision transformer patches in x direction. -* transformer_patchsize_y: Patch size of vision transformer patches in y direction. -* transformer_projection_dim: Transformer projection dimension. Default value is 64. -* transformer_mlp_head_units: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. -* transformer_layers: transformer layers. Default value is 8. -* transformer_num_heads: Transformer number of heads. Default value is 4. -* transformer_cnn_first: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. +* `dir_train`: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. +* `index_start`: Starting index for saved models in the case that "continue_training" is `true`. +* `dir_of_start_model`: Directory containing pretrained model to continue training the model in the case that "continue_training" is `true`. +* `transformer_num_patches_xy`: Number of patches for vision transformer in x and y direction respectively. +* `transformer_patchsize_x`: Patch size of vision transformer patches in x direction. +* `transformer_patchsize_y`: Patch size of vision transformer patches in y direction. +* `transformer_projection_dim`: Transformer projection dimension. Default value is 64. +* `transformer_mlp_head_units`: Transformer Multilayer Perceptron (MLP) head units. Default value is [128, 64]. +* `transformer_layers`: transformer layers. Default value is 8. +* `transformer_num_heads`: Transformer number of heads. Default value is 4. +* `transformer_cnn_first`: We have two types of vision transformers. In one type, a CNN is applied first, followed by a transformer. In the other type, this order is reversed. If transformer_cnn_first is true, it means the CNN will be applied before the transformer. Default value is true. In the case of segmentation and enhancement the train and evaluation directory should be as following. @@ -394,6 +399,30 @@ command, similar to the process for classification and reading order: #### Binarization +### Ground truth format + +Lables for each pixel are identified by a number. So if you have a +binary case, ``n_classes`` should be set to ``2`` and labels should +be ``0`` and ``1`` for each class and pixel. + +In the case of multiclass, just set ``n_classes`` to the number of classes +you have and the try to produce the labels by pixels set from ``0 , 1 ,2 .., n_classes-1``. +The labels format should be png. +Our lables are 3 channel png images but only information of first channel is used. +If you have an image label with height and width of 10, for a binary case the first channel should look like this: + + Label: [ [1, 0, 0, 1, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ..., + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] + + This means that you have an image by `10*10*3` and `pixel[0,0]` belongs + to class `1` and `pixel[0,1]` belongs to class `0`. + + A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/~vahid.rezanezhad/binarization_training_data_sample/), which contains images and lables folders. + + An example config json file for binarization can be like this: ```yaml diff --git a/train/README.md b/train/README.md index 7c69a10..5f6d326 100644 --- a/train/README.md +++ b/train/README.md @@ -1,17 +1,39 @@ -# Pixelwise Segmentation -> Pixelwise segmentation for document images +# Training eynollah + +This README explains the technical details of how to set up and run training, for detailed information on parameterization, see [`docs/train.md`](../docs/train.md) ## Introduction -This repository contains the source code for training an encoder model for document image segmentation. + +This folder contains the source code for training an encoder model for document image segmentation. ## Installation -Either clone the repository via `git clone https://github.com/qurator-spk/sbb_pixelwise_segmentation.git` or download and unpack the [ZIP](https://github.com/qurator-spk/sbb_pixelwise_segmentation/archive/master.zip). + +Clone the repository and install eynollah along with the dependencies necessary for training: + +```sh +git clone https://github.com/qurator-spk/eynollah +cd eynollah +pip install '.[training]' +``` ### Pretrained encoder -Download our pretrained weights and add them to a ``pretrained_model`` folder: -https://qurator-data.de/sbb_pixelwise_segmentation/pretrained_encoder/ + +Download our pretrained weights and add them to a `train/pretrained_model` folder: + +```sh +cd train +wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 +tar xf pretrained_model.tar.gz +``` + +### Binarization training data + +A small sample of training data for binarization experiment can be found [on +zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), +which contains `images` and `labels` folders. ### Helpful tools + * [`pagexml2img`](https://github.com/qurator-spk/page2img) > Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, each class will be defined with a RGB value and beside images, a text file of classes will also be produced. @@ -20,71 +42,18 @@ each class will be defined with a RGB value and beside images, a text file of cl * [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) > Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. -## Usage - -### Train -To train a model, run: ``python train.py with config_params.json`` - ### Train using Docker -#### Build the Docker image +Build the Docker image: - ```bash - docker build -t model-training . - ``` -#### Run Docker image - ```bash - docker run --gpus all -v /host/path/to/entry_point_dir:/entry_point_dir model-training - ``` +```bash +cd train +docker build -t model-training . +``` -### Ground truth format -Lables for each pixel are identified by a number. So if you have a -binary case, ``n_classes`` should be set to ``2`` and labels should -be ``0`` and ``1`` for each class and pixel. +Run Docker image -In the case of multiclass, just set ``n_classes`` to the number of classes -you have and the try to produce the labels by pixels set from ``0 , 1 ,2 .., n_classes-1``. -The labels format should be png. -Our lables are 3 channel png images but only information of first channel is used. -If you have an image label with height and width of 10, for a binary case the first channel should look like this: - - Label: [ [1, 0, 0, 1, 1, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - ..., - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] - - This means that you have an image by `10*10*3` and `pixel[0,0]` belongs - to class `1` and `pixel[0,1]` belongs to class `0`. - - A small sample of training data for binarization experiment can be found here, [Training data sample](https://qurator-data.de/~vahid.rezanezhad/binarization_training_data_sample/), which contains images and lables folders. - -### Training , evaluation and output -The train and evaluation folders should contain subfolders of images and labels. -The output folder should be an empty folder where the output model will be written to. - -### Parameter configuration -* patches: If you want to break input images into smaller patches (input size of the model) you need to set this parameter to ``true``. In the case that the model should see the image once, like page extraction, patches should be set to ``false``. -* n_batch: Number of batches at each iteration. -* n_classes: Number of classes. In the case of binary classification this should be 2. -* n_epochs: Number of epochs. -* input_height: This indicates the height of model's input. -* input_width: This indicates the width of model's input. -* weight_decay: Weight decay of l2 regularization of model layers. -* augmentation: If you want to apply any kind of augmentation this parameter should first set to ``true``. -* flip_aug: If ``true``, different types of filp will be applied on image. Type of flips is given with "flip_index" in train.py file. -* blur_aug: If ``true``, different types of blurring will be applied on image. Type of blurrings is given with "blur_k" in train.py file. -* scaling: If ``true``, scaling will be applied on image. Scale of scaling is given with "scales" in train.py file. -* rotation_not_90: If ``true``, rotation (not 90 degree) will be applied on image. Rotation angles are given with "thetha" in train.py file. -* rotation: If ``true``, 90 degree rotation will be applied on image. -* binarization: If ``true``,Otsu thresholding will be applied to augment the input data with binarized images. -* scaling_bluring: If ``true``, combination of scaling and blurring will be applied on image. -* scaling_binarization: If ``true``, combination of scaling and binarization will be applied on image. -* scaling_flip: If ``true``, combination of scaling and flip will be applied on image. -* continue_training: If ``true``, it means that you have already trained a model and you would like to continue the training. So it is needed to provide the dir of trained model with "dir_of_start_model" and index for naming the models. For example if you have already trained for 3 epochs then your last index is 2 and if you want to continue from model_1.h5, you can set "index_start" to 3 to start naming model with index 3. -* weighted_loss: If ``true``, this means that you want to apply weighted categorical_crossentropy as loss fucntion. Be carefull if you set to ``true``the parameter "is_loss_soft_dice" should be ``false`` -* data_is_provided: If you have already provided the input data you can set this to ``true``. Be sure that the train and eval data are in "dir_output". Since when once we provide training data we resize and augment them and then we write them in sub-directories train and eval in "dir_output". -* dir_train: This is the directory of "images" and "labels" (dir_train should include two subdirectories with names of images and labels ) for raw images and labels. Namely they are not prepared (not resized and not augmented) yet for training the model. When we run this tool these raw data will be transformed to suitable size needed for the model and they will be written in "dir_output" in train and eval directories. Each of train and eval include "images" and "labels" sub-directories. - -#### Additional documentation -Please check the [wiki](https://github.com/qurator-spk/sbb_pixelwise_segmentation/wiki). +```bash +cd train +docker run --gpus all -v $PWD:/entry_point_dir model-training +``` From 48266b1ee0cd5aa7dc971336257307d7f681ddc1 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 18:01:21 +0200 Subject: [PATCH 303/492] make training dependencies optional-dependencies of eynollah i.e. `pip install "eynollah[training]"` will install the requirements for training --- pyproject.toml | 13 ++++++++----- requirements-ocr.txt | 2 ++ requirements-plotting.txt | 1 + requirements-training.txt | 1 + train/requirements.txt | 7 +------ 5 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 requirements-ocr.txt create mode 100644 requirements-plotting.txt create mode 120000 requirements-training.txt diff --git a/pyproject.toml b/pyproject.toml index 8a63543..ec3e5f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,11 @@ license.file = "LICENSE" requires-python = ">=3.8" keywords = ["document layout analysis", "image segmentation"] -dynamic = ["dependencies", "version"] +dynamic = [ + "dependencies", + "optional-dependencies", + "version" +] classifiers = [ "Development Status :: 4 - Beta", @@ -25,10 +29,6 @@ classifiers = [ "Topic :: Scientific/Engineering :: Image Processing", ] -[project.optional-dependencies] -OCR = ["torch <= 2.0.1", "transformers <= 4.30.2"] -plotting = ["matplotlib"] - [project.scripts] eynollah = "eynollah.cli:main" ocrd-eynollah-segment = "eynollah.ocrd_cli:main" @@ -41,6 +41,9 @@ Repository = "https://github.com/qurator-spk/eynollah.git" [tool.setuptools.dynamic] dependencies = {file = ["requirements.txt"]} optional-dependencies.test = {file = ["requirements-test.txt"]} +optional-dependencies.OCR = {file = ["requirements-ocr.txt"]} +optional-dependencies.plotting = {file = ["requirements-plotting.txt"]} +optional-dependencies.training = {file = ["requirements-training.txt"]} [tool.setuptools.packages.find] where = ["src"] diff --git a/requirements-ocr.txt b/requirements-ocr.txt new file mode 100644 index 0000000..9f31ebb --- /dev/null +++ b/requirements-ocr.txt @@ -0,0 +1,2 @@ +torch <= 2.0.1 +transformers <= 4.30.2 diff --git a/requirements-plotting.txt b/requirements-plotting.txt new file mode 100644 index 0000000..6ccafc3 --- /dev/null +++ b/requirements-plotting.txt @@ -0,0 +1 @@ +matplotlib diff --git a/requirements-training.txt b/requirements-training.txt new file mode 120000 index 0000000..e1bc9c3 --- /dev/null +++ b/requirements-training.txt @@ -0,0 +1 @@ +train/requirements.txt \ No newline at end of file diff --git a/train/requirements.txt b/train/requirements.txt index d8f9003..4df9c2f 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -1,11 +1,6 @@ -tensorflow == 2.12.1 +# tensorflow == 2.12.1 # TODO why not tensorflow < 2.13 as in eynollah/requirements.txt sacred -opencv-python-headless seaborn tqdm imutils -numpy scipy -scikit-learn -shapely -click From f0ef2b5db27b8f6e8abcce2aef261fbcb8575793 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 18:10:13 +0200 Subject: [PATCH 304/492] remove unused imports --- train/gt_gen_utils.py | 4 +--- train/inference.py | 6 ------ 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/train/gt_gen_utils.py b/train/gt_gen_utils.py index 38d48ca..2828d7b 100644 --- a/train/gt_gen_utils.py +++ b/train/gt_gen_utils.py @@ -1,5 +1,3 @@ -import click -import sys import os import numpy as np import warnings @@ -9,7 +7,7 @@ import cv2 from shapely import geometry from pathlib import Path import matplotlib.pyplot as plt -from PIL import Image, ImageDraw, ImageFont +from PIL import ImageFont KERNEL = np.ones((5, 5), np.uint8) diff --git a/train/inference.py b/train/inference.py index 595cfe7..0bff0ec 100644 --- a/train/inference.py +++ b/train/inference.py @@ -3,12 +3,9 @@ import os import numpy as np import warnings import cv2 -import seaborn as sns from tensorflow.keras.models import load_model import tensorflow as tf from tensorflow.keras import backend as K -from tensorflow.keras import layers -import tensorflow.keras.losses from tensorflow.keras.layers import * from models import * from gt_gen_utils import * @@ -16,7 +13,6 @@ import click import json from tensorflow.python.keras import backend as tensorflow_backend import xml.etree.ElementTree as ET -import matplotlib.pyplot as plt with warnings.catch_warnings(): @@ -55,11 +51,9 @@ class sbb_predict: seg=seg[:,:,0] seg_img=np.zeros((np.shape(seg)[0],np.shape(seg)[1],3)).astype(np.uint8) - colors=sns.color_palette("hls", self.n_classes) for c in ann_u: c=int(c) - segl=(seg==c) seg_img[:,:,0][seg==c]=c seg_img[:,:,1][seg==c]=c seg_img[:,:,2][seg==c]=c From 4f5cdf314004b6bb0a409aee7b3525391f8afcc7 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 18:12:45 +0200 Subject: [PATCH 305/492] move training scripts to src/eynollah/training --- pyproject.toml | 1 + {train => src/eynollah/training}/__init__.py | 0 .../training}/build_model_load_pretrained_weights_and_save.py | 0 {train => src/eynollah/training}/generate_gt_for_training.py | 0 {train => src/eynollah/training}/gt_gen_utils.py | 0 {train => src/eynollah/training}/inference.py | 0 {train => src/eynollah/training}/metrics.py | 0 {train => src/eynollah/training}/models.py | 0 {train => src/eynollah/training}/train.py | 0 {train => src/eynollah/training}/utils.py | 0 10 files changed, 1 insertion(+) rename {train => src/eynollah/training}/__init__.py (100%) rename {train => src/eynollah/training}/build_model_load_pretrained_weights_and_save.py (100%) rename {train => src/eynollah/training}/generate_gt_for_training.py (100%) rename {train => src/eynollah/training}/gt_gen_utils.py (100%) rename {train => src/eynollah/training}/inference.py (100%) rename {train => src/eynollah/training}/metrics.py (100%) rename {train => src/eynollah/training}/models.py (100%) rename {train => src/eynollah/training}/train.py (100%) rename {train => src/eynollah/training}/utils.py (100%) diff --git a/pyproject.toml b/pyproject.toml index ec3e5f8..8ca6cff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ classifiers = [ eynollah = "eynollah.cli:main" ocrd-eynollah-segment = "eynollah.ocrd_cli:main" ocrd-sbb-binarize = "eynollah.ocrd_cli_binarization:main" +eynollah-training = "eynollah.training.cli:main" [project.urls] Homepage = "https://github.com/qurator-spk/eynollah" diff --git a/train/__init__.py b/src/eynollah/training/__init__.py similarity index 100% rename from train/__init__.py rename to src/eynollah/training/__init__.py diff --git a/train/build_model_load_pretrained_weights_and_save.py b/src/eynollah/training/build_model_load_pretrained_weights_and_save.py similarity index 100% rename from train/build_model_load_pretrained_weights_and_save.py rename to src/eynollah/training/build_model_load_pretrained_weights_and_save.py diff --git a/train/generate_gt_for_training.py b/src/eynollah/training/generate_gt_for_training.py similarity index 100% rename from train/generate_gt_for_training.py rename to src/eynollah/training/generate_gt_for_training.py diff --git a/train/gt_gen_utils.py b/src/eynollah/training/gt_gen_utils.py similarity index 100% rename from train/gt_gen_utils.py rename to src/eynollah/training/gt_gen_utils.py diff --git a/train/inference.py b/src/eynollah/training/inference.py similarity index 100% rename from train/inference.py rename to src/eynollah/training/inference.py diff --git a/train/metrics.py b/src/eynollah/training/metrics.py similarity index 100% rename from train/metrics.py rename to src/eynollah/training/metrics.py diff --git a/train/models.py b/src/eynollah/training/models.py similarity index 100% rename from train/models.py rename to src/eynollah/training/models.py diff --git a/train/train.py b/src/eynollah/training/train.py similarity index 100% rename from train/train.py rename to src/eynollah/training/train.py diff --git a/train/utils.py b/src/eynollah/training/utils.py similarity index 100% rename from train/utils.py rename to src/eynollah/training/utils.py From 2baf42e878732330c0df54927c55a1ef9a9c8b03 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 18:15:54 +0200 Subject: [PATCH 306/492] organize imports, use relative imports --- src/eynollah/training/generate_gt_for_training.py | 3 ++- src/eynollah/training/gt_gen_utils.py | 1 - src/eynollah/training/inference.py | 10 ++++++---- src/eynollah/training/train.py | 10 ++++++---- src/eynollah/training/utils.py | 5 +++-- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/eynollah/training/generate_gt_for_training.py b/src/eynollah/training/generate_gt_for_training.py index 388fced..d378c3e 100644 --- a/src/eynollah/training/generate_gt_for_training.py +++ b/src/eynollah/training/generate_gt_for_training.py @@ -1,10 +1,11 @@ import click import json -from gt_gen_utils import * from tqdm import tqdm from pathlib import Path from PIL import Image, ImageDraw, ImageFont +from .gt_gen_utils import * + @click.group() def main(): pass diff --git a/src/eynollah/training/gt_gen_utils.py b/src/eynollah/training/gt_gen_utils.py index 2828d7b..2e3428b 100644 --- a/src/eynollah/training/gt_gen_utils.py +++ b/src/eynollah/training/gt_gen_utils.py @@ -6,7 +6,6 @@ from tqdm import tqdm import cv2 from shapely import geometry from pathlib import Path -import matplotlib.pyplot as plt from PIL import ImageFont diff --git a/src/eynollah/training/inference.py b/src/eynollah/training/inference.py index 0bff0ec..24837a1 100644 --- a/src/eynollah/training/inference.py +++ b/src/eynollah/training/inference.py @@ -1,19 +1,21 @@ import sys import os -import numpy as np import warnings +import json + +import numpy as np import cv2 from tensorflow.keras.models import load_model import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.layers import * -from models import * -from gt_gen_utils import * import click -import json from tensorflow.python.keras import backend as tensorflow_backend import xml.etree.ElementTree as ET +from .models import * +from .gt_gen_utils import * + with warnings.catch_warnings(): warnings.simplefilter("ignore") diff --git a/src/eynollah/training/train.py b/src/eynollah/training/train.py index add878a..3b99807 100644 --- a/src/eynollah/training/train.py +++ b/src/eynollah/training/train.py @@ -1,20 +1,22 @@ import os import sys +import json + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf from tensorflow.compat.v1.keras.backend import set_session import warnings from tensorflow.keras.optimizers import * from sacred import Experiment -from models import * -from utils import * -from metrics import * from tensorflow.keras.models import load_model from tqdm import tqdm -import json from sklearn.metrics import f1_score from tensorflow.keras.callbacks import Callback +from .models import * +from .utils import * +from .metrics import * + class SaveWeightsAfterSteps(Callback): def __init__(self, save_interval, save_path, _config): super(SaveWeightsAfterSteps, self).__init__() diff --git a/src/eynollah/training/utils.py b/src/eynollah/training/utils.py index ead4887..1278be5 100644 --- a/src/eynollah/training/utils.py +++ b/src/eynollah/training/utils.py @@ -1,13 +1,14 @@ import os +import math +import random + import cv2 import numpy as np import seaborn as sns from scipy.ndimage.interpolation import map_coordinates from scipy.ndimage.filters import gaussian_filter -import random from tqdm import tqdm import imutils -import math from tensorflow.keras.utils import to_categorical from PIL import Image, ImageEnhance From 690d47444caab7a8f2ba5443c0cb1701383c46e3 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 18:36:28 +0200 Subject: [PATCH 307/492] make relative wildcard imports explicit --- pyproject.toml | 1 - ..._model_load_pretrained_weights_and_save.py | 9 ++--- .../training/generate_gt_for_training.py | 20 ++++++++++- src/eynollah/training/inference.py | 11 +++++-- src/eynollah/training/train.py | 33 +++++++++++++++---- 5 files changed, 55 insertions(+), 19 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8ca6cff..ec3e5f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,6 @@ classifiers = [ eynollah = "eynollah.cli:main" ocrd-eynollah-segment = "eynollah.ocrd_cli:main" ocrd-sbb-binarize = "eynollah.ocrd_cli_binarization:main" -eynollah-training = "eynollah.training.cli:main" [project.urls] Homepage = "https://github.com/qurator-spk/eynollah" diff --git a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py b/src/eynollah/training/build_model_load_pretrained_weights_and_save.py index 125611e..ce3d955 100644 --- a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py +++ b/src/eynollah/training/build_model_load_pretrained_weights_and_save.py @@ -1,12 +1,7 @@ -import os -import sys import tensorflow as tf -import warnings from tensorflow.keras.optimizers import * -from sacred import Experiment -from models import * -from utils import * -from metrics import * + +from .models import resnet50_unet def configuration(): diff --git a/src/eynollah/training/generate_gt_for_training.py b/src/eynollah/training/generate_gt_for_training.py index d378c3e..3fd93ae 100644 --- a/src/eynollah/training/generate_gt_for_training.py +++ b/src/eynollah/training/generate_gt_for_training.py @@ -1,10 +1,28 @@ import click import json +import os from tqdm import tqdm from pathlib import Path from PIL import Image, ImageDraw, ImageFont +import cv2 +import numpy as np -from .gt_gen_utils import * +from eynollah.training.gt_gen_utils import ( + filter_contours_area_of_image, + find_format_of_given_filename_in_dir, + find_new_features_of_contours, + fit_text_single_line, + get_content_of_dir, + get_images_of_ground_truth, + get_layout_contours_for_visualization, + get_textline_contours_and_ocr_text, + get_textline_contours_for_visualization, + overlay_layout_on_image, + read_xml, + resize_image, + visualize_image_from_contours, + visualize_image_from_contours_layout +) @click.group() def main(): diff --git a/src/eynollah/training/inference.py b/src/eynollah/training/inference.py index 24837a1..998c8fc 100644 --- a/src/eynollah/training/inference.py +++ b/src/eynollah/training/inference.py @@ -13,9 +13,14 @@ import click from tensorflow.python.keras import backend as tensorflow_backend import xml.etree.ElementTree as ET -from .models import * -from .gt_gen_utils import * - +from .gt_gen_utils import ( + filter_contours_area_of_image, + find_new_features_of_contours, + read_xml, + resize_image, + update_list_and_return_first_with_length_bigger_than_one +) +from .models import PatchEncoder, Patches with warnings.catch_warnings(): warnings.simplefilter("ignore") diff --git a/src/eynollah/training/train.py b/src/eynollah/training/train.py index 3b99807..527bca6 100644 --- a/src/eynollah/training/train.py +++ b/src/eynollah/training/train.py @@ -2,20 +2,39 @@ import os import sys import json +from eynollah.training.metrics import soft_dice_loss, weighted_categorical_crossentropy + +from .models import ( + PatchEncoder, + Patches, + machine_based_reading_order_model, + resnet50_classifier, + resnet50_unet, + vit_resnet50_unet, + vit_resnet50_unet_transformer_before_cnn +) +from .utils import ( + data_gen, + generate_arrays_from_folder_reading_order, + generate_data_from_folder_evaluation, + generate_data_from_folder_training, + get_one_hot, + provide_patches, + return_number_of_total_training_data +) + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf from tensorflow.compat.v1.keras.backend import set_session -import warnings -from tensorflow.keras.optimizers import * +from tensorflow.keras.optimizers import SGD, Adam from sacred import Experiment from tensorflow.keras.models import load_model from tqdm import tqdm from sklearn.metrics import f1_score from tensorflow.keras.callbacks import Callback -from .models import * -from .utils import * -from .metrics import * +import numpy as np +import cv2 class SaveWeightsAfterSteps(Callback): def __init__(self, save_interval, save_path, _config): @@ -47,8 +66,8 @@ def configuration(): def get_dirs_or_files(input_data): + image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') if os.path.isdir(input_data): - image_input, labels_input = os.path.join(input_data, 'images/'), os.path.join(input_data, 'labels/') # Check if training dir exists assert os.path.isdir(image_input), "{} is not a directory".format(image_input) assert os.path.isdir(labels_input), "{} is not a directory".format(labels_input) @@ -425,7 +444,7 @@ def run(_config, n_classes, n_epochs, input_height, #f1score_tot = [0] indexer_start = 0 - opt = SGD(learning_rate=0.01, momentum=0.9) + # opt = SGD(learning_rate=0.01, momentum=0.9) opt_adam = tf.keras.optimizers.Adam(learning_rate=0.0001) model.compile(loss="binary_crossentropy", optimizer = opt_adam,metrics=['accuracy']) From 1c043c586a972c4088d204b179b37d64eb44a39f Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 18:52:11 +0200 Subject: [PATCH 308/492] eynollah-training: all training CLI into single click group --- pyproject.toml | 1 + ..._model_load_pretrained_weights_and_save.py | 6 ++--- src/eynollah/training/cli.py | 26 +++++++++++++++++++ .../training/generate_gt_for_training.py | 3 --- src/eynollah/training/inference.py | 11 +++----- src/eynollah/training/train.py | 11 +++++--- 6 files changed, 41 insertions(+), 17 deletions(-) create mode 100644 src/eynollah/training/cli.py diff --git a/pyproject.toml b/pyproject.toml index ec3e5f8..ec99c99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,6 +31,7 @@ classifiers = [ [project.scripts] eynollah = "eynollah.cli:main" +eynollah-training = "eynollah.training.cli:main" ocrd-eynollah-segment = "eynollah.ocrd_cli:main" ocrd-sbb-binarize = "eynollah.ocrd_cli_binarization:main" diff --git a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py b/src/eynollah/training/build_model_load_pretrained_weights_and_save.py index ce3d955..40fc1fe 100644 --- a/src/eynollah/training/build_model_load_pretrained_weights_and_save.py +++ b/src/eynollah/training/build_model_load_pretrained_weights_and_save.py @@ -1,5 +1,5 @@ +import click import tensorflow as tf -from tensorflow.keras.optimizers import * from .models import resnet50_unet @@ -8,8 +8,8 @@ def configuration(): gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) - -if __name__ == '__main__': +@click.command() +def build_model_load_pretrained_weights_and_save(): n_classes = 2 input_height = 224 input_width = 448 diff --git a/src/eynollah/training/cli.py b/src/eynollah/training/cli.py new file mode 100644 index 0000000..8ab754d --- /dev/null +++ b/src/eynollah/training/cli.py @@ -0,0 +1,26 @@ +import os +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +import click +import sys + +from .build_model_load_pretrained_weights_and_save import build_model_load_pretrained_weights_and_save +from .generate_gt_for_training import main as generate_gt_cli +from .inference import main as inference_cli +from .train import ex + +@click.command(context_settings=dict( + ignore_unknown_options=True, +)) +@click.argument('SACRED_ARGS', nargs=-1, type=click.UNPROCESSED) +def train_cli(sacred_args): + ex.run_commandline([sys.argv[0]] + list(sacred_args)) + +@click.group('training') +def main(): + pass + +main.add_command(build_model_load_pretrained_weights_and_save) +main.add_command(generate_gt_cli, 'generate-gt') +main.add_command(inference_cli, 'inference') +main.add_command(train_cli, 'train') diff --git a/src/eynollah/training/generate_gt_for_training.py b/src/eynollah/training/generate_gt_for_training.py index 3fd93ae..693cab8 100644 --- a/src/eynollah/training/generate_gt_for_training.py +++ b/src/eynollah/training/generate_gt_for_training.py @@ -581,6 +581,3 @@ def visualize_ocr_text(xml_file, dir_xml, dir_out): # Draw the text draw.text((text_x, text_y), ocr_texts[index], fill="black", font=font) image_text.save(os.path.join(dir_out, f_name+'.png')) - -if __name__ == "__main__": - main() diff --git a/src/eynollah/training/inference.py b/src/eynollah/training/inference.py index 998c8fc..3fa8fd6 100644 --- a/src/eynollah/training/inference.py +++ b/src/eynollah/training/inference.py @@ -20,7 +20,10 @@ from .gt_gen_utils import ( resize_image, update_list_and_return_first_with_length_bigger_than_one ) -from .models import PatchEncoder, Patches +from .models import ( + PatchEncoder, + Patches +) with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -675,9 +678,3 @@ def main(image, dir_in, model, patches, save, save_layout, ground_truth, xml_fil x=sbb_predict(image, dir_in, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area) x.run() -if __name__=="__main__": - main() - - - - diff --git a/src/eynollah/training/train.py b/src/eynollah/training/train.py index 527bca6..97736e0 100644 --- a/src/eynollah/training/train.py +++ b/src/eynollah/training/train.py @@ -2,9 +2,13 @@ import os import sys import json -from eynollah.training.metrics import soft_dice_loss, weighted_categorical_crossentropy +import click -from .models import ( +from eynollah.training.metrics import ( + soft_dice_loss, + weighted_categorical_crossentropy +) +from eynollah.training.models import ( PatchEncoder, Patches, machine_based_reading_order_model, @@ -13,7 +17,7 @@ from .models import ( vit_resnet50_unet, vit_resnet50_unet_transformer_before_cnn ) -from .utils import ( +from eynollah.training.utils import ( data_gen, generate_arrays_from_folder_reading_order, generate_data_from_folder_evaluation, @@ -142,7 +146,6 @@ def config_params(): dir_rgb_backgrounds = None dir_rgb_foregrounds = None - @ex.automain def run(_config, n_classes, n_epochs, input_height, input_width, weight_decay, weighted_loss, From f60e0543ab293212c2d0e5791c0efa8658cc0ac4 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 1 Oct 2025 19:16:58 +0200 Subject: [PATCH 309/492] training: update docs --- docs/train.md | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/docs/train.md b/docs/train.md index 839529f..252bead 100644 --- a/docs/train.md +++ b/docs/train.md @@ -13,7 +13,7 @@ The following three tasks can all be accomplished using the code in the * train a model * inference with the trained model -## Training , evaluation and output +## Training, evaluation and output The train and evaluation folders should contain subfolders of `images` and `labels`. @@ -22,11 +22,13 @@ The output folder should be an empty folder where the output model will be writt ## Generate training dataset The script `generate_gt_for_training.py` is used for generating training datasets. As the results of the following -command demonstrates, the dataset generator provides three different commands: +command demonstrates, the dataset generator provides several subcommands: -`python generate_gt_for_training.py --help` +```sh +eynollah-training generate-gt --help +``` -These three commands are: +The three most important subcommands are: * image-enhancement * machine-based-reading-order @@ -38,7 +40,7 @@ Generating a training dataset for image enhancement is quite straightforward. Al high-resolution images. The training dataset can then be generated using the following command: ```sh -python generate_gt_for_training.py image-enhancement \ +eynollah-training image-enhancement \ -dis "dir of high resolution images" \ -dois "dir where degraded images will be written" \ -dols "dir where the corresponding high resolution image will be written as label" \ @@ -69,7 +71,7 @@ to filter out regions smaller than this minimum size. This minimum size is defin to the image area, with a default value of zero. To run the dataset generator, use the following command: ```shell -python generate_gt_for_training.py machine-based-reading-order \ +eynollah-training generate-gt machine-based-reading-order \ -dx "dir of GT xml files" \ -domi "dir where output images will be written" \ "" -docl "dir where the labels will be written" \ @@ -144,7 +146,7 @@ region" are also present in the label. However, other regions like "noise region included in the label PNG file, even if they have information in the page XML files, as we chose not to include them. ```sh -python generate_gt_for_training.py pagexml2label \ +eynollah-training generate-gt pagexml2label \ -dx "dir of GT xml files" \ -do "dir where output label png files will be written" \ -cfg "custom config json file" \ @@ -198,7 +200,7 @@ provided to ensure that they are cropped in sync with the labels. This ensures t required for training are obtained. The command should resemble the following: ```sh -python generate_gt_for_training.py pagexml2label \ +eynollah-training generate-gt pagexml2label \ -dx "dir of GT xml files" \ -do "dir where output label png files will be written" \ -cfg "custom config json file" \ @@ -261,7 +263,7 @@ And the "dir_eval" the same structure as train directory: The classification model can be trained using the following command line: ```sh -python train.py with config_classification.json +eynollah-training train with config_classification.json ``` As evident in the example JSON file above, for classification, we utilize a "f1_threshold_classification" parameter. @@ -395,7 +397,9 @@ And the "dir_eval" the same structure as train directory: After configuring the JSON file for segmentation or enhancement, training can be initiated by running the following command, similar to the process for classification and reading order: -`python train.py with config_classification.json` +``` +eynollah-training train with config_classification.json` +``` #### Binarization @@ -679,7 +683,7 @@ For conducting inference with a trained model, you simply need to execute the fo directory of the model and the image on which to perform inference: ```sh -python inference.py -m "model dir" -i "image" +eynollah-training inference -m "model dir" -i "image" ``` This will straightforwardly return the class of the image. @@ -691,7 +695,7 @@ without the reading order. We simply need to provide the model directory, the XM new XML file with the added reading order will be written to the output directory with the same name. We need to run: ```sh -python inference.py \ +eynollah-training inference \ -m "model dir" \ -xml "page xml file" \ -o "output dir to write new xml with reading order" @@ -702,7 +706,7 @@ python inference.py \ For conducting inference with a trained model for segmentation and enhancement you need to run the following command line: ```sh -python inference.py \ +eynollah-training inference \ -m "model dir" \ -i "image" \ -p \ From 8a9b4f8f55de9a2e51fd72af1894e771fe44f348 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 2 Oct 2025 12:16:26 +0200 Subject: [PATCH 310/492] remove commented-out requirement for tf == 2.12.1, rely on same version as in eynollah proper --- train/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/train/requirements.txt b/train/requirements.txt index 4df9c2f..2fb9908 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -1,4 +1,3 @@ -# tensorflow == 2.12.1 # TODO why not tensorflow < 2.13 as in eynollah/requirements.txt sacred seaborn tqdm From 0b9d4901a61ea777fc0db6e90930a734fe33302d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 20:51:03 +0200 Subject: [PATCH 311/492] contour features: avoid unused calculations, simplify, add shortcuts - new function: `find_center_of_contours` - simplified: `find_(new_)features_of_contours` --- src/eynollah/utils/contour.py | 78 ++++++++++++----------------------- 1 file changed, 27 insertions(+), 51 deletions(-) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 0700ed4..041cbf6 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -79,61 +79,37 @@ def filter_contours_area_of_image_tables(image, contours, hierarchy, max_area=1. found_polygons_early.append(polygon2contour(polygon)) return found_polygons_early -def find_new_features_of_contours(contours_main): - areas_main = np.array([cv2.contourArea(contours_main[j]) - for j in range(len(contours_main))]) - M_main = [cv2.moments(contours_main[j]) - for j in range(len(contours_main))] - cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) - for j in range(len(M_main))] - cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) - for j in range(len(M_main))] - try: - x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) - for j in range(len(contours_main))]) - argmin_x_main = np.array([np.argmin(contours_main[j][:, 0, 0]) - for j in range(len(contours_main))]) - x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 0] - for j in range(len(contours_main))]) - y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0, 1] - for j in range(len(contours_main))]) - x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) - for j in range(len(contours_main))]) - y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) - for j in range(len(contours_main))]) - y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) - for j in range(len(contours_main))]) - except: - x_min_main = np.array([np.min(contours_main[j][:, 0]) - for j in range(len(contours_main))]) - argmin_x_main = np.array([np.argmin(contours_main[j][:, 0]) - for j in range(len(contours_main))]) - x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 0] - for j in range(len(contours_main))]) - y_corr_x_min_from_argmin = np.array([contours_main[j][argmin_x_main[j], 1] - for j in range(len(contours_main))]) - x_max_main = np.array([np.max(contours_main[j][:, 0]) - for j in range(len(contours_main))]) - y_min_main = np.array([np.min(contours_main[j][:, 1]) - for j in range(len(contours_main))]) - y_max_main = np.array([np.max(contours_main[j][:, 1]) - for j in range(len(contours_main))]) - # dis_x=np.abs(x_max_main-x_min_main) +def find_center_of_contours(contours): + moments = [cv2.moments(contour) for contour in contours] + cx = [feat["m10"] / (feat["m00"] + 1e-32) + for feat in moments] + cy = [feat["m01"] / (feat["m00"] + 1e-32) + for feat in moments] + return cx, cy - return cx_main, cy_main, x_min_main, x_max_main, y_min_main, y_max_main, y_corr_x_min_from_argmin +def find_new_features_of_contours(contours): + # areas = np.array([cv2.contourArea(contour) for contour in contours]) + cx, cy = find_center_of_contours(contours) + slice_x = np.index_exp[:, 0, 0] + slice_y = np.index_exp[:, 0, 1] + if any(contour.ndim < 3 for contour in contours): + slice_x = np.index_exp[:, 0] + slice_y = np.index_exp[:, 1] + x_min = np.array([np.min(contour[slice_x]) for contour in contours]) + x_max = np.array([np.max(contour[slice_x]) for contour in contours]) + y_min = np.array([np.min(contour[slice_y]) for contour in contours]) + y_max = np.array([np.max(contour[slice_y]) for contour in contours]) + # dis_x=np.abs(x_max-x_min) + y_corr_x_min = np.array([contour[np.argmin(contour[slice_x])][slice_y[1:]] + for contour in contours]) -def find_features_of_contours(contours_main): - areas_main=np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) - M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))] - cx_main=[(M_main[j]['m10']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] - cy_main=[(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] - x_min_main=np.array([np.min(contours_main[j][:,0,0]) for j in range(len(contours_main))]) - x_max_main=np.array([np.max(contours_main[j][:,0,0]) for j in range(len(contours_main))]) + return cx, cy, x_min, x_max, y_min, y_max, y_corr_x_min - y_min_main=np.array([np.min(contours_main[j][:,0,1]) for j in range(len(contours_main))]) - y_max_main=np.array([np.max(contours_main[j][:,0,1]) for j in range(len(contours_main))]) +def find_features_of_contours(contours): + y_min = np.array([np.min(contour[:,0,1]) for contour in contours]) + y_max = np.array([np.max(contour[:,0,1]) for contour in contours]) - return y_min_main, y_max_main + return y_min, y_max def return_parent_contours(contours, hierarchy): contours_parent = [contours[i] From 81827c2942e0a6b7e4121b9de510108de4f026fa Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 21:03:07 +0200 Subject: [PATCH 312/492] filter_contours_inside_a_bigger_one: simplify - use new `find_center_of_contours` - avoid loops in favour of array processing - use sets instead of `np.unique` and `np.delete` instead of list.pop --- src/eynollah/eynollah.py | 102 +++++++++++++++------------------------ 1 file changed, 39 insertions(+), 63 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 62ce002..b2d9016 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4208,7 +4208,7 @@ class Eynollah: return generated_text def return_list_of_contours_with_desired_order(self, ls_cons, sorted_indexes): - return [ls_cons[sorted_indexes[index]] for index in range(len(sorted_indexes))] + return list(np.array(ls_cons)[np.array(sorted_indexes)]) def return_it_in_two_groups(self, x_differential): split = [ind if x_differential[ind]!=x_differential[ind+1] else -1 @@ -4237,47 +4237,38 @@ class Eynollah: def filter_contours_inside_a_bigger_one(self, contours, contours_d_ordered, image, marginal_cnts=None, type_contour="textregion"): - if type_contour=="textregion": - areas = [cv2.contourArea(contours[j]) for j in range(len(contours))] + if type_contour == "textregion": + areas = np.array(list(map(cv2.contourArea, contours))) area_tot = image.shape[0]*image.shape[1] + areas_ratio = areas / area_tot + cx_main, cy_main = find_center_of_contours(contours) - M_main = [cv2.moments(contours[j]) - for j in range(len(contours))] - cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] + contours_index_small = np.flatnonzero(areas_ratio < 1e-3) + contours_index_large = np.flatnonzero(areas_ratio >= 1e-3) - areas_ratio = np.array(areas)/ area_tot - contours_index_small = [ind for ind in range(len(contours)) if areas_ratio[ind] < 1e-3] - contours_index_big = [ind for ind in range(len(contours)) if areas_ratio[ind] >= 1e-3] - - #contours_> = [contours[ind] for ind in contours_index_big] + #contours_> = [contours[ind] for ind in contours_index_large] indexes_to_be_removed = [] for ind_small in contours_index_small: - results = [cv2.pointPolygonTest(contours[ind], (cx_main[ind_small], cy_main[ind_small]), False) - for ind in contours_index_big] - if marginal_cnts: - results_marginal = [cv2.pointPolygonTest(marginal_cnts[ind], + results = [cv2.pointPolygonTest(contours[ind_large], (cx_main[ind_small], + cy_main[ind_small]), + False) + for ind_large in contours_index_large] + results = np.array(results) + if np.any(results==1): + indexes_to_be_removed.append(ind_small) + elif marginal_cnts: + results_marginal = [cv2.pointPolygonTest(marginal_cnt, (cx_main[ind_small], cy_main[ind_small]), False) - for ind in range(len(marginal_cnts))] + for marginal_cnt in marginal_cnts] results_marginal = np.array(results_marginal) - if np.any(results_marginal==1): indexes_to_be_removed.append(ind_small) - results = np.array(results) - - if np.any(results==1): - indexes_to_be_removed.append(ind_small) - - if len(indexes_to_be_removed)>0: - indexes_to_be_removed = np.unique(indexes_to_be_removed) - indexes_to_be_removed = np.sort(indexes_to_be_removed)[::-1] - for ind in indexes_to_be_removed: - contours.pop(ind) - if len(contours_d_ordered)>0: - contours_d_ordered.pop(ind) + contours = np.delete(contours, indexes_to_be_removed, axis=0) + if len(contours_d_ordered): + contours_d_ordered = np.delete(contours_d_ordered, indexes_to_be_removed, axis=0) return contours, contours_d_ordered @@ -4285,33 +4276,21 @@ class Eynollah: contours_txtline_of_all_textregions = [] indexes_of_textline_tot = [] index_textline_inside_textregion = [] + for ind_region, textlines in enumerate(contours): + contours_txtline_of_all_textregions.extend(textlines) + index_textline_inside_textregion.extend(list(range(len(textlines)))) + indexes_of_textline_tot.extend([ind_region] * len(textlines)) - for jj in range(len(contours)): - contours_txtline_of_all_textregions = contours_txtline_of_all_textregions + contours[jj] - - ind_textline_inside_tr = list(range(len(contours[jj]))) - index_textline_inside_textregion = index_textline_inside_textregion + ind_textline_inside_tr - ind_ins = [jj] * len(contours[jj]) - indexes_of_textline_tot = indexes_of_textline_tot + ind_ins - - M_main_tot = [cv2.moments(contours_txtline_of_all_textregions[j]) - for j in range(len(contours_txtline_of_all_textregions))] - cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - - areas_tot = [cv2.contourArea(con_ind) for con_ind in contours_txtline_of_all_textregions] + areas_tot = np.array(list(map(cv2.contourArea, contours_txtline_of_all_textregions))) area_tot_tot = image.shape[0]*image.shape[1] + cx_main_tot, cy_main_tot = find_center_of_contours(contours_txtline_of_all_textregions) - textregion_index_to_del = [] - textline_in_textregion_index_to_del = [] + textline_in_textregion_index_to_del = {} for ij in range(len(contours_txtline_of_all_textregions)): - args_all = list(np.array(range(len(contours_txtline_of_all_textregions)))) - args_all.pop(ij) - - areas_without = np.array(areas_tot)[args_all] area_of_con_interest = areas_tot[ij] - - args_with_bigger_area = np.array(args_all)[areas_without > 1.5*area_of_con_interest] + args_without = np.delete(np.arange(len(contours_txtline_of_all_textregions)), ij) + areas_without = areas_tot[args_without] + args_with_bigger_area = args_without[areas_without > 1.5*area_of_con_interest] if len(args_with_bigger_area)>0: results = [cv2.pointPolygonTest(contours_txtline_of_all_textregions[ind], @@ -4322,18 +4301,15 @@ class Eynollah: results = np.array(results) if np.any(results==1): #print(indexes_of_textline_tot[ij], index_textline_inside_textregion[ij]) - textregion_index_to_del.append(int(indexes_of_textline_tot[ij])) - textline_in_textregion_index_to_del.append(int(index_textline_inside_textregion[ij])) - #contours[int(indexes_of_textline_tot[ij])].pop(int(index_textline_inside_textregion[ij])) + textline_in_textregion_index_to_del.setdefault( + indexes_of_textline_tot[ij], list()).append( + index_textline_inside_textregion[ij]) + #contours[indexes_of_textline_tot[ij]].pop(index_textline_inside_textregion[ij]) - textregion_index_to_del = np.array(textregion_index_to_del) - textline_in_textregion_index_to_del = np.array(textline_in_textregion_index_to_del) - for ind_u_a_trs in np.unique(textregion_index_to_del): - textline_in_textregion_index_to_del_ind = \ - textline_in_textregion_index_to_del[textregion_index_to_del==ind_u_a_trs] - textline_in_textregion_index_to_del_ind = np.sort(textline_in_textregion_index_to_del_ind)[::-1] - for ittrd in textline_in_textregion_index_to_del_ind: - contours[ind_u_a_trs].pop(ittrd) + for textregion_index_to_del in textline_in_textregion_index_to_del: + contours[textregion_index_to_del] = list(np.delete( + contours[textregion_index_to_del], + textline_in_textregion_index_to_del[textregion_index_to_del])) return contours From 8869c20c33c673e02e4f60081b96a8bd71d823d2 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 6 Oct 2025 14:53:47 +0200 Subject: [PATCH 313/492] updating CHANGELOG for v0.5.0 --- CHANGELOG.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfdd1ce..70e8854 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,12 @@ Versioned according to [Semantic Versioning](http://semver.org/). Fixed: * restoring the contour in the original image caused an error due to an empty tuple, #154 + * removed NumPy warnings (fixed issue #158) + * fixed issue #124 + * Drop capitals are now handled separately from their corresponding textline + * Marginals are now divided into left and right. Their reading order is written first for left marginals, then for right marginals, and within each side from top to bottom + * Added a new page extraction model. Instead of bounding boxes, it outputs page contours in the XML file, improving results for skewed pages + * Improved reading order for cases where a textline is segmented into multiple smaller textlines Changed @@ -24,6 +30,20 @@ Added: * `eynollah machine-based-reading-order` CLI to run reading order detection, #175 * `eynollah enhancement` CLI to run image enhancement, #175 * Improved models for page extraction and reading order detection, #175 + * For the lightweight version (layout and textline detection), thresholds are now assigned to the artificial class. Users can apply these thresholds to improve detection of isolated textlines and regions. To counteract the drawback of thresholding, the skeleton of the artificial class is used to keep lines as thin as possible (resolved issues #163 and #161) + * Added and integrated trained CNN-RNN OCR models + * Added and integrated a trained TrOCR model + * Improved OCR detection to support vertical and curved textlines + * Introduced a new machine-based reading order model with rotation augmentation + * Optimized reading order speed by clustering text regions that belong to the same block, maintaining top-to-bottom order + * Implemented text merging across textlines based on hyphenation when a line ends with a hyphen + * Integrated image enhancement as a separate use case + * Added reading order functionality on the layout level as a separate use case + * CNN-RNN OCR models provide confidence scores for predictions + * Added OCR visualization: predicted OCR can be overlaid on an image of the same size as the input + * Introduced a threshold value for CNN-RNN OCR models, allowing users to filter out low-confidence textline predictions + * For OCR, users can specify a single model by name instead of always using the default model + * Under the OCR use case, if ground-truth XMLs and images are available, textline image and corresponding text extraction can now be performed Merged PRs: From 4ffe6190d2c6b885b27330027f4a0d8fd97a32f6 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 9 Oct 2025 14:03:26 +0200 Subject: [PATCH 314/492] :memo: changelog --- CHANGELOG.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70e8854..5ca95a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,8 +10,8 @@ Versioned according to [Semantic Versioning](http://semver.org/). Fixed: * restoring the contour in the original image caused an error due to an empty tuple, #154 - * removed NumPy warnings (fixed issue #158) - * fixed issue #124 + * removed NumPy warnings calculating sigma, mean, (fixed issue #158) + * fixed bug in `separate_lines.py`, #124 * Drop capitals are now handled separately from their corresponding textline * Marginals are now divided into left and right. Their reading order is written first for left marginals, then for right marginals, and within each side from top to bottom * Added a new page extraction model. Instead of bounding boxes, it outputs page contours in the XML file, improving results for skewed pages @@ -31,7 +31,7 @@ Added: * `eynollah enhancement` CLI to run image enhancement, #175 * Improved models for page extraction and reading order detection, #175 * For the lightweight version (layout and textline detection), thresholds are now assigned to the artificial class. Users can apply these thresholds to improve detection of isolated textlines and regions. To counteract the drawback of thresholding, the skeleton of the artificial class is used to keep lines as thin as possible (resolved issues #163 and #161) - * Added and integrated trained CNN-RNN OCR models + * Added and integrated a trained CNN-RNN OCR models * Added and integrated a trained TrOCR model * Improved OCR detection to support vertical and curved textlines * Introduced a new machine-based reading order model with rotation augmentation @@ -43,7 +43,7 @@ Added: * Added OCR visualization: predicted OCR can be overlaid on an image of the same size as the input * Introduced a threshold value for CNN-RNN OCR models, allowing users to filter out low-confidence textline predictions * For OCR, users can specify a single model by name instead of always using the default model - * Under the OCR use case, if ground-truth XMLs and images are available, textline image and corresponding text extraction can now be performed + * Under the OCR use case, if Ground Truth XMLs and images are available, textline image and corresponding text extraction can now be performed Merged PRs: From 8c3d5eb0eb0eccd97542a86b0d3385e95f4f1da0 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 21:07:35 +0200 Subject: [PATCH 315/492] separate_marginals_to_left_and_right_and_order_from_top_to_down: simplify - use new `find_center_of_contours` - avoid loops in favour of array processing - avoid repeated sorting --- src/eynollah/eynollah.py | 75 +++++++++++++++++----------------- src/eynollah/utils/__init__.py | 2 +- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index b2d9016..9eba3d3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4418,52 +4418,53 @@ class Eynollah: def separate_marginals_to_left_and_right_and_order_from_top_to_down( self, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes_marginals, mid_point_of_page_width): - cx_marg, cy_marg, _, _, _, _, _ = find_new_features_of_contours( - polygons_of_marginals) - + cx_marg, cy_marg = find_center_of_contours(polygons_of_marginals) cx_marg = np.array(cx_marg) cy_marg = np.array(cy_marg) + + def split(lis): + array = np.array(lis) + return (list(array[cx_marg < mid_point_of_page_width]), + list(array[cx_marg >= mid_point_of_page_width])) + + (poly_marg_left, + poly_marg_right) = \ + split(polygons_of_marginals) + + (all_found_textline_polygons_marginals_left, + all_found_textline_polygons_marginals_right) = \ + split(all_found_textline_polygons_marginals) - poly_marg_left = list( np.array(polygons_of_marginals)[cx_marg < mid_point_of_page_width] ) - poly_marg_right = list( np.array(polygons_of_marginals)[cx_marg >= mid_point_of_page_width] ) + (all_box_coord_marginals_left, + all_box_coord_marginals_right) = \ + split(all_box_coord_marginals) - all_found_textline_polygons_marginals_left = \ - list( np.array(all_found_textline_polygons_marginals)[cx_marg < mid_point_of_page_width] ) - all_found_textline_polygons_marginals_right = \ - list( np.array(all_found_textline_polygons_marginals)[cx_marg >= mid_point_of_page_width] ) + (slopes_marg_left, + slopes_marg_right) = \ + split(slopes_marginals) - all_box_coord_marginals_left = list( np.array(all_box_coord_marginals)[cx_marg < mid_point_of_page_width] ) - all_box_coord_marginals_right = list( np.array(all_box_coord_marginals)[cx_marg >= mid_point_of_page_width] ) + (cy_marg_left, + cy_marg_right) = \ + split(cy_marg) + + order_left = np.argsort(cy_marg_left) + order_right = np.argsort(cy_marg_right) + def sort_left(lis): + return list(np.array(lis)[order_left]) + def sort_right(lis): + return list(np.array(lis)[order_right]) - slopes_marg_left = list( np.array(slopes_marginals)[cx_marg < mid_point_of_page_width] ) - slopes_marg_right = list( np.array(slopes_marginals)[cx_marg >= mid_point_of_page_width] ) + ordered_left_marginals = sort_left(poly_marg_left) + ordered_right_marginals = sort_right(poly_marg_right) - cy_marg_left = cy_marg[cx_marg < mid_point_of_page_width] - cy_marg_right = cy_marg[cx_marg >= mid_point_of_page_width] + ordered_left_marginals_textline = sort_left(all_found_textline_polygons_marginals_left) + ordered_right_marginals_textline = sort_right(all_found_textline_polygons_marginals_right) - ordered_left_marginals = [poly for _, poly in sorted(zip(cy_marg_left, poly_marg_left), - key=lambda x: x[0])] - ordered_right_marginals = [poly for _, poly in sorted(zip(cy_marg_right, poly_marg_right), - key=lambda x: x[0])] + ordered_left_marginals_bbox = sort_left(all_box_coord_marginals_left) + ordered_right_marginals_bbox = sort_right(all_box_coord_marginals_right) - ordered_left_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_left, - all_found_textline_polygons_marginals_left), - key=lambda x: x[0])] - ordered_right_marginals_textline = [poly for _, poly in sorted(zip(cy_marg_right, - all_found_textline_polygons_marginals_right), - key=lambda x: x[0])] - - ordered_left_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_left, - all_box_coord_marginals_left), - key=lambda x: x[0])] - ordered_right_marginals_bbox = [poly for _, poly in sorted(zip(cy_marg_right, - all_box_coord_marginals_right), - key=lambda x: x[0])] - - ordered_left_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_left, slopes_marg_left), - key=lambda x: x[0])] - ordered_right_slopes_marginals = [poly for _, poly in sorted(zip(cy_marg_right, slopes_marg_right), - key=lambda x: x[0])] + ordered_left_slopes_marginals = sort_left(slopes_marg_left) + ordered_right_slopes_marginals = sort_right(slopes_marg_right) return (ordered_left_marginals, ordered_right_marginals, diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 52bf3ef..4eee5a9 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1417,7 +1417,7 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( imgray = cv2.cvtColor(sep_ver_hor_cross, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(imgray, 0, 255, 0) contours_cross,_=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - cx_cross,cy_cross ,_ , _, _ ,_,_=find_new_features_of_contours(contours_cross) + cx_cross, cy_cross = find_center_of_contours(contours_cross) for ii in range(len(cx_cross)): img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])+5:int(cx_cross[ii])+40,0]=0 img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])-40:int(cx_cross[ii])-4,0]=0 From 3f3353ec3a53384a100ef9ebe2fefb7e092e968c Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 21:28:04 +0200 Subject: [PATCH 316/492] do_order_of_regions: simplify - avoid loops in favour of array processing --- src/eynollah/eynollah.py | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 9eba3d3..7f7f53f 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2518,6 +2518,8 @@ class Eynollah: self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): self.logger.debug("enter do_order_of_regions_full_layout") + contours_only_text_parent = np.array(contours_only_text_parent) + contours_only_text_parent_h = np.array(contours_only_text_parent_h) boxes = np.array(boxes, dtype=int) # to be on the safe side cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( contours_only_text_parent) @@ -2573,14 +2575,9 @@ class Eynollah: xs = slice(*boxes[iij][0:2]) args_contours_box = args_contours[np.array(arg_text_con) == iij] args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] - con_inter_box = [] - con_inter_box_h = [] + con_inter_box = contours_only_text_parent[args_contours_box] + con_inter_box_h = contours_only_text_parent_h[args_contours_box_h] - for box in args_contours_box: - con_inter_box.append(contours_only_text_parent[box]) - - for box in args_contours_box_h: - con_inter_box_h.append(contours_only_text_parent_h[box]) indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) @@ -2675,14 +2672,8 @@ class Eynollah: xs = slice(*boxes[iij][0:2]) args_contours_box = args_contours[np.array(arg_text_con) == iij] args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] - con_inter_box = [] - con_inter_box_h = [] - - for box in args_contours_box: - con_inter_box.append(contours_only_text_parent[box]) - - for box in args_contours_box_h: - con_inter_box_h.append(contours_only_text_parent_h[box]) + con_inter_box = contours_only_text_parent[args_contours_box] + con_inter_box_h = contours_only_text_parent_h[args_contours_box_h] indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) @@ -2729,6 +2720,8 @@ class Eynollah: self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): self.logger.debug("enter do_order_of_regions_no_full_layout") + contours_only_text_parent = np.array(contours_only_text_parent) + contours_only_text_parent_h = np.array(contours_only_text_parent_h) boxes = np.array(boxes, dtype=int) # to be on the safe side cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( contours_only_text_parent) @@ -2761,10 +2754,8 @@ class Eynollah: ys = slice(*boxes[iij][2:4]) xs = slice(*boxes[iij][0:2]) args_contours_box = args_contours[np.array(arg_text_con) == iij] - con_inter_box = [] + con_inter_box = contours_only_text_parent[args_contours_box] con_inter_box_h = [] - for i in range(len(args_contours_box)): - con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) From 415b2cbad843d4fa083f94f459777af97bd81234 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 21:36:22 +0200 Subject: [PATCH 317/492] eynollah, drop_capitals: simplify - use new `find_center_of_contours` --- src/eynollah/eynollah.py | 21 ++++++++------------- src/eynollah/utils/drop_capitals.py | 27 ++++++++++++++------------- 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 7f7f53f..357c0c2 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -70,6 +70,7 @@ from .utils.contour import ( filter_contours_area_of_image, filter_contours_area_of_image_tables, find_contours_mean_y_diff, + find_center_of_contours, find_new_features_of_contours, find_features_of_contours, get_text_region_boxes_by_given_contours, @@ -1859,14 +1860,10 @@ class Eynollah: def get_slopes_and_deskew_new_light2(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot,1,0.00001) - M_main_tot = [cv2.moments(polygons_of_textlines[j]) - for j in range(len(polygons_of_textlines))] + cx_main_tot, cy_main_tot = find_center_of_contours(polygons_of_textlines) + w_h_textlines = [cv2.boundingRect(polygon)[2:] for polygon in polygons_of_textlines] - w_h_textlines = [cv2.boundingRect(polygons_of_textlines[i])[2:] for i in range(len(polygons_of_textlines))] - cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - - args_textlines = np.array(range(len(polygons_of_textlines))) + args_textlines = np.arange(len(polygons_of_textlines)) all_found_textline_polygons = [] slopes = [] all_box_coord =[] @@ -4809,8 +4806,8 @@ class Eynollah: areas_cnt_text_parent = self.return_list_of_contours_with_desired_order( areas_cnt_text_parent, index_con_parents) - cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest]) - cx_bigest, cy_biggest, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) + cx_bigest_big, cy_biggest_big = find_center_of_contours([contours_biggest]) + cx_bigest, cy_biggest = find_center_of_contours(contours_only_text_parent) if np.abs(slope_deskew) >= SLOPE_THRESHOLD: contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d) @@ -4834,10 +4831,8 @@ class Eynollah: areas_cnt_text_d = self.return_list_of_contours_with_desired_order( areas_cnt_text_d, index_con_parents_d) - cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = \ - find_new_features_of_contours([contours_biggest_d]) - cx_bigest_d, cy_biggest_d, _, _, _, _, _ = \ - find_new_features_of_contours(contours_only_text_parent_d) + cx_bigest_d_big, cy_biggest_d_big = find_center_of_contours([contours_biggest_d]) + cx_bigest_d, cy_biggest_d = find_center_of_contours(contours_only_text_parent_d) try: if len(cx_bigest_d) >= 5: cx_bigest_d_last5 = cx_bigest_d[-5:] diff --git a/src/eynollah/utils/drop_capitals.py b/src/eynollah/utils/drop_capitals.py index 67547d3..9f82fac 100644 --- a/src/eynollah/utils/drop_capitals.py +++ b/src/eynollah/utils/drop_capitals.py @@ -1,6 +1,7 @@ import numpy as np import cv2 from .contour import ( + find_center_of_contours, find_new_features_of_contours, return_contours_of_image, return_parent_contours, @@ -22,8 +23,8 @@ def adhere_drop_capital_region_into_corresponding_textline( ): # print(np.shape(all_found_textline_polygons),np.shape(all_found_textline_polygons[3]),'all_found_textline_polygonsshape') # print(all_found_textline_polygons[3]) - cx_m, cy_m, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent) - cx_h, cy_h, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_h) + cx_m, cy_m = find_center_of_contours(contours_only_text_parent) + cx_h, cy_h = find_center_of_contours(contours_only_text_parent_h) cx_d, cy_d, _, _, y_min_d, y_max_d, _ = find_new_features_of_contours(polygons_of_drop_capitals) img_con_all = np.zeros((text_regions_p.shape[0], text_regions_p.shape[1], 3)) @@ -89,9 +90,9 @@ def adhere_drop_capital_region_into_corresponding_textline( region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1 # print(region_final,'region_final') - # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) try: - cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -153,9 +154,9 @@ def adhere_drop_capital_region_into_corresponding_textline( # areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))]) - # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) try: - cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -208,7 +209,7 @@ def adhere_drop_capital_region_into_corresponding_textline( try: # print(all_found_textline_polygons[j_cont][0]) - cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -261,7 +262,7 @@ def adhere_drop_capital_region_into_corresponding_textline( else: pass - ##cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + ##cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) ###print(all_box_coord[j_cont]) ###print(cx_t) ###print(cy_t) @@ -315,9 +316,9 @@ def adhere_drop_capital_region_into_corresponding_textline( region_final = region_with_intersected_drop[np.argmax(sum_pixels_of_intersection)] - 1 # print(region_final,'region_final') - # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) try: - cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -375,12 +376,12 @@ def adhere_drop_capital_region_into_corresponding_textline( # areas_main=np.array([cv2.contourArea(all_found_textline_polygons[int(region_final)][0][j] ) for j in range(len(all_found_textline_polygons[int(region_final)]))]) - # cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + # cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) # print(cx_t,'print') try: # print(all_found_textline_polygons[j_cont][0]) - cx_t, cy_t, _, _, _, _, _ = find_new_features_of_contours(all_found_textline_polygons[int(region_final)]) + cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[int(region_final)]) # print(all_box_coord[j_cont]) # print(cx_t) # print(cy_t) @@ -453,7 +454,7 @@ def adhere_drop_capital_region_into_corresponding_textline( #####try: #####if len(contours_new_parent)==1: ######print(all_found_textline_polygons[j_cont][0]) - #####cx_t,cy_t ,_, _, _ ,_,_= find_new_features_of_contours(all_found_textline_polygons[j_cont]) + #####cx_t, cy_t = find_center_of_contours(all_found_textline_polygons[j_cont]) ######print(all_box_coord[j_cont]) ######print(cx_t) ######print(cy_t) From a1c8fd44677fc894395652de070710a5fc6aae2e Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 21:41:37 +0200 Subject: [PATCH 318/492] do_order_of_regions / order_of_regions: simplify - array-convert only once (before returning from `order_of_regions`) - avoid passing `matrix_of_orders` unnecessarily between `order_of_regions` and `order_and_id_of_texts` --- src/eynollah/eynollah.py | 73 +++++++++++++++++----------------- src/eynollah/utils/__init__.py | 2 +- src/eynollah/utils/xml.py | 6 +-- 3 files changed, 38 insertions(+), 43 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 357c0c2..8351ab6 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2567,26 +2567,25 @@ class Eynollah: ref_point = 0 order_of_texts_tot = [] id_of_texts_tot = [] - for iij in range(len(boxes)): - ys = slice(*boxes[iij][2:4]) - xs = slice(*boxes[iij][0:2]) + for iij, box in enumerate(boxes): + ys = slice(*box[2:4]) + xs = slice(*box[0:2]) args_contours_box = args_contours[np.array(arg_text_con) == iij] args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] con_inter_box = contours_only_text_parent[args_contours_box] con_inter_box_h = contours_only_text_parent_h[args_contours_box_h] - - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) order_of_texts, id_of_texts = order_and_id_of_texts( con_inter_box, con_inter_box_h, - matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] - indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] - indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2] - indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2] + indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] + indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] + indexes_sorted_head = indexes_sorted[kind_of_texts_sorted == 2] + indexes_by_type_head = index_by_kind_sorted[kind_of_texts_sorted == 2] for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] @@ -2664,25 +2663,25 @@ class Eynollah: ref_point = 0 order_of_texts_tot = [] id_of_texts_tot = [] - for iij, _ in enumerate(boxes): - ys = slice(*boxes[iij][2:4]) - xs = slice(*boxes[iij][0:2]) + for iij, box in enumerate(boxes): + ys = slice(*box[2:4]) + xs = slice(*box[0:2]) args_contours_box = args_contours[np.array(arg_text_con) == iij] args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] con_inter_box = contours_only_text_parent[args_contours_box] con_inter_box_h = contours_only_text_parent_h[args_contours_box_h] - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) order_of_texts, id_of_texts = order_and_id_of_texts( con_inter_box, con_inter_box_h, - matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] - indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] - indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2] - indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2] + indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] + indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] + indexes_sorted_head = indexes_sorted[kind_of_texts_sorted == 2] + indexes_by_type_head = index_by_kind_sorted[kind_of_texts_sorted == 2] for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] @@ -2747,22 +2746,22 @@ class Eynollah: ref_point = 0 order_of_texts_tot = [] id_of_texts_tot = [] - for iij in range(len(boxes)): - ys = slice(*boxes[iij][2:4]) - xs = slice(*boxes[iij][0:2]) + for iij, box in enumerate(boxes): + ys = slice(*box[2:4]) + xs = slice(*box[0:2]) args_contours_box = args_contours[np.array(arg_text_con) == iij] con_inter_box = contours_only_text_parent[args_contours_box] con_inter_box_h = [] - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) order_of_texts, id_of_texts = order_and_id_of_texts( con_inter_box, con_inter_box_h, - matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] - indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] + indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] @@ -2808,24 +2807,24 @@ class Eynollah: ref_point = 0 order_of_texts_tot = [] id_of_texts_tot = [] - for iij in range(len(boxes)): - ys = slice(*boxes[iij][2:4]) - xs = slice(*boxes[iij][0:2]) + for iij, box in enumerate(boxes): + ys = slice(*box[2:4]) + xs = slice(*box[0:2]) args_contours_box = args_contours[np.array(arg_text_con) == iij] con_inter_box = [] con_inter_box_h = [] for i in range(len(args_contours_box)): con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) - indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, boxes[iij][2]) + indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( + textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) order_of_texts, id_of_texts = order_and_id_of_texts( con_inter_box, con_inter_box_h, - matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) + indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1] - indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1] + indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] + indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 4eee5a9..27a85da 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1325,7 +1325,7 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): final_types.append(1) final_index_type.append(ind_missed) - return final_indexers_sorted, matrix_of_orders, final_types, final_index_type + return np.array(final_indexers_sorted), np.array(final_types), np.array(final_index_type) def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( img_p_in_ver, img_in_hor,num_col_classifier): diff --git a/src/eynollah/utils/xml.py b/src/eynollah/utils/xml.py index 13420df..a61dadb 100644 --- a/src/eynollah/utils/xml.py +++ b/src/eynollah/utils/xml.py @@ -65,11 +65,7 @@ def xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_margina og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) region_counter.inc('region') -def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, matrix_of_orders, indexes_sorted, index_of_types, kind_of_texts, ref_point): - indexes_sorted = np.array(indexes_sorted) - index_of_types = np.array(index_of_types) - kind_of_texts = np.array(kind_of_texts) - +def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, indexes_sorted, index_of_types, kind_of_texts, ref_point): id_of_texts = [] order_of_texts = [] From 4950e6bd784e2078ca7b65b1fcbf20de29d0f613 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 22:28:52 +0200 Subject: [PATCH 319/492] order_of_regions: simplify - use new `find_center_of_contours` - avoid unused calculations - avoid loops in favour of array processing --- src/eynollah/utils/__init__.py | 131 +++++++++------------------------ 1 file changed, 34 insertions(+), 97 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 27a85da..92da14a 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -15,10 +15,21 @@ from scipy.ndimage import gaussian_filter1d from .is_nan import isNaN from .contour import (contours_in_same_horizon, + find_center_of_contours, find_new_features_of_contours, return_contours_of_image, return_parent_contours) +def pairwise(iterable): + # pairwise('ABCDEFG') → AB BC CD DE EF FG + + iterator = iter(iterable) + a = next(iterator, None) + + for b in iterator: + yield a, b + a = b + def return_x_start_end_mothers_childs_and_type_of_reading_order( x_min_hor_some, x_max_hor_some, cy_hor_some, peak_points, cy_hor_diff): @@ -1183,106 +1194,45 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) textlines_con_changed.append(textlines_big_org_form) return textlines_con_changed -def order_of_regions(textline_mask, contours_main, contours_header, y_ref): +def order_of_regions(textline_mask, contours_main, contours_head, y_ref): ##plt.imshow(textline_mask) ##plt.show() - """ - print(len(contours_main),'contours_main') - mada_n=textline_mask.sum(axis=1) - y=mada_n[:] - - y_help=np.zeros(len(y)+40) - y_help[20:len(y)+20]=y - x=np.arange(len(y)) - - peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) - ##plt.imshow(textline_mask[:,:]) - ##plt.show() - - sigma_gaus=8 - z= gaussian_filter1d(y_help, sigma_gaus) - zneg_rev=-y_help+np.max(y_help) - zneg=np.zeros(len(zneg_rev)+40) - zneg[20:len(zneg_rev)+20]=zneg_rev - zneg= gaussian_filter1d(zneg, sigma_gaus) - - peaks, _ = find_peaks(z, height=0) - peaks_neg, _ = find_peaks(zneg, height=0) - peaks_neg=peaks_neg-20-20 - peaks=peaks-20 - """ - textline_sum_along_width = textline_mask.sum(axis=1) - - y = textline_sum_along_width[:] + y = textline_mask.sum(axis=1) # horizontal projection profile y_padded = np.zeros(len(y) + 40) y_padded[20 : len(y) + 20] = y - x = np.arange(len(y)) - - peaks_real, _ = find_peaks(gaussian_filter1d(y, 3), height=0) sigma_gaus = 8 - z = gaussian_filter1d(y_padded, sigma_gaus) - zneg_rev = -y_padded + np.max(y_padded) + #z = gaussian_filter1d(y_padded, sigma_gaus) + #peaks, _ = find_peaks(z, height=0) + #peaks = peaks - 20 + zneg_rev = np.max(y_padded) - y_padded zneg = np.zeros(len(zneg_rev) + 40) zneg[20 : len(zneg_rev) + 20] = zneg_rev zneg = gaussian_filter1d(zneg, sigma_gaus) - peaks, _ = find_peaks(z, height=0) peaks_neg, _ = find_peaks(zneg, height=0) peaks_neg = peaks_neg - 20 - 20 - peaks = peaks - 20 ##plt.plot(z) ##plt.show() - if contours_main != None: - areas_main = np.array([cv2.contourArea(contours_main[j]) for j in range(len(contours_main))]) - M_main = [cv2.moments(contours_main[j]) for j in range(len(contours_main))] - cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - x_min_main = np.array([np.min(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) - x_max_main = np.array([np.max(contours_main[j][:, 0, 0]) for j in range(len(contours_main))]) + cx_main, cy_main = find_center_of_contours(contours_main) + cx_head, cy_head = find_center_of_contours(contours_head) - y_min_main = np.array([np.min(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) - y_max_main = np.array([np.max(contours_main[j][:, 0, 1]) for j in range(len(contours_main))]) + peaks_neg_new = np.append(np.insert(peaks_neg, 0, 0), textline_mask.shape[0]) + # offset from bbox of mask + peaks_neg_new += y_ref - if len(contours_header) != None: - areas_header = np.array([cv2.contourArea(contours_header[j]) for j in range(len(contours_header))]) - M_header = [cv2.moments(contours_header[j]) for j in range(len(contours_header))] - cx_header = [(M_header[j]["m10"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] - cy_header = [(M_header[j]["m01"] / (M_header[j]["m00"] + 1e-32)) for j in range(len(M_header))] - x_min_header = np.array([np.min(contours_header[j][:, 0, 0]) for j in range(len(contours_header))]) - x_max_header = np.array([np.max(contours_header[j][:, 0, 0]) for j in range(len(contours_header))]) - - y_min_header = np.array([np.min(contours_header[j][:, 0, 1]) for j in range(len(contours_header))]) - y_max_header = np.array([np.max(contours_header[j][:, 0, 1]) for j in range(len(contours_header))]) - # print(cy_main,'mainy') - - peaks_neg_new = [] - peaks_neg_new.append(0 + y_ref) - for iii in range(len(peaks_neg)): - peaks_neg_new.append(peaks_neg[iii] + y_ref) - peaks_neg_new.append(textline_mask.shape[0] + y_ref) - - if len(cy_main) > 0 and np.max(cy_main) > np.max(peaks_neg_new): - cy_main = np.array(cy_main) * (np.max(peaks_neg_new) / np.max(cy_main)) - 10 - if contours_main != None: - indexer_main = np.arange(len(contours_main)) - if contours_main != None: - len_main = len(contours_main) - else: - len_main = 0 - - matrix_of_orders = np.zeros((len(contours_main) + len(contours_header), 5)) - matrix_of_orders[:, 0] = np.arange(len(contours_main) + len(contours_header)) + matrix_of_orders = np.zeros((len(contours_main) + len(contours_head), 5), dtype=int) + matrix_of_orders[:, 0] = np.arange(len(contours_main) + len(contours_head)) matrix_of_orders[: len(contours_main), 1] = 1 matrix_of_orders[len(contours_main) :, 1] = 2 matrix_of_orders[: len(contours_main), 2] = cx_main - matrix_of_orders[len(contours_main) :, 2] = cx_header + matrix_of_orders[len(contours_main) :, 2] = cx_head matrix_of_orders[: len(contours_main), 3] = cy_main - matrix_of_orders[len(contours_main) :, 3] = cy_header + matrix_of_orders[len(contours_main) :, 3] = cy_head matrix_of_orders[: len(contours_main), 4] = np.arange(len(contours_main)) - matrix_of_orders[len(contours_main) :, 4] = np.arange(len(contours_header)) + matrix_of_orders[len(contours_main) :, 4] = np.arange(len(contours_head)) # print(peaks_neg_new,'peaks_neg_new') # print(matrix_of_orders,'matrix_of_orders') @@ -1290,27 +1240,14 @@ def order_of_regions(textline_mask, contours_main, contours_header, y_ref): final_indexers_sorted = [] final_types = [] final_index_type = [] - for i in range(len(peaks_neg_new) - 1): - top = peaks_neg_new[i] - down = peaks_neg_new[i + 1] - indexes_in = matrix_of_orders[:, 0][(matrix_of_orders[:, 3] >= top) & - ((matrix_of_orders[:, 3] < down))] - cxs_in = matrix_of_orders[:, 2][(matrix_of_orders[:, 3] >= top) & - ((matrix_of_orders[:, 3] < down))] - cys_in = matrix_of_orders[:, 3][(matrix_of_orders[:, 3] >= top) & - ((matrix_of_orders[:, 3] < down))] - types_of_text = matrix_of_orders[:, 1][(matrix_of_orders[:, 3] >= top) & - (matrix_of_orders[:, 3] < down)] - index_types_of_text = matrix_of_orders[:, 4][(matrix_of_orders[:, 3] >= top) & - (matrix_of_orders[:, 3] < down)] + for top, bot in pairwise(peaks_neg_new): + indexes_in, types_in, cxs_in, cys_in, typed_indexes_in = \ + matrix_of_orders[(matrix_of_orders[:, 3] >= top) & + (matrix_of_orders[:, 3] < bot)].T sorted_inside = np.argsort(cxs_in) - ind_in_int = indexes_in[sorted_inside] - ind_in_type = types_of_text[sorted_inside] - ind_ind_type = index_types_of_text[sorted_inside] - for j in range(len(ind_in_int)): - final_indexers_sorted.append(int(ind_in_int[j])) - final_types.append(int(ind_in_type[j])) - final_index_type.append(int(ind_ind_type[j])) + final_indexers_sorted.extend(indexes_in[sorted_inside]) + final_types.extend(types_in[sorted_inside]) + final_index_type.extend(typed_indexes_in[sorted_inside]) ##matrix_of_orders[:len_main,4]=final_indexers_sorted[:] From 7387f5a92994bc5c2678be643816e5883f32cfa1 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 22:35:40 +0200 Subject: [PATCH 320/492] do_order_of_regions: improve box matching, simplify - when searching for boxes matching contour, be more precise: - avoid heuristic rules ("xmin + 80 within xrange") in favour of exact criteria (contour properly contained in box) - for fallback criterion (nearest centers), also require proper containment of center in box - `order_of_regions`: remove (now) unnecessary (and insufficient) workaround for missing indexes (if boxes are not covering contours exactly) --- src/eynollah/eynollah.py | 185 ++++++++++++++++++--------------- src/eynollah/utils/__init__.py | 14 +-- 2 files changed, 106 insertions(+), 93 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 8351ab6..3194b66 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2518,51 +2518,59 @@ class Eynollah: contours_only_text_parent = np.array(contours_only_text_parent) contours_only_text_parent_h = np.array(contours_only_text_parent_h) boxes = np.array(boxes, dtype=int) # to be on the safe side - cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( + c_boxes = np.stack((0.5 * boxes[:, 2:4].sum(axis=1), + 0.5 * boxes[:, 0:2].sum(axis=1))) + cx_main, cy_main, mx_main, Mx_main, my_main, My_main, mxy_main = find_new_features_of_contours( contours_only_text_parent) - cx_text_only_h, cy_text_only_h, x_min_text_only_h, _, _, _, y_cor_x_min_main_h = find_new_features_of_contours( + cx_head, cy_head, mx_head, Mx_head, my_head, My_head, mxy_head = find_new_features_of_contours( contours_only_text_parent_h) try: arg_text_con = [] - for ii in range(len(cx_text_only)): + for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False - for jj in range(len(boxes)): - if (x_min_text_only[ii] + 80 >= boxes[jj][0] and - x_min_text_only[ii] + 80 < boxes[jj][1] and - y_cor_x_min_main[ii] >= boxes[jj][2] and - y_cor_x_min_main[ii] < boxes[jj][3]): + for jj, box in enumerate(boxes): + if (mx_main[ii] >= box[0] and + Mx_main[ii] < box[1] and + my_main[ii] >= box[2] and + My_main[ii] < box[3]): arg_text_con.append(jj) check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + - (cy_text_only[ii] - boxes[jj][2]) ** 2) - for jj in range(len(boxes))] - ind_min = np.argmin(dists_tr_from_box) + # dists_tr_from_box = [math.sqrt((cx_main[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + + # (cy_main[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) + # for box in boxes] + dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) + pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & + (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) + ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) arg_text_con.append(ind_min) - args_contours = np.array(range(len(arg_text_con))) + args_contours = np.arange(len(arg_text_con)) + order_by_con_main = np.zeros(len(arg_text_con)) + arg_text_con_h = [] - for ii in range(len(cx_text_only_h)): + for ii in range(len(contours_only_text_parent_h)): check_if_textregion_located_in_a_box = False - for jj in range(len(boxes)): - if (x_min_text_only_h[ii] + 80 >= boxes[jj][0] and - x_min_text_only_h[ii] + 80 < boxes[jj][1] and - y_cor_x_min_main_h[ii] >= boxes[jj][2] and - y_cor_x_min_main_h[ii] < boxes[jj][3]): + for jj, box in enumerate(boxes): + if (mx_head[ii] >= box[0] and + Mx_head[ii] < box[1] and + my_head[ii] >= box[2] and + My_head[ii] < box[3]): arg_text_con_h.append(jj) check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - dists_tr_from_box = [math.sqrt((cx_text_only_h[ii] - boxes[jj][1]) ** 2 + - (cy_text_only_h[ii] - boxes[jj][2]) ** 2) - for jj in range(len(boxes))] - ind_min = np.argmin(dists_tr_from_box) + # dists_tr_from_box = [math.sqrt((cx_head[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + + # (cy_head[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) + # for box in boxes] + dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_head[ii]], [cx_head[ii]]]), axis=0) + pcontained_in_box = ((boxes[:, 2] <= cy_head[ii]) & (cy_head[ii] < boxes[:, 3]) & + (boxes[:, 0] <= cx_head[ii]) & (cx_head[ii] < boxes[:, 1])) + ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) arg_text_con_h.append(ind_min) - args_contours_h = np.array(range(len(arg_text_con_h))) - + args_contours_h = np.arange(len(arg_text_con_h)) order_by_con_head = np.zeros(len(arg_text_con_h)) - order_by_con_main = np.zeros(len(arg_text_con)) ref_point = 0 order_of_texts_tot = [] @@ -2590,12 +2598,12 @@ class Eynollah: for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ - np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for zahler, _ in enumerate(args_contours_box_h): arg_order_v = indexes_sorted_head[zahler] order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ - np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for jji in range(len(id_of_texts)): order_of_texts_tot.append(order_of_texts[jji] + ref_point) @@ -2611,53 +2619,59 @@ class Eynollah: order_text_new = [] for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) except Exception as why: self.logger.error(why) arg_text_con = [] - for ii in range(len(cx_text_only)): + for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False - for jj in range(len(boxes)): - if (cx_text_only[ii] >= boxes[jj][0] and - cx_text_only[ii] < boxes[jj][1] and - cy_text_only[ii] >= boxes[jj][2] and - cy_text_only[ii] < boxes[jj][3]): + for jj, box in enumerate(boxes): + if (cx_main[ii] >= box[0] and + cx_main[ii] < box[1] and + cy_main[ii] >= box[2] and + cy_main[ii] < box[3]): # this is valid if the center of region identify in which box it is located arg_text_con.append(jj) check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + - (cy_text_only[ii] - boxes[jj][2]) ** 2) - for jj in range(len(boxes))] - ind_min = np.argmin(dists_tr_from_box) + # dists_tr_from_box = [math.sqrt((cx_main[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + + # (cy_main[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) + # for box in boxes] + dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) + pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & + (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) + ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) arg_text_con.append(ind_min) - args_contours = np.array(range(len(arg_text_con))) + args_contours = np.arange(len(arg_text_con)) order_by_con_main = np.zeros(len(arg_text_con)) ############################# head arg_text_con_h = [] - for ii in range(len(cx_text_only_h)): + for ii in range(len(contours_only_text_parent_h)): check_if_textregion_located_in_a_box = False - for jj in range(len(boxes)): - if (cx_text_only_h[ii] >= boxes[jj][0] and - cx_text_only_h[ii] < boxes[jj][1] and - cy_text_only_h[ii] >= boxes[jj][2] and - cy_text_only_h[ii] < boxes[jj][3]): + for jj, box in enumerate(boxes): + if (cx_head[ii] >= box[0] and + cx_head[ii] < box[1] and + cy_head[ii] >= box[2] and + cy_head[ii] < box[3]): # this is valid if the center of region identify in which box it is located arg_text_con_h.append(jj) check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - dists_tr_from_box = [math.sqrt((cx_text_only_h[ii] - boxes[jj][1]) ** 2 + - (cy_text_only_h[ii] - boxes[jj][2]) ** 2) - for jj in range(len(boxes))] - ind_min = np.argmin(dists_tr_from_box) + # dists_tr_from_box = [math.sqrt((cx_head[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + + # (cy_head[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) + # for box in boxes] + dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_head[ii]], [cx_head[ii]]]), axis=0) + pcontained_in_box = ((boxes[:, 2] <= cy_head[ii]) & (cy_head[ii] < boxes[:, 3]) & + (boxes[:, 0] <= cx_head[ii]) & (cx_head[ii] < boxes[:, 1])) + ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) arg_text_con_h.append(ind_min) - args_contours_h = np.array(range(len(arg_text_con_h))) + args_contours_h = np.arange(len(arg_text_con_h)) order_by_con_head = np.zeros(len(arg_text_con_h)) ref_point = 0 @@ -2686,14 +2700,14 @@ class Eynollah: for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ - np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for zahler, _ in enumerate(args_contours_box_h): arg_order_v = indexes_sorted_head[zahler] order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ - np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - for jji, _ in enumerate(id_of_texts): + for jji in range(len(id_of_texts)): order_of_texts_tot.append(order_of_texts[jji] + ref_point) id_of_texts_tot.append(id_of_texts[jji]) ref_point += len(id_of_texts) @@ -2707,7 +2721,7 @@ class Eynollah: order_text_new = [] for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) self.logger.debug("exit do_order_of_regions_full_layout") return order_text_new, id_of_texts_tot @@ -2719,28 +2733,33 @@ class Eynollah: contours_only_text_parent = np.array(contours_only_text_parent) contours_only_text_parent_h = np.array(contours_only_text_parent_h) boxes = np.array(boxes, dtype=int) # to be on the safe side - cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours( + c_boxes = np.stack((0.5 * boxes[:, 2:4].sum(axis=1), + 0.5 * boxes[:, 0:2].sum(axis=1))) + cx_main, cy_main, mx_main, Mx_main, my_main, My_main, mxy_main = find_new_features_of_contours( contours_only_text_parent) try: arg_text_con = [] - for ii in range(len(cx_text_only)): + for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False - for jj in range(len(boxes)): - if (x_min_text_only[ii] + 80 >= boxes[jj][0] and - x_min_text_only[ii] + 80 < boxes[jj][1] and - y_cor_x_min_main[ii] >= boxes[jj][2] and - y_cor_x_min_main[ii] < boxes[jj][3]): + for jj, box in enumerate(boxes): + if (mx_main[ii] >= box[0] and + Mx_main[ii] < box[1] and + my_main[ii] >= box[2] and + My_main[ii] < box[3]): arg_text_con.append(jj) check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + - (cy_text_only[ii] - boxes[jj][2]) ** 2) - for jj in range(len(boxes))] - ind_min = np.argmin(dists_tr_from_box) + # dists_tr_from_box = [math.sqrt((cx_main[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + + # (cy_main[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) + # for box in boxes] + dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) + pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & + (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) + ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) arg_text_con.append(ind_min) - args_contours = np.array(range(len(arg_text_con))) + args_contours = np.arange(len(arg_text_con)) order_by_con_main = np.zeros(len(arg_text_con)) ref_point = 0 @@ -2766,7 +2785,7 @@ class Eynollah: for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ - np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for jji, _ in enumerate(id_of_texts): order_of_texts_tot.append(order_of_texts[jji] + ref_point) @@ -2779,29 +2798,29 @@ class Eynollah: order_text_new = [] for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) except Exception as why: self.logger.error(why) arg_text_con = [] - for ii in range(len(cx_text_only)): + for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False - for jj in range(len(boxes)): - if (cx_text_only[ii] >= boxes[jj][0] and - cx_text_only[ii] < boxes[jj][1] and - cy_text_only[ii] >= boxes[jj][2] and - cy_text_only[ii] < boxes[jj][3]): + for jj, box in enumerate(boxes): + if (cx_main[ii] >= box[0] and + cx_main[ii] < box[1] and + cy_main[ii] >= box[2] and + cy_main[ii] < box[3]): # this is valid if the center of region identify in which box it is located arg_text_con.append(jj) check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - dists_tr_from_box = [math.sqrt((cx_text_only[ii] - boxes[jj][1]) ** 2 + - (cy_text_only[ii] - boxes[jj][2]) ** 2) - for jj in range(len(boxes))] - ind_min = np.argmin(dists_tr_from_box) - arg_text_con.append(ind_min) - args_contours = np.array(range(len(arg_text_con))) + dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) + pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & + (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) + ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) + arg_text_con[ii] = ind_min + args_contours = np.arange(len(contours_only_text_parent)) order_by_con_main = np.zeros(len(arg_text_con)) ref_point = 0 @@ -2829,7 +2848,7 @@ class Eynollah: for zahler, _ in enumerate(args_contours_box): arg_order_v = indexes_sorted_main[zahler] order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ - np.where(indexes_sorted == arg_order_v)[0][0] + ref_point + np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for jji, _ in enumerate(id_of_texts): order_of_texts_tot.append(order_of_texts[jji] + ref_point) @@ -2843,7 +2862,7 @@ class Eynollah: order_text_new = [] for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0]) + order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) self.logger.debug("exit do_order_of_regions_no_full_layout") return order_text_new, id_of_texts_tot diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 92da14a..6e5afd4 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1222,6 +1222,8 @@ def order_of_regions(textline_mask, contours_main, contours_head, y_ref): # offset from bbox of mask peaks_neg_new += y_ref + # assert not len(cy_main) or np.min(peaks_neg_new) <= np.min(cy_main) and np.max(cy_main) <= np.max(peaks_neg_new) + # assert not len(cy_head) or np.min(peaks_neg_new) <= np.min(cy_head) and np.max(cy_head) <= np.max(peaks_neg_new) matrix_of_orders = np.zeros((len(contours_main) + len(contours_head), 5), dtype=int) matrix_of_orders[:, 0] = np.arange(len(contours_main) + len(contours_head)) @@ -1251,16 +1253,8 @@ def order_of_regions(textline_mask, contours_main, contours_head, y_ref): ##matrix_of_orders[:len_main,4]=final_indexers_sorted[:] - # This fix is applied if the sum of the lengths of contours and contours_h - # does not match final_indexers_sorted. However, this is not the optimal solution.. - if len(cy_main) + len(cy_header) == len(final_index_type): - pass - else: - indexes_missed = set(np.arange(len(cy_main) + len(cy_header))) - set(final_indexers_sorted) - for ind_missed in indexes_missed: - final_indexers_sorted.append(ind_missed) - final_types.append(1) - final_index_type.append(ind_missed) + # assert len(final_indexers_sorted) == len(contours_main) + len(contours_head) + # assert not len(final_indexers_sorted) or max(final_index_type) == max(len(contours_main) return np.array(final_indexers_sorted), np.array(final_types), np.array(final_index_type) From e9bb62bd86747dabd5cd6fb5f67a36547c5c626d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 2 Oct 2025 23:44:00 +0200 Subject: [PATCH 321/492] do_order_of_regions: simplify - avoid loops in favour of array processing --- src/eynollah/eynollah.py | 158 ++++++++++++++++----------------------- 1 file changed, 64 insertions(+), 94 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 3194b66..6a3fd1e 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2526,7 +2526,7 @@ class Eynollah: contours_only_text_parent_h) try: - arg_text_con = [] + arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): @@ -2534,7 +2534,7 @@ class Eynollah: Mx_main[ii] < box[1] and my_main[ii] >= box[2] and My_main[ii] < box[3]): - arg_text_con.append(jj) + arg_text_con_main[ii] = jj check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: @@ -2545,11 +2545,11 @@ class Eynollah: pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con.append(ind_min) - args_contours = np.arange(len(arg_text_con)) - order_by_con_main = np.zeros(len(arg_text_con)) + arg_text_con_main[ii] = ind_min + args_contours_main = np.arange(len(contours_only_text_parent)) + order_by_con_main = np.zeros_like(arg_text_con_main) - arg_text_con_h = [] + arg_text_con_head = np.zeros(len(contours_only_text_parent_h), dtype=int) for ii in range(len(contours_only_text_parent_h)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): @@ -2557,7 +2557,7 @@ class Eynollah: Mx_head[ii] < box[1] and my_head[ii] >= box[2] and My_head[ii] < box[3]): - arg_text_con_h.append(jj) + arg_text_con_head[ii] = jj check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: @@ -2568,9 +2568,9 @@ class Eynollah: pcontained_in_box = ((boxes[:, 2] <= cy_head[ii]) & (cy_head[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_head[ii]) & (cx_head[ii] < boxes[:, 1])) ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_h.append(ind_min) - args_contours_h = np.arange(len(arg_text_con_h)) - order_by_con_head = np.zeros(len(arg_text_con_h)) + arg_text_con_head[ii] = ind_min + args_contours_head = np.arange(len(contours_only_text_parent_h)) + order_by_con_head = np.zeros_like(arg_text_con_head) ref_point = 0 order_of_texts_tot = [] @@ -2578,10 +2578,10 @@ class Eynollah: for iij, box in enumerate(boxes): ys = slice(*box[2:4]) xs = slice(*box[0:2]) - args_contours_box = args_contours[np.array(arg_text_con) == iij] - args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] - con_inter_box = contours_only_text_parent[args_contours_box] - con_inter_box_h = contours_only_text_parent_h[args_contours_box_h] + args_contours_box_main = args_contours_main[arg_text_con_main == iij] + args_contours_box_head = args_contours_head[arg_text_con_head == iij] + con_inter_box = contours_only_text_parent[args_contours_box_main] + con_inter_box_h = contours_only_text_parent_h[args_contours_box_head] indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) @@ -2595,14 +2595,14 @@ class Eynollah: indexes_sorted_head = indexes_sorted[kind_of_texts_sorted == 2] indexes_by_type_head = index_by_kind_sorted[kind_of_texts_sorted == 2] - for zahler, _ in enumerate(args_contours_box): + for zahler, _ in enumerate(args_contours_box_main): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - for zahler, _ in enumerate(args_contours_box_h): + for zahler, _ in enumerate(args_contours_box_head): arg_order_v = indexes_sorted_head[zahler] - order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ + order_by_con_head[args_contours_box_head[indexes_by_type_head[zahler]]] = \ np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for jji in range(len(id_of_texts)): @@ -2610,20 +2610,13 @@ class Eynollah: id_of_texts_tot.append(id_of_texts[jji]) ref_point += len(id_of_texts) - order_of_texts_tot = [] - for tj1 in range(len(contours_only_text_parent)): - order_of_texts_tot.append(int(order_by_con_main[tj1])) - - for tj1 in range(len(contours_only_text_parent_h)): - order_of_texts_tot.append(int(order_by_con_head[tj1])) - - order_text_new = [] - for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) + order_of_texts_tot = np.concatenate((order_by_con_main, + order_by_con_head)) + order_text_new = np.argsort(order_of_texts_tot) except Exception as why: self.logger.error(why) - arg_text_con = [] + arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): @@ -2632,10 +2625,9 @@ class Eynollah: cy_main[ii] >= box[2] and cy_main[ii] < box[3]): # this is valid if the center of region identify in which box it is located - arg_text_con.append(jj) + arg_text_con_main[ii] = jj check_if_textregion_located_in_a_box = True break - if not check_if_textregion_located_in_a_box: # dists_tr_from_box = [math.sqrt((cx_main[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + # (cy_main[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) @@ -2644,13 +2636,11 @@ class Eynollah: pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con.append(ind_min) - args_contours = np.arange(len(arg_text_con)) - order_by_con_main = np.zeros(len(arg_text_con)) + arg_text_con_main[ii] = ind_min + args_contours_main = np.arange(len(contours_only_text_parent)) + order_by_con_main = np.zeros_like(arg_text_con_main) - ############################# head - - arg_text_con_h = [] + arg_text_con_head = np.zeros(len(contours_only_text_parent_h), dtype=int) for ii in range(len(contours_only_text_parent_h)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): @@ -2659,7 +2649,7 @@ class Eynollah: cy_head[ii] >= box[2] and cy_head[ii] < box[3]): # this is valid if the center of region identify in which box it is located - arg_text_con_h.append(jj) + arg_text_con_head[ii] = jj check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: @@ -2670,9 +2660,9 @@ class Eynollah: pcontained_in_box = ((boxes[:, 2] <= cy_head[ii]) & (cy_head[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_head[ii]) & (cx_head[ii] < boxes[:, 1])) ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_h.append(ind_min) - args_contours_h = np.arange(len(arg_text_con_h)) - order_by_con_head = np.zeros(len(arg_text_con_h)) + arg_text_con_head[ii] = ind_min + args_contours_head = np.arange(len(contours_only_text_parent_h)) + order_by_con_head = np.zeros_like(arg_text_con_head) ref_point = 0 order_of_texts_tot = [] @@ -2680,10 +2670,10 @@ class Eynollah: for iij, box in enumerate(boxes): ys = slice(*box[2:4]) xs = slice(*box[0:2]) - args_contours_box = args_contours[np.array(arg_text_con) == iij] - args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij] - con_inter_box = contours_only_text_parent[args_contours_box] - con_inter_box_h = contours_only_text_parent_h[args_contours_box_h] + args_contours_box_main = args_contours_main[arg_text_con_main == iij] + args_contours_box_head = args_contours_head[arg_text_con_head == iij] + con_inter_box = contours_only_text_parent[args_contours_box_main] + con_inter_box_h = contours_only_text_parent_h[args_contours_box_head] indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) @@ -2697,14 +2687,14 @@ class Eynollah: indexes_sorted_head = indexes_sorted[kind_of_texts_sorted == 2] indexes_by_type_head = index_by_kind_sorted[kind_of_texts_sorted == 2] - for zahler, _ in enumerate(args_contours_box): + for zahler, _ in enumerate(args_contours_box_main): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - for zahler, _ in enumerate(args_contours_box_h): + for zahler, _ in enumerate(args_contours_box_head): arg_order_v = indexes_sorted_head[zahler] - order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = \ + order_by_con_head[args_contours_box_head[indexes_by_type_head[zahler]]] = \ np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for jji in range(len(id_of_texts)): @@ -2712,16 +2702,9 @@ class Eynollah: id_of_texts_tot.append(id_of_texts[jji]) ref_point += len(id_of_texts) - order_of_texts_tot = [] - for tj1 in range(len(contours_only_text_parent)): - order_of_texts_tot.append(int(order_by_con_main[tj1])) - - for tj1 in range(len(contours_only_text_parent_h)): - order_of_texts_tot.append(int(order_by_con_head[tj1])) - - order_text_new = [] - for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) + order_of_texts_tot = np.concatenate((order_by_con_main, + order_by_con_head)) + order_text_new = np.argsort(order_of_texts_tot) self.logger.debug("exit do_order_of_regions_full_layout") return order_text_new, id_of_texts_tot @@ -2739,7 +2722,7 @@ class Eynollah: contours_only_text_parent) try: - arg_text_con = [] + arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): @@ -2747,7 +2730,7 @@ class Eynollah: Mx_main[ii] < box[1] and my_main[ii] >= box[2] and My_main[ii] < box[3]): - arg_text_con.append(jj) + arg_text_con_main[ii] = jj check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: @@ -2758,9 +2741,9 @@ class Eynollah: pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con.append(ind_min) - args_contours = np.arange(len(arg_text_con)) - order_by_con_main = np.zeros(len(arg_text_con)) + arg_text_con_main[ii] = ind_min + args_contours_main = np.arange(len(contours_only_text_parent)) + order_by_con_main = np.zeros_like(arg_text_con_main) ref_point = 0 order_of_texts_tot = [] @@ -2768,8 +2751,8 @@ class Eynollah: for iij, box in enumerate(boxes): ys = slice(*box[2:4]) xs = slice(*box[0:2]) - args_contours_box = args_contours[np.array(arg_text_con) == iij] - con_inter_box = contours_only_text_parent[args_contours_box] + args_contours_box_main = args_contours_main[arg_text_con_main == iij] + con_inter_box = contours_only_text_parent[args_contours_box_main] con_inter_box_h = [] indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( @@ -2782,9 +2765,9 @@ class Eynollah: indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] - for zahler, _ in enumerate(args_contours_box): + for zahler, _ in enumerate(args_contours_box_main): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for jji, _ in enumerate(id_of_texts): @@ -2792,17 +2775,12 @@ class Eynollah: id_of_texts_tot.append(id_of_texts[jji]) ref_point += len(id_of_texts) - order_of_texts_tot = [] - for tj1 in range(len(contours_only_text_parent)): - order_of_texts_tot.append(int(order_by_con_main[tj1])) - - order_text_new = [] - for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) + order_of_texts_tot = order_by_con_main + order_text_new = np.argsort(order_of_texts_tot) except Exception as why: self.logger.error(why) - arg_text_con = [] + arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): @@ -2811,7 +2789,7 @@ class Eynollah: cy_main[ii] >= box[2] and cy_main[ii] < box[3]): # this is valid if the center of region identify in which box it is located - arg_text_con.append(jj) + arg_text_con_main[ii] = jj check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: @@ -2819,9 +2797,9 @@ class Eynollah: pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con[ii] = ind_min - args_contours = np.arange(len(contours_only_text_parent)) - order_by_con_main = np.zeros(len(arg_text_con)) + arg_text_con_main[ii] = ind_min + args_contours_main = np.arange(len(contours_only_text_parent)) + order_by_con_main = np.zeros_like(arg_text_con_main) ref_point = 0 order_of_texts_tot = [] @@ -2829,11 +2807,9 @@ class Eynollah: for iij, box in enumerate(boxes): ys = slice(*box[2:4]) xs = slice(*box[0:2]) - args_contours_box = args_contours[np.array(arg_text_con) == iij] - con_inter_box = [] + args_contours_box_main = args_contours_main[arg_text_con_main == iij] + con_inter_box = contours_only_text_parent[args_contours_box_main] con_inter_box_h = [] - for i in range(len(args_contours_box)): - con_inter_box.append(contours_only_text_parent[args_contours_box[i]]) indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) @@ -2845,9 +2821,9 @@ class Eynollah: indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] - for zahler, _ in enumerate(args_contours_box): + for zahler, _ in enumerate(args_contours_box_main): arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = \ + order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ np.flatnonzero(indexes_sorted == arg_order_v) + ref_point for jji, _ in enumerate(id_of_texts): @@ -2855,14 +2831,8 @@ class Eynollah: id_of_texts_tot.append(id_of_texts[jji]) ref_point += len(id_of_texts) - order_of_texts_tot = [] - - for tj1 in range(len(contours_only_text_parent)): - order_of_texts_tot.append(int(order_by_con_main[tj1])) - - order_text_new = [] - for iii in range(len(order_of_texts_tot)): - order_text_new.append(np.flatnonzero(np.array(order_of_texts_tot) == iii)) + order_of_texts_tot = order_by_con_main + order_text_new = np.argsort(order_of_texts_tot) self.logger.debug("exit do_order_of_regions_no_full_layout") return order_text_new, id_of_texts_tot From e674ea08f383de0c87f950be153fc954c3b4308e Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 3 Oct 2025 00:59:25 +0200 Subject: [PATCH 322/492] do_order_of_regions: drop redundant no/full_layout (`_no_full_layout` is the same copied code as `_full_layout`; the latter runs just the same if passed an empty list for headings) --- src/eynollah/eynollah.py | 141 ++------------------------------------ src/eynollah/utils/xml.py | 4 +- 2 files changed, 6 insertions(+), 139 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6a3fd1e..629b001 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2511,10 +2511,10 @@ class Eynollah: self.logger.debug("exit get_regions_from_xy_2models") return text_regions_p_true, erosion_hurts, polygons_seplines - def do_order_of_regions_full_layout( + def do_order_of_regions( self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): - self.logger.debug("enter do_order_of_regions_full_layout") + self.logger.debug("enter do_order_of_regions") contours_only_text_parent = np.array(contours_only_text_parent) contours_only_text_parent_h = np.array(contours_only_text_parent_h) boxes = np.array(boxes, dtype=int) # to be on the safe side @@ -2706,135 +2706,7 @@ class Eynollah: order_by_con_head)) order_text_new = np.argsort(order_of_texts_tot) - self.logger.debug("exit do_order_of_regions_full_layout") - return order_text_new, id_of_texts_tot - - def do_order_of_regions_no_full_layout( - self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot): - - self.logger.debug("enter do_order_of_regions_no_full_layout") - contours_only_text_parent = np.array(contours_only_text_parent) - contours_only_text_parent_h = np.array(contours_only_text_parent_h) - boxes = np.array(boxes, dtype=int) # to be on the safe side - c_boxes = np.stack((0.5 * boxes[:, 2:4].sum(axis=1), - 0.5 * boxes[:, 0:2].sum(axis=1))) - cx_main, cy_main, mx_main, Mx_main, my_main, My_main, mxy_main = find_new_features_of_contours( - contours_only_text_parent) - - try: - arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) - for ii in range(len(contours_only_text_parent)): - check_if_textregion_located_in_a_box = False - for jj, box in enumerate(boxes): - if (mx_main[ii] >= box[0] and - Mx_main[ii] < box[1] and - my_main[ii] >= box[2] and - My_main[ii] < box[3]): - arg_text_con_main[ii] = jj - check_if_textregion_located_in_a_box = True - break - if not check_if_textregion_located_in_a_box: - # dists_tr_from_box = [math.sqrt((cx_main[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + - # (cy_main[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) - # for box in boxes] - dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) - pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & - (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) - ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_main[ii] = ind_min - args_contours_main = np.arange(len(contours_only_text_parent)) - order_by_con_main = np.zeros_like(arg_text_con_main) - - ref_point = 0 - order_of_texts_tot = [] - id_of_texts_tot = [] - for iij, box in enumerate(boxes): - ys = slice(*box[2:4]) - xs = slice(*box[0:2]) - args_contours_box_main = args_contours_main[arg_text_con_main == iij] - con_inter_box = contours_only_text_parent[args_contours_box_main] - con_inter_box_h = [] - - indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) - - order_of_texts, id_of_texts = order_and_id_of_texts( - con_inter_box, con_inter_box_h, - indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - - indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] - indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] - - for zahler, _ in enumerate(args_contours_box_main): - arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ - np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - - for jji, _ in enumerate(id_of_texts): - order_of_texts_tot.append(order_of_texts[jji] + ref_point) - id_of_texts_tot.append(id_of_texts[jji]) - ref_point += len(id_of_texts) - - order_of_texts_tot = order_by_con_main - order_text_new = np.argsort(order_of_texts_tot) - - except Exception as why: - self.logger.error(why) - arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) - for ii in range(len(contours_only_text_parent)): - check_if_textregion_located_in_a_box = False - for jj, box in enumerate(boxes): - if (cx_main[ii] >= box[0] and - cx_main[ii] < box[1] and - cy_main[ii] >= box[2] and - cy_main[ii] < box[3]): - # this is valid if the center of region identify in which box it is located - arg_text_con_main[ii] = jj - check_if_textregion_located_in_a_box = True - break - if not check_if_textregion_located_in_a_box: - dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) - pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & - (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) - ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_main[ii] = ind_min - args_contours_main = np.arange(len(contours_only_text_parent)) - order_by_con_main = np.zeros_like(arg_text_con_main) - - ref_point = 0 - order_of_texts_tot = [] - id_of_texts_tot = [] - for iij, box in enumerate(boxes): - ys = slice(*box[2:4]) - xs = slice(*box[0:2]) - args_contours_box_main = args_contours_main[arg_text_con_main == iij] - con_inter_box = contours_only_text_parent[args_contours_box_main] - con_inter_box_h = [] - - indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) - - order_of_texts, id_of_texts = order_and_id_of_texts( - con_inter_box, con_inter_box_h, - indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - - indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] - indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] - - for zahler, _ in enumerate(args_contours_box_main): - arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ - np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - - for jji, _ in enumerate(id_of_texts): - order_of_texts_tot.append(order_of_texts[jji] + ref_point) - id_of_texts_tot.append(id_of_texts[jji]) - ref_point += len(id_of_texts) - - order_of_texts_tot = order_by_con_main - order_text_new = np.argsort(order_of_texts_tot) - - self.logger.debug("exit do_order_of_regions_no_full_layout") + self.logger.debug("exit do_order_of_regions") return order_text_new, id_of_texts_tot def check_iou_of_bounding_box_and_contour_for_tables( @@ -3081,11 +2953,6 @@ class Eynollah: image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv]),:,:]=pixel_table return image_revised_last - def do_order_of_regions(self, *args, **kwargs): - if self.full_layout: - return self.do_order_of_regions_full_layout(*args, **kwargs) - return self.do_order_of_regions_no_full_layout(*args, **kwargs) - def get_tables_from_model(self, img, num_col_classifier): img_org = np.copy(img) img_height_h = img_org.shape[0] @@ -5170,7 +5037,7 @@ class Eynollah: return pcgts - contours_only_text_parent_h = None + contours_only_text_parent_h = [] self.logger.info("Step 4/5: Reading Order Detection") if self.reading_order_machine_based: diff --git a/src/eynollah/utils/xml.py b/src/eynollah/utils/xml.py index a61dadb..88d1df8 100644 --- a/src/eynollah/utils/xml.py +++ b/src/eynollah/utils/xml.py @@ -57,8 +57,8 @@ def xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_margina og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=id_marginal)) region_counter.inc('region') - for idx_textregion, _ in enumerate(order_of_texts): - og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(order_of_texts[idx_textregion] + 1))) + for idx_textregion in order_of_texts: + og.add_RegionRefIndexed(RegionRefIndexedType(index=str(region_counter.get('region')), regionRef=region_counter.region_id(idx_textregion + 1))) region_counter.inc('region') for id_marginal in id_of_marginalia_right: From 29b4527bdebf6583f32b8801aed26f6ae70d25c7 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 3 Oct 2025 02:06:08 +0200 Subject: [PATCH 323/492] do_order_of_regions: simplify - remove duplicate code via inline def for the try-catch --- src/eynollah/eynollah.py | 127 +++++++-------------------------------- 1 file changed, 22 insertions(+), 105 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 629b001..bb3d1bf 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2525,22 +2525,23 @@ class Eynollah: cx_head, cy_head, mx_head, Mx_head, my_head, My_head, mxy_head = find_new_features_of_contours( contours_only_text_parent_h) - try: + def match_boxes(only_centers: bool): arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) for ii in range(len(contours_only_text_parent)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): - if (mx_main[ii] >= box[0] and - Mx_main[ii] < box[1] and - my_main[ii] >= box[2] and - My_main[ii] < box[3]): + if ((cx_main[ii] >= box[0] and + cx_main[ii] < box[1] and + cy_main[ii] >= box[2] and + cy_main[ii] < box[3]) if only_centers else + (mx_main[ii] >= box[0] and + Mx_main[ii] < box[1] and + my_main[ii] >= box[2] and + My_main[ii] < box[3])): arg_text_con_main[ii] = jj check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - # dists_tr_from_box = [math.sqrt((cx_main[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + - # (cy_main[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) - # for box in boxes] dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) @@ -2553,17 +2554,18 @@ class Eynollah: for ii in range(len(contours_only_text_parent_h)): check_if_textregion_located_in_a_box = False for jj, box in enumerate(boxes): - if (mx_head[ii] >= box[0] and - Mx_head[ii] < box[1] and - my_head[ii] >= box[2] and - My_head[ii] < box[3]): + if ((cx_head[ii] >= box[0] and + cx_head[ii] < box[1] and + cy_head[ii] >= box[2] and + cy_head[ii] < box[3]) if only_centers else + (mx_head[ii] >= box[0] and + Mx_head[ii] < box[1] and + my_head[ii] >= box[2] and + My_head[ii] < box[3])): arg_text_con_head[ii] = jj check_if_textregion_located_in_a_box = True break if not check_if_textregion_located_in_a_box: - # dists_tr_from_box = [math.sqrt((cx_head[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + - # (cy_head[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) - # for box in boxes] dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_head[ii]], [cx_head[ii]]]), axis=0) pcontained_in_box = ((boxes[:, 2] <= cy_head[ii]) & (cy_head[ii] < boxes[:, 3]) & (boxes[:, 0] <= cx_head[ii]) & (cx_head[ii] < boxes[:, 1])) @@ -2613,101 +2615,16 @@ class Eynollah: order_of_texts_tot = np.concatenate((order_by_con_main, order_by_con_head)) order_text_new = np.argsort(order_of_texts_tot) + return order_text_new, id_of_texts_tot + try: + results = match_boxes(False) except Exception as why: self.logger.error(why) - arg_text_con_main = np.zeros(len(contours_only_text_parent), dtype=int) - for ii in range(len(contours_only_text_parent)): - check_if_textregion_located_in_a_box = False - for jj, box in enumerate(boxes): - if (cx_main[ii] >= box[0] and - cx_main[ii] < box[1] and - cy_main[ii] >= box[2] and - cy_main[ii] < box[3]): - # this is valid if the center of region identify in which box it is located - arg_text_con_main[ii] = jj - check_if_textregion_located_in_a_box = True - break - if not check_if_textregion_located_in_a_box: - # dists_tr_from_box = [math.sqrt((cx_main[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + - # (cy_main[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) - # for box in boxes] - dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_main[ii]], [cx_main[ii]]]), axis=0) - pcontained_in_box = ((boxes[:, 2] <= cy_main[ii]) & (cy_main[ii] < boxes[:, 3]) & - (boxes[:, 0] <= cx_main[ii]) & (cx_main[ii] < boxes[:, 1])) - ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_main[ii] = ind_min - args_contours_main = np.arange(len(contours_only_text_parent)) - order_by_con_main = np.zeros_like(arg_text_con_main) - - arg_text_con_head = np.zeros(len(contours_only_text_parent_h), dtype=int) - for ii in range(len(contours_only_text_parent_h)): - check_if_textregion_located_in_a_box = False - for jj, box in enumerate(boxes): - if (cx_head[ii] >= box[0] and - cx_head[ii] < box[1] and - cy_head[ii] >= box[2] and - cy_head[ii] < box[3]): - # this is valid if the center of region identify in which box it is located - arg_text_con_head[ii] = jj - check_if_textregion_located_in_a_box = True - break - if not check_if_textregion_located_in_a_box: - # dists_tr_from_box = [math.sqrt((cx_head[ii] - 0.5 * box[1] - 0.5 * box[0]) ** 2 + - # (cy_head[ii] - 0.5 * box[3] - 0.5 * box[2]) ** 2) - # for box in boxes] - dists_tr_from_box = np.linalg.norm(c_boxes - np.array([[cy_head[ii]], [cx_head[ii]]]), axis=0) - pcontained_in_box = ((boxes[:, 2] <= cy_head[ii]) & (cy_head[ii] < boxes[:, 3]) & - (boxes[:, 0] <= cx_head[ii]) & (cx_head[ii] < boxes[:, 1])) - ind_min = np.argmin(np.ma.masked_array(dists_tr_from_box, ~pcontained_in_box)) - arg_text_con_head[ii] = ind_min - args_contours_head = np.arange(len(contours_only_text_parent_h)) - order_by_con_head = np.zeros_like(arg_text_con_head) - - ref_point = 0 - order_of_texts_tot = [] - id_of_texts_tot = [] - for iij, box in enumerate(boxes): - ys = slice(*box[2:4]) - xs = slice(*box[0:2]) - args_contours_box_main = args_contours_main[arg_text_con_main == iij] - args_contours_box_head = args_contours_head[arg_text_con_head == iij] - con_inter_box = contours_only_text_parent[args_contours_box_main] - con_inter_box_h = contours_only_text_parent_h[args_contours_box_head] - - indexes_sorted, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions( - textline_mask_tot[ys, xs], con_inter_box, con_inter_box_h, box[2]) - - order_of_texts, id_of_texts = order_and_id_of_texts( - con_inter_box, con_inter_box_h, - indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point) - - indexes_sorted_main = indexes_sorted[kind_of_texts_sorted == 1] - indexes_by_type_main = index_by_kind_sorted[kind_of_texts_sorted == 1] - indexes_sorted_head = indexes_sorted[kind_of_texts_sorted == 2] - indexes_by_type_head = index_by_kind_sorted[kind_of_texts_sorted == 2] - - for zahler, _ in enumerate(args_contours_box_main): - arg_order_v = indexes_sorted_main[zahler] - order_by_con_main[args_contours_box_main[indexes_by_type_main[zahler]]] = \ - np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - - for zahler, _ in enumerate(args_contours_box_head): - arg_order_v = indexes_sorted_head[zahler] - order_by_con_head[args_contours_box_head[indexes_by_type_head[zahler]]] = \ - np.flatnonzero(indexes_sorted == arg_order_v) + ref_point - - for jji in range(len(id_of_texts)): - order_of_texts_tot.append(order_of_texts[jji] + ref_point) - id_of_texts_tot.append(id_of_texts[jji]) - ref_point += len(id_of_texts) - - order_of_texts_tot = np.concatenate((order_by_con_main, - order_by_con_head)) - order_text_new = np.argsort(order_of_texts_tot) + results = match_boxes(True) self.logger.debug("exit do_order_of_regions") - return order_text_new, id_of_texts_tot + return results def check_iou_of_bounding_box_and_contour_for_tables( self, layout, table_prediction_early, pixel_table, num_col_classifier): From d774a23daa80cad0baa16dc4b41e93b93bca39bf Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Sun, 5 Oct 2025 02:18:17 +0200 Subject: [PATCH 324/492] matching deskewed text region contours with predicted: simplify - avoid loops in favour of array processing - improve readability and identifiers --- src/eynollah/eynollah.py | 108 +++++++++++++++------------------------ 1 file changed, 40 insertions(+), 68 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index bb3d1bf..dd6172a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4559,27 +4559,16 @@ class Eynollah: areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) #self.logger.info('areas_cnt_text %s', areas_cnt_text) - contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)] - contours_only_text_parent = [c for jz, c in enumerate(contours_only_text_parent) - if areas_cnt_text[jz] > MIN_AREA_REGION] - areas_cnt_text_parent = [area for area in areas_cnt_text if area > MIN_AREA_REGION] + contour0 = contours_only_text_parent[np.argmax(areas_cnt_text)] + contours_only_text_parent = np.array(contours_only_text_parent)[areas_cnt_text > MIN_AREA_REGION] + areas_cnt_text_parent = areas_cnt_text[areas_cnt_text > MIN_AREA_REGION] + index_con_parents = np.argsort(areas_cnt_text_parent) + contours_only_text_parent = contours_only_text_parent[index_con_parents] + areas_cnt_text_parent = areas_cnt_text_parent[index_con_parents] - contours_only_text_parent = self.return_list_of_contours_with_desired_order( - contours_only_text_parent, index_con_parents) - - ##try: - ##contours_only_text_parent = \ - ##list(np.array(contours_only_text_parent,dtype=object)[index_con_parents]) - ##except: - ##contours_only_text_parent = \ - ##list(np.array(contours_only_text_parent,dtype=np.int32)[index_con_parents]) - ##areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents]) - areas_cnt_text_parent = self.return_list_of_contours_with_desired_order( - areas_cnt_text_parent, index_con_parents) - - cx_bigest_big, cy_biggest_big = find_center_of_contours([contours_biggest]) - cx_bigest, cy_biggest = find_center_of_contours(contours_only_text_parent) + center0 = np.stack(find_center_of_contours([contour0])) # [2, 1] + centers = np.stack(find_center_of_contours(contours_only_text_parent)) # [2, N] if np.abs(slope_deskew) >= SLOPE_THRESHOLD: contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d) @@ -4588,65 +4577,48 @@ class Eynollah: areas_cnt_text_d = np.array([cv2.contourArea(c) for c in contours_only_text_parent_d]) areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1]) - if len(areas_cnt_text_d)>0: - contours_biggest_d = contours_only_text_parent_d[np.argmax(areas_cnt_text_d)] + if len(contours_only_text_parent_d): + contour0_d = contours_only_text_parent_d[np.argmax(areas_cnt_text_d)] index_con_parents_d = np.argsort(areas_cnt_text_d) - contours_only_text_parent_d = self.return_list_of_contours_with_desired_order( - contours_only_text_parent_d, index_con_parents_d) - #try: - #contours_only_text_parent_d = \ - #list(np.array(contours_only_text_parent_d,dtype=object)[index_con_parents_d]) - #except: - #contours_only_text_parent_d = \ - #list(np.array(contours_only_text_parent_d,dtype=np.int32)[index_con_parents_d]) - #areas_cnt_text_d = list(np.array(areas_cnt_text_d)[index_con_parents_d]) - areas_cnt_text_d = self.return_list_of_contours_with_desired_order( - areas_cnt_text_d, index_con_parents_d) + contours_only_text_parent_d = np.array(contours_only_text_parent_d)[index_con_parents_d] + # rs: should be the same, no? + assert np.all(contour0_d == contours_only_text_parent_d[-1]), (np.argmax(areas_cnt_text_d), index_con_parents_d[-1]) + areas_cnt_text_d = areas_cnt_text_d[index_con_parents_d] - cx_bigest_d_big, cy_biggest_d_big = find_center_of_contours([contours_biggest_d]) - cx_bigest_d, cy_biggest_d = find_center_of_contours(contours_only_text_parent_d) - try: - if len(cx_bigest_d) >= 5: - cx_bigest_d_last5 = cx_bigest_d[-5:] - cy_biggest_d_last5 = cy_biggest_d[-5:] - dists_d = [math.sqrt((cx_bigest_big[0] - cx_bigest_d_last5[j]) ** 2 + - (cy_biggest_big[0] - cy_biggest_d_last5[j]) ** 2) - for j in range(len(cy_biggest_d_last5))] - ind_largest = len(cx_bigest_d) -5 + np.argmin(dists_d) - else: - cx_bigest_d_last5 = cx_bigest_d[-len(cx_bigest_d):] - cy_biggest_d_last5 = cy_biggest_d[-len(cx_bigest_d):] - dists_d = [math.sqrt((cx_bigest_big[0]-cx_bigest_d_last5[j])**2 + - (cy_biggest_big[0]-cy_biggest_d_last5[j])**2) - for j in range(len(cy_biggest_d_last5))] - ind_largest = len(cx_bigest_d) - len(cx_bigest_d) + np.argmin(dists_d) - - cx_bigest_d_big[0] = cx_bigest_d[ind_largest] - cy_biggest_d_big[0] = cy_biggest_d[ind_largest] - except Exception as why: - self.logger.error(str(why)) + center0_d = np.stack(find_center_of_contours([contour0_d])) # [2, 1] + centers_d = np.stack(find_center_of_contours(contours_only_text_parent_d)) # [2, N] + # rs: should be the same, no? + assert center0_d[0,0] == centers_d[0,-1] and center0_d[1,0] == centers_d[1,-1] + last5_centers_d = centers_d[:, -5:] + dists_d = np.linalg.norm(center0 - last5_centers_d, axis=0) + ind_largest = len(contours_only_text_parent_d) - last5_centers_d.shape[1] + np.argmin(dists_d) + center0_d[:, 0] = centers_d[:, ind_largest] + # order new contours the same way as the undeskewed contours + # (by calculating the offset of the largest contours, respectively, + # of the new and undeskewed image; then for each contour, + # finding the closest new contour, with proximity calculated + # as distance of their centers modulo offset vector) (h, w) = text_only.shape[:2] center = (w // 2.0, h // 2.0) M = cv2.getRotationMatrix2D(center, slope_deskew, 1.0) M_22 = np.array(M)[:2, :2] - p_big = np.dot(M_22, [cx_bigest_big, cy_biggest_big]) - x_diff = p_big[0] - cx_bigest_d_big - y_diff = p_big[1] - cy_biggest_d_big + p0 = np.dot(M_22, center0) # [2, 1] + offset = p0 - center0_d # [2, 1] + # img2 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) contours_only_text_parent_d_ordered = [] for i in range(len(contours_only_text_parent)): - p = np.dot(M_22, [cx_bigest[i], cy_biggest[i]]) - p[0] = p[0] - x_diff[0] - p[1] = p[1] - y_diff[0] - dists = [math.sqrt((p[0] - cx_bigest_d[j]) ** 2 + - (p[1] - cy_biggest_d[j]) ** 2) - for j in range(len(cx_bigest_d))] - contours_only_text_parent_d_ordered.append(contours_only_text_parent_d[np.argmin(dists)]) - # img2=np.zeros((text_only.shape[0],text_only.shape[1],3)) - # img2=cv2.fillPoly(img2,pts=[contours_only_text_parent_d[np.argmin(dists)]] ,color=(1,1,1)) - # plt.imshow(img2[:,:,0]) - # plt.show() + p = np.dot(M_22, centers[:, i:i+1]) # [2, 1] + p -= offset + dists = np.linalg.norm(p - centers_d, axis=0) + contours_only_text_parent_d_ordered.append( + contours_only_text_parent_d[np.argmin(dists)]) + # cv2.fillPoly(img2, pts=[contours_only_text_parent_d[np.argmin(dists)]], color=i + 1) + # plt.imshow(img2) + # plt.show() + # rs: what about the remaining contours_only_text_parent_d? + # rs: what about duplicates? else: contours_only_text_parent_d_ordered = [] contours_only_text_parent_d = [] From 73e5a1def8489f6bf022e696f010d4c852ff685b Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Sun, 5 Oct 2025 02:33:03 +0200 Subject: [PATCH 325/492] matching deskewed text region contours with predicted: simplify - (no need for argmax if already sorted) --- src/eynollah/eynollah.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index dd6172a..46437f0 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4559,7 +4559,6 @@ class Eynollah: areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) #self.logger.info('areas_cnt_text %s', areas_cnt_text) - contour0 = contours_only_text_parent[np.argmax(areas_cnt_text)] contours_only_text_parent = np.array(contours_only_text_parent)[areas_cnt_text > MIN_AREA_REGION] areas_cnt_text_parent = areas_cnt_text[areas_cnt_text > MIN_AREA_REGION] @@ -4567,9 +4566,11 @@ class Eynollah: contours_only_text_parent = contours_only_text_parent[index_con_parents] areas_cnt_text_parent = areas_cnt_text_parent[index_con_parents] - center0 = np.stack(find_center_of_contours([contour0])) # [2, 1] centers = np.stack(find_center_of_contours(contours_only_text_parent)) # [2, N] + contour0 = contours_only_text_parent[-1] + center0 = centers[:, -1:] # [2, 1] + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d) contours_only_text_parent_d = return_parent_contours(contours_only_text_d, hir_on_text_d) @@ -4578,17 +4579,15 @@ class Eynollah: areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1]) if len(contours_only_text_parent_d): - contour0_d = contours_only_text_parent_d[np.argmax(areas_cnt_text_d)] index_con_parents_d = np.argsort(areas_cnt_text_d) contours_only_text_parent_d = np.array(contours_only_text_parent_d)[index_con_parents_d] - # rs: should be the same, no? - assert np.all(contour0_d == contours_only_text_parent_d[-1]), (np.argmax(areas_cnt_text_d), index_con_parents_d[-1]) areas_cnt_text_d = areas_cnt_text_d[index_con_parents_d] - center0_d = np.stack(find_center_of_contours([contour0_d])) # [2, 1] centers_d = np.stack(find_center_of_contours(contours_only_text_parent_d)) # [2, N] - # rs: should be the same, no? - assert center0_d[0,0] == centers_d[0,-1] and center0_d[1,0] == centers_d[1,-1] + + contour0_d = contours_only_text_parent_d[-1] + center0_d = centers_d[:, -1:] # [2, 1] + last5_centers_d = centers_d[:, -5:] dists_d = np.linalg.norm(center0 - last5_centers_d, axis=0) ind_largest = len(contours_only_text_parent_d) - last5_centers_d.shape[1] + np.argmin(dists_d) From 0f33c21eb3a9cbe87f7221dd3481203de415794d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Sun, 5 Oct 2025 02:45:01 +0200 Subject: [PATCH 326/492] matching deskewed text region contours with predicted: improve - when matching undeskewed and new contours, do not just pick the closest centers, respectively, but also of similar size (by making the contour area the 3rd dimension of the vector norm in the distance calculation) --- src/eynollah/eynollah.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 46437f0..e474916 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4610,7 +4610,11 @@ class Eynollah: for i in range(len(contours_only_text_parent)): p = np.dot(M_22, centers[:, i:i+1]) # [2, 1] p -= offset - dists = np.linalg.norm(p - centers_d, axis=0) + # add dimension for area + #dists = np.linalg.norm(p - centers_d, axis=0) + diffs = (np.append(p, [[areas_cnt_text_parent[i]]], axis=0) - + np.append(centers_d, areas_cnt_text_d[np.newaxis], axis=0)) + dists = np.linalg.norm(diffs, axis=0) contours_only_text_parent_d_ordered.append( contours_only_text_parent_d[np.argmin(dists)]) # cv2.fillPoly(img2, pts=[contours_only_text_parent_d[np.argmin(dists)]], color=i + 1) From 0e00d7868be55d3fb94b52fffc6ed96bf9387067 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 6 Oct 2025 12:55:10 +0200 Subject: [PATCH 327/492] matching deskewed text region contours with predicted: improve - apply same min-area filter to deskewed contours as to original ones --- src/eynollah/eynollah.py | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index e474916..e5ad5ae 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4568,7 +4568,6 @@ class Eynollah: centers = np.stack(find_center_of_contours(contours_only_text_parent)) # [2, N] - contour0 = contours_only_text_parent[-1] center0 = centers[:, -1:] # [2, 1] if np.abs(slope_deskew) >= SLOPE_THRESHOLD: @@ -4578,6 +4577,9 @@ class Eynollah: areas_cnt_text_d = np.array([cv2.contourArea(c) for c in contours_only_text_parent_d]) areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1]) + contours_only_text_parent_d = np.array(contours_only_text_parent_d)[areas_cnt_text_d > MIN_AREA_REGION] + areas_cnt_text_d = areas_cnt_text_d[areas_cnt_text_d > MIN_AREA_REGION] + if len(contours_only_text_parent_d): index_con_parents_d = np.argsort(areas_cnt_text_d) contours_only_text_parent_d = np.array(contours_only_text_parent_d)[index_con_parents_d] @@ -4585,9 +4587,10 @@ class Eynollah: centers_d = np.stack(find_center_of_contours(contours_only_text_parent_d)) # [2, N] - contour0_d = contours_only_text_parent_d[-1] center0_d = centers_d[:, -1:] # [2, 1] + # find the largest among the largest 5 deskewed contours + # that is also closest to the largest original contour last5_centers_d = centers_d[:, -5:] dists_d = np.linalg.norm(center0 - last5_centers_d, axis=0) ind_largest = len(contours_only_text_parent_d) - last5_centers_d.shape[1] + np.argmin(dists_d) @@ -4762,14 +4765,7 @@ class Eynollah: if np.abs(slope_deskew) >= SLOPE_THRESHOLD: contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( contours_only_text_parent_d_ordered, index_by_text_par_con) - #try: - #contours_only_text_parent_d_ordered = \ - #list(np.array(contours_only_text_parent_d_ordered, dtype=np.int32)[index_by_text_par_con]) - #except: - #contours_only_text_parent_d_ordered = \ - #list(np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) else: - #takes long timee contours_only_text_parent_d_ordered = None if self.light_version: fun = check_any_text_region_in_model_one_is_main_or_header_light @@ -4949,12 +4945,6 @@ class Eynollah: else: contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( contours_only_text_parent_d_ordered, index_by_text_par_con) - #try: - #contours_only_text_parent_d_ordered = \ - #list(np.array(contours_only_text_parent_d_ordered, dtype=object)[index_by_text_par_con]) - #except: - #contours_only_text_parent_d_ordered = \ - #list(np.array(contours_only_text_parent_d_ordered, dtype=np.int32)[index_by_text_par_con]) order_text_new, id_of_texts_tot = self.do_order_of_regions( contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) From 155b8f68b8a7754de11e002e0df2bfc7292899d8 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 6 Oct 2025 12:58:24 +0200 Subject: [PATCH 328/492] matching deskewed text region contours with predicted: improve - avoid duplicate and missing mappings by using a different approach: instead of just minimising the center distance for the N contours that we expect, 1. get all N:M distances 2. iterate over them from small to large 3. continue adding correspondences until both every original contour and every deskewed contour have at least one match 4. where one original matches multiple deskewed contours, join the latter polygons to map as single contour 5. where one deskewed contour matches multiple originals, split the former by intersecting with each of the latter (after bringing them into the same coordinate space), so ultimately only the respective match gets assigned --- src/eynollah/eynollah.py | 94 ++++++++++++++++++++++++++++------- src/eynollah/utils/contour.py | 15 ++++++ 2 files changed, 90 insertions(+), 19 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index e5ad5ae..5e32929 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -33,6 +33,7 @@ from concurrent.futures import ProcessPoolExecutor import xml.etree.ElementTree as ET import cv2 import numpy as np +import shapely.affinity from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d from numba import cuda @@ -83,6 +84,10 @@ from .utils.contour import ( return_parent_contours, dilate_textregion_contours, dilate_textline_contours, + polygon2contour, + contour2polygon, + join_polygons, + make_intersection, ) from .utils.rotate import ( rotate_image, @@ -4556,8 +4561,9 @@ class Eynollah: contours_only_text, hir_on_text = return_contours_of_image(text_only) contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text) if len(contours_only_text_parent) > 0: + areas_tot_text = np.prod(text_only.shape) areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) - areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1]) + areas_cnt_text = areas_cnt_text / float(areas_tot_text) #self.logger.info('areas_cnt_text %s', areas_cnt_text) contours_only_text_parent = np.array(contours_only_text_parent)[areas_cnt_text > MIN_AREA_REGION] areas_cnt_text_parent = areas_cnt_text[areas_cnt_text > MIN_AREA_REGION] @@ -4574,8 +4580,9 @@ class Eynollah: contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d) contours_only_text_parent_d = return_parent_contours(contours_only_text_d, hir_on_text_d) + areas_tot_text_d = np.prod(text_only_d.shape) areas_cnt_text_d = np.array([cv2.contourArea(c) for c in contours_only_text_parent_d]) - areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1]) + areas_cnt_text_d = areas_cnt_text_d / float(areas_tot_text_d) contours_only_text_parent_d = np.array(contours_only_text_parent_d)[areas_cnt_text_d > MIN_AREA_REGION] areas_cnt_text_d = areas_cnt_text_d[areas_cnt_text_d > MIN_AREA_REGION] @@ -4587,7 +4594,7 @@ class Eynollah: centers_d = np.stack(find_center_of_contours(contours_only_text_parent_d)) # [2, N] - center0_d = centers_d[:, -1:] # [2, 1] + center0_d = centers_d[:, -1:].copy() # [2, 1] # find the largest among the largest 5 deskewed contours # that is also closest to the largest original contour @@ -4605,26 +4612,75 @@ class Eynollah: center = (w // 2.0, h // 2.0) M = cv2.getRotationMatrix2D(center, slope_deskew, 1.0) M_22 = np.array(M)[:2, :2] - p0 = np.dot(M_22, center0) # [2, 1] - offset = p0 - center0_d # [2, 1] + center0 = np.dot(M_22, center0) # [2, 1] + offset = center0 - center0_d # [2, 1] - # img2 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) - contours_only_text_parent_d_ordered = [] + centers = np.dot(M_22, centers) - offset # [2,N] + # add dimension for area (so only contours of similar size will be considered close) + centers = np.append(centers, areas_cnt_text_parent[np.newaxis], axis=0) + centers_d = np.append(centers_d, areas_cnt_text_d[np.newaxis], axis=0) + + dists = np.zeros((len(contours_only_text_parent), len(contours_only_text_parent_d))) for i in range(len(contours_only_text_parent)): - p = np.dot(M_22, centers[:, i:i+1]) # [2, 1] - p -= offset - # add dimension for area - #dists = np.linalg.norm(p - centers_d, axis=0) - diffs = (np.append(p, [[areas_cnt_text_parent[i]]], axis=0) - - np.append(centers_d, areas_cnt_text_d[np.newaxis], axis=0)) - dists = np.linalg.norm(diffs, axis=0) - contours_only_text_parent_d_ordered.append( - contours_only_text_parent_d[np.argmin(dists)]) - # cv2.fillPoly(img2, pts=[contours_only_text_parent_d[np.argmin(dists)]], color=i + 1) + dists[i] = np.linalg.norm(centers[:, i:i + 1] - centers_d, axis=0) + corresp = np.zeros(dists.shape, dtype=bool) + # keep searching next-closest until at least one correspondence on each side + while not np.all(corresp.sum(axis=1)) and not np.all(corresp.sum(axis=0)): + idx = np.nanargmin(dists) + i, j = np.unravel_index(idx, dists.shape) + dists[i, j] = np.nan + corresp[i, j] = True + #print("original/deskewed adjacency", corresp.nonzero()) + contours_only_text_parent_d_ordered = np.zeros_like(contours_only_text_parent) + contours_only_text_parent_d_ordered = contours_only_text_parent_d[np.argmax(corresp, axis=1)] + # img1 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) + # for i in range(len(contours_only_text_parent)): + # cv2.fillPoly(img1, pts=[contours_only_text_parent_d_ordered[i]], color=i + 1) + # plt.subplot(2, 2, 1, title="direct corresp contours") + # plt.imshow(img1) + # img2 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) + # join deskewed regions mapping to single original ones + for i in range(len(contours_only_text_parent)): + if np.count_nonzero(corresp[i]) > 1: + indices = np.flatnonzero(corresp[i]) + #print("joining", indices) + polygons_d = [contour2polygon(contour) + for contour in contours_only_text_parent_d[indices]] + contour_d = polygon2contour(join_polygons(polygons_d)) + contours_only_text_parent_d_ordered[i] = contour_d + # cv2.fillPoly(img2, pts=[contour_d], color=i + 1) + # plt.subplot(2, 2, 3, title="joined contours") # plt.imshow(img2) + # img3 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) + # split deskewed regions mapping to multiple original ones + def deskew(polygon): + polygon = shapely.affinity.rotate(polygon, -slope_deskew, origin=center) + polygon = shapely.affinity.translate(polygon, *offset.squeeze()) + return polygon + for j in range(len(contours_only_text_parent_d)): + if np.count_nonzero(corresp[:, j]) > 1: + indices = np.flatnonzero(corresp[:, j]) + #print("splitting along", indices) + polygons = [deskew(contour2polygon(contour)) + for contour in contours_only_text_parent[indices]] + polygon_d = contour2polygon(contours_only_text_parent_d[j]) + polygons_d = [make_intersection(polygon_d, polygon) + for polygon in polygons] + # ignore where there is no actual overlap + indices = indices[np.flatnonzero(polygons_d)] + contours_d = [polygon2contour(polygon_d) + for polygon_d in polygons_d + if polygon_d] + contours_only_text_parent_d_ordered[indices] = contours_d + # cv2.fillPoly(img3, pts=contours_d, color=j + 1) + # plt.subplot(2, 2, 4, title="split contours") + # plt.imshow(img3) + # img4 = np.zeros(text_only_d.shape[:2], dtype=np.uint8) + # for i in range(len(contours_only_text_parent)): + # cv2.fillPoly(img4, pts=[contours_only_text_parent_d_ordered[i]], color=i + 1) + # plt.subplot(2, 2, 2, title="result contours") + # plt.imshow(img4) # plt.show() - # rs: what about the remaining contours_only_text_parent_d? - # rs: what about duplicates? else: contours_only_text_parent_d_ordered = [] contours_only_text_parent_d = [] diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 041cbf6..8431bbe 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -335,6 +335,21 @@ def polygon2contour(polygon: Polygon) -> np.ndarray: polygon = np.array(polygon.exterior.coords[:-1], dtype=int) return np.maximum(0, polygon).astype(np.uint)[:, np.newaxis] +def make_intersection(poly1, poly2): + interp = poly1.intersection(poly2) + # post-process + if interp.is_empty or interp.area == 0.0: + return None + if interp.geom_type == 'GeometryCollection': + # heterogeneous result: filter zero-area shapes (LineString, Point) + interp = unary_union([geom for geom in interp.geoms if geom.area > 0]) + if interp.geom_type == 'MultiPolygon': + # homogeneous result: construct convex hull to connect + interp = join_polygons(interp.geoms) + assert interp.geom_type == 'Polygon', interp.wkt + interp = make_valid(interp) + return interp + def make_valid(polygon: Polygon) -> Polygon: """Ensures shapely.geometry.Polygon object is valid by repeated rearrangement/simplification/enlargement.""" def isint(x): From fe603188f4f7f9d545b44085cdc45195f98f0546 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 6 Oct 2025 13:11:03 +0200 Subject: [PATCH 329/492] avoid unnecessary 3-channel conversions --- src/eynollah/eynollah.py | 52 ++++----- src/eynollah/utils/__init__.py | 156 +++++++++++---------------- src/eynollah/utils/contour.py | 74 +++++-------- src/eynollah/utils/separate_lines.py | 53 ++++----- 4 files changed, 132 insertions(+), 203 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 5e32929..834ecf3 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -712,7 +712,7 @@ class Eynollah: if self.input_binary: img = self.imread() prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) - prediction_bin = 255 * (prediction_bin[:,:,0]==0) + prediction_bin = 255 * (prediction_bin[:,:,0] == 0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) img= np.copy(prediction_bin) img_bin = prediction_bin @@ -2064,9 +2064,7 @@ class Eynollah: boxes_sub_new = [] poly_sub = [] for mv in range(len(boxes_per_process)): - crop_img, _ = crop_image_inside_box(boxes_per_process[mv], - np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2)) - crop_img = crop_img[:, :, 0] + crop_img, _ = crop_image_inside_box(boxes_per_process[mv], textline_mask_tot) crop_img = cv2.erode(crop_img, KERNEL, iterations=2) try: textline_con, hierarchy = return_contours_of_image(crop_img) @@ -2638,10 +2636,8 @@ class Eynollah: layout_org[:,:,0][layout_org[:,:,0]==pixel_table] = 0 layout = (layout[:,:,0]==pixel_table)*1 - layout =np.repeat(layout[:, :, np.newaxis], 3, axis=2) layout = layout.astype(np.uint8) - imgray = cv2.cvtColor(layout, cv2.COLOR_BGR2GRAY ) - _, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(layout, 0, 255, 0) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cnt_size = np.array([cv2.contourArea(contours[j]) @@ -2652,8 +2648,8 @@ class Eynollah: x, y, w, h = cv2.boundingRect(contours[i]) iou = cnt_size[i] /float(w*h) *100 if iou<80: - layout_contour = np.zeros((layout_org.shape[0], layout_org.shape[1])) - layout_contour= cv2.fillPoly(layout_contour,pts=[contours[i]] ,color=(1,1,1)) + layout_contour = np.zeros(layout_org.shape[:2]) + layout_contour = cv2.fillPoly(layout_contour, pts=[contours[i]] ,color=1) layout_contour_sum = layout_contour.sum(axis=0) layout_contour_sum_diff = np.diff(layout_contour_sum) @@ -2669,20 +2665,17 @@ class Eynollah: layout_contour=cv2.erode(layout_contour[:,:], KERNEL, iterations=5) layout_contour=cv2.dilate(layout_contour[:,:], KERNEL, iterations=5) - layout_contour =np.repeat(layout_contour[:, :, np.newaxis], 3, axis=2) layout_contour = layout_contour.astype(np.uint8) - - imgray = cv2.cvtColor(layout_contour, cv2.COLOR_BGR2GRAY ) - _, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(layout_contour, 0, 255, 0) contours_sep, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for ji in range(len(contours_sep) ): contours_new.append(contours_sep[ji]) if num_col_classifier>=2: - only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) - only_recent_contour_image= cv2.fillPoly(only_recent_contour_image, - pts=[contours_sep[ji]], color=(1,1,1)) + only_recent_contour_image = np.zeros(layout.shape[:2]) + only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, + pts=[contours_sep[ji]], color=1) table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() #print(iou_in,'iou_in_in1') @@ -3210,13 +3203,11 @@ class Eynollah: pixel_lines = 3 if np.abs(slope_deskew) < SLOPE_THRESHOLD: _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_lines) + text_regions_p, num_col_classifier, self.tables, pixel_lines) if np.abs(slope_deskew) >= SLOPE_THRESHOLD: _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_lines) + text_regions_p_1_n, num_col_classifier, self.tables, pixel_lines) #print(time.time()-t_0_box,'time box in 2') self.logger.info("num_col_classifier: %s", num_col_classifier) @@ -3392,13 +3383,11 @@ class Eynollah: pixel_lines=3 if np.abs(slope_deskew) < SLOPE_THRESHOLD: num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_lines) + text_regions_p, num_col_classifier, self.tables, pixel_lines) if np.abs(slope_deskew) >= SLOPE_THRESHOLD: num_col_d, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, pixel_lines) + text_regions_p_1_n, num_col_classifier, self.tables, pixel_lines) if num_col_classifier>=3: if np.abs(slope_deskew) < SLOPE_THRESHOLD: @@ -3498,7 +3487,7 @@ class Eynollah: #text_regions_p[:,:][regions_fully[:,:,0]==6]=6 ##regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p) - ##regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 4] = 4 + ##regions_fully[:, :, 0][regions_fully_only_drop[:, :] == 4] = 4 drop_capital_label_in_full_layout_model = 3 drops = (regions_fully[:,:,0]==drop_capital_label_in_full_layout_model)*1 @@ -4715,7 +4704,6 @@ class Eynollah: return pcgts - #print("text region early 3 in %.1fs", time.time() - t0) if self.light_version: contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) @@ -4851,21 +4839,17 @@ class Eynollah: if not self.headers_off: if np.abs(slope_deskew) < SLOPE_THRESHOLD: num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, label_seps, contours_only_text_parent_h) + text_regions_p, num_col_classifier, self.tables, label_seps, contours_only_text_parent_h) else: _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, label_seps, contours_only_text_parent_h_d_ordered) + text_regions_p_1_n, num_col_classifier, self.tables, label_seps, contours_only_text_parent_h_d_ordered) elif self.headers_off: if np.abs(slope_deskew) < SLOPE_THRESHOLD: num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, label_seps) + text_regions_p, num_col_classifier, self.tables, label_seps) else: _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document( - np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), - num_col_classifier, self.tables, label_seps) + text_regions_p_1_n, num_col_classifier, self.tables, label_seps) if num_col_classifier >= 3: if np.abs(slope_deskew) < SLOPE_THRESHOLD: diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 6e5afd4..ebf78fe 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -796,7 +796,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): return len(peaks_fin_true), peaks_fin_true def find_num_col_by_vertical_lines(regions_without_separators, multiplier=3.8): - regions_without_separators_0 = regions_without_separators[:, :, 0].sum(axis=0) + regions_without_separators_0 = regions_without_separators.sum(axis=0) ##plt.plot(regions_without_separators_0) ##plt.show() @@ -823,7 +823,10 @@ def return_regions_without_separators(regions_pre): return regions_without_separators def put_drop_out_from_only_drop_model(layout_no_patch, layout1): - drop_only = (layout_no_patch[:, :, 0] == 4) * 1 + if layout_no_patch.ndim == 3: + layout_no_patch = layout_no_patch[:, :, 0] + + drop_only = (layout_no_patch[:, :] == 4) * 1 contours_drop, hir_on_drop = return_contours_of_image(drop_only) contours_drop_parent = return_parent_contours(contours_drop, hir_on_drop) @@ -849,9 +852,8 @@ def put_drop_out_from_only_drop_model(layout_no_patch, layout1): (map_of_drop_contour_bb == 5).sum()) >= 15: contours_drop_parent_final.append(contours_drop_parent[jj]) - layout_no_patch[:, :, 0][layout_no_patch[:, :, 0] == 4] = 0 - - layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=(4, 4, 4)) + layout_no_patch[:, :][layout_no_patch[:, :] == 4] = 0 + layout_no_patch = cv2.fillPoly(layout_no_patch, pts=contours_drop_parent_final, color=4) return layout_no_patch @@ -925,17 +927,16 @@ def check_any_text_region_in_model_one_is_main_or_header( contours_only_text_parent_main_d=[] contours_only_text_parent_head_d=[] - for ii in range(len(contours_only_text_parent)): - con=contours_only_text_parent[ii] - img=np.zeros((regions_model_1.shape[0],regions_model_1.shape[1],3)) - img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) + for ii, con in enumerate(contours_only_text_parent): + img = np.zeros(regions_model_1.shape[:2]) + img = cv2.fillPoly(img, pts=[con], color=255) - all_pixels=((img[:,:,0]==255)*1).sum() - pixels_header=( ( (img[:,:,0]==255) & (regions_model_full[:,:,0]==2) )*1 ).sum() + all_pixels=((img == 255)*1).sum() + pixels_header=( ( (img == 255) & (regions_model_full[:,:,0]==2) )*1 ).sum() pixels_main=all_pixels-pixels_header if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=2 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=2 contours_only_text_parent_head.append(con) if contours_only_text_parent_d_ordered is not None: contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) @@ -944,7 +945,7 @@ def check_any_text_region_in_model_one_is_main_or_header( all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) conf_contours_head.append(None) else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ]=1 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=1 contours_only_text_parent_main.append(con) conf_contours_main.append(conf_contours[ii]) if contours_only_text_parent_d_ordered is not None: @@ -1015,11 +1016,11 @@ def check_any_text_region_in_model_one_is_main_or_header_light( contours_only_text_parent_head_d=[] for ii, con in enumerate(contours_only_text_parent_z): - img=np.zeros((regions_model_1.shape[0], regions_model_1.shape[1], 3)) - img = cv2.fillPoly(img, pts=[con], color=(255, 255, 255)) + img = np.zeros(regions_model_1.shape[:2]) + img = cv2.fillPoly(img, pts=[con], color=255) - all_pixels = (img[:,:,0]==255).sum() - pixels_header=((img[:,:,0]==255) & + all_pixels = (img == 255).sum() + pixels_header=((img == 255) & (regions_model_full[:,:,0]==2)).sum() pixels_main = all_pixels - pixels_header @@ -1029,7 +1030,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light( ( pixels_header / float(pixels_main) >= 0.3 and length_con[ii] / float(height_con[ii]) >=3 )): - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ] = 2 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 2 contours_only_text_parent_head.append(contours_only_text_parent[ii]) conf_contours_head.append(None) # why not conf_contours[ii], too? if contours_only_text_parent_d_ordered is not None: @@ -1039,7 +1040,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light( all_found_textline_polygons_head.append(all_found_textline_polygons[ii]) else: - regions_model_1[:,:][(regions_model_1[:,:]==1) & (img[:,:,0]==255) ] = 1 + regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 1 contours_only_text_parent_main.append(contours_only_text_parent[ii]) conf_contours_main.append(conf_contours[ii]) if contours_only_text_parent_d_ordered is not None: @@ -1119,11 +1120,11 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) textlines_big.append(textlines_tot[i]) textlines_big_org_form.append(textlines_tot_org_form[i]) - img_textline_s = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) - img_textline_s = cv2.fillPoly(img_textline_s, pts=textlines_small, color=(1, 1, 1)) + img_textline_s = np.zeros(textline_iamge.shape[:2]) + img_textline_s = cv2.fillPoly(img_textline_s, pts=textlines_small, color=1) - img_textline_b = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) - img_textline_b = cv2.fillPoly(img_textline_b, pts=textlines_big, color=(1, 1, 1)) + img_textline_b = np.zeros(textline_iamge.shape[:2]) + img_textline_b = cv2.fillPoly(img_textline_b, pts=textlines_big, color=1) sum_small_big_all = img_textline_s + img_textline_b sum_small_big_all2 = (sum_small_big_all[:, :] == 2) * 1 @@ -1135,11 +1136,11 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) # print(len(textlines_small),'small') intersections = [] for z2 in range(len(textlines_big)): - img_text = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) - img_text = cv2.fillPoly(img_text, pts=[textlines_small[z1]], color=(1, 1, 1)) + img_text = np.zeros(textline_iamge.shape[:2]) + img_text = cv2.fillPoly(img_text, pts=[textlines_small[z1]], color=1) - img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1])) - img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z2]], color=(1, 1, 1)) + img_text2 = np.zeros(textline_iamge.shape[:2]) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z2]], color=1) sum_small_big = img_text2 + img_text sum_small_big_2 = (sum_small_big[:, :] == 2) * 1 @@ -1165,19 +1166,17 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) index_small_textlines = list(np.where(np.array(dis_small_from_bigs_tot) == z)[0]) # print(z,index_small_textlines) - img_text2 = np.zeros((textline_iamge.shape[0], textline_iamge.shape[1], 3)) - img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z]], color=(255, 255, 255)) + img_text2 = np.zeros(textline_iamge.shape[:2], dtype=np.uint8) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_big[z]], color=255) textlines_big_with_change.append(z) for k in index_small_textlines: - img_text2 = cv2.fillPoly(img_text2, pts=[textlines_small[k]], color=(255, 255, 255)) + img_text2 = cv2.fillPoly(img_text2, pts=[textlines_small[k]], color=255) textlines_small_with_change.append(k) - img_text2 = img_text2.astype(np.uint8) - imgray = cv2.cvtColor(img_text2, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + _, thresh = cv2.threshold(img_text2, 0, 255, 0) + cont, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(cont[0],type(cont)) textlines_big_with_change_con.append(cont) @@ -1189,9 +1188,8 @@ def small_textlines_to_parent_adherence2(textlines_con, textline_iamge, num_col) # print(textlines_big_with_change,'textlines_big_with_change') # print(textlines_small_with_change,'textlines_small_with_change') # print(textlines_big) - textlines_con_changed.append(textlines_big_org_form) - else: - textlines_con_changed.append(textlines_big_org_form) + + textlines_con_changed.append(textlines_big_org_form) return textlines_con_changed def order_of_regions(textline_mask, contours_main, contours_head, y_ref): @@ -1262,29 +1260,22 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( img_p_in_ver, img_in_hor,num_col_classifier): #img_p_in_ver = cv2.erode(img_p_in_ver, self.kernel, iterations=2) - img_p_in_ver=img_p_in_ver.astype(np.uint8) - img_p_in_ver=np.repeat(img_p_in_ver[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(img_p_in_ver, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_lines_ver,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + _, thresh = cv2.threshold(img_p_in_ver, 0, 255, 0) + contours_lines_ver, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) slope_lines_ver, _, x_min_main_ver, _, _, _, y_min_main_ver, y_max_main_ver, cx_main_ver = \ find_features_of_lines(contours_lines_ver) for i in range(len(x_min_main_ver)): img_p_in_ver[int(y_min_main_ver[i]): int(y_min_main_ver[i])+30, int(cx_main_ver[i])-25: - int(cx_main_ver[i])+25, 0] = 0 + int(cx_main_ver[i])+25] = 0 img_p_in_ver[int(y_max_main_ver[i])-30: int(y_max_main_ver[i]), int(cx_main_ver[i])-25: - int(cx_main_ver[i])+25, 0] = 0 + int(cx_main_ver[i])+25] = 0 - img_in_hor=img_in_hor.astype(np.uint8) - img_in_hor=np.repeat(img_in_hor[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(img_in_hor, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_lines_hor,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + _, thresh = cv2.threshold(img_in_hor, 0, 255, 0) + contours_lines_hor, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) slope_lines_hor, dist_x_hor, x_min_main_hor, x_max_main_hor, cy_main_hor, _, _, _, _ = \ find_features_of_lines(contours_lines_hor) @@ -1340,22 +1331,19 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( img_p_in=img_in_hor special_separators=[] - img_p_in_ver[:,:,0][img_p_in_ver[:,:,0]==255]=1 - sep_ver_hor=img_p_in+img_p_in_ver - sep_ver_hor_cross=(sep_ver_hor[:,:,0]==2)*1 - sep_ver_hor_cross=np.repeat(sep_ver_hor_cross[:, :, np.newaxis], 3, axis=2) - sep_ver_hor_cross=sep_ver_hor_cross.astype(np.uint8) - imgray = cv2.cvtColor(sep_ver_hor_cross, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_cross,_=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - cx_cross, cy_cross = find_center_of_contours(contours_cross) - for ii in range(len(cx_cross)): - img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])+5:int(cx_cross[ii])+40,0]=0 - img_p_in[int(cy_cross[ii])-30:int(cy_cross[ii])+30,int(cx_cross[ii])-40:int(cx_cross[ii])-4,0]=0 + img_p_in_ver[img_p_in_ver == 255] = 1 + sep_ver_hor = img_p_in + img_p_in_ver + sep_ver_hor_cross = (sep_ver_hor == 2) * 1 + _, thresh = cv2.threshold(sep_ver_hor_cross.astype(np.uint8), 0, 255, 0) + contours_cross, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + center_cross = np.array(find_center_of_contours(contours_cross), dtype=int) + for cx, cy in center_cross.T: + img_p_in[cy - 30: cy + 30, cx + 5: cx + 40] = 0 + img_p_in[cy - 30: cy + 30, cx - 40: cx - 4] = 0 else: img_p_in=np.copy(img_in_hor) special_separators=[] - return img_p_in[:,:,0], special_separators + return img_p_in, special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot = [] @@ -1365,11 +1353,11 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): peaks_neg_tot.append(last_point) return peaks_neg_tot -def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, pixel_lines, contours_h=None): +def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, label_lines, contours_h=None): t_ins_c0 = time.time() - separators_closeup=( (region_pre_p[:,:,:]==pixel_lines))*1 - separators_closeup[0:110,:,:]=0 - separators_closeup[separators_closeup.shape[0]-150:,:,:]=0 + separators_closeup=( (region_pre_p[:,:]==label_lines))*1 + separators_closeup[0:110,:]=0 + separators_closeup[separators_closeup.shape[0]-150:,:]=0 kernel = np.ones((5,5),np.uint8) separators_closeup=separators_closeup.astype(np.uint8) @@ -1381,15 +1369,11 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, separators_closeup_n=separators_closeup_n.astype(np.uint8) separators_closeup_n_binary=np.zeros(( separators_closeup_n.shape[0],separators_closeup_n.shape[1]) ) - separators_closeup_n_binary[:,:]=separators_closeup_n[:,:,0] + separators_closeup_n_binary[:,:]=separators_closeup_n[:,:] separators_closeup_n_binary[:,:][separators_closeup_n_binary[:,:]!=0]=1 - gray_early=np.repeat(separators_closeup_n_binary[:, :, np.newaxis], 3, axis=2) - gray_early=gray_early.astype(np.uint8) - imgray_e = cv2.cvtColor(gray_early, cv2.COLOR_BGR2GRAY) - ret_e, thresh_e = cv2.threshold(imgray_e, 0, 255, 0) - - contours_line_e,hierarchy_e=cv2.findContours(thresh_e,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + _, thresh_e = cv2.threshold(separators_closeup_n_binary, 0, 255, 0) + contours_line_e, _ = cv2.findContours(thresh_e.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) _, dist_xe, _, _, _, _, y_min_main, y_max_main, _ = \ find_features_of_lines(contours_line_e) dist_ye = y_max_main - y_min_main @@ -1399,10 +1383,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, cnts_hor_e=[] for ce in args_hor_e: cnts_hor_e.append(contours_line_e[ce]) - figs_e=np.zeros(thresh_e.shape) - figs_e=cv2.fillPoly(figs_e,pts=cnts_hor_e,color=(1,1,1)) - separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary, pts=cnts_hor_e, color=(0,0,0)) + separators_closeup_n_binary=cv2.fillPoly(separators_closeup_n_binary, pts=cnts_hor_e, color=0) gray = cv2.bitwise_not(separators_closeup_n_binary) gray=gray.astype(np.uint8) @@ -1422,7 +1404,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, kernel = np.ones((5,5),np.uint8) horizontal = cv2.dilate(horizontal,kernel,iterations = 2) horizontal = cv2.erode(horizontal,kernel,iterations = 2) - horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=(255,255,255)) + horizontal = cv2.fillPoly(horizontal, pts=cnts_hor_e, color=255) rows = vertical.shape[0] verticalsize = rows // 30 @@ -1440,13 +1422,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, separators_closeup_new[:,:][vertical[:,:]!=0]=1 separators_closeup_new[:,:][horizontal[:,:]!=0]=1 - vertical=np.repeat(vertical[:, :, np.newaxis], 3, axis=2) - vertical=vertical.astype(np.uint8) - - imgray = cv2.cvtColor(vertical, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_line_vers,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + _, thresh = cv2.threshold(vertical, 0, 255, 0) + contours_line_vers, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ find_features_of_lines(contours_line_vers) @@ -1461,11 +1438,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, dist_y_ver=y_max_main_ver-y_min_main_ver len_y=separators_closeup.shape[0]/3.0 - horizontal=np.repeat(horizontal[:, :, np.newaxis], 3, axis=2) - horizontal=horizontal.astype(np.uint8) - imgray = cv2.cvtColor(horizontal, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_line_hors,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + _, thresh = cv2.threshold(horizontal, 0, 255, 0) + contours_line_hors, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) slope_lines, dist_x, x_min_main, x_max_main, cy_main, slope_lines_org, y_min_main, y_max_main, cx_main = \ find_features_of_lines(contours_line_hors) @@ -1558,7 +1532,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, peaks_neg_fin_fin=[] for itiles in args_big_parts: regions_without_separators_tile=regions_without_separators[int(splitter_y_new[itiles]): - int(splitter_y_new[itiles+1]),:,0] + int(splitter_y_new[itiles+1]),:] try: num_col, peaks_neg_fin = find_num_col(regions_without_separators_tile, num_col_classifier, tables, multiplier=7.0) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 8431bbe..22a6f50 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -119,14 +119,11 @@ def return_parent_contours(contours, hierarchy): def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002): # pixels of images are identified by 5 - if len(region_pre_p.shape) == 3: + if region_pre_p.ndim == 3: cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: cnts_images = (region_pre_p[:, :] == label) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) @@ -135,13 +132,11 @@ def return_contours_of_interested_region(region_pre_p, label, min_area=0.0002): return contours_imgs def do_work_of_contours_in_image(contour, index_r_con, img, slope_first): - img_copy = np.zeros(img.shape) - img_copy = cv2.fillPoly(img_copy, pts=[contour], color=(1, 1, 1)) + img_copy = np.zeros(img.shape[:2], dtype=np.uint8) + img_copy = cv2.fillPoly(img_copy, pts=[contour], color=1) img_copy = rotation_image_new(img_copy, -slope_first) - img_copy = img_copy.astype(np.uint8) - imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(img_copy, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) @@ -164,8 +159,8 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): cnts_org = [] # print(cnts,'cnts') for i in range(len(cnts)): - img_copy = np.zeros(img.shape) - img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=(1, 1, 1)) + img_copy = np.zeros(img.shape[:2], dtype=np.uint8) + img_copy = cv2.fillPoly(img_copy, pts=[cnts[i]], color=1) # plt.imshow(img_copy) # plt.show() @@ -176,9 +171,7 @@ def get_textregion_contours_in_org_image(cnts, img, slope_first): # plt.imshow(img_copy) # plt.show() - img_copy = img_copy.astype(np.uint8) - imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(img_copy, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) @@ -195,12 +188,11 @@ def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first): interpolation=cv2.INTER_NEAREST) cnts_org = [] for cnt in cnts: - img_copy = np.zeros(img.shape) - img_copy = cv2.fillPoly(img_copy, pts=[(cnt / zoom).astype(int)], color=(1, 1, 1)) + img_copy = np.zeros(img.shape[:2], dtype=np.uint8) + img_copy = cv2.fillPoly(img_copy, pts=[cnt // zoom], color=1) img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) - imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(img_copy, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_int[0][:, 0, 0] = cont_int[0][:, 0, 0] + np.abs(img_copy.shape[1] - img.shape[1]) @@ -210,14 +202,13 @@ def get_textregion_contours_in_org_image_light_old(cnts, img, slope_first): return cnts_org def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first, confidence_matrix): - img_copy = np.zeros(img.shape) - img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=(1, 1, 1)) - confidence_matrix_mapped_with_contour = confidence_matrix * img_copy[:,:,0] - confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy[:,:,0])) + img_copy = np.zeros(img.shape[:2], dtype=np.uint8) + img_copy = cv2.fillPoly(img_copy, pts=[contour_par], color=1) + confidence_matrix_mapped_with_contour = confidence_matrix * img_copy + confidence_contour = np.sum(confidence_matrix_mapped_with_contour) / float(np.sum(img_copy)) img_copy = rotation_image_new(img_copy, -slope_first).astype(np.uint8) - imgray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(img_copy, 0, 255, 0) cont_int, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(cont_int)==0: @@ -245,14 +236,11 @@ def get_textregion_contours_in_org_image_light(cnts, img, confidence_matrix): def return_contours_of_interested_textline(region_pre_p, label): # pixels of images are identified by 5 - if len(region_pre_p.shape) == 3: + if region_pre_p.ndim == 3: cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: cnts_images = (region_pre_p[:, :] == label) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) @@ -262,25 +250,22 @@ def return_contours_of_interested_textline(region_pre_p, label): def return_contours_of_image(image): if len(image.shape) == 2: - image = np.repeat(image[:, :, np.newaxis], 3, axis=2) image = image.astype(np.uint8) + imgray = image else: image = image.astype(np.uint8) - imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + _, thresh = cv2.threshold(imgray, 0, 255, 0) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours, hierarchy def return_contours_of_interested_region_by_min_size(region_pre_p, label, min_size=0.00003): # pixels of images are identified by 5 - if len(region_pre_p.shape) == 3: + if region_pre_p.ndim == 3: cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: cnts_images = (region_pre_p[:, :] == label) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) @@ -291,24 +276,21 @@ def return_contours_of_interested_region_by_min_size(region_pre_p, label, min_si def return_contours_of_interested_region_by_size(region_pre_p, label, min_area, max_area): # pixels of images are identified by 5 - if len(region_pre_p.shape) == 3: + if region_pre_p.ndim == 3: cnts_images = (region_pre_p[:, :, 0] == label) * 1 else: cnts_images = (region_pre_p[:, :] == label) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = filter_contours_area_of_image_tables( thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area) - img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1], 3)) - img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=(1, 1, 1)) + img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1])) + img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=1) - return img_ret[:, :, 0] + return img_ret def dilate_textline_contours(all_found_textline_polygons): return [[polygon2contour(contour2polygon(contour, dilate=6)) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index d41dda1..b8c7f3d 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -142,13 +142,12 @@ def dedup_separate_lines(img_patch, contour_text_interest, thetha, axis): rotation_matrix) def separate_lines(img_patch, contour_text_interest, thetha, x_help, y_help): - (h, w) = img_patch.shape[:2] + h, w = img_patch.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, -thetha, 1.0) x_d = M[0, 2] y_d = M[1, 2] - thetha = thetha / 180. * np.pi - rotation_matrix = np.array([[np.cos(thetha), -np.sin(thetha)], [np.sin(thetha), np.cos(thetha)]]) + rotation_matrix = M[:2, :2] contour_text_interest_copy = contour_text_interest.copy() x_cont = contour_text_interest[:, 0, 0] @@ -1302,19 +1301,16 @@ def separate_lines_new_inside_tiles(img_path, thetha): def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_ind, add_boxes_coor_into_textlines): kernel = np.ones((5, 5), np.uint8) - pixel = 255 + label = 255 min_area = 0 max_area = 1 - if len(img_patch.shape) == 3: - cnts_images = (img_patch[:, :, 0] == pixel) * 1 + if img_patch.ndim == 3: + cnts_images = (img_patch[:, :, 0] == label) * 1 else: - cnts_images = (img_patch[:, :] == pixel) * 1 - cnts_images = cnts_images.astype(np.uint8) - cnts_images = np.repeat(cnts_images[:, :, np.newaxis], 3, axis=2) - imgray = cv2.cvtColor(cnts_images, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + cnts_images = (img_patch[:, :] == label) * 1 + _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) + contours_imgs, hierarchy = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_imgs = return_parent_contours(contours_imgs, hierarchy) contours_imgs = filter_contours_area_of_image_tables(thresh, @@ -1322,14 +1318,12 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i max_area=max_area, min_area=min_area) cont_final = [] for i in range(len(contours_imgs)): - img_contour = np.zeros((cnts_images.shape[0], cnts_images.shape[1], 3)) - img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=(255, 255, 255)) - img_contour = img_contour.astype(np.uint8) + img_contour = np.zeros(cnts_images.shape[:2], dtype=np.uint8) + img_contour = cv2.fillPoly(img_contour, pts=[contours_imgs[i]], color=255) img_contour = cv2.dilate(img_contour, kernel, iterations=4) - imgrayrot = cv2.cvtColor(img_contour, cv2.COLOR_BGR2GRAY) - _, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) - contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + _, threshrot = cv2.threshold(img_contour, 0, 255, 0) + contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) ##contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[ ##0] @@ -1344,8 +1338,7 @@ def separate_lines_vertical_cont(img_patch, contour_text_interest, thetha, box_i def textline_contours_postprocessing(textline_mask, slope, contour_text_interest, box_ind, add_boxes_coor_into_textlines=False): - textline_mask = np.repeat(textline_mask[:, :, np.newaxis], 3, axis=2) * 255 - textline_mask = textline_mask.astype(np.uint8) + textline_mask = textline_mask * 255 kernel = np.ones((5, 5), np.uint8) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_OPEN, kernel) textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_CLOSE, kernel) @@ -1356,12 +1349,11 @@ def textline_contours_postprocessing(textline_mask, slope, y_help = 2 textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), - textline_mask.shape[1] + int(2 * x_help), 3)) + textline_mask.shape[1] + int(2 * x_help))) textline_mask_help[y_help : y_help + textline_mask.shape[0], - x_help : x_help + textline_mask.shape[1], :] = np.copy(textline_mask[:, :, :]) + x_help : x_help + textline_mask.shape[1]] = np.copy(textline_mask[:, :]) dst = rotate_image(textline_mask_help, slope) - dst = dst[:, :, 0] dst[dst != 0] = 1 # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: @@ -1372,21 +1364,18 @@ def textline_contours_postprocessing(textline_mask, slope, contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0] contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] - img_contour = np.zeros((box_ind[3], box_ind[2], 3)) - img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=(255, 255, 255)) + img_contour = np.zeros((box_ind[3], box_ind[2])) + img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=255) img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), - img_contour.shape[1] + int(2 * x_help), 3)) + img_contour.shape[1] + int(2 * x_help))) img_contour_help[y_help : y_help + img_contour.shape[0], - x_help : x_help + img_contour.shape[1], :] = np.copy(img_contour[:, :, :]) + x_help : x_help + img_contour.shape[1]] = np.copy(img_contour[:, :]) img_contour_rot = rotate_image(img_contour_help, slope) - img_contour_rot = img_contour_rot.astype(np.uint8) - # dst_help = dst_help.astype(np.uint8) - imgrayrot = cv2.cvtColor(img_contour_rot, cv2.COLOR_BGR2GRAY) - _, threshrot = cv2.threshold(imgrayrot, 0, 255, 0) - contours_text_rot, _ = cv2.findContours(threshrot.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + _, threshrot = cv2.threshold(img_contour_rot, 0, 255, 0) + contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] ind_big_con = np.argmax(len_con_text_rot) From 6e57ab3741f5532a30dd2925b423cd40871ab010 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 6 Oct 2025 16:53:59 +0200 Subject: [PATCH 330/492] textline_contours_postprocessing: do not catch arbitrary exceptions --- src/eynollah/utils/separate_lines.py | 68 ++++++++++++++-------------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index b8c7f3d..3bfc903 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1344,51 +1344,49 @@ def textline_contours_postprocessing(textline_mask, slope, textline_mask = cv2.morphologyEx(textline_mask, cv2.MORPH_CLOSE, kernel) textline_mask = cv2.erode(textline_mask, kernel, iterations=2) # textline_mask = cv2.erode(textline_mask, kernel, iterations=1) - try: - x_help = 30 - y_help = 2 - textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), - textline_mask.shape[1] + int(2 * x_help))) - textline_mask_help[y_help : y_help + textline_mask.shape[0], - x_help : x_help + textline_mask.shape[1]] = np.copy(textline_mask[:, :]) + x_help = 30 + y_help = 2 - dst = rotate_image(textline_mask_help, slope) - dst[dst != 0] = 1 + textline_mask_help = np.zeros((textline_mask.shape[0] + int(2 * y_help), + textline_mask.shape[1] + int(2 * x_help))) + textline_mask_help[y_help : y_help + textline_mask.shape[0], + x_help : x_help + textline_mask.shape[1]] = np.copy(textline_mask[:, :]) - # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: - # plt.imshow(dst) - # plt.show() + dst = rotate_image(textline_mask_help, slope) + dst[dst != 0] = 1 - contour_text_copy = contour_text_interest.copy() - contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0] - contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] + # if np.abs(slope)>.5 and textline_mask.shape[0]/float(textline_mask.shape[1])>3: + # plt.imshow(dst) + # plt.show() - img_contour = np.zeros((box_ind[3], box_ind[2])) - img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=255) + contour_text_copy = contour_text_interest.copy() + contour_text_copy[:, 0, 0] = contour_text_copy[:, 0, 0] - box_ind[0] + contour_text_copy[:, 0, 1] = contour_text_copy[:, 0, 1] - box_ind[1] - img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), - img_contour.shape[1] + int(2 * x_help))) - img_contour_help[y_help : y_help + img_contour.shape[0], - x_help : x_help + img_contour.shape[1]] = np.copy(img_contour[:, :]) + img_contour = np.zeros((box_ind[3], box_ind[2])) + img_contour = cv2.fillPoly(img_contour, pts=[contour_text_copy], color=255) - img_contour_rot = rotate_image(img_contour_help, slope) + img_contour_help = np.zeros((img_contour.shape[0] + int(2 * y_help), + img_contour.shape[1] + int(2 * x_help))) + img_contour_help[y_help : y_help + img_contour.shape[0], + x_help : x_help + img_contour.shape[1]] = np.copy(img_contour[:, :]) - _, threshrot = cv2.threshold(img_contour_rot, 0, 255, 0) - contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + img_contour_rot = rotate_image(img_contour_help, slope) - len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] - ind_big_con = np.argmax(len_con_text_rot) + _, threshrot = cv2.threshold(img_contour_rot, 0, 255, 0) + contours_text_rot, _ = cv2.findContours(threshrot.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - if abs(slope) > 45: - _, contours_rotated_clean = separate_lines_vertical_cont( - textline_mask, contours_text_rot[ind_big_con], box_ind, slope, - add_boxes_coor_into_textlines=add_boxes_coor_into_textlines) - else: - _, contours_rotated_clean = separate_lines( - dst, contours_text_rot[ind_big_con], slope, x_help, y_help) - except: - contours_rotated_clean = [] + len_con_text_rot = [len(contours_text_rot[ib]) for ib in range(len(contours_text_rot))] + ind_big_con = np.argmax(len_con_text_rot) + + if abs(slope) > 45: + _, contours_rotated_clean = separate_lines_vertical_cont( + textline_mask, contours_text_rot[ind_big_con], box_ind, slope, + add_boxes_coor_into_textlines=add_boxes_coor_into_textlines) + else: + _, contours_rotated_clean = separate_lines( + dst, contours_text_rot[ind_big_con], slope, x_help, y_help) return contours_rotated_clean From 595ed02743afc3ab8359de5f6feb0ca680546599 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 6 Oct 2025 17:24:50 +0200 Subject: [PATCH 331/492] run_single: simplify; allow running TrOCR in non-fl mode, too - refactor final `self.full_layout` conditional, removing copied code - allow running `self.ocr` and `self.tr` branch in both cases (non/fl) - when running TrOCR, use model / processor / device initialised during init (instead of ad-hoc loading) --- src/eynollah/eynollah.py | 277 ++++++++++++++++----------------------- 1 file changed, 112 insertions(+), 165 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 834ecf3..079cf8c 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -379,9 +379,14 @@ class Eynollah: self.model_reading_order = self.our_load_model(self.model_reading_order_dir) if self.ocr and self.tr: self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - #("microsoft/trocr-base-printed")#("microsoft/trocr-base-handwritten") - self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") + if torch.cuda.is_available(): + self.logger.info("Using GPU acceleration") + self.device = torch.device("cuda:0") + else: + self.logger.info("Using CPU processing") + self.device = torch.device("cpu") + #self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") + self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") elif self.ocr and not self.tr: model_ocr = load_model(self.model_ocr_dir , compile=False) @@ -4805,12 +4810,13 @@ class Eynollah: slopes_marginals, mid_point_of_page_width) #print(len(polygons_of_marginals), len(ordered_left_marginals), len(ordered_right_marginals), 'marginals ordred') + if np.abs(slope_deskew) >= SLOPE_THRESHOLD: + contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( + contours_only_text_parent_d_ordered, index_by_text_par_con) + else: + contours_only_text_parent_d_ordered = None + if self.full_layout: - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( - contours_only_text_parent_d_ordered, index_by_text_par_con) - else: - contours_only_text_parent_d_ordered = None if self.light_version: fun = check_any_text_region_in_model_one_is_main_or_header_light else: @@ -4869,44 +4875,43 @@ class Eynollah: splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts, self.tables, self.right2left, logger=self.logger) + else: + contours_only_text_parent_h = [] + contours_only_text_parent_h_d_ordered = [] if self.plotter: self.plotter.write_images_into_directory(polygons_of_images, image_page) t_order = time.time() - if self.full_layout: - self.logger.info("Step 4/5: Reading Order Detection") - - if self.reading_order_machine_based: - self.logger.info("Using machine-based detection") - if self.right2left: - self.logger.info("Right-to-left mode enabled") - if self.headers_off: - self.logger.info("Headers ignored in reading order") + #if self.full_layout: + self.logger.info("Step 4/5: Reading Order Detection") - if self.reading_order_machine_based: - tror = time.time() - order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( - contours_only_text_parent, contours_only_text_parent_h, text_regions_p) + if self.reading_order_machine_based: + self.logger.info("Using machine-based detection") + if self.right2left: + self.logger.info("Right-to-left mode enabled") + if self.headers_off: + self.logger.info("Headers ignored in reading order") + + if self.reading_order_machine_based: + order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( + contours_only_text_parent, contours_only_text_parent_h, text_regions_p) + else: + if np.abs(slope_deskew) < SLOPE_THRESHOLD: + order_text_new, id_of_texts_tot = self.do_order_of_regions( + contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) else: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - order_text_new, id_of_texts_tot = self.do_order_of_regions( - contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) - else: - order_text_new, id_of_texts_tot = self.do_order_of_regions( - contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, - boxes_d, textline_mask_tot_d) - self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") + order_text_new, id_of_texts_tot = self.do_order_of_regions( + contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, + boxes_d, textline_mask_tot_d) + self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") - if self.ocr and not self.tr: - self.logger.info("Step 4.5/5: OCR Processing") - - if torch.cuda.is_available(): - self.logger.info("Using GPU acceleration") - else: - self.logger.info("Using CPU processing") - + if self.ocr: + self.logger.info("Step 4.5/5: OCR Processing") + + if not self.tr: gc.collect() + if len(all_found_textline_polygons)>0: ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, self.prediction_model, @@ -4941,15 +4946,68 @@ class Eynollah: self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_drop = None + else: - ocr_all_textlines = None - ocr_all_textlines_marginals_left = None - ocr_all_textlines_marginals_right = None - ocr_all_textlines_h = None - ocr_all_textlines_drop = None + if self.light_version: + self.logger.info("Using light version OCR") + if self.textline_light: + self.logger.info("Using light text line detection for OCR") + self.logger.info("Processing text lines...") + + self.device.reset() + gc.collect() + + torch.cuda.empty_cache() + self.model_ocr.to(self.device) + + ind_tot = 0 + #cv2.imwrite('./img_out.png', image_page) + ocr_all_textlines = [] + for indexing, ind_poly_first in enumerate(all_found_textline_polygons): + ocr_textline_in_textregion = [] + for indexing2, ind_poly in enumerate(ind_poly_first): + if not (self.textline_light or self.curved_line): + ind_poly = copy.deepcopy(ind_poly) + box_ind = all_box_coord[indexing] + #print(ind_poly,np.shape(ind_poly), 'ind_poly') + #print(box_ind) + ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) + #print(ind_poly_copy) + ind_poly[ind_poly<0] = 0 + x, y, w, h = cv2.boundingRect(ind_poly) + #print(ind_poly_copy, np.shape(ind_poly_copy)) + #print(x, y, w, h, h/float(w),'ratio') + h2w_ratio = h/float(w) + mask_poly = np.zeros(image_page.shape) + if not self.light_version: + img_poly_on_img = np.copy(image_page) + else: + img_poly_on_img = np.copy(img_bin_light) + mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) + + if self.textline_light: + mask_poly = cv2.dilate(mask_poly, KERNEL, iterations=1) + img_poly_on_img[:,:,0][mask_poly[:,:,0] ==0] = 255 + img_poly_on_img[:,:,1][mask_poly[:,:,0] ==0] = 255 + img_poly_on_img[:,:,2][mask_poly[:,:,0] ==0] = 255 + + img_croped = img_poly_on_img[y:y+h, x:x+w, :] + #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) + text_ocr = self.return_ocr_of_textline_without_common_section( + img_croped, self.model_ocr, self.processor, self.device, w, h2w_ratio, ind_tot) + ocr_textline_in_textregion.append(text_ocr) + ind_tot = ind_tot +1 + ocr_all_textlines.append(ocr_textline_in_textregion) + else: + ocr_all_textlines = None + ocr_all_textlines_marginals_left = None + ocr_all_textlines_marginals_right = None + ocr_all_textlines_h = None + ocr_all_textlines_drop = None - self.logger.info("Step 5/5: Output Generation") - + self.logger.info("Step 5/5: Output Generation") + + if self.full_layout: pcgts = self.writer.build_pagexml_full_layout( contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, @@ -4962,129 +5020,18 @@ class Eynollah: ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, ocr_all_textlines_drop, conf_contours_textregions, conf_contours_textregions_h) - - return pcgts - - contours_only_text_parent_h = [] - self.logger.info("Step 4/5: Reading Order Detection") - - if self.reading_order_machine_based: - self.logger.info("Using machine-based detection") - if self.right2left: - self.logger.info("Right-to-left mode enabled") - if self.headers_off: - self.logger.info("Headers ignored in reading order") - - if self.reading_order_machine_based: - order_text_new, id_of_texts_tot = self.do_order_of_regions_with_model( - contours_only_text_parent, contours_only_text_parent_h, text_regions_p) else: - if np.abs(slope_deskew) < SLOPE_THRESHOLD: - order_text_new, id_of_texts_tot = self.do_order_of_regions( - contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot) - else: - contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( - contours_only_text_parent_d_ordered, index_by_text_par_con) - order_text_new, id_of_texts_tot = self.do_order_of_regions( - contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d) - - if self.ocr and self.tr: - self.logger.info("Step 4.5/5: OCR Processing") - if torch.cuda.is_available(): - self.logger.info("Using GPU acceleration") - else: - self.logger.info("Using CPU processing") - if self.light_version: - self.logger.info("Using light version OCR") - if self.textline_light: - self.logger.info("Using light text line detection for OCR") - self.logger.info("Processing text lines...") + pcgts = self.writer.build_pagexml_no_full_layout( + txt_con_org, page_coord, order_text_new, id_of_texts_tot, + all_found_textline_polygons, all_box_coord, polygons_of_images, + polygons_of_marginals_left, polygons_of_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_seplines, contours_tables, ocr_all_textlines, + ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, + conf_contours_textregions) - device = cuda.get_current_device() - device.reset() - gc.collect() - model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") - torch.cuda.empty_cache() - model_ocr.to(device) - - ind_tot = 0 - #cv2.imwrite('./img_out.png', image_page) - ocr_all_textlines = [] - for indexing, ind_poly_first in enumerate(all_found_textline_polygons): - ocr_textline_in_textregion = [] - for indexing2, ind_poly in enumerate(ind_poly_first): - if not (self.textline_light or self.curved_line): - ind_poly = copy.deepcopy(ind_poly) - box_ind = all_box_coord[indexing] - #print(ind_poly,np.shape(ind_poly), 'ind_poly') - #print(box_ind) - ind_poly = return_textline_contour_with_added_box_coordinate(ind_poly, box_ind) - #print(ind_poly_copy) - ind_poly[ind_poly<0] = 0 - x, y, w, h = cv2.boundingRect(ind_poly) - #print(ind_poly_copy, np.shape(ind_poly_copy)) - #print(x, y, w, h, h/float(w),'ratio') - h2w_ratio = h/float(w) - mask_poly = np.zeros(image_page.shape) - if not self.light_version: - img_poly_on_img = np.copy(image_page) - else: - img_poly_on_img = np.copy(img_bin_light) - mask_poly = cv2.fillPoly(mask_poly, pts=[ind_poly], color=(1, 1, 1)) - - if self.textline_light: - mask_poly = cv2.dilate(mask_poly, KERNEL, iterations=1) - img_poly_on_img[:,:,0][mask_poly[:,:,0] ==0] = 255 - img_poly_on_img[:,:,1][mask_poly[:,:,0] ==0] = 255 - img_poly_on_img[:,:,2][mask_poly[:,:,0] ==0] = 255 - - img_croped = img_poly_on_img[y:y+h, x:x+w, :] - #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) - text_ocr = self.return_ocr_of_textline_without_common_section( - img_croped, model_ocr, processor, device, w, h2w_ratio, ind_tot) - ocr_textline_in_textregion.append(text_ocr) - ind_tot = ind_tot +1 - ocr_all_textlines.append(ocr_textline_in_textregion) - - elif self.ocr and not self.tr: - gc.collect() - if len(all_found_textline_polygons)>0: - ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - if all_found_textline_polygons_marginals_left and len(all_found_textline_polygons_marginals_left)>0: - ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_left, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - if all_found_textline_polygons_marginals_right and len(all_found_textline_polygons_marginals_right)>0: - ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_right, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - - else: - ocr_all_textlines = None - ocr_all_textlines_marginals_left = None - ocr_all_textlines_marginals_right = None - self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") - - self.logger.info("Step 5/5: Output Generation") - self.logger.info("Generating PAGE-XML output") - - pcgts = self.writer.build_pagexml_no_full_layout( - txt_con_org, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, all_box_coord, polygons_of_images, - polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, contours_tables, ocr_all_textlines, - ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, - conf_contours_textregions) - return pcgts From a1904fa660e7cb79ba9b4d8fc7df5befc41072f1 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 6 Oct 2025 17:44:12 +0200 Subject: [PATCH 332/492] tests: cover layout with OCR in various modes --- tests/test_run.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/test_run.py b/tests/test_run.py index 59e5099..d69f021 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -24,14 +24,18 @@ MODELS_BIN = environ.get('MODELS_BIN', str(testdir.joinpath('..', 'default-2021- "options", [ [], # defaults - ["--allow_scaling", "--curved-line"], + #["--allow_scaling", "--curved-line"], ["--allow_scaling", "--curved-line", "--full-layout"], ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", "--textline_light", "--light_version"], # -ep ... # -eoi ... - # --do_ocr + ["--do_ocr"], + ["--do_ocr", "--light_version", "--textline_light"], + ["--do_ocr", "--transformer_ocr"], + #["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"], + ["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"], # --skip_layout_and_reading_order ], ids=str) def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): From 23535998f7532942d481f3729682969e19c228b6 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 6 Oct 2025 21:27:21 +0200 Subject: [PATCH 333/492] tests: symlink OCR models into layout model directory (so layout with OCR options works with our split model packages) --- Makefile | 19 +++++++++++-------- tests/test_run.py | 3 ++- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 357aa47..5d190b2 100644 --- a/Makefile +++ b/Makefile @@ -90,26 +90,29 @@ deps-test: $(OCR_MODELNAME) endif deps-test: $(BIN_MODELNAME) $(SEG_MODELNAME) $(PIP) install -r requirements-test.txt +ifeq (OCR,$(findstring OCR, $(EXTRAS))) + ln -s $(OCR_MODELNAME)/* $(SEG_MODELNAME)/ +endif smoke-test: TMPDIR != mktemp -d smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif # layout analysis: - eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/models_layout_v0_5_0 + eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/$(SEG_MODELNAME) fgrep -q http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 $(TMPDIR)/$(basename $( Date: Tue, 7 Oct 2025 00:54:25 +0200 Subject: [PATCH 334/492] CI: run deps-test with OCR extra so symlink rule fires --- .github/workflows/test-eynollah.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 9d5b2c8..7c3f5ae 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -65,7 +65,7 @@ jobs: run: | python -m pip install --upgrade pip make install-dev EXTRAS=OCR,plotting - make deps-test + make deps-test EXTRAS=OCR,plotting - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" - name: Get coverage results diff --git a/Makefile b/Makefile index 5d190b2..618b1f9 100644 --- a/Makefile +++ b/Makefile @@ -91,7 +91,7 @@ endif deps-test: $(BIN_MODELNAME) $(SEG_MODELNAME) $(PIP) install -r requirements-test.txt ifeq (OCR,$(findstring OCR, $(EXTRAS))) - ln -s $(OCR_MODELNAME)/* $(SEG_MODELNAME)/ + ln -rs $(OCR_MODELNAME)/* $(SEG_MODELNAME)/ endif smoke-test: TMPDIR != mktemp -d From d53f829dfd0b26e4738915b24ffe4256796c6eb4 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 22:06:57 +0200 Subject: [PATCH 335/492] filter_contours_inside_a_bigger_one: fix edge case in 81827c29 --- src/eynollah/eynollah.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 079cf8c..271779f 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4068,7 +4068,9 @@ class Eynollah: for textregion_index_to_del in textline_in_textregion_index_to_del: contours[textregion_index_to_del] = list(np.delete( contours[textregion_index_to_del], - textline_in_textregion_index_to_del[textregion_index_to_del])) + textline_in_textregion_index_to_del[textregion_index_to_del], + # needed so numpy does not flatten the entire result when 0 left + axis=0)) return contours From 2e907875c12b4f22c650c109558917479e0ec3ae Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 22:32:06 +0200 Subject: [PATCH 336/492] get_text_region_boxes_by_given_contours: simplify --- src/eynollah/eynollah.py | 4 ++-- src/eynollah/utils/contour.py | 10 ++-------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 271779f..06be910 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4726,8 +4726,8 @@ class Eynollah: txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( contours_only_text_parent, self.image, confidence_matrix) #print("text region early 4 in %.1fs", time.time() - t0) - boxes_text, _ = get_text_region_boxes_by_given_contours(contours_only_text_parent) - boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals) + boxes_text = get_text_region_boxes_by_given_contours(contours_only_text_parent) + boxes_marginals = get_text_region_boxes_by_given_contours(polygons_of_marginals) #print("text region early 5 in %.1fs", time.time() - t0) ## birdan sora chock chakir if not self.curved_line: diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 22a6f50..fb4bbd0 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -36,14 +36,8 @@ def find_contours_mean_y_diff(contours_main): return np.mean(np.diff(np.sort(np.array(cy_main)))) def get_text_region_boxes_by_given_contours(contours): - boxes = [] - contours_new = [] - for jj in range(len(contours)): - box = cv2.boundingRect(contours[jj]) - boxes.append(box) - contours_new.append(contours[jj]) - - return boxes, contours_new + return [cv2.boundingRect(contour) + for contour in contours] def filter_contours_area_of_image(image, contours, hierarchy, max_area=1.0, min_area=0.0, dilate=0): found_polygons_early = [] From dfdc70537530b55f77b5232ae3cfa1fc8357eed0 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 22:33:06 +0200 Subject: [PATCH 337/492] do_work_of_slopes: rm unused old variant --- src/eynollah/eynollah.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 06be910..2431a3b 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -108,7 +108,6 @@ from .utils.utils_ocr import ( get_contours_and_bounding_boxes ) from .utils.separate_lines import ( - textline_contours_postprocessing, separate_lines_new2, return_deskew_slop, do_work_of_slopes_new, @@ -2062,43 +2061,6 @@ class Eynollah: (prediction_textline_longshot_true_size[:, :, 0]==1).astype(np.uint8)) - def do_work_of_slopes(self, q, poly, box_sub, boxes_per_process, textline_mask_tot, contours_per_process): - self.logger.debug('enter do_work_of_slopes') - slope_biggest = 0 - slopes_sub = [] - boxes_sub_new = [] - poly_sub = [] - for mv in range(len(boxes_per_process)): - crop_img, _ = crop_image_inside_box(boxes_per_process[mv], textline_mask_tot) - crop_img = cv2.erode(crop_img, KERNEL, iterations=2) - try: - textline_con, hierarchy = return_contours_of_image(crop_img) - textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, - max_area=1, min_area=0.0008) - y_diff_mean = find_contours_mean_y_diff(textline_con_fil) - sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0))) - crop_img[crop_img > 0] = 1 - slope_corresponding_textregion = return_deskew_slop(crop_img, sigma_des, - logger=self.logger, plotter=self.plotter) - except Exception as why: - self.logger.error(why) - slope_corresponding_textregion = MAX_SLOPE - - if slope_corresponding_textregion == MAX_SLOPE: - slope_corresponding_textregion = slope_biggest - slopes_sub.append(slope_corresponding_textregion) - - cnt_clean_rot = textline_contours_postprocessing( - crop_img, slope_corresponding_textregion, contours_per_process[mv], boxes_per_process[mv]) - - poly_sub.append(cnt_clean_rot) - boxes_sub_new.append(boxes_per_process[mv]) - - q.put(slopes_sub) - poly.put(poly_sub) - box_sub.put(boxes_sub_new) - self.logger.debug('exit do_work_of_slopes') - def get_regions_light_v_extract_only_images(self,img,is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_extract_images_only") erosion_hurts = False From 0a80cd5dffc7e5c28f41330da8d2f1255ac66e88 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 22:37:05 +0200 Subject: [PATCH 338/492] avoid unnecessary 3-channel conversions: for tables, too --- src/eynollah/eynollah.py | 155 ++++++++++++++++----------------------- 1 file changed, 65 insertions(+), 90 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 2431a3b..70a8a17 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -930,10 +930,8 @@ class Eynollah: img_w = img.shape[1] prediction_true = np.zeros((img_h, img_w, 3)) mask_true = np.zeros((img_h, img_w)) - nxf = img_w / float(width_mid) - nyf = img_h / float(height_mid) - nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf) - nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf) + nxf = math.ceil(img_w / float(width_mid)) + nyf = math.ceil(img_h / float(height_mid)) list_i_s = [] list_j_s = [] @@ -946,18 +944,10 @@ class Eynollah: img_patch = np.zeros((n_batch_inference, img_height_model, img_width_model, 3)) for i in range(nxf): for j in range(nyf): - if i == 0: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - else: - index_x_d = i * width_mid - index_x_u = index_x_d + img_width_model - if j == 0: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model - else: - index_y_d = j * height_mid - index_y_u = index_y_d + img_height_model + index_x_d = i * width_mid + index_x_u = index_x_d + img_width_model + index_y_d = j * height_mid + index_y_u = index_y_d + img_height_model if index_x_u > img_w: index_x_u = img_w index_x_d = img_w - img_width_model @@ -2600,23 +2590,20 @@ class Eynollah: self, layout, table_prediction_early, pixel_table, num_col_classifier): layout_org = np.copy(layout) - layout_org[:,:,0][layout_org[:,:,0]==pixel_table] = 0 - layout = (layout[:,:,0]==pixel_table)*1 - - layout = layout.astype(np.uint8) + layout_org[layout_org == pixel_table] = 0 + layout = (layout == pixel_table).astype(np.uint8) * 1 _, thresh = cv2.threshold(layout, 0, 255, 0) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - cnt_size = np.array([cv2.contourArea(contours[j]) - for j in range(len(contours))]) + cnt_size = np.array([cv2.contourArea(cnt) for cnt in contours]) contours_new = [] - for i in range(len(contours)): - x, y, w, h = cv2.boundingRect(contours[i]) + for i, contour in enumerate(contours): + x, y, w, h = cv2.boundingRect(contour) iou = cnt_size[i] /float(w*h) *100 if iou<80: layout_contour = np.zeros(layout_org.shape[:2]) - layout_contour = cv2.fillPoly(layout_contour, pts=[contours[i]] ,color=1) + layout_contour = cv2.fillPoly(layout_contour, pts=[contour] ,color=1) layout_contour_sum = layout_contour.sum(axis=0) layout_contour_sum_diff = np.diff(layout_contour_sum) @@ -2648,26 +2635,26 @@ class Eynollah: #print(iou_in,'iou_in_in1') if iou_in>30: - layout_org= cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=3 * (pixel_table,)) + layout_org = cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=pixel_table) else: pass else: - layout_org= cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=3 * (pixel_table,)) + layout_org= cv2.fillPoly(layout_org, pts=[contours_sep[ji]], color=pixel_table) else: - contours_new.append(contours[i]) + contours_new.append(contour) if num_col_classifier>=2: - only_recent_contour_image = np.zeros((layout.shape[0],layout.shape[1])) - only_recent_contour_image= cv2.fillPoly(only_recent_contour_image,pts=[contours[i]] ,color=(1,1,1)) + only_recent_contour_image = np.zeros(layout.shape[:2]) + only_recent_contour_image = cv2.fillPoly(only_recent_contour_image, pts=[contour],color=1) table_pixels_masked_from_early_pre = only_recent_contour_image * table_prediction_early iou_in = 100. * table_pixels_masked_from_early_pre.sum() / only_recent_contour_image.sum() #print(iou_in,'iou_in') if iou_in>30: - layout_org= cv2.fillPoly(layout_org, pts=[contours[i]], color=3 * (pixel_table,)) + layout_org = cv2.fillPoly(layout_org, pts=[contour], color=pixel_table) else: pass else: - layout_org= cv2.fillPoly(layout_org, pts=[contours[i]], color=3 * (pixel_table,)) + layout_org = cv2.fillPoly(layout_org, pts=[contour], color=pixel_table) return layout_org, contours_new @@ -2714,16 +2701,10 @@ class Eynollah: pass boxes = np.array(boxes, dtype=int) # to be on the safe side - img_comm_e = np.zeros(image_revised_1.shape) - img_comm = np.repeat(img_comm_e[:, :, np.newaxis], 3, axis=2) - + img_comm = np.zeros(image_revised_1.shape, dtype=np.uint8) for indiv in np.unique(image_revised_1): - image_col=(image_revised_1==indiv)*255 - img_comm_in=np.repeat(image_col[:, :, np.newaxis], 3, axis=2) - img_comm_in=img_comm_in.astype(np.uint8) - - imgray = cv2.cvtColor(img_comm_in, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) + image_col = (image_revised_1 == indiv).astype(np.uint8) * 255 + _, thresh = cv2.threshold(image_col, 0, 255, 0) contours,hirarchy=cv2.findContours(thresh.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if indiv==pixel_table: @@ -2733,35 +2714,27 @@ class Eynollah: main_contours = filter_contours_area_of_image_tables(thresh, contours, hirarchy, max_area=1, min_area=min_area) - img_comm = cv2.fillPoly(img_comm, pts = main_contours, color = (indiv, indiv, indiv)) - img_comm = img_comm.astype(np.uint8) + img_comm = cv2.fillPoly(img_comm, pts=main_contours, color=indiv) if not self.isNaN(slope_mean_hor): - image_revised_last = np.zeros((image_regions_eraly_p.shape[0], image_regions_eraly_p.shape[1],3)) + image_revised_last = np.zeros(image_regions_eraly_p.shape[:2]) for i in range(len(boxes)): box_ys = slice(*boxes[i][2:4]) box_xs = slice(*boxes[i][0:2]) image_box = img_comm[box_ys, box_xs] try: - image_box_tabels_1=(image_box[:,:,0]==pixel_table)*1 + image_box_tabels_1 = (image_box == pixel_table) * 1 contours_tab,_=return_contours_of_image(image_box_tabels_1) contours_tab=filter_contours_area_of_image_tables(image_box_tabels_1,contours_tab,_,1,0.003) - image_box_tabels_1=(image_box[:,:,0]==pixel_line)*1 + image_box_tabels_1 = (image_box == pixel_line).astype(np.uint8) * 1 + image_box_tabels_and_m_text = ( (image_box == pixel_table) | + (image_box == 1) ).astype(np.uint8) * 1 - image_box_tabels_and_m_text=( (image_box[:,:,0]==pixel_table) | (image_box[:,:,0]==1) )*1 - image_box_tabels_and_m_text=image_box_tabels_and_m_text.astype(np.uint8) + image_box_tabels_1 = cv2.dilate(image_box_tabels_1, KERNEL, iterations=5) - image_box_tabels_1=image_box_tabels_1.astype(np.uint8) - image_box_tabels_1 = cv2.dilate(image_box_tabels_1,KERNEL,iterations = 5) - - contours_table_m_text,_=return_contours_of_image(image_box_tabels_and_m_text) - image_box_tabels=np.repeat(image_box_tabels_1[:, :, np.newaxis], 3, axis=2) - - image_box_tabels=image_box_tabels.astype(np.uint8) - imgray = cv2.cvtColor(image_box_tabels, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(imgray, 0, 255, 0) - - contours_line,hierachy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + contours_table_m_text, _ = return_contours_of_image(image_box_tabels_and_m_text) + _, thresh = cv2.threshold(image_box_tabels_1, 0, 255, 0) + contours_line, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) y_min_main_line ,y_max_main_line=find_features_of_contours(contours_line) y_min_main_tab ,y_max_main_tab=find_features_of_contours(contours_tab) @@ -2793,18 +2766,20 @@ class Eynollah: y_max_main_tab[i_t] < y_min_main_line[i_l] and y_min_main_tab[i_t] < y_min_main_line[i_l]): pass - elif np.abs(y_max_main_line[i_l]-y_min_main_line[i_l])<100: + elif abs(y_max_main_line[i_l] - y_min_main_line[i_l]) < 100: pass else: - y_up_tab.append(np.min([y_min_main_line[i_l], y_min_main_tab[i_t] ]) ) - y_down_tab.append( np.max([ y_max_main_line[i_l],y_max_main_tab[i_t] ]) ) + y_up_tab.append(min([y_min_main_line[i_l], + y_min_main_tab[i_t]])) + y_down_tab.append(max([y_max_main_line[i_l], + y_max_main_tab[i_t]])) if len(y_up_tab)==0: y_up_tabs.append(y_min_main_tab[i_t]) y_down_tabs.append(y_max_main_tab[i_t]) else: - y_up_tabs.append(np.min(y_up_tab)) - y_down_tabs.append(np.max(y_down_tab)) + y_up_tabs.append(min(y_up_tab)) + y_down_tabs.append(max(y_down_tab)) else: y_down_tabs=[] y_up_tabs=[] @@ -2814,7 +2789,7 @@ class Eynollah: y_up_tabs=[] for ii in range(len(y_up_tabs)): - image_box[y_up_tabs[ii]:y_down_tabs[ii],:,0]=pixel_table + image_box[y_up_tabs[ii]:y_down_tabs[ii]] = pixel_table image_revised_last[box_ys, box_xs] = image_box else: @@ -2825,14 +2800,14 @@ class Eynollah: image_revised_last[box_ys, box_xs] = image_box if num_col_classifier==1: - img_tables_col_1 = (image_revised_last[:,:,0] == pixel_table).astype(np.uint8) + img_tables_col_1 = (image_revised_last == pixel_table).astype(np.uint8) contours_table_col1, _ = return_contours_of_image(img_tables_col_1) _,_ ,_ , _, y_min_tab_col1 ,y_max_tab_col1, _= find_new_features_of_contours(contours_table_col1) if len(y_min_tab_col1)>0: for ijv in range(len(y_min_tab_col1)): - image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv]),:,:]=pixel_table + image_revised_last[int(y_min_tab_col1[ijv]):int(y_max_tab_col1[ijv])] = pixel_table return image_revised_last def get_tables_from_model(self, img, num_col_classifier): @@ -3200,7 +3175,7 @@ class Eynollah: pass else: text_regions_p_tables = np.copy(text_regions_p) - text_regions_p_tables[:,:][(table_prediction[:,:] == 1)] = 10 + text_regions_p_tables[(table_prediction == 1)] = 10 pixel_line = 3 img_revised_tab2 = self.add_tables_heuristic_to_layout( text_regions_p_tables, boxes, 0, splitter_y_new, peaks_neg_tot_tables, text_regions_p_tables, @@ -3221,8 +3196,8 @@ class Eynollah: pass else: text_regions_p_tables = np.copy(text_regions_p_1_n) - text_regions_p_tables =np.round(text_regions_p_tables) - text_regions_p_tables[:,:][(text_regions_p_tables[:,:] != 3) & (table_prediction_n[:,:] == 1)] = 10 + text_regions_p_tables = np.round(text_regions_p_tables) + text_regions_p_tables[(text_regions_p_tables != 3) & (table_prediction_n == 1)] = 10 pixel_line = 3 img_revised_tab2 = self.add_tables_heuristic_to_layout( @@ -3242,21 +3217,21 @@ class Eynollah: if self.tables: if self.light_version: - text_regions_p[:,:][table_prediction[:,:]==1] = 10 - img_revised_tab=text_regions_p[:,:] + text_regions_p[table_prediction == 1] = 10 + img_revised_tab = text_regions_p[:,:] else: if np.abs(slope_deskew) < SLOPE_THRESHOLD: - img_revised_tab = np.copy(img_revised_tab2[:,:,0]) - img_revised_tab[:,:][(text_regions_p[:,:] == 1) & (img_revised_tab[:,:] != 10)] = 1 + img_revised_tab = np.copy(img_revised_tab2) + img_revised_tab[(text_regions_p == 1) & (img_revised_tab != 10)] = 1 else: - img_revised_tab = np.copy(text_regions_p[:,:]) - img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0 - img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10 + img_revised_tab = np.copy(text_regions_p) + img_revised_tab[img_revised_tab == 10] = 0 + img_revised_tab[img_revised_tab2_d_rotated == 10] = 10 - text_regions_p[:,:][text_regions_p[:,:]==10] = 0 - text_regions_p[:,:][img_revised_tab[:,:]==10] = 10 + text_regions_p[text_regions_p == 10] = 0 + text_regions_p[img_revised_tab == 10] = 10 else: - img_revised_tab=text_regions_p[:,:] + img_revised_tab = text_regions_p[:,:] #img_revised_tab = text_regions_p[:, :] if self.light_version: polygons_of_images = return_contours_of_interested_region(text_regions_p, 2) @@ -3386,7 +3361,7 @@ class Eynollah: num_col_classifier, erosion_hurts, self.tables, self.right2left) text_regions_p_tables = np.copy(text_regions_p_1_n) text_regions_p_tables = np.round(text_regions_p_tables) - text_regions_p_tables[:,:][(text_regions_p_tables[:,:]!=3) & (table_prediction_n[:,:]==1)] = 10 + text_regions_p_tables[(text_regions_p_tables != 3) & (table_prediction_n == 1)] = 10 pixel_line = 3 img_revised_tab2 = self.add_tables_heuristic_to_layout( @@ -3405,17 +3380,17 @@ class Eynollah: text_regions_p.shape[1]) if np.abs(slope_deskew) < 0.13: - img_revised_tab = np.copy(img_revised_tab2[:,:,0]) + img_revised_tab = np.copy(img_revised_tab2) else: - img_revised_tab = np.copy(text_regions_p[:,:]) - img_revised_tab[:,:][img_revised_tab[:,:] == 10] = 0 - img_revised_tab[:,:][img_revised_tab2_d_rotated[:,:,0] == 10] = 10 + img_revised_tab = np.copy(text_regions_p) + img_revised_tab[img_revised_tab == 10] = 0 + img_revised_tab[img_revised_tab2_d_rotated == 10] = 10 - ##img_revised_tab=img_revised_tab2[:,:,0] - #img_revised_tab=text_regions_p[:,:] - text_regions_p[:,:][text_regions_p[:,:]==10] = 0 - text_regions_p[:,:][img_revised_tab[:,:]==10] = 10 - #img_revised_tab[img_revised_tab2[:,:,0]==10] =10 + ##img_revised_tab = img_revised_tab2[:,:] + #img_revised_tab = text_regions_p[:,:] + text_regions_p[text_regions_p == 10] = 0 + text_regions_p[img_revised_tab == 10] = 10 + #img_revised_tab[img_revised_tab2 == 10] = 10 pixel_img = 4 min_area_mar = 0.00001 From fd43e78442251c552faafeffe02256023ae1a806 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 22:42:36 +0200 Subject: [PATCH 339/492] filter_contours_without_textline_inside: simplify - np.delete in index array instead of contour lists - yield actual resulting indices --- src/eynollah/eynollah.py | 77 ++++------------------------------------ 1 file changed, 6 insertions(+), 71 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 70a8a17..6cc8b1b 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4040,79 +4040,23 @@ class Eynollah: self, contours, text_con_org, contours_textline, contours_only_text_parent_d_ordered, conf_contours_textregions): - ###contours_txtline_of_all_textregions = [] - ###for jj in range(len(contours_textline)): - ###contours_txtline_of_all_textregions = contours_txtline_of_all_textregions + contours_textline[jj] - ###M_main_textline = [cv2.moments(contours_txtline_of_all_textregions[j]) - ### for j in range(len(contours_txtline_of_all_textregions))] - ###cx_main_textline = [(M_main_textline[j]["m10"] / (M_main_textline[j]["m00"] + 1e-32)) - ### for j in range(len(M_main_textline))] - ###cy_main_textline = [(M_main_textline[j]["m01"] / (M_main_textline[j]["m00"] + 1e-32)) - ### for j in range(len(M_main_textline))] - - ###M_main = [cv2.moments(contours[j]) for j in range(len(contours))] - ###cx_main = [(M_main[j]["m10"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - ###cy_main = [(M_main[j]["m01"] / (M_main[j]["m00"] + 1e-32)) for j in range(len(M_main))] - - ###contours_with_textline = [] - ###for ind_tr, con_tr in enumerate(contours): - ###results = [cv2.pointPolygonTest(con_tr, - ### (cx_main_textline[index_textline_con], - ### cy_main_textline[index_textline_con]), - ### False) - ### for index_textline_con in range(len(contours_txtline_of_all_textregions)) ] - ###results = np.array(results) - ###if np.any(results==1): - ###contours_with_textline.append(con_tr) - - textregion_index_to_del = set() - for index_textregion, textlines_textregion in enumerate(contours_textline): - if len(textlines_textregion) == 0: - textregion_index_to_del.add(index_textregion) + assert len(contours_par) == len(contours_textline) + indices = np.arange(len(contours_textline)) + indices = np.delete(indices, np.flatnonzero([len(lines) == 0 for lines in contours_textline])) def filterfun(lis): if len(lis) == 0: return [] - if len(textregion_index_to_del) == 0: - return lis - return list(np.delete(lis, list(textregion_index_to_del))) + return list(np.array(lis)[indices]) return (filterfun(contours), filterfun(text_con_org), filterfun(conf_contours_textregions), filterfun(contours_textline), filterfun(contours_only_text_parent_d_ordered), - np.arange(len(contours) - len(textregion_index_to_del))) + indices + ) - def delete_regions_without_textlines( - self, slopes, all_found_textline_polygons, boxes_text, txt_con_org, - contours_only_text_parent, index_by_text_par_con): - - slopes_rem = [] - all_found_textline_polygons_rem = [] - boxes_text_rem = [] - txt_con_org_rem = [] - contours_only_text_parent_rem = [] - index_by_text_par_con_rem = [] - - for i, ind_con in enumerate(all_found_textline_polygons): - if len(ind_con): - all_found_textline_polygons_rem.append(ind_con) - slopes_rem.append(slopes[i]) - boxes_text_rem.append(boxes_text[i]) - txt_con_org_rem.append(txt_con_org[i]) - contours_only_text_parent_rem.append(contours_only_text_parent[i]) - index_by_text_par_con_rem.append(index_by_text_par_con[i]) - - index_sort = np.argsort(index_by_text_par_con_rem) - indexes_new = np.array(range(len(index_by_text_par_con_rem))) - - index_by_text_par_con_rem_sort = [indexes_new[index_sort==j][0] - for j in range(len(index_by_text_par_con_rem))] - - return (slopes_rem, all_found_textline_polygons_rem, boxes_text_rem, txt_con_org_rem, - contours_only_text_parent_rem, index_by_text_par_con_rem_sort) - def separate_marginals_to_left_and_right_and_order_from_top_to_down( self, polygons_of_marginals, all_found_textline_polygons_marginals, all_box_coord_marginals, slopes_marginals, mid_point_of_page_width): @@ -4679,15 +4623,6 @@ class Eynollah: polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_org, boxes_marginals, slope_deskew) - #slopes, all_found_textline_polygons, boxes_text, txt_con_org, \ - # contours_only_text_parent, index_by_text_par_con = \ - # self.delete_regions_without_textlines(slopes, all_found_textline_polygons, - # boxes_text, txt_con_org, contours_only_text_parent, index_by_text_par_con) - #slopes_marginals, all_found_textline_polygons_marginals, boxes_marginals, \ - # polygons_of_marginals, polygons_of_marginals, _ = \ - # self.delete_regions_without_textlines(slopes_marginals, all_found_textline_polygons_marginals, - # boxes_marginals, polygons_of_marginals, polygons_of_marginals, - # np.array(range(len(polygons_of_marginals)))) all_found_textline_polygons = dilate_textline_contours( all_found_textline_polygons) all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( From 02a347a48a972de49c4b098f454a9a16cc4ee4fc Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 22:47:34 +0200 Subject: [PATCH 340/492] no more need to rm from `contours_only_text_parent_d_ordered` now --- src/eynollah/eynollah.py | 16 ++-------------- src/eynollah/utils/__init__.py | 8 ++++---- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6cc8b1b..c4a6600 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4437,6 +4437,8 @@ class Eynollah: ###min_con_area = 0.000005 contours_only_text, hir_on_text = return_contours_of_image(text_only) contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text) + contours_only_text_parent_d_ordered = [] + contours_only_text_parent_d = [] if len(contours_only_text_parent) > 0: areas_tot_text = np.prod(text_only.shape) areas_cnt_text = np.array([cv2.contourArea(c) for c in contours_only_text_parent]) @@ -4558,15 +4560,6 @@ class Eynollah: # plt.subplot(2, 2, 2, title="result contours") # plt.imshow(img4) # plt.show() - else: - contours_only_text_parent_d_ordered = [] - contours_only_text_parent_d = [] - contours_only_text_parent = [] - - else: - contours_only_text_parent_d_ordered = [] - contours_only_text_parent_d = [] - #contours_only_text_parent = [] if not len(contours_only_text_parent): # stop early @@ -4684,11 +4677,6 @@ class Eynollah: slopes_marginals, mid_point_of_page_width) #print(len(polygons_of_marginals), len(ordered_left_marginals), len(ordered_right_marginals), 'marginals ordred') - if np.abs(slope_deskew) >= SLOPE_THRESHOLD: - contours_only_text_parent_d_ordered = self.return_list_of_contours_with_desired_order( - contours_only_text_parent_d_ordered, index_by_text_par_con) - else: - contours_only_text_parent_d_ordered = None if self.full_layout: if self.light_version: diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index ebf78fe..5ccb2af 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -938,7 +938,7 @@ def check_any_text_region_in_model_one_is_main_or_header( if (pixels_header>=pixels_main) and ( (length_con[ii]/float(height_con[ii]) )>=1.3 ): regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=2 contours_only_text_parent_head.append(con) - if contours_only_text_parent_d_ordered is not None: + if len(contours_only_text_parent_d_ordered): contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_head.append(all_box_coord[ii]) slopes_head.append(slopes[ii]) @@ -948,7 +948,7 @@ def check_any_text_region_in_model_one_is_main_or_header( regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ]=1 contours_only_text_parent_main.append(con) conf_contours_main.append(conf_contours[ii]) - if contours_only_text_parent_d_ordered is not None: + if len(contours_only_text_parent_d_ordered): contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_main.append(all_box_coord[ii]) slopes_main.append(slopes[ii]) @@ -1033,7 +1033,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light( regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 2 contours_only_text_parent_head.append(contours_only_text_parent[ii]) conf_contours_head.append(None) # why not conf_contours[ii], too? - if contours_only_text_parent_d_ordered is not None: + if len(contours_only_text_parent_d_ordered): contours_only_text_parent_head_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_head.append(all_box_coord[ii]) slopes_head.append(slopes[ii]) @@ -1043,7 +1043,7 @@ def check_any_text_region_in_model_one_is_main_or_header_light( regions_model_1[:,:][(regions_model_1[:,:]==1) & (img == 255) ] = 1 contours_only_text_parent_main.append(contours_only_text_parent[ii]) conf_contours_main.append(conf_contours[ii]) - if contours_only_text_parent_d_ordered is not None: + if len(contours_only_text_parent_d_ordered): contours_only_text_parent_main_d.append(contours_only_text_parent_d_ordered[ii]) all_box_coord_main.append(all_box_coord[ii]) slopes_main.append(slopes[ii]) From d88ca18eec8f1a4def371848c218b817fdb728a1 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 22:53:30 +0200 Subject: [PATCH 341/492] get/do_work_of_slopes etc.: reduce call/return signatures - `get_textregion_contours_in_org_image_light`: no more need to also return unchanged contours here (see 41cc38c5); therefore - `txt_con_org`: no more need for this (now mere alias to `contours_only_text_parent`); also - `index_by_text_par_con`: no more need for this (see prev. commit), so do not pass/return - `get_slopes_and_deskew_*`: do not pass `contours_only_text` (where not used) - `get_slopes_and_deskew_*`: do not return unchanged contours, boxes - `do_work_of_slopes_*`: adapt respectively --- src/eynollah/eynollah.py | 98 +++++++++++++--------------- src/eynollah/utils/contour.py | 4 +- src/eynollah/utils/separate_lines.py | 12 ++-- 3 files changed, 54 insertions(+), 60 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index c4a6600..ec68bcd 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -879,7 +879,7 @@ class Eynollah: thresholding_for_fl_light_version=False, threshold_art_class_textline=0.1): - self.logger.debug("enter do_prediction") + self.logger.debug("enter do_prediction (patches=%d)", patches) img_height_model = model.layers[-1].output_shape[1] img_width_model = model.layers[-1].output_shape[2] @@ -1856,7 +1856,7 @@ class Eynollah: return sorted_textlines - def get_slopes_and_deskew_new_light2(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): + def get_slopes_and_deskew_new_light2(self, contours_par, textline_mask_tot, boxes, slope_deskew): polygons_of_textlines = return_contours_of_interested_region(textline_mask_tot,1,0.00001) cx_main_tot, cy_main_tot = find_center_of_contours(polygons_of_textlines) @@ -1889,16 +1889,12 @@ class Eynollah: all_box_coord.append(crop_coor) return (all_found_textline_polygons, - boxes, - contours, - contours_par, all_box_coord, - np.array(range(len(contours_par))), slopes) def get_slopes_and_deskew_new_light(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): if not len(contours): - return [], [], [], [], [], [], [] + return [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_light") with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: results = self.executor.map(partial(do_work_of_slopes_new_light, @@ -1906,15 +1902,15 @@ class Eynollah: slope_deskew=slope_deskew, textline_light=self.textline_light, logger=self.logger,), - boxes, contours, contours_par, range(len(contours_par))) + boxes, contours, contours_par) results = list(results) # exhaust prior to release - #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) + #textline_polygons, box_coord, slopes = zip(*results) self.logger.debug("exit get_slopes_and_deskew_new_light") return tuple(zip(*results)) def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, boxes, slope_deskew): if not len(contours): - return [], [], [], [], [], [], [] + return [], [], [] self.logger.debug("enter get_slopes_and_deskew_new") with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: results = self.executor.map(partial(do_work_of_slopes_new, @@ -1924,16 +1920,16 @@ class Eynollah: KERNEL=KERNEL, logger=self.logger, plotter=self.plotter,), - boxes, contours, contours_par, range(len(contours_par))) + boxes, contours, contours_par) results = list(results) # exhaust prior to release - #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) + #textline_polygons, box_coord, slopes = zip(*results) self.logger.debug("exit get_slopes_and_deskew_new") return tuple(zip(*results)) - def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, boxes, + def get_slopes_and_deskew_new_curved(self, contours_par, textline_mask_tot, boxes, mask_texts_only, num_col, scale_par, slope_deskew): - if not len(contours): - return [], [], [], [], [], [], [] + if not len(contours_par): + return [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_curved") with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: with share_ndarray(mask_texts_only) as mask_texts_only_shared: @@ -1947,9 +1943,9 @@ class Eynollah: KERNEL=KERNEL, logger=self.logger, plotter=self.plotter,), - boxes, contours, contours_par, range(len(contours_par))) + boxes, contours_par) results = list(results) # exhaust prior to release - #textline_polygons, boxes, text_regions, text_regions_par, box_coord, index_text_con, slopes = zip(*results) + #textline_polygons, box_coord, slopes = zip(*results) self.logger.debug("exit get_slopes_and_deskew_new_curved") return tuple(zip(*results)) @@ -4037,7 +4033,7 @@ class Eynollah: def filter_contours_without_textline_inside( - self, contours, text_con_org, contours_textline, + self, contours_par, contours_textline, contours_only_text_parent_d_ordered, conf_contours_textregions): @@ -4049,12 +4045,11 @@ class Eynollah: return [] return list(np.array(lis)[indices]) - return (filterfun(contours), - filterfun(text_con_org), - filterfun(conf_contours_textregions), + return (filterfun(contours_par), filterfun(contours_textline), filterfun(contours_only_text_parent_d_ordered), - indices + filterfun(conf_contours_textregions), + # indices ) def separate_marginals_to_left_and_right_and_order_from_top_to_down( @@ -4592,12 +4587,11 @@ class Eynollah: contours_only_text_parent, contours_only_text_parent_d_ordered, text_only, marginal_cnts=polygons_of_marginals) #print("text region early 3.5 in %.1fs", time.time() - t0) - txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( + conf_contours_textregions = get_textregion_contours_in_org_image_light( contours_only_text_parent, self.image, confidence_matrix) - #txt_con_org = dilate_textregion_contours(txt_con_org) #contours_only_text_parent = dilate_textregion_contours(contours_only_text_parent) else: - txt_con_org , conf_contours_textregions = get_textregion_contours_in_org_image_light( + conf_contours_textregions = get_textregion_contours_in_org_image_light( contours_only_text_parent, self.image, confidence_matrix) #print("text region early 4 in %.1fs", time.time() - t0) boxes_text = get_text_region_boxes_by_given_contours(contours_only_text_parent) @@ -4607,13 +4601,13 @@ class Eynollah: if not self.curved_line: if self.light_version: if self.textline_light: - all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ - all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_light2( - txt_con_org, contours_only_text_parent, textline_mask_tot_ea_org, + all_found_textline_polygons, \ + all_box_coord, slopes = self.get_slopes_and_deskew_new_light2( + contours_only_text_parent, textline_mask_tot_ea_org, boxes_text, slope_deskew) - all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ - all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_light2( - polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_org, + all_found_textline_polygons_marginals, \ + all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_light2( + polygons_of_marginals, textline_mask_tot_ea_org, boxes_marginals, slope_deskew) all_found_textline_polygons = dilate_textline_contours( @@ -4622,46 +4616,46 @@ class Eynollah: all_found_textline_polygons, None, textline_mask_tot_ea_org, type_contour="textline") all_found_textline_polygons_marginals = dilate_textline_contours( all_found_textline_polygons_marginals) - contours_only_text_parent, txt_con_org, conf_contours_textregions, \ - all_found_textline_polygons, contours_only_text_parent_d_ordered, \ - index_by_text_par_con = self.filter_contours_without_textline_inside( - contours_only_text_parent, txt_con_org, all_found_textline_polygons, + contours_only_text_parent, all_found_textline_polygons, \ + contours_only_text_parent_d_ordered, conf_contours_textregions = \ + self.filter_contours_without_textline_inside( + contours_only_text_parent, all_found_textline_polygons, contours_only_text_parent_d_ordered, conf_contours_textregions) else: textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) - all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, \ - index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_light( - txt_con_org, contours_only_text_parent, textline_mask_tot_ea, + all_found_textline_polygons, \ + all_box_coord, slopes = self.get_slopes_and_deskew_new_light( + contours_only_text_parent, contours_only_text_parent, textline_mask_tot_ea, boxes_text, slope_deskew) - all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ - all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_light( + all_found_textline_polygons_marginals, \ + all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_light( polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, boxes_marginals, slope_deskew) #all_found_textline_polygons = self.filter_contours_inside_a_bigger_one( # all_found_textline_polygons, textline_mask_tot_ea_org, type_contour="textline") else: textline_mask_tot_ea = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1) - all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ - all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new( - txt_con_org, contours_only_text_parent, textline_mask_tot_ea, + all_found_textline_polygons, \ + all_box_coord, slopes = self.get_slopes_and_deskew_new( + contours_only_text_parent, contours_only_text_parent, textline_mask_tot_ea, boxes_text, slope_deskew) - all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ - all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new( + all_found_textline_polygons_marginals, \ + all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new( polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, boxes_marginals, slope_deskew) else: scale_param = 1 textline_mask_tot_ea_erode = cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=2) - all_found_textline_polygons, boxes_text, txt_con_org, contours_only_text_parent, \ - all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_curved( - txt_con_org, contours_only_text_parent, textline_mask_tot_ea_erode, + all_found_textline_polygons, \ + all_box_coord, slopes = self.get_slopes_and_deskew_new_curved( + contours_only_text_parent, textline_mask_tot_ea_erode, boxes_text, text_only, num_col_classifier, scale_param, slope_deskew) all_found_textline_polygons = small_textlines_to_parent_adherence2( all_found_textline_polygons, textline_mask_tot_ea, num_col_classifier) - all_found_textline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, \ - all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_curved( - polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea_erode, + all_found_textline_polygons_marginals, \ + all_box_coord_marginals, slopes_marginals = self.get_slopes_and_deskew_new_curved( + polygons_of_marginals, textline_mask_tot_ea_erode, boxes_marginals, text_only, num_col_classifier, scale_param, slope_deskew) all_found_textline_polygons_marginals = small_textlines_to_parent_adherence2( @@ -4884,7 +4878,7 @@ class Eynollah: conf_contours_textregions, conf_contours_textregions_h) else: pcgts = self.writer.build_pagexml_no_full_layout( - txt_con_org, page_coord, order_text_new, id_of_texts_tot, + contours_only_text_parent, page_coord, order_text_new, id_of_texts_tot, all_found_textline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals_left, polygons_of_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index fb4bbd0..2560846 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -216,7 +216,7 @@ def do_back_rotation_and_get_cnt_back(contour_par, index_r_con, img, slope_first def get_textregion_contours_in_org_image_light(cnts, img, confidence_matrix): if not len(cnts): - return [], [] + return [] confidence_matrix = cv2.resize(confidence_matrix, (img.shape[1] // 6, img.shape[0] // 6), @@ -226,7 +226,7 @@ def get_textregion_contours_in_org_image_light(cnts, img, confidence_matrix): cnt_mask = np.zeros(confidence_matrix.shape) cnt_mask = cv2.fillPoly(cnt_mask, pts=[cnt // 6], color=1.0) confs.append(np.sum(confidence_matrix * cnt_mask) / np.sum(cnt_mask)) - return cnts, confs + return confs def return_contours_of_interested_textline(region_pre_p, label): # pixels of images are identified by 5 diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 3bfc903..22ef00d 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1592,7 +1592,7 @@ def get_smallest_skew(img, sigma_des, angles, logger=None, plotter=None, map=map @wrap_ndarray_shared(kw='textline_mask_tot_ea') def do_work_of_slopes_new( - box_text, contour, contour_par, index_r_con, + box_text, contour, contour_par, textline_mask_tot_ea=None, slope_deskew=0.0, logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None ): @@ -1647,12 +1647,12 @@ def do_work_of_slopes_new( all_text_region_raw[mask_only_con_region == 0] = 0 cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contour_par, box_text) - return cnt_clean_rot, box_text, contour, contour_par, crop_coor, index_r_con, slope + return cnt_clean_rot, crop_coor, slope @wrap_ndarray_shared(kw='textline_mask_tot_ea') @wrap_ndarray_shared(kw='mask_texts_only') def do_work_of_slopes_new_curved( - box_text, contour, contour_par, index_r_con, + box_text, contour_par, textline_mask_tot_ea=None, mask_texts_only=None, num_col=1, scale_par=1.0, slope_deskew=0.0, logger=None, MAX_SLOPE=999, KERNEL=None, plotter=None @@ -1743,11 +1743,11 @@ def do_work_of_slopes_new_curved( slope_for_all, contour_par, box_text, True) - return textlines_cnt_per_region[::-1], box_text, contour, contour_par, crop_coor, index_r_con, slope + return textlines_cnt_per_region[::-1], crop_coor, slope @wrap_ndarray_shared(kw='textline_mask_tot_ea') def do_work_of_slopes_new_light( - box_text, contour, contour_par, index_r_con, + box_text, contour, contour_par, textline_mask_tot_ea=None, slope_deskew=0, textline_light=True, logger=None ): @@ -1777,4 +1777,4 @@ def do_work_of_slopes_new_light( all_text_region_raw[mask_only_con_region == 0] = 0 cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_deskew, contour_par, box_text) - return cnt_clean_rot, box_text, contour, contour_par, crop_coor, index_r_con, slope_deskew + return cnt_clean_rot, crop_coor, slope_deskew From e32479765cc52a29462b36f876d253478860f176 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 7 Oct 2025 23:03:27 +0200 Subject: [PATCH 342/492] writer: simplify - simplify serialization of coordinates - re-use `serialize_lines_in_region` (drop `*_in_dropcapital` and `*_in_marginal`) - re-use `calculate_polygon_coords` --- src/eynollah/writer.py | 343 ++++++++++++++++------------------------- 1 file changed, 131 insertions(+), 212 deletions(-) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 936c95f..67a2989 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -56,113 +56,30 @@ class EynollahXmlWriter(): points_page_print = points_page_print + ' ' return points_page_print[:-1] - def serialize_lines_in_marginal(self, marginal_region, all_found_textline_polygons_marginals, marginal_idx, page_coord, all_box_coord_marginals, slopes_marginals, counter, ocr_all_textlines_textregion): - for j in range(len(all_found_textline_polygons_marginals[marginal_idx])): - coords = CoordsType() - textline = TextLineType(id=counter.next_line_id, Coords=coords) - if ocr_all_textlines_textregion: - textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] ) - marginal_region.add_TextLine(textline) - marginal_region.set_orientation(-slopes_marginals[marginal_idx]) - points_co = '' - for l in range(len(all_found_textline_polygons_marginals[marginal_idx][j])): - if not (self.curved_line or self.textline_light): - if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: - textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) - textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) - else: - textline_x_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x) ) - textline_y_coord = max(0, int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y) ) - points_co += str(textline_x_coord) - points_co += ',' - points_co += str(textline_y_coord) - if (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) <= 45: - if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + page_coord[0]) / self.scale_y)) - else: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + page_coord[0]) / self.scale_y)) - - elif (self.curved_line or self.textline_light) and np.abs(slopes_marginals[marginal_idx]) > 45: - if len(all_found_textline_polygons_marginals[marginal_idx][j][l]) == 2: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) - else: - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][0] + all_box_coord_marginals[marginal_idx][2] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((all_found_textline_polygons_marginals[marginal_idx][j][l][0][1] + all_box_coord_marginals[marginal_idx][0] + page_coord[0]) / self.scale_y)) - points_co += ' ' - coords.set_points(points_co[:-1]) - def serialize_lines_in_region(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion): self.logger.debug('enter serialize_lines_in_region') - for j in range(len(all_found_textline_polygons[region_idx])): + for j, polygon_textline in enumerate(all_found_textline_polygons[region_idx]): coords = CoordsType() textline = TextLineType(id=counter.next_line_id, Coords=coords) if ocr_all_textlines_textregion: - textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] ) + # FIXME: add OCR confidence + textline.set_TextEquiv([TextEquivType(Unicode=ocr_all_textlines_textregion[j])]) text_region.add_TextLine(textline) text_region.set_orientation(-slopes[region_idx]) region_bboxes = all_box_coord[region_idx] points_co = '' - for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[region_idx][j]): - if not (self.curved_line or self.textline_light): - if len(contour_textline) == 2: - textline_x_coord = max(0, int((contour_textline[0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) - textline_y_coord = max(0, int((contour_textline[1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) - else: - textline_x_coord = max(0, int((contour_textline[0][0] + region_bboxes[2] + page_coord[2]) / self.scale_x)) - textline_y_coord = max(0, int((contour_textline[0][1] + region_bboxes[0] + page_coord[0]) / self.scale_y)) - points_co += str(textline_x_coord) - points_co += ',' - points_co += str(textline_y_coord) - - if self.textline_light or (self.curved_line and np.abs(slopes[region_idx]) <= 45): - if len(contour_textline) == 2: - points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y)) - else: - points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) - elif self.curved_line and np.abs(slopes[region_idx]) > 45: - if len(contour_textline)==2: - points_co += str(int((contour_textline[0] + region_bboxes[2] + page_coord[2])/self.scale_x)) - points_co += ',' - points_co += str(int((contour_textline[1] + region_bboxes[0] + page_coord[0])/self.scale_y)) - else: - points_co += str(int((contour_textline[0][0] + region_bboxes[2]+page_coord[2])/self.scale_x)) - points_co += ',' - points_co += str(int((contour_textline[0][1] + region_bboxes[0]+page_coord[0])/self.scale_y)) - points_co += ' ' - coords.set_points(points_co[:-1]) - - def serialize_lines_in_dropcapital(self, text_region, all_found_textline_polygons, region_idx, page_coord, all_box_coord, slopes, counter, ocr_all_textlines_textregion): - self.logger.debug('enter serialize_lines_in_region') - for j in range(1): - coords = CoordsType() - textline = TextLineType(id=counter.next_line_id, Coords=coords) - if ocr_all_textlines_textregion: - textline.set_TextEquiv( [ TextEquivType(Unicode=ocr_all_textlines_textregion[j]) ] ) - text_region.add_TextLine(textline) - #region_bboxes = all_box_coord[region_idx] - points_co = '' - for idx_contour_textline, contour_textline in enumerate(all_found_textline_polygons[j]): - if len(contour_textline) == 2: - points_co += str(int((contour_textline[0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((contour_textline[1] + page_coord[0]) / self.scale_y)) - else: - points_co += str(int((contour_textline[0][0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((contour_textline[0][1] + page_coord[0])/self.scale_y)) - - points_co += ' ' + for point in polygon_textline: + if len(point) != 2: + point = point[0] + point_x = point[0] + page_coord[2] + point_y = point[1] + page_coord[0] + # FIXME: or actually... not self.textline_light and not self.curved_line or np.abs(slopes[region_idx]) > 45? + if not self.textline_light and not (self.curved_line and np.abs(slopes[region_idx]) <= 45): + point_x += region_bboxes[2] + point_y += region_bboxes[0] + point_x = max(0, int(point_x / self.scale_x)) + point_y = max(0, int(point_y / self.scale_y)) + points_co += str(point_x) + ',' + str(point_y) + ' ' coords.set_points(points_co[:-1]) def write_pagexml(self, pcgts): @@ -170,7 +87,7 @@ class EynollahXmlWriter(): with open(self.output_filename, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals_left, found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_lines_to_be_written_in_xml, found_polygons_tables, ocr_all_textlines=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, conf_contours_textregion=None, skip_layout_reading_order=False): + def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals_left, found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_seplines, found_polygons_tables, ocr_all_textlines=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, conf_contours_textregion=None, skip_layout_reading_order=False): self.logger.debug('enter build_pagexml_no_full_layout') # create the file structure @@ -179,90 +96,79 @@ class EynollahXmlWriter(): page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) counter = EynollahIdCounter() - if len(found_polygons_text_region) > 0: + if len(order_of_texts): _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) - id_of_marginalia_left = [_counter_marginals.next_region_id for _ in found_polygons_marginals_left] - id_of_marginalia_right = [_counter_marginals.next_region_id for _ in found_polygons_marginals_right] + id_of_marginalia_left = [_counter_marginals.next_region_id + for _ in found_polygons_marginals_left] + id_of_marginalia_right = [_counter_marginals.next_region_id + for _ in found_polygons_marginals_right] xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) - for mm in range(len(found_polygons_text_region)): - textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord, skip_layout_reading_order), conf=conf_contours_textregion[mm]), - ) - #textregion.set_conf(conf_contours_textregion[mm]) + for mm, region_contour in enumerate(found_polygons_text_region): + textregion = TextRegionType( + id=counter.next_region_id, type_='paragraph', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord, + skip_layout_reading_order), + conf=conf_contours_textregion[mm]), + ) page.add_TextRegion(textregion) if ocr_all_textlines: ocr_textlines = ocr_all_textlines[mm] else: ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines) + self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, + all_box_coord, slopes, counter, ocr_textlines) - for mm in range(len(found_polygons_marginals_left)): - marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_left[mm], page_coord))) + for mm, region_contour in enumerate(found_polygons_marginals_left): + marginal = TextRegionType( + id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) page.add_TextRegion(marginal) if ocr_all_textlines_marginals_left: ocr_textlines = ocr_all_textlines_marginals_left[mm] else: ocr_textlines = None - - #print(ocr_textlines, mm, len(all_found_textline_polygons_marginals_left[mm]) ) - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) + self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, + all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) - for mm in range(len(found_polygons_marginals_right)): - marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_right[mm], page_coord))) + for mm, region_contour in enumerate(found_polygons_marginals_right): + marginal = TextRegionType( + id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) page.add_TextRegion(marginal) if ocr_all_textlines_marginals_right: ocr_textlines = ocr_all_textlines_marginals_right[mm] else: ocr_textlines = None - - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) + self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, + all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) - for mm in range(len(found_polygons_text_region_img)): - img_region = ImageRegionType(id=counter.next_region_id, Coords=CoordsType()) + for region_contour in found_polygons_text_region_img: + img_region = ImageRegionType( + id=counter.next_region_id, + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) page.add_ImageRegion(img_region) - points_co = '' - for lmm in range(len(found_polygons_text_region_img[mm])): - try: - points_co += str(int((found_polygons_text_region_img[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((found_polygons_text_region_img[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) - points_co += ' ' - except: - points_co += str(int((found_polygons_text_region_img[mm][lmm][0] + page_coord[2])/ self.scale_x )) - points_co += ',' - points_co += str(int((found_polygons_text_region_img[mm][lmm][1] + page_coord[0])/ self.scale_y )) - points_co += ' ' + for region_contour in polygons_seplines: + sep = SeparatorRegionType( + id=counter.next_region_id, + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, [0, 0, 0, 0])) + ) + page.add_SeparatorRegion(sep) - img_region.get_Coords().set_points(points_co[:-1]) - - for mm in range(len(polygons_lines_to_be_written_in_xml)): - sep_hor = SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType()) - page.add_SeparatorRegion(sep_hor) - points_co = '' - for lmm in range(len(polygons_lines_to_be_written_in_xml[mm])): - points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,0] ) / self.scale_x)) - points_co += ',' - points_co += str(int((polygons_lines_to_be_written_in_xml[mm][lmm,0,1] ) / self.scale_y)) - points_co += ' ' - sep_hor.get_Coords().set_points(points_co[:-1]) - for mm in range(len(found_polygons_tables)): - tab_region = TableRegionType(id=counter.next_region_id, Coords=CoordsType()) - page.add_TableRegion(tab_region) - points_co = '' - for lmm in range(len(found_polygons_tables[mm])): - points_co += str(int((found_polygons_tables[mm][lmm,0,0] + page_coord[2]) / self.scale_x)) - points_co += ',' - points_co += str(int((found_polygons_tables[mm][lmm,0,1] + page_coord[0]) / self.scale_y)) - points_co += ' ' - tab_region.get_Coords().set_points(points_co[:-1]) + for region_contour in found_polygons_tables: + tab = TableRegionType( + id=counter.next_region_id, + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) + page.add_TableRegion(tab) return pcgts - def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals_left,found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_lines_to_be_written_in_xml, ocr_all_textlines=None, ocr_all_textlines_h=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, ocr_all_textlines_drop=None, conf_contours_textregion=None, conf_contours_textregion_h=None): + def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals_left,found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_seplines, ocr_all_textlines=None, ocr_all_textlines_h=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, ocr_all_textlines_drop=None, conf_contours_textregion=None, conf_contours_textregion_h=None): self.logger.debug('enter build_pagexml_full_layout') # create the file structure @@ -271,99 +177,112 @@ class EynollahXmlWriter(): page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) counter = EynollahIdCounter() - _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) - id_of_marginalia_left = [_counter_marginals.next_region_id for _ in found_polygons_marginals_left] - id_of_marginalia_right = [_counter_marginals.next_region_id for _ in found_polygons_marginals_right] - xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) + if len(order_of_texts): + _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) + id_of_marginalia_left = [_counter_marginals.next_region_id + for _ in found_polygons_marginals_left] + id_of_marginalia_right = [_counter_marginals.next_region_id + for _ in found_polygons_marginals_right] + xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) - for mm in range(len(found_polygons_text_region)): - textregion = TextRegionType(id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region[mm], page_coord), conf=conf_contours_textregion[mm])) + for mm, region_contour in enumerate(found_polygons_text_region): + textregion = TextRegionType( + id=counter.next_region_id, type_='paragraph', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord), + conf=conf_contours_textregion[mm]) + ) page.add_TextRegion(textregion) - if ocr_all_textlines: ocr_textlines = ocr_all_textlines[mm] else: ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, all_box_coord, slopes, counter, ocr_textlines) + self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, + all_box_coord, slopes, counter, ocr_textlines) self.logger.debug('len(found_polygons_text_region_h) %s', len(found_polygons_text_region_h)) - for mm in range(len(found_polygons_text_region_h)): - textregion = TextRegionType(id=counter.next_region_id, type_='heading', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_h[mm], page_coord))) + for mm, region_contour in enumerate(found_polygons_text_region_h): + textregion = TextRegionType( + id=counter.next_region_id, type_='heading', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) page.add_TextRegion(textregion) - if ocr_all_textlines_h: ocr_textlines = ocr_all_textlines_h[mm] else: ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, all_box_coord_h, slopes_h, counter, ocr_textlines) + self.serialize_lines_in_region(textregion, all_found_textline_polygons_h, mm, page_coord, + all_box_coord_h, slopes_h, counter, ocr_textlines) - for mm in range(len(found_polygons_marginals_left)): - marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_left[mm], page_coord))) + for mm, region_contour in enumerate(found_polygons_marginals_left): + marginal = TextRegionType( + id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) page.add_TextRegion(marginal) if ocr_all_textlines_marginals_left: ocr_textlines = ocr_all_textlines_marginals_left[mm] else: ocr_textlines = None - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) - - for mm in range(len(found_polygons_marginals_right)): - marginal = TextRegionType(id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_marginals_right[mm], page_coord))) + self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) + + for mm, region_contour in enumerate(found_polygons_marginals_right): + marginal = TextRegionType( + id=counter.next_region_id, type_='marginalia', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) page.add_TextRegion(marginal) if ocr_all_textlines_marginals_right: ocr_textlines = ocr_all_textlines_marginals_right[mm] else: ocr_textlines = None - self.serialize_lines_in_marginal(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) - - for mm in range(len(found_polygons_drop_capitals)): - dropcapital = TextRegionType(id=counter.next_region_id, type_='drop-capital', - Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_drop_capitals[mm], page_coord))) + self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, + all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) + + for mm, region_contour in enumerate(found_polygons_drop_capitals): + dropcapital = TextRegionType( + id=counter.next_region_id, type_='drop-capital', + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) + ) page.add_TextRegion(dropcapital) - all_box_coord_drop = None - slopes_drop = None + all_box_coord_drop = [[0, 0, 0, 0]] + slopes_drop = [0] if ocr_all_textlines_drop: ocr_textlines = ocr_all_textlines_drop[mm] else: ocr_textlines = None - self.serialize_lines_in_dropcapital(dropcapital, [found_polygons_drop_capitals[mm]], mm, page_coord, all_box_coord_drop, slopes_drop, counter, ocr_all_textlines_textregion=ocr_textlines) + self.serialize_lines_in_region(dropcapital, [[found_polygons_drop_capitals[mm]]], 0, page_coord, + all_box_coord_drop, slopes_drop, counter, ocr_textlines) - for mm in range(len(found_polygons_text_region_img)): - page.add_ImageRegion(ImageRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_text_region_img[mm], page_coord)))) + for region_contour in found_polygons_text_region_img: + page.add_ImageRegion( + ImageRegionType(id=counter.next_region_id, + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)))) - for mm in range(len(polygons_lines_to_be_written_in_xml)): - page.add_SeparatorRegion(SeparatorRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(polygons_lines_to_be_written_in_xml[mm], [0 , 0, 0, 0])))) + for region_contour in polygons_seplines: + page.add_SeparatorRegion( + SeparatorRegionType(id=counter.next_region_id, + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, [0, 0, 0, 0])))) - for mm in range(len(found_polygons_tables)): - page.add_TableRegion(TableRegionType(id=counter.next_region_id, Coords=CoordsType(points=self.calculate_polygon_coords(found_polygons_tables[mm], page_coord)))) + for region_contour in found_polygons_tables: + page.add_TableRegion( + TableRegionType(id=counter.next_region_id, + Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)))) return pcgts def calculate_polygon_coords(self, contour, page_coord, skip_layout_reading_order=False): self.logger.debug('enter calculate_polygon_coords') coords = '' - for value_bbox in contour: - if skip_layout_reading_order: - if len(value_bbox) == 2: - coords += str(int((value_bbox[0]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[1]) / self.scale_y)) - else: - coords += str(int((value_bbox[0][0]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[0][1]) / self.scale_y)) - else: - if len(value_bbox) == 2: - coords += str(int((value_bbox[0] + page_coord[2]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[1] + page_coord[0]) / self.scale_y)) - else: - coords += str(int((value_bbox[0][0] + page_coord[2]) / self.scale_x)) - coords += ',' - coords += str(int((value_bbox[0][1] + page_coord[0]) / self.scale_y)) - coords=coords + ' ' + for point in contour: + if len(point) != 2: + point = point[0] + point_x = point[0] + point_y = point[1] + if not skip_layout_reading_order: + point_x += page_coord[2] + point_y += page_coord[0] + point_x = int(point_x / self.scale_x) + point_y = int(point_y / self.scale_y) + coords += str(point_x) + ',' + str(point_y) + ' ' return coords[:-1] From cbbb3248c72c1f3e50b98de1f7e2980bdd14da5d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 00:43:29 +0200 Subject: [PATCH 343/492] writer: simplify - `build_pagexml_no_full_layout`: delegate to `build_pagexml_full_layout` (removing redundant code) --- src/eynollah/writer.py | 133 +++++++++++++++-------------------------- 1 file changed, 49 insertions(+), 84 deletions(-) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 67a2989..eee7440 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -87,8 +87,50 @@ class EynollahXmlWriter(): with open(self.output_filename, 'w') as f: f.write(to_xml(pcgts)) - def build_pagexml_no_full_layout(self, found_polygons_text_region, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_box_coord, found_polygons_text_region_img, found_polygons_marginals_left, found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_seplines, found_polygons_tables, ocr_all_textlines=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, conf_contours_textregion=None, skip_layout_reading_order=False): - self.logger.debug('enter build_pagexml_no_full_layout') + def build_pagexml_no_full_layout( + self, found_polygons_text_region, + page_coord, order_of_texts, id_of_texts, + all_found_textline_polygons, + all_box_coord, + found_polygons_text_region_img, + found_polygons_marginals_left, found_polygons_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_seplines, + found_polygons_tables, + **kwargs): + return self.build_pagexml_full_layout( + found_polygons_text_region, [], + page_coord, order_of_texts, id_of_texts, + all_found_textline_polygons, [], + all_box_coord, [], + found_polygons_text_region_img, found_polygons_tables, [], + found_polygons_marginals_left, found_polygons_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes, [], slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_seplines, + **kwargs) + + def build_pagexml_full_layout( + self, + found_polygons_text_region, found_polygons_text_region_h, + page_coord, order_of_texts, id_of_texts, + all_found_textline_polygons, all_found_textline_polygons_h, + all_box_coord, all_box_coord_h, + found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, + found_polygons_marginals_left,found_polygons_marginals_right, + all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, + all_box_coord_marginals_left, all_box_coord_marginals_right, + slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, + cont_page, polygons_seplines, + ocr_all_textlines=None, ocr_all_textlines_h=None, + ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, + ocr_all_textlines_drop=None, + conf_contours_textregion=None, conf_contours_textregion_h=None, + skip_layout_reading_order=False): + self.logger.debug('enter build_pagexml') # create the file structure pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) @@ -108,89 +150,10 @@ class EynollahXmlWriter(): textregion = TextRegionType( id=counter.next_region_id, type_='paragraph', Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord, - skip_layout_reading_order), - conf=conf_contours_textregion[mm]), - ) - page.add_TextRegion(textregion) - if ocr_all_textlines: - ocr_textlines = ocr_all_textlines[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(textregion, all_found_textline_polygons, mm, page_coord, - all_box_coord, slopes, counter, ocr_textlines) - - for mm, region_contour in enumerate(found_polygons_marginals_left): - marginal = TextRegionType( - id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - page.add_TextRegion(marginal) - if ocr_all_textlines_marginals_left: - ocr_textlines = ocr_all_textlines_marginals_left[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_left, mm, page_coord, - all_box_coord_marginals_left, slopes_marginals_left, counter, ocr_textlines) - - for mm, region_contour in enumerate(found_polygons_marginals_right): - marginal = TextRegionType( - id=counter.next_region_id, type_='marginalia', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - page.add_TextRegion(marginal) - if ocr_all_textlines_marginals_right: - ocr_textlines = ocr_all_textlines_marginals_right[mm] - else: - ocr_textlines = None - self.serialize_lines_in_region(marginal, all_found_textline_polygons_marginals_right, mm, page_coord, - all_box_coord_marginals_right, slopes_marginals_right, counter, ocr_textlines) - - for region_contour in found_polygons_text_region_img: - img_region = ImageRegionType( - id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - page.add_ImageRegion(img_region) - - for region_contour in polygons_seplines: - sep = SeparatorRegionType( - id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, [0, 0, 0, 0])) - ) - page.add_SeparatorRegion(sep) - - for region_contour in found_polygons_tables: - tab = TableRegionType( - id=counter.next_region_id, - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) - ) - page.add_TableRegion(tab) - - return pcgts - - def build_pagexml_full_layout(self, found_polygons_text_region, found_polygons_text_region_h, page_coord, order_of_texts, id_of_texts, all_found_textline_polygons, all_found_textline_polygons_h, all_box_coord, all_box_coord_h, found_polygons_text_region_img, found_polygons_tables, found_polygons_drop_capitals, found_polygons_marginals_left,found_polygons_marginals_right, all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_h, slopes_marginals_left, slopes_marginals_right, cont_page, polygons_seplines, ocr_all_textlines=None, ocr_all_textlines_h=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, ocr_all_textlines_drop=None, conf_contours_textregion=None, conf_contours_textregion_h=None): - self.logger.debug('enter build_pagexml_full_layout') - - # create the file structure - pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) - page = pcgts.get_Page() - page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) - - counter = EynollahIdCounter() - if len(order_of_texts): - _counter_marginals = EynollahIdCounter(region_idx=len(order_of_texts)) - id_of_marginalia_left = [_counter_marginals.next_region_id - for _ in found_polygons_marginals_left] - id_of_marginalia_right = [_counter_marginals.next_region_id - for _ in found_polygons_marginals_right] - xml_reading_order(page, order_of_texts, id_of_marginalia_left, id_of_marginalia_right) - - for mm, region_contour in enumerate(found_polygons_text_region): - textregion = TextRegionType( - id=counter.next_region_id, type_='paragraph', - Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord), - conf=conf_contours_textregion[mm]) + skip_layout_reading_order)) ) + if conf_contours_textregion: + textregion.Coords.set_conf(conf_contours_textregion[mm]) page.add_TextRegion(textregion) if ocr_all_textlines: ocr_textlines = ocr_all_textlines[mm] @@ -205,6 +168,8 @@ class EynollahXmlWriter(): id=counter.next_region_id, type_='heading', Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) ) + if conf_contours_textregion_h: + textregion.Coords.set_conf(conf_contours_textregion_h[mm]) page.add_TextRegion(textregion) if ocr_all_textlines_h: ocr_textlines = ocr_all_textlines_h[mm] From 75823f9bed64153718acab6f664cdfc114ef34fb Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 00:54:53 +0200 Subject: [PATCH 344/492] run_single: call `writer.build_pagexml_no_full_layout` w/ kwargs --- src/eynollah/eynollah.py | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index ec68bcd..b109c90 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4260,18 +4260,6 @@ class Eynollah: order_text_new = [0] slopes =[0] id_of_texts_tot =['region_0001'] - - polygons_of_images = [] - slopes_marginals_left = [] - slopes_marginals_right = [] - polygons_of_marginals_left = [] - polygons_of_marginals_right = [] - all_found_textline_polygons_marginals_left = [] - all_found_textline_polygons_marginals_right = [] - all_box_coord_marginals_left = [] - all_box_coord_marginals_right = [] - polygons_seplines = [] - contours_tables = [] conf_contours_textregions =[0] if self.ocr and not self.tr: @@ -4284,15 +4272,13 @@ class Eynollah: pcgts = self.writer.build_pagexml_no_full_layout( cont_page, page_coord, order_text_new, id_of_texts_tot, - all_found_textline_polygons, page_coord, polygons_of_images, - polygons_of_marginals_left, polygons_of_marginals_right, - all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, - all_box_coord_marginals_left, all_box_coord_marginals_right, - slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, contours_tables, + all_found_textline_polygons, page_coord, [], + [], [], [], [], [], [], + slopes, [], [], + cont_page, [], [], ocr_all_textlines=ocr_all_textlines, conf_contours_textregion=conf_contours_textregions, - skip_layout_reading_order=self.skip_layout_and_reading_order) + skip_layout_reading_order=True) self.logger.info("Basic processing complete") return pcgts @@ -4884,9 +4870,11 @@ class Eynollah: all_found_textline_polygons_marginals_left, all_found_textline_polygons_marginals_right, all_box_coord_marginals_left, all_box_coord_marginals_right, slopes, slopes_marginals_left, slopes_marginals_right, - cont_page, polygons_seplines, contours_tables, ocr_all_textlines, - ocr_all_textlines_marginals_left, ocr_all_textlines_marginals_right, - conf_contours_textregions) + cont_page, polygons_seplines, contours_tables, + ocr_all_textlines=ocr_all_textlines, + ocr_all_textlines_marginals_left=ocr_all_textlines_marginals_left, + ocr_all_textlines_marginals_right=ocr_all_textlines_marginals_right, + conf_contours_textregions=conf_contours_textregions) return pcgts From 5e11a68a3e18e926b25829e0fce3c279e529aca0 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 01:03:48 +0200 Subject: [PATCH 345/492] writer/run_single: consistent kwarg naming `conf_contours_textregion(s)` --- src/eynollah/writer.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index eee7440..8859d95 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -128,7 +128,7 @@ class EynollahXmlWriter(): ocr_all_textlines=None, ocr_all_textlines_h=None, ocr_all_textlines_marginals_left=None, ocr_all_textlines_marginals_right=None, ocr_all_textlines_drop=None, - conf_contours_textregion=None, conf_contours_textregion_h=None, + conf_contours_textregions=None, conf_contours_textregions_h=None, skip_layout_reading_order=False): self.logger.debug('enter build_pagexml') @@ -152,8 +152,8 @@ class EynollahXmlWriter(): Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord, skip_layout_reading_order)) ) - if conf_contours_textregion: - textregion.Coords.set_conf(conf_contours_textregion[mm]) + if conf_contours_textregions: + textregion.Coords.set_conf(conf_contours_textregions[mm]) page.add_TextRegion(textregion) if ocr_all_textlines: ocr_textlines = ocr_all_textlines[mm] @@ -168,8 +168,8 @@ class EynollahXmlWriter(): id=counter.next_region_id, type_='heading', Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) ) - if conf_contours_textregion_h: - textregion.Coords.set_conf(conf_contours_textregion_h[mm]) + if conf_contours_textregions_h: + textregion.Coords.set_conf(conf_contours_textregions_h[mm]) page.add_TextRegion(textregion) if ocr_all_textlines_h: ocr_textlines = ocr_all_textlines_h[mm] From ca72a095cab373b6daa2f7353f456d9eacfd399b Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 00:44:32 +0200 Subject: [PATCH 346/492] tests: cover table detection in various modes --- tests/test_run.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/test_run.py b/tests/test_run.py index 98cee30..79c64c2 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -67,6 +67,44 @@ def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): lines = tree.xpath("//page:TextLine", namespaces=NS) assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line +@pytest.mark.parametrize( + "options", + [ + ["--tables"], + ["--tables", "--full-layout"], + ["--tables", "--full-layout", "--textline_light", "--light_version"], + ], ids=str) +def test_run_eynollah_layout_filename2(tmp_path, pytestconfig, caplog, options): + infile = testdir.joinpath('resources/euler_rechenkunst01_1738_0025.tif') + outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' + args = [ + '-m', MODELS_LAYOUT, + '-i', str(infile), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + def only_eynollah(logrec): + return logrec.name == 'eynollah' + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(layout_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert str(infile) in logmsgs + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath("//page:TextRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + regions = tree.xpath("//page:TableRegion", namespaces=NS) + # model/decoding is not very precise, so (depending on mode) we can get fractures/splits/FP + assert len(regions) >= 1, "result is inaccurate" + regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + lines = tree.xpath("//page:TextLine", namespaces=NS) + assert len(lines) >= 2, "result is inaccurate" # mostly table (if detected correctly), but 1 page and 1 catch-word line + def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path From e5b52645685b669d5af7c5da2870a01660f81cdb Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 12:17:53 +0200 Subject: [PATCH 347/492] CI: add diagnostic message for model symlink --- .github/workflows/test-eynollah.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 7c3f5ae..759b26c 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -66,6 +66,7 @@ jobs: python -m pip install --upgrade pip make install-dev EXTRAS=OCR,plotting make deps-test EXTRAS=OCR,plotting + ls -l models_* - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" - name: Get coverage results From 839b7c4d846d6f73069529aa1f337caa362917c0 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 12:33:14 +0200 Subject: [PATCH 348/492] make models: avoid re-download --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 618b1f9..29dd877 100644 --- a/Makefile +++ b/Makefile @@ -58,6 +58,9 @@ help: # Download and extract models to $(PWD)/models_layout_v0_5_0 models: $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME) +# do not download these files if we already have the directories +.INTERMEDIATE: $(BIN_MODELFILE) $(SEG_MODELFILE) $(OCR_MODELFILE) + $(BIN_MODELFILE): wget -O $@ $(BIN_MODEL) $(SEG_MODELFILE): From 1d4815b48f1f5b1bf006efe78141fd3161ee8073 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 14:56:14 +0200 Subject: [PATCH 349/492] utils_ocr: forgot to pass coordinate offsets --- src/eynollah/eynollah.py | 24 ++++++++++++------------ src/eynollah/utils/utils_ocr.py | 10 ++++++++-- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index b109c90..a6b65c4 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4265,8 +4265,8 @@ class Eynollah: if self.ocr and not self.tr: gc.collect() ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons, self.prediction_model, - self.b_s_ocr, self.num_to_char, textline_light=True) + image_page, all_found_textline_polygons, np.zeros((len(all_found_textline_polygons), 4)), + self.prediction_model, self.b_s_ocr, self.num_to_char, textline_light=True) else: ocr_all_textlines = None @@ -4756,36 +4756,36 @@ class Eynollah: if len(all_found_textline_polygons)>0: ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + image_page, all_found_textline_polygons, all_box_coord, + self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines = None if all_found_textline_polygons_marginals_left and len(all_found_textline_polygons_marginals_left)>0: ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_left, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + image_page, all_found_textline_polygons_marginals_left, all_box_coord_marginals_left, + self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_marginals_left = None if all_found_textline_polygons_marginals_right and len(all_found_textline_polygons_marginals_right)>0: ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_marginals_right, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + image_page, all_found_textline_polygons_marginals_right, all_box_coord_marginals_right, + self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_marginals_right = None if all_found_textline_polygons_h and len(all_found_textline_polygons)>0: ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( - image_page, all_found_textline_polygons_h, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + image_page, all_found_textline_polygons_h, all_box_coord_h, + self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_h = None if polygons_of_drop_capitals and len(polygons_of_drop_capitals)>0: ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( - image_page, polygons_of_drop_capitals, self.prediction_model, - self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + image_page, polygons_of_drop_capitals, np.zeros((len(polygons_of_drop_capitals), 4)), + self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: ocr_all_textlines_drop = None diff --git a/src/eynollah/utils/utils_ocr.py b/src/eynollah/utils/utils_ocr.py index 602ad6e..6e71b0f 100644 --- a/src/eynollah/utils/utils_ocr.py +++ b/src/eynollah/utils/utils_ocr.py @@ -1,13 +1,17 @@ +import math +import copy + import numpy as np import cv2 import tensorflow as tf from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d -import math from PIL import Image, ImageDraw, ImageFont from Bio import pairwise2 + from .resize import resize_image + def decode_batch_predictions(pred, num_to_char, max_len = 128): # input_len is the product of the batch size and the # number of time steps. @@ -370,7 +374,9 @@ def return_textline_contour_with_added_box_coordinate(textline_contour, box_ind return textline_contour -def return_rnn_cnn_ocr_of_given_textlines(image, all_found_textline_polygons, +def return_rnn_cnn_ocr_of_given_textlines(image, + all_found_textline_polygons, + all_box_coord, prediction_model, b_s_ocr, num_to_char, textline_light=False, From 027b87d32125afdc1bebbb968fc32b55b58bf153 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 14:56:57 +0200 Subject: [PATCH 350/492] fixup c0137c2 (missing arguments for utils_ocr) --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index a6b65c4..aeb01be 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -96,6 +96,7 @@ from .utils.rotate import ( rotation_image_new ) from .utils.utils_ocr import ( + return_start_and_end_of_common_text_of_textline_ocr_without_common_section, return_textline_contour_with_added_box_coordinate, preprocess_and_resize_image_for_ocrcnn_model, return_textlines_split_if_needed, @@ -4796,7 +4797,6 @@ class Eynollah: self.logger.info("Using light text line detection for OCR") self.logger.info("Processing text lines...") - self.device.reset() gc.collect() torch.cuda.empty_cache() From 096def1e9d0b95cf3690734730f675ae5a74c0fd Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 15:13:13 +0200 Subject: [PATCH 351/492] mbreorder/enhancment: fix missing imports (not sure if these models really need that, though) --- src/eynollah/image_enhancer.py | 6 +++--- src/eynollah/mb_ro_on_layout.py | 7 +++---- tests/test_smoke.py | 1 - 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 89dde16..9247efe 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -6,23 +6,23 @@ from logging import Logger import os import time from typing import Optional -import atexit -from functools import partial from pathlib import Path -from multiprocessing import cpu_count import gc + import cv2 import numpy as np from ocrd_utils import getLogger, tf_disable_interactive_logs import tensorflow as tf from skimage.morphology import skeletonize from tensorflow.keras.models import load_model + from .utils.resize import resize_image from .utils.pil_cv2 import pil2cv from .utils import ( is_image_filename, crop_image_inside_box ) +from .eynollah import PatchEncoder, Patches DPI_THRESHOLD = 298 KERNEL = np.ones((5, 5), np.uint8) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 45db8e4..218f973 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -6,25 +6,24 @@ from logging import Logger import os import time from typing import Optional -import atexit -from functools import partial from pathlib import Path -from multiprocessing import cpu_count import xml.etree.ElementTree as ET + import cv2 import numpy as np from ocrd_utils import getLogger import statistics import tensorflow as tf from tensorflow.keras.models import load_model -from .utils.resize import resize_image +from .utils.resize import resize_image from .utils.contour import ( find_new_features_of_contours, return_contours_of_image, return_parent_contours, ) from .utils import is_xml_filename +from .eynollah import PatchEncoder, Patches DPI_THRESHOLD = 298 KERNEL = np.ones((5, 5), np.uint8) diff --git a/tests/test_smoke.py b/tests/test_smoke.py index 252213f..e2b323a 100644 --- a/tests/test_smoke.py +++ b/tests/test_smoke.py @@ -2,6 +2,5 @@ def test_utils_import(): import eynollah.utils import eynollah.utils.contour import eynollah.utils.drop_capitals - import eynollah.utils.drop_capitals import eynollah.utils.is_nan import eynollah.utils.rotate From 8a2d682e12d8e95414aa53f1e2a9cfea74c778a3 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 16:52:22 +0200 Subject: [PATCH 352/492] fix identifier scope in layout OCR options (w/o full_layout) --- src/eynollah/eynollah.py | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index aeb01be..7d6229a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4726,7 +4726,6 @@ class Eynollah: self.plotter.write_images_into_directory(polygons_of_images, image_page) t_order = time.time() - #if self.full_layout: self.logger.info("Step 4/5: Reading Order Detection") if self.reading_order_machine_based: @@ -4749,46 +4748,41 @@ class Eynollah: boxes_d, textline_mask_tot_d) self.logger.info(f"Detection of reading order took {time.time() - t_order:.1f}s") + ocr_all_textlines = None + ocr_all_textlines_marginals_left = None + ocr_all_textlines_marginals_right = None + ocr_all_textlines_h = None + ocr_all_textlines_drop = None if self.ocr: self.logger.info("Step 4.5/5: OCR Processing") if not self.tr: gc.collect() - if len(all_found_textline_polygons)>0: + if len(all_found_textline_polygons): ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, all_box_coord, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - else: - ocr_all_textlines = None - if all_found_textline_polygons_marginals_left and len(all_found_textline_polygons_marginals_left)>0: + if len(all_found_textline_polygons_marginals_left): ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_left, all_box_coord_marginals_left, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - else: - ocr_all_textlines_marginals_left = None - if all_found_textline_polygons_marginals_right and len(all_found_textline_polygons_marginals_right)>0: + if len(all_found_textline_polygons_marginals_right): ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_right, all_box_coord_marginals_right, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - else: - ocr_all_textlines_marginals_right = None - if all_found_textline_polygons_h and len(all_found_textline_polygons)>0: + if self.full_layout and len(all_found_textline_polygons): ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_h, all_box_coord_h, self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - else: - ocr_all_textlines_h = None - if polygons_of_drop_capitals and len(polygons_of_drop_capitals)>0: + if self.full_layout and len(polygons_of_drop_capitals): ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( image_page, polygons_of_drop_capitals, np.zeros((len(polygons_of_drop_capitals), 4)), self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) - else: - ocr_all_textlines_drop = None else: if self.light_version: @@ -4805,6 +4799,7 @@ class Eynollah: ind_tot = 0 #cv2.imwrite('./img_out.png', image_page) ocr_all_textlines = [] + # FIXME: what about lines in marginals / headings / drop-capitals here? for indexing, ind_poly_first in enumerate(all_found_textline_polygons): ocr_textline_in_textregion = [] for indexing2, ind_poly in enumerate(ind_poly_first): @@ -4840,12 +4835,6 @@ class Eynollah: ocr_textline_in_textregion.append(text_ocr) ind_tot = ind_tot +1 ocr_all_textlines.append(ocr_textline_in_textregion) - else: - ocr_all_textlines = None - ocr_all_textlines_marginals_left = None - ocr_all_textlines_marginals_right = None - ocr_all_textlines_h = None - ocr_all_textlines_drop = None self.logger.info("Step 5/5: Output Generation") From b3d29bef8961435f85cf0c95ec3dd6c239e74621 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 19:21:07 +0200 Subject: [PATCH 353/492] return_contours_of_interested_region*: rm unused variants --- src/eynollah/eynollah.py | 17 +++++++---------- src/eynollah/utils/contour.py | 33 --------------------------------- 2 files changed, 7 insertions(+), 43 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 7d6229a..e15afd6 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -79,7 +79,6 @@ from .utils.contour import ( get_textregion_contours_in_org_image_light, return_contours_of_image, return_contours_of_interested_region, - return_contours_of_interested_region_by_min_size, return_contours_of_interested_textline, return_parent_contours, dilate_textregion_contours, @@ -4242,14 +4241,11 @@ class Eynollah: all_found_textline_polygons = filter_contours_area_of_image( textline_mask_tot_ea, cnt_clean_rot_raw, hir_on_cnt_clean_rot, max_area=1, min_area=0.00001) - M_main_tot = [cv2.moments(all_found_textline_polygons[j]) - for j in range(len(all_found_textline_polygons))] - w_h_textlines = [cv2.boundingRect(all_found_textline_polygons[j])[2:] - for j in range(len(all_found_textline_polygons))] - w_h_textlines = [w_h_textlines[j][0] / float(w_h_textlines[j][1]) for j in range(len(w_h_textlines))] - cx_main_tot = [(M_main_tot[j]["m10"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - cy_main_tot = [(M_main_tot[j]["m01"] / (M_main_tot[j]["m00"] + 1e-32)) for j in range(len(M_main_tot))] - + cx_main_tot, cy_main_tot = find_center_of_contours(all_found_textline_polygons) + w_h_textlines = [cv2.boundingRect(polygon)[2:] + for polygon in all_found_textline_polygons] + w_h_textlines = [w / float(h) for w, h in w_h_textlines] + all_found_textline_polygons = self.get_textlines_of_a_textregion_sorted( #all_found_textline_polygons[::-1] all_found_textline_polygons, cx_main_tot, cy_main_tot, w_h_textlines) @@ -4677,7 +4673,8 @@ class Eynollah: self.plotter.save_plot_of_layout_all(text_regions_p, image_page) label_img = 4 - polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, label_img) + polygons_of_drop_capitals = return_contours_of_interested_region(text_regions_p, label_img, + min_area=0.00003) ##all_found_textline_polygons = adhere_drop_capital_region_into_corresponding_textline( ##text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, ##all_box_coord, all_box_coord_h, all_found_textline_polygons, all_found_textline_polygons_h, diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 2560846..f998c4d 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -253,39 +253,6 @@ def return_contours_of_image(image): contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours, hierarchy -def return_contours_of_interested_region_by_min_size(region_pre_p, label, min_size=0.00003): - # pixels of images are identified by 5 - if region_pre_p.ndim == 3: - cnts_images = (region_pre_p[:, :, 0] == label) * 1 - else: - cnts_images = (region_pre_p[:, :] == label) * 1 - _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) - - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - contours_imgs = return_parent_contours(contours_imgs, hierarchy) - contours_imgs = filter_contours_area_of_image_tables( - thresh, contours_imgs, hierarchy, max_area=1, min_area=min_size) - - return contours_imgs - -def return_contours_of_interested_region_by_size(region_pre_p, label, min_area, max_area): - # pixels of images are identified by 5 - if region_pre_p.ndim == 3: - cnts_images = (region_pre_p[:, :, 0] == label) * 1 - else: - cnts_images = (region_pre_p[:, :] == label) * 1 - _, thresh = cv2.threshold(cnts_images.astype(np.uint8), 0, 255, 0) - contours_imgs, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - contours_imgs = return_parent_contours(contours_imgs, hierarchy) - contours_imgs = filter_contours_area_of_image_tables( - thresh, contours_imgs, hierarchy, max_area=max_area, min_area=min_area) - - img_ret = np.zeros((region_pre_p.shape[0], region_pre_p.shape[1])) - img_ret = cv2.fillPoly(img_ret, pts=contours_imgs, color=1) - - return img_ret - def dilate_textline_contours(all_found_textline_polygons): return [[polygon2contour(contour2polygon(contour, dilate=6)) for contour in region] From a144026b2789ae056c7bac619d2e3e2b582e62d6 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 15:13:57 +0200 Subject: [PATCH 354/492] add rough ruff config --- pyproject.toml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 8a63543..2df39b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,3 +51,18 @@ where = ["src"] [tool.coverage.run] branch = true source = ["eynollah"] + +[tool.ruff] +line-length = 120 + +[tool.ruff.lint] +ignore = [ +# disable unused imports +"F401", +# disable import order +"E402", +# disable unused variables +"F841", +# disable bare except +"E722", +] From e1b56d97dab9eed6110fabd85b5ae74b36f18c9f Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 8 Oct 2025 17:54:38 +0200 Subject: [PATCH 355/492] CI: lint with ruff --- .github/workflows/test-eynollah.yml | 4 ++++ pyproject.toml | 3 +++ 2 files changed, 7 insertions(+) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 759b26c..466e690 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -67,6 +67,10 @@ jobs: make install-dev EXTRAS=OCR,plotting make deps-test EXTRAS=OCR,plotting ls -l models_* + - name: Lint with ruff + uses: astral-sh/ruff-action@v3 + with: + src: "./src" - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" - name: Get coverage results diff --git a/pyproject.toml b/pyproject.toml index 2df39b9..79f9164 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,3 +66,6 @@ ignore = [ # disable bare except "E722", ] + +[tool.ruff.format] +quote-style = "preserve" From cab392601e74e0360e659296f26e1719fb6f742f Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 9 Oct 2025 20:12:06 +0200 Subject: [PATCH 356/492] :memo: update changelog --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6776d6..ab3dd83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,11 +15,17 @@ Fixed: * `get_smallest_skew`: after shifting search range of rotation angle, use overall best result * Dockerfile: fix CUDA installation (cuDNN contested between Torch and TF due to extra OCR) * OCR: re-instate missing methods and fix `utils_ocr` function calls + * mbreorder/enhancement CLIs: missing imports * :fire: writer: `SeparatorRegion` needs `SeparatorRegionType` (not `ImageRegionType`) f458e3e * tests: switch from `pytest-subtests` to `parametrize` so we can use `pytest-isolate` (so CUDA memory gets freed between tests if running on GPU) +Added: + * test coverage for OCR options in `layout` + * test coverage for table detection in `layout` + * CI linting with ruff + Changed: * polygons: slightly widen for regions and lines, increase for separators @@ -28,7 +34,19 @@ Changed: but use shared memory if necessary, and switch back from `loky` to stdlib, and shutdown in `del()` instead of `atexit` * :fire: OCR: switch CNN-RNN model to `20250930` version compatible with TF 2.12 on CPU, too + * OCR: allow running `-tr` without `-fl`, too * :fire: writer: use `@type='heading'` instead of `'header'` for headings + * :fire: performance gains via refactoring (simplification, less copy-code, vectorization, + avoiding unused calculations, avoiding unnecessary 3-channel image operations) + * :fire: heuristic reading order detection: many improvements + - contour vs splitter box matching: + * contour must be contained in box exactly instead of heuristics + * make fallback center matching, center must be contained in box + - original vs deskewed contour matching: + * same min-area filter on both sides + * similar area score in addition to center proximity + * avoid duplicate and missing mappings by allowing N:M + matches and splitting+joining where necessary * CI: update+improve model caching From c4cb16c2a8e92b0d14b2388ad7a7e8d06e6472fe Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 9 Oct 2025 23:05:50 +0200 Subject: [PATCH 357/492] simplify (`skip_layout_and_reading_order` is already an attr) --- src/eynollah/eynollah.py | 205 +++++++++++++++++++-------------------- 1 file changed, 102 insertions(+), 103 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 1b6cee0..3579078 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2155,7 +2155,7 @@ class Eynollah: page_coord, cont_page) - def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier, skip_layout_and_reading_order=False): + def get_regions_light_v(self,img,is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_light_v") t_in = time.time() erosion_hurts = False @@ -2221,110 +2221,110 @@ class Eynollah: #plt.imshwo(self.image_page_org_size) #plt.show() - if not skip_layout_and_reading_order: - #print("inside 2 ", time.time()-t_in) - if num_col_classifier == 1 or num_col_classifier == 2: - if self.image_org.shape[0]/self.image_org.shape[1] > 2.5: - self.logger.debug("resized to %dx%d for %d cols", - img_resized.shape[1], img_resized.shape[0], num_col_classifier) - prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.model_region_1_2, n_batch_inference=1, - thresholding_for_some_classes_in_light_version=True, - threshold_art_class_layout=self.threshold_art_class_layout) - else: - prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) - confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) - prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( - False, self.image_page_org_size, self.model_region_1_2, n_batch_inference=1, - thresholding_for_artificial_class_in_light_version=True, - threshold_art_class_layout=self.threshold_art_class_layout) - ys = slice(*self.page_coord[0:2]) - xs = slice(*self.page_coord[2:4]) - prediction_regions_org[ys, xs] = prediction_regions_page - confidence_matrix[ys, xs] = confidence_matrix_page - - else: - new_h = (900+ (num_col_classifier-3)*100) - img_resized = resize_image(img_bin, int(new_h * img_bin.shape[0] /img_bin.shape[1]), new_h) - self.logger.debug("resized to %dx%d (new_h=%d) for %d cols", - img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) - prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.model_region_1_2, n_batch_inference=2, - thresholding_for_some_classes_in_light_version=True, - threshold_art_class_layout=self.threshold_art_class_layout) - ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_region, - ###n_batch_inference=3, - ###thresholding_for_some_classes_in_light_version=True) - #print("inside 3 ", time.time()-t_in) - #plt.imshow(prediction_regions_org[:,:,0]) - #plt.show() - - prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) - confidence_matrix = resize_image(confidence_matrix, img_height_h, img_width_h ) - img_bin = resize_image(img_bin, img_height_h, img_width_h ) - prediction_regions_org=prediction_regions_org[:,:,0] - - mask_lines_only = (prediction_regions_org[:,:] ==3)*1 - mask_texts_only = (prediction_regions_org[:,:] ==1)*1 - mask_texts_only = mask_texts_only.astype('uint8') - - ##if num_col_classifier == 1 or num_col_classifier == 2: - ###mask_texts_only = cv2.erode(mask_texts_only, KERNEL, iterations=1) - ##mask_texts_only = cv2.dilate(mask_texts_only, KERNEL, iterations=1) - - mask_texts_only = cv2.dilate(mask_texts_only, kernel=np.ones((2,2), np.uint8), iterations=1) - mask_images_only=(prediction_regions_org[:,:] ==2)*1 - - polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) - test_khat = np.zeros(prediction_regions_org.shape) - test_khat = cv2.fillPoly(test_khat, pts=polygons_seplines, color=(1,1,1)) - - #plt.imshow(test_khat[:,:]) - #plt.show() - #for jv in range(1): - #print(jv, hir_seplines[0][232][3]) - #test_khat = np.zeros(prediction_regions_org.shape) - #test_khat = cv2.fillPoly(test_khat, pts = [polygons_seplines[232]], color=(1,1,1)) - #plt.imshow(test_khat[:,:]) - #plt.show() - - polygons_seplines = filter_contours_area_of_image( - mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) - - test_khat = np.zeros(prediction_regions_org.shape) - test_khat = cv2.fillPoly(test_khat, pts = polygons_seplines, color=(1,1,1)) - - #plt.imshow(test_khat[:,:]) - #plt.show() - #sys.exit() - - polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) - ##polygons_of_only_texts = dilate_textregion_contours(polygons_of_only_texts) - polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) - - text_regions_p_true = np.zeros(prediction_regions_org.shape) - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3,3,3)) - - text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 - text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) - - textline_mask_tot_ea[(text_regions_p_true==0) | (text_regions_p_true==4) ] = 0 - #plt.imshow(textline_mask_tot_ea) - #plt.show() - #print("inside 4 ", time.time()-t_in) - self.logger.debug("exit get_regions_light_v") - return (text_regions_p_true, - erosion_hurts, - polygons_seplines, - polygons_of_only_texts, - textline_mask_tot_ea, - img_bin, - confidence_matrix) - else: + if self.skip_layout_and_reading_order: img_bin = resize_image(img_bin,img_height_h, img_width_h ) self.logger.debug("exit get_regions_light_v") return None, erosion_hurts, None, None, textline_mask_tot_ea, img_bin, None + #print("inside 2 ", time.time()-t_in) + if num_col_classifier == 1 or num_col_classifier == 2: + if self.image_org.shape[0]/self.image_org.shape[1] > 2.5: + self.logger.debug("resized to %dx%d for %d cols", + img_resized.shape[1], img_resized.shape[0], num_col_classifier) + prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( + True, img_resized, self.model_region_1_2, n_batch_inference=1, + thresholding_for_some_classes_in_light_version=True, + threshold_art_class_layout=self.threshold_art_class_layout) + else: + prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) + confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) + prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( + False, self.image_page_org_size, self.model_region_1_2, n_batch_inference=1, + thresholding_for_artificial_class_in_light_version=True, + threshold_art_class_layout=self.threshold_art_class_layout) + ys = slice(*self.page_coord[0:2]) + xs = slice(*self.page_coord[2:4]) + prediction_regions_org[ys, xs] = prediction_regions_page + confidence_matrix[ys, xs] = confidence_matrix_page + + else: + new_h = (900+ (num_col_classifier-3)*100) + img_resized = resize_image(img_bin, int(new_h * img_bin.shape[0] /img_bin.shape[1]), new_h) + self.logger.debug("resized to %dx%d (new_h=%d) for %d cols", + img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) + prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( + True, img_resized, self.model_region_1_2, n_batch_inference=2, + thresholding_for_some_classes_in_light_version=True, + threshold_art_class_layout=self.threshold_art_class_layout) + ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_region, + ###n_batch_inference=3, + ###thresholding_for_some_classes_in_light_version=True) + #print("inside 3 ", time.time()-t_in) + #plt.imshow(prediction_regions_org[:,:,0]) + #plt.show() + + prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) + confidence_matrix = resize_image(confidence_matrix, img_height_h, img_width_h ) + img_bin = resize_image(img_bin, img_height_h, img_width_h ) + prediction_regions_org=prediction_regions_org[:,:,0] + + mask_lines_only = (prediction_regions_org[:,:] ==3)*1 + mask_texts_only = (prediction_regions_org[:,:] ==1)*1 + mask_texts_only = mask_texts_only.astype('uint8') + + ##if num_col_classifier == 1 or num_col_classifier == 2: + ###mask_texts_only = cv2.erode(mask_texts_only, KERNEL, iterations=1) + ##mask_texts_only = cv2.dilate(mask_texts_only, KERNEL, iterations=1) + + mask_texts_only = cv2.dilate(mask_texts_only, kernel=np.ones((2,2), np.uint8), iterations=1) + mask_images_only=(prediction_regions_org[:,:] ==2)*1 + + polygons_seplines, hir_seplines = return_contours_of_image(mask_lines_only) + test_khat = np.zeros(prediction_regions_org.shape) + test_khat = cv2.fillPoly(test_khat, pts=polygons_seplines, color=(1,1,1)) + + #plt.imshow(test_khat[:,:]) + #plt.show() + #for jv in range(1): + #print(jv, hir_seplines[0][232][3]) + #test_khat = np.zeros(prediction_regions_org.shape) + #test_khat = cv2.fillPoly(test_khat, pts = [polygons_seplines[232]], color=(1,1,1)) + #plt.imshow(test_khat[:,:]) + #plt.show() + + polygons_seplines = filter_contours_area_of_image( + mask_lines_only, polygons_seplines, hir_seplines, max_area=1, min_area=0.00001, dilate=1) + + test_khat = np.zeros(prediction_regions_org.shape) + test_khat = cv2.fillPoly(test_khat, pts = polygons_seplines, color=(1,1,1)) + + #plt.imshow(test_khat[:,:]) + #plt.show() + #sys.exit() + + polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001) + ##polygons_of_only_texts = dilate_textregion_contours(polygons_of_only_texts) + polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001) + + text_regions_p_true = np.zeros(prediction_regions_org.shape) + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts=polygons_of_only_lines, color=(3,3,3)) + + text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2 + text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1)) + + textline_mask_tot_ea[(text_regions_p_true==0) | (text_regions_p_true==4) ] = 0 + #plt.imshow(textline_mask_tot_ea) + #plt.show() + #print("inside 4 ", time.time()-t_in) + self.logger.debug("exit get_regions_light_v") + return (text_regions_p_true, + erosion_hurts, + polygons_seplines, + polygons_of_only_texts, + textline_mask_tot_ea, + img_bin, + confidence_matrix) + def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier): self.logger.debug("enter get_regions_from_xy_2models") erosion_hurts = False @@ -4226,8 +4226,7 @@ class Eynollah: self.logger.info("Skipping layout analysis and reading order detection") _ ,_, _, _, textline_mask_tot_ea, img_bin_light, _ = \ - self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier, - skip_layout_and_reading_order=self.skip_layout_and_reading_order) + self.get_regions_light_v(img_res, is_image_enhanced, num_col_classifier,) page_coord, image_page, textline_mask_tot_ea, img_bin_light, cont_page = \ self.run_graphics_and_columns_without_layout(textline_mask_tot_ea, img_bin_light) From 374818de118dc0292dde789c6c3a233dbce4d83d Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Thu, 9 Oct 2025 23:11:05 +0200 Subject: [PATCH 358/492] :memo: update changelog for 5725e4f --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a0f190..6fd3b2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ Versioned according to [Semantic Versioning](http://semver.org/). Fixed: + * continue processing when no columns detected but text regions exist + * convert marginalia to main text if no main text is present + * reset deskewing angle to 0° when text covers <30% image area and detected angle >45° * :fire: polygons: avoid invalid paths (use `Polygon.buffer()` instead of dilation etc.) * `return_boxes_of_images_by_order_of_reading_new`: avoid Numpy.dtype mismatch, simplify * `return_boxes_of_images_by_order_of_reading_new`: log any exceptions instead of ignoring From 4e9a1618c355a7aeed471c9f63018440adf441cf Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Fri, 10 Oct 2025 03:18:09 +0200 Subject: [PATCH 359/492] layout: refactor model setup, allow loading custom versions - simplify definition of (defaults for) model versions - unify loading of loadable models (depending on mode) - use `self.models` dict instead of `self.model_*` attributes - add `model_versions` kwarg / `--model_version` CLI option --- CHANGELOG.md | 1 + src/eynollah/cli.py | 10 +- src/eynollah/eynollah.py | 362 +++++++++++++++++++-------------------- 3 files changed, 191 insertions(+), 182 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fd3b2e..df1e12e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ f458e3e (so CUDA memory gets freed between tests if running on GPU) Added: + * :fire: `layout` CLI: new option `--model_version` to override default choices * test coverage for OCR options in `layout` * test coverage for table detection in `layout` * CI linting with ruff diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 93bb676..c9bad52 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -202,6 +202,13 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low type=click.Path(exists=True, file_okay=False), required=True, ) +@click.option( + "--model_version", + "-mv", + help="override default versions of model categories", + type=(str, str), + multiple=True, +) @click.option( "--save_images", "-si", @@ -373,7 +380,7 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low help="Setup a basic console logger", ) -def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, transformer_ocr, batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level, setup_logging): +def layout(image, out, overwrite, dir_in, model, model_version, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, transformer_ocr, batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level, setup_logging): if setup_logging: console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) @@ -404,6 +411,7 @@ def layout(image, out, overwrite, dir_in, model, save_images, save_layout, save_ assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." eynollah = Eynollah( model, + model_versions=model_version, extract_only_images=extract_only_images, enable_plotting=enable_plotting, allow_enhancement=allow_enhancement, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 3579078..0992c8c 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -19,7 +19,7 @@ import math import os import sys import time -from typing import Optional +from typing import Dict, List, Optional, Tuple import atexit import warnings from functools import partial @@ -180,7 +180,6 @@ class Patches(layers.Layer): }) return config - class PatchEncoder(layers.Layer): def __init__(self, **kwargs): super(PatchEncoder, self).__init__() @@ -208,6 +207,7 @@ class Eynollah: def __init__( self, dir_models : str, + model_versions: List[Tuple[str, str]] = [], extract_only_images : bool =False, enable_plotting : bool = False, allow_enhancement : bool = False, @@ -254,6 +254,10 @@ class Eynollah: self.skip_layout_and_reading_order = skip_layout_and_reading_order self.ocr = do_ocr self.tr = transformer_ocr + if not batch_size_ocr: + self.b_s_ocr = 8 + else: + self.b_s_ocr = int(batch_size_ocr) if num_col_upper: self.num_col_upper = int(num_col_upper) else: @@ -275,69 +279,6 @@ class Eynollah: self.threshold_art_class_textline = float(threshold_art_class_textline) else: self.threshold_art_class_textline = 0.1 - - self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" - self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" - self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" - self.model_region_dir_p = dir_models + "/eynollah-main-regions-aug-scaling_20210425" - self.model_region_dir_p2 = dir_models + "/eynollah-main-regions-aug-rotation_20210425" - #"/modelens_full_lay_1_3_031124" - #"/modelens_full_lay_13__3_19_241024" - #"/model_full_lay_13_241024" - #"/modelens_full_lay_13_17_231024" - #"/modelens_full_lay_1_2_221024" - #"/eynollah-full-regions-1column_20210425" - self.model_region_dir_fully_np = dir_models + "/modelens_full_lay_1__4_3_091124" - #self.model_region_dir_fully = dir_models + "/eynollah-full-regions-3+column_20210425" - self.model_page_dir = dir_models + "/model_eynollah_page_extraction_20250915" - self.model_region_dir_p_ens = dir_models + "/eynollah-main-regions-ensembled_20210425" - self.model_region_dir_p_ens_light = dir_models + "/eynollah-main-regions_20220314" - self.model_region_dir_p_ens_light_only_images_extraction = (dir_models + - "/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" - ) - self.model_reading_order_dir = (dir_models + - "/model_eynollah_reading_order_20250824" - #"/model_mb_ro_aug_ens_11" - #"/model_step_3200000_mb_ro" - #"/model_ens_reading_order_machine_based" - #"/model_mb_ro_aug_ens_8" - #"/model_ens_reading_order_machine_based" - ) - #"/modelens_12sp_elay_0_3_4__3_6_n" - #"/modelens_earlylayout_12spaltige_2_3_5_6_7_8" - #"/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" - #"/modelens_1_2_4_5_early_lay_1_2_spaltige" - #"/model_3_eraly_layout_no_patches_1_2_spaltige" - self.model_region_dir_p_1_2_sp_np = dir_models + "/modelens_e_l_all_sp_0_1_2_3_4_171024" - ##self.model_region_dir_fully_new = dir_models + "/model_2_full_layout_new_trans" - #"/modelens_full_lay_1_3_031124" - #"/modelens_full_lay_13__3_19_241024" - #"/model_full_lay_13_241024" - #"/modelens_full_lay_13_17_231024" - #"/modelens_full_lay_1_2_221024" - #"/modelens_full_layout_24_till_28" - #"/model_2_full_layout_new_trans" - self.model_region_dir_fully = dir_models + "/modelens_full_lay_1__4_3_091124" - if self.textline_light: - #"/modelens_textline_1_4_16092024" - #"/model_textline_ens_3_4_5_6_artificial" - #"/modelens_textline_1_3_4_20240915" - #"/model_textline_ens_3_4_5_6_artificial" - #"/modelens_textline_9_12_13_14_15" - #"/eynollah-textline_light_20210425" - self.model_textline_dir = dir_models + "/modelens_textline_0_1__2_4_16092024" - else: - #"/eynollah-textline_20210425" - self.model_textline_dir = dir_models + "/modelens_textline_0_1__2_4_16092024" - if self.ocr and self.tr: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" - elif self.ocr and not self.tr: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250930" - if self.tables: - if self.light_version: - self.model_table_dir = dir_models + "/modelens_table_0t4_201124" - else: - self.model_table_dir = dir_models + "/eynollah-tables_20210319" t_start = time.time() @@ -356,28 +297,124 @@ class Eynollah: self.logger.warning("no GPU device available") self.logger.info("Loading models...") - - self.model_page = self.our_load_model(self.model_page_dir) - self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) - self.model_bin = self.our_load_model(self.model_dir_of_binarization) - if self.extract_only_images: - self.model_region = self.our_load_model(self.model_region_dir_p_ens_light_only_images_extraction) - else: - self.model_textline = self.our_load_model(self.model_textline_dir) + self.setup_models(dir_models, model_versions) + self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") + + @staticmethod + def our_load_model(model_file, basedir=""): + if basedir: + model_file = os.path.join(basedir, model_file) + if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): + # prefer SavedModel over HDF5 format if it exists + model_file = model_file[:-3] + try: + model = load_model(model_file, compile=False) + except: + model = load_model(model_file, compile=False, custom_objects={ + "PatchEncoder": PatchEncoder, "Patches": Patches}) + return model + + def setup_models(self, basedir: Path, model_versions: List[Tuple[str, str]] = []): + self.model_versions = { + "enhancement": "eynollah-enhancement_20210425", + "binarization": "eynollah-binarization_20210425", + "col_classifier": "eynollah-column-classifier_20210425", + "page": "model_eynollah_page_extraction_20250915", + #?: "eynollah-main-regions-aug-scaling_20210425", + "region": ( # early layout + "eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" if self.extract_only_images else + "eynollah-main-regions_20220314" if self.light_version else + "eynollah-main-regions-ensembled_20210425"), + "region_p2": ( # early layout, non-light, 2nd part + "eynollah-main-regions-aug-rotation_20210425"), + "region_1_2": ( # early layout, light, 1-or-2-column + #"modelens_12sp_elay_0_3_4__3_6_n" + #"modelens_earlylayout_12spaltige_2_3_5_6_7_8" + #"modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" + #"modelens_1_2_4_5_early_lay_1_2_spaltige" + #"model_3_eraly_layout_no_patches_1_2_spaltige" + "modelens_e_l_all_sp_0_1_2_3_4_171024"), + "region_fl_np": ( # full layout / no patches + #"modelens_full_lay_1_3_031124" + #"modelens_full_lay_13__3_19_241024" + #"model_full_lay_13_241024" + #"modelens_full_lay_13_17_231024" + #"modelens_full_lay_1_2_221024" + #"eynollah-full-regions-1column_20210425" + "modelens_full_lay_1__4_3_091124"), + "region_fl": ( # full layout / with patches + #"eynollah-full-regions-3+column_20210425" + ##"model_2_full_layout_new_trans" + #"modelens_full_lay_1_3_031124" + #"modelens_full_lay_13__3_19_241024" + #"model_full_lay_13_241024" + #"modelens_full_lay_13_17_231024" + #"modelens_full_lay_1_2_221024" + #"modelens_full_layout_24_till_28" + #"model_2_full_layout_new_trans" + "modelens_full_lay_1__4_3_091124"), + "reading_order": ( + #"model_mb_ro_aug_ens_11" + #"model_step_3200000_mb_ro" + #"model_ens_reading_order_machine_based" + #"model_mb_ro_aug_ens_8" + #"model_ens_reading_order_machine_based" + "model_eynollah_reading_order_20250824"), + "textline": ( + #"modelens_textline_1_4_16092024" + #"model_textline_ens_3_4_5_6_artificial" + #"modelens_textline_1_3_4_20240915" + #"model_textline_ens_3_4_5_6_artificial" + #"modelens_textline_9_12_13_14_15" + #"eynollah-textline_light_20210425" + "modelens_textline_0_1__2_4_16092024" if self.textline_light else + #"eynollah-textline_20210425" + "modelens_textline_0_1__2_4_16092024"), + "table": ( + None if not self.tables else + "modelens_table_0t4_201124" if self.light_version else + "eynollah-tables_20210319"), + "ocr": ( + None if not self.ocr else + "model_eynollah_ocr_trocr_20250919" if self.tr else + "model_eynollah_ocr_cnnrnn_20250930") + } + # override defaults from CLI + for key, val in model_versions: + assert key in self.model_versions, "unknown model category '%s'" % key + self.logger.warning("overriding default model %s version %s to %s", key, self.model_versions[key], val) + self.model_versions[key] = val + # load models, depending on modes + loadable = [ + "col_classifier", + "binarization", + "page", + "region" + ] + if not self.extract_only_images: + loadable.append("textline") if self.light_version: - self.model_region = self.our_load_model(self.model_region_dir_p_ens_light) - self.model_region_1_2 = self.our_load_model(self.model_region_dir_p_1_2_sp_np) + loadable.append("region_1_2") else: - self.model_region = self.our_load_model(self.model_region_dir_p_ens) - self.model_region_p2 = self.our_load_model(self.model_region_dir_p2) - self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement) - ###self.model_region_fl_new = self.our_load_model(self.model_region_dir_fully_new) - self.model_region_fl_np = self.our_load_model(self.model_region_dir_fully_np) - self.model_region_fl = self.our_load_model(self.model_region_dir_fully) + loadable.append("region_p2") + # if self.allow_enhancement:? + loadable.append("enhancement") + if self.full_layout: + loadable.extend(["region_fl_np", + "region_fl"]) if self.reading_order_machine_based: - self.model_reading_order = self.our_load_model(self.model_reading_order_dir) - if self.ocr and self.tr: - self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) + loadable.append("reading_order") + if self.tables: + loadable.append("table") + + self.models = {name: self.our_load_model(self.model_versions[name], basedir) + for name in loadable + } + + if self.ocr: + ocr_model_dir = os.path.join(basedir, self.model_versions["ocr"]) + if self.tr: + self.models["ocr"] = VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) if torch.cuda.is_available(): self.logger.info("Using GPU acceleration") self.device = torch.device("cuda:0") @@ -386,54 +423,29 @@ class Eynollah: self.device = torch.device("cpu") #self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") - elif self.ocr and not self.tr: - model_ocr = load_model(self.model_ocr_dir , compile=False) - - self.prediction_model = tf.keras.models.Model( - model_ocr.get_layer(name = "image").input, - model_ocr.get_layer(name = "dense2").output) - if not batch_size_ocr: - self.b_s_ocr = 8 - else: - self.b_s_ocr = int(batch_size_ocr) + else: + ocr_model = load_model(ocr_model_dir, compile=False) + self.models["ocr"] = tf.keras.models.Model( + ocr_model.get_layer(name = "image").input, + ocr_model.get_layer(name = "dense2").output) - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: + with open(os.path.join(ocr_model_dir, "characters_org.txt"), "r") as config_file: characters = json.load(config_file) - - AUTOTUNE = tf.data.AUTOTUNE - # Mapping characters to integers. char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - # Mapping integers back to original characters. self.num_to_char = StringLookup( vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True ) - - if self.tables: - self.model_table = self.our_load_model(self.model_table_dir) - - self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") def __del__(self): if hasattr(self, 'executor') and getattr(self, 'executor'): self.executor.shutdown() - for model_name in ['model_page', - 'model_classifier', - 'model_bin', - 'model_enhancement', - 'model_region', - 'model_region_1_2', - 'model_region_p2', - 'model_region_fl_np', - 'model_region_fl', - 'model_textline', - 'model_reading_order', - 'model_table', - 'model_ocr', - 'processor']: - if hasattr(self, model_name) and getattr(self, model_name): - delattr(self, model_name) + self.executor = None + if hasattr(self, 'models') and getattr(self, 'models'): + for model_name in list(self.models): + if self.models[model_name]: + del self.models[model_name] def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} @@ -480,8 +492,8 @@ class Eynollah: def predict_enhancement(self, img): self.logger.debug("enter predict_enhancement") - img_height_model = self.model_enhancement.layers[-1].output_shape[1] - img_width_model = self.model_enhancement.layers[-1].output_shape[2] + img_height_model = self.models["enhancement"].layers[-1].output_shape[1] + img_width_model = self.models["enhancement"].layers[-1].output_shape[2] if img.shape[0] < img_height_model: img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) if img.shape[1] < img_width_model: @@ -522,7 +534,7 @@ class Eynollah: index_y_d = img_h - img_height_model img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.model_enhancement.predict(img_patch, verbose=0) + label_p_pred = self.models["enhancement"].predict(img_patch, verbose=0) seg = label_p_pred[0, :, :, :] * 255 if i == 0 and j == 0: @@ -697,7 +709,7 @@ class Eynollah: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.model_classifier.predict(img_in, verbose=0) + label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 self.logger.info("Found %s columns (%s)", num_col, label_p_pred) @@ -715,7 +727,7 @@ class Eynollah: self.logger.info("Detected %s DPI", dpi) if self.input_binary: img = self.imread() - prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) + prediction_bin = self.do_prediction(True, img, self.models["binarization"], n_batch_inference=5) prediction_bin = 255 * (prediction_bin[:,:,0] == 0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) img= np.copy(prediction_bin) @@ -755,7 +767,7 @@ class Eynollah: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.model_classifier.predict(img_in, verbose=0) + label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): @@ -776,7 +788,7 @@ class Eynollah: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.model_classifier.predict(img_in, verbose=0) + label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 if num_col > self.num_col_upper: @@ -1628,7 +1640,7 @@ class Eynollah: cont_page = [] if not self.ignore_page_extraction: img = np.copy(self.image)#cv2.GaussianBlur(self.image, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.model_page) + img_page_prediction = self.do_prediction(False, img, self.models["page"]) imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) ##thresh = cv2.dilate(thresh, KERNEL, iterations=3) @@ -1676,7 +1688,7 @@ class Eynollah: else: img = self.imread() img = cv2.GaussianBlur(img, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.model_page) + img_page_prediction = self.do_prediction(False, img, self.models["page"]) imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) @@ -1702,7 +1714,7 @@ class Eynollah: self.logger.debug("enter extract_text_regions") img_height_h = img.shape[0] img_width_h = img.shape[1] - model_region = self.model_region_fl if patches else self.model_region_fl_np + model_region = self.models["region_fl"] if patches else self.models["region_fl_np"] if self.light_version: thresholding_for_fl_light_version = True @@ -1737,7 +1749,7 @@ class Eynollah: self.logger.debug("enter extract_text_regions") img_height_h = img.shape[0] img_width_h = img.shape[1] - model_region = self.model_region_fl if patches else self.model_region_fl_np + model_region = self.models["region_fl"] if patches else self.models["region_fl_np"] if not patches: img = otsu_copy_binary(img) @@ -1958,14 +1970,14 @@ class Eynollah: img_w = img_org.shape[1] img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w)) - prediction_textline = self.do_prediction(use_patches, img, self.model_textline, + prediction_textline = self.do_prediction(use_patches, img, self.models["textline"], marginal_of_patch_percent=0.15, n_batch_inference=3, thresholding_for_artificial_class_in_light_version=self.textline_light, threshold_art_class_textline=self.threshold_art_class_textline) #if not self.textline_light: #if num_col_classifier==1: - #prediction_textline_nopatch = self.do_prediction(False, img, self.model_textline) + #prediction_textline_nopatch = self.do_prediction(False, img, self.models["textline"]) #prediction_textline[:,:][prediction_textline_nopatch[:,:]==0] = 0 prediction_textline = resize_image(prediction_textline, img_h, img_w) @@ -2036,7 +2048,7 @@ class Eynollah: #cv2.imwrite('prediction_textline2.png', prediction_textline[:,:,0]) - prediction_textline_longshot = self.do_prediction(False, img, self.model_textline) + prediction_textline_longshot = self.do_prediction(False, img, self.models["textline"]) prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w) @@ -2069,7 +2081,7 @@ class Eynollah: img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) img_resized = resize_image(img,img_h_new, img_w_new ) - prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.model_region) + prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.models["region"]) prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) image_page, page_coord, cont_page = self.extract_page() @@ -2185,7 +2197,7 @@ class Eynollah: #if self.input_binary: #img_bin = np.copy(img_resized) ###if (not self.input_binary and self.full_layout) or (not self.input_binary and num_col_classifier >= 30): - ###prediction_bin = self.do_prediction(True, img_resized, self.model_bin, n_batch_inference=5) + ###prediction_bin = self.do_prediction(True, img_resized, self.models["binarization"], n_batch_inference=5) ####print("inside bin ", time.time()-t_bin) ###prediction_bin=prediction_bin[:,:,0] @@ -2200,7 +2212,7 @@ class Eynollah: ###else: ###img_bin = np.copy(img_resized) if (self.ocr and self.tr) and not self.input_binary: - prediction_bin = self.do_prediction(True, img_resized, self.model_bin, n_batch_inference=5) + prediction_bin = self.do_prediction(True, img_resized, self.models["binarization"], n_batch_inference=5) prediction_bin = 255 * (prediction_bin[:,:,0] == 0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) prediction_bin = prediction_bin.astype(np.uint16) @@ -2232,14 +2244,14 @@ class Eynollah: self.logger.debug("resized to %dx%d for %d cols", img_resized.shape[1], img_resized.shape[0], num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.model_region_1_2, n_batch_inference=1, + True, img_resized, self.models["region_1_2"], n_batch_inference=1, thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) else: prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( - False, self.image_page_org_size, self.model_region_1_2, n_batch_inference=1, + False, self.image_page_org_size, self.models["region_1_2"], n_batch_inference=1, thresholding_for_artificial_class_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) ys = slice(*self.page_coord[0:2]) @@ -2253,10 +2265,10 @@ class Eynollah: self.logger.debug("resized to %dx%d (new_h=%d) for %d cols", img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.model_region_1_2, n_batch_inference=2, + True, img_resized, self.models["region_1_2"], n_batch_inference=2, thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) - ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_region, + ###prediction_regions_org = self.do_prediction(True, img_bin, self.models["region"], ###n_batch_inference=3, ###thresholding_for_some_classes_in_light_version=True) #print("inside 3 ", time.time()-t_in) @@ -2336,7 +2348,7 @@ class Eynollah: ratio_x=1 img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org_y = self.do_prediction(True, img, self.model_region) + prediction_regions_org_y = self.do_prediction(True, img, self.models["region"]) prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h ) #plt.imshow(prediction_regions_org_y[:,:,0]) @@ -2351,7 +2363,7 @@ class Eynollah: _, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1))) - prediction_regions_org = self.do_prediction(True, img, self.model_region) + prediction_regions_org = self.do_prediction(True, img, self.models["region"]) prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) prediction_regions_org=prediction_regions_org[:,:,0] @@ -2359,7 +2371,7 @@ class Eynollah: img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1])) - prediction_regions_org2 = self.do_prediction(True, img, self.model_region_p2, marginal_of_patch_percent=0.2) + prediction_regions_org2 = self.do_prediction(True, img, self.models["region_p2"], marginal_of_patch_percent=0.2) prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h ) mask_zeros2 = (prediction_regions_org2[:,:,0] == 0) @@ -2383,7 +2395,7 @@ class Eynollah: if self.input_binary: prediction_bin = np.copy(img_org) else: - prediction_bin = self.do_prediction(True, img_org, self.model_bin, n_batch_inference=5) + prediction_bin = self.do_prediction(True, img_org, self.models["binarization"], n_batch_inference=5) prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) prediction_bin = 255 * (prediction_bin[:,:,0]==0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) @@ -2393,7 +2405,7 @@ class Eynollah: img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org = self.do_prediction(True, img, self.model_region) + prediction_regions_org = self.do_prediction(True, img, self.models["region"]) prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) prediction_regions_org=prediction_regions_org[:,:,0] @@ -2420,7 +2432,7 @@ class Eynollah: except: if self.input_binary: prediction_bin = np.copy(img_org) - prediction_bin = self.do_prediction(True, img_org, self.model_bin, n_batch_inference=5) + prediction_bin = self.do_prediction(True, img_org, self.models["binarization"], n_batch_inference=5) prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) prediction_bin = 255 * (prediction_bin[:,:,0]==0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) @@ -2431,14 +2443,14 @@ class Eynollah: img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org = self.do_prediction(True, img, self.model_region) + prediction_regions_org = self.do_prediction(True, img, self.models["region"]) prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) prediction_regions_org=prediction_regions_org[:,:,0] #mask_lines_only=(prediction_regions_org[:,:]==3)*1 #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) - #prediction_regions_org = self.do_prediction(True, img, self.model_region) + #prediction_regions_org = self.do_prediction(True, img, self.models["region"]) #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) #prediction_regions_org = prediction_regions_org[:,:,0] #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 @@ -2809,13 +2821,13 @@ class Eynollah: img_width_h = img_org.shape[1] patches = False if self.light_version: - prediction_table, _ = self.do_prediction_new_concept(patches, img, self.model_table) + prediction_table, _ = self.do_prediction_new_concept(patches, img, self.models["table"]) prediction_table = prediction_table.astype(np.int16) return prediction_table[:,:,0] else: if num_col_classifier < 4 and num_col_classifier > 2: - prediction_table = self.do_prediction(patches, img, self.model_table) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_table) + prediction_table = self.do_prediction(patches, img, self.models["table"]) + pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.models["table"]) pre_updown = cv2.flip(pre_updown, -1) prediction_table[:,:,0][pre_updown[:,:,0]==1]=1 @@ -2834,8 +2846,8 @@ class Eynollah: xs = slice(w_start, w_start + img.shape[1]) img_new[ys, xs] = img - prediction_ext = self.do_prediction(patches, img_new, self.model_table) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_table) + prediction_ext = self.do_prediction(patches, img_new, self.models["table"]) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.models["table"]) pre_updown = cv2.flip(pre_updown, -1) prediction_table = prediction_ext[ys, xs] @@ -2856,8 +2868,8 @@ class Eynollah: xs = slice(w_start, w_start + img.shape[1]) img_new[ys, xs] = img - prediction_ext = self.do_prediction(patches, img_new, self.model_table) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_table) + prediction_ext = self.do_prediction(patches, img_new, self.models["table"]) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.models["table"]) pre_updown = cv2.flip(pre_updown, -1) prediction_table = prediction_ext[ys, xs] @@ -2869,10 +2881,10 @@ class Eynollah: prediction_table = np.zeros(img.shape) img_w_half = img.shape[1] // 2 - pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], self.model_table) - pre2 = self.do_prediction(patches, img[:,img_w_half:,:], self.model_table) - pre_full = self.do_prediction(patches, img[:,:,:], self.model_table) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_table) + pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], self.models["table"]) + pre2 = self.do_prediction(patches, img[:,img_w_half:,:], self.models["table"]) + pre_full = self.do_prediction(patches, img[:,:,:], self.models["table"]) + pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.models["table"]) pre_updown = cv2.flip(pre_updown, -1) prediction_table_full_erode = cv2.erode(pre_full[:,:,0], KERNEL, iterations=4) @@ -3474,18 +3486,6 @@ class Eynollah: regions_without_separators_d, regions_fully, regions_without_separators, polygons_of_marginals, contours_tables) - @staticmethod - def our_load_model(model_file): - if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): - # prefer SavedModel over HDF5 format if it exists - model_file = model_file[:-3] - try: - model = load_model(model_file, compile=False) - except: - model = load_model(model_file, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - return model - def do_order_of_regions_with_model(self, contours_only_text_parent, contours_only_text_parent_h, text_regions_p): height1 =672#448 @@ -3676,7 +3676,7 @@ class Eynollah: tot_counter += 1 batch.append(j) if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): - y_pr = self.model_reading_order.predict(input_1 , verbose=0) + y_pr = self.models["reading_order"].predict(input_1 , verbose=0) for jb, j in enumerate(batch): if y_pr[jb][0]>=0.5: post_list.append(j) @@ -4259,7 +4259,7 @@ class Eynollah: gc.collect() ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, np.zeros((len(all_found_textline_polygons), 4)), - self.prediction_model, self.b_s_ocr, self.num_to_char, textline_light=True) + self.models["ocr"], self.b_s_ocr, self.num_to_char, textline_light=True) else: ocr_all_textlines = None @@ -4768,27 +4768,27 @@ class Eynollah: if len(all_found_textline_polygons): ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, all_box_coord, - self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) if len(all_found_textline_polygons_marginals_left): ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_left, all_box_coord_marginals_left, - self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) if len(all_found_textline_polygons_marginals_right): ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_right, all_box_coord_marginals_right, - self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) if self.full_layout and len(all_found_textline_polygons): ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_h, all_box_coord_h, - self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) if self.full_layout and len(polygons_of_drop_capitals): ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( image_page, polygons_of_drop_capitals, np.zeros((len(polygons_of_drop_capitals), 4)), - self.prediction_model, self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) else: if self.light_version: @@ -4800,7 +4800,7 @@ class Eynollah: gc.collect() torch.cuda.empty_cache() - self.model_ocr.to(self.device) + self.models["ocr"].to(self.device) ind_tot = 0 #cv2.imwrite('./img_out.png', image_page) @@ -4837,7 +4837,7 @@ class Eynollah: img_croped = img_poly_on_img[y:y+h, x:x+w, :] #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) text_ocr = self.return_ocr_of_textline_without_common_section( - img_croped, self.model_ocr, self.processor, self.device, w, h2w_ratio, ind_tot) + img_croped, self.models["ocr"], self.processor, self.device, w, h2w_ratio, ind_tot) ocr_textline_in_textregion.append(text_ocr) ind_tot = ind_tot +1 ocr_all_textlines.append(ocr_textline_in_textregion) From 2056a8bdb9aff8895235f36f2ddf11a42b0469a3 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 10 Oct 2025 16:32:47 +0200 Subject: [PATCH 360/492] :package: v0.6.0rc1 --- CHANGELOG.md | 3 +++ src/eynollah/ocrd-tool.json | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index df1e12e..d0ad43c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +## [0.6.0rc1] - 2025-10-10 + Fixed: * continue processing when no columns detected but text regions exist @@ -289,6 +291,7 @@ Fixed: Initial release +[0.6.0rc1]: ../../compare/v0.6.0rc1...v0.5.0 [0.5.0]: ../../compare/v0.5.0...v0.4.0 [0.4.0]: ../../compare/v0.4.0...v0.3.1 [0.3.1]: ../../compare/v0.3.1...v0.3.0 diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index 5d89c92..2ae4ead 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -1,5 +1,5 @@ { - "version": "0.5.0", + "version": "0.6.0rc1", "git_url": "https://github.com/qurator-spk/eynollah", "dockerhub": "ocrd/eynollah", "tools": { From 745cf3be48ad6d5fee9c6297e50ea2d52d7f8fd2 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 10 Oct 2025 16:39:16 +0200 Subject: [PATCH 361/492] XML encoding should be utf-8 not utf8 ... and should use OCR-D's generateDS PAGE API consistently --- src/eynollah/eynollah.py | 4 ++-- src/eynollah/mb_ro_on_layout.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0992c8c..94bd10c 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -5284,7 +5284,7 @@ class Eynollah_ocr: ##unicode_textpage.text = tot_page_text ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) else: ###max_len = 280#512#280#512 ###padding_token = 1500#299#1500#299 @@ -5833,5 +5833,5 @@ class Eynollah_ocr: ##unicode_textpage.text = tot_page_text ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf8",default_namespace=None) + tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) #print("Job done in %.1fs", time.time() - t0) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 218f973..1b991ae 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -805,7 +805,7 @@ class machine_based_reading_order_on_layout: tree_xml.write(os.path.join(dir_out, file_name+'.xml'), xml_declaration=True, method='xml', - encoding="utf8", + encoding="utf-8", default_namespace=None) #sys.exit() From e8b7212f36af40c536bdf3607d53d6c60460b129 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 14 Oct 2025 14:16:39 +0200 Subject: [PATCH 362/492] `polygon2contour`: avoid uint for coords (introduced in a433c736 to make consistent with `filter_contours_area_of_image`, but actually np.uint is prone to create overflows downstream) --- src/eynollah/utils/contour.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index f998c4d..21068b3 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -276,7 +276,7 @@ def contour2polygon(contour: Union[np.ndarray, Sequence[Sequence[Sequence[Number def polygon2contour(polygon: Polygon) -> np.ndarray: polygon = np.array(polygon.exterior.coords[:-1], dtype=int) - return np.maximum(0, polygon).astype(np.uint)[:, np.newaxis] + return np.maximum(0, polygon).astype(int)[:, np.newaxis] def make_intersection(poly1, poly2): interp = poly1.intersection(poly2) From 8299e7009a569c0c3c82e603df245c730f4f52b4 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Tue, 14 Oct 2025 14:23:29 +0200 Subject: [PATCH 363/492] `setup_models`: avoid unnecessarily loading `region_fl` --- src/eynollah/eynollah.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0992c8c..6367c91 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -385,6 +385,8 @@ class Eynollah: self.logger.warning("overriding default model %s version %s to %s", key, self.model_versions[key], val) self.model_versions[key] = val # load models, depending on modes + # (note: loading too many models can cause OOM on GPU/CUDA, + # thus, we try set up the minimal configuration for the current mode) loadable = [ "col_classifier", "binarization", @@ -400,8 +402,8 @@ class Eynollah: # if self.allow_enhancement:? loadable.append("enhancement") if self.full_layout: - loadable.extend(["region_fl_np", - "region_fl"]) + loadable.append("region_fl_np") + #loadable.append("region_fl") if self.reading_order_machine_based: loadable.append("reading_order") if self.tables: From 2febf534797eaa5be35caf16d7965c3ac39bdd39 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 14 Oct 2025 14:52:31 +0200 Subject: [PATCH 364/492] :memo: changelog --- CHANGELOG.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d0ad43c..dfd6868 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +Fixed: + + * Prevent OOM GPU error by avoiding loading the `region_fl` model, #199 + ## [0.6.0rc1] - 2025-10-10 Fixed: @@ -21,8 +25,7 @@ Fixed: * Dockerfile: fix CUDA installation (cuDNN contested between Torch and TF due to extra OCR) * OCR: re-instate missing methods and fix `utils_ocr` function calls * mbreorder/enhancement CLIs: missing imports - * :fire: writer: `SeparatorRegion` needs `SeparatorRegionType` (not `ImageRegionType`) -f458e3e + * :fire: writer: `SeparatorRegion` needs `SeparatorRegionType` (not `ImageRegionType`), f458e3e * tests: switch from `pytest-subtests` to `parametrize` so we can use `pytest-isolate` (so CUDA memory gets freed between tests if running on GPU) From c1f01588062714ba0c5146dc676c2dacade3e36f Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 14 Oct 2025 14:53:15 +0200 Subject: [PATCH 365/492] :memo: changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfd6868..636880f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Versioned according to [Semantic Versioning](http://semver.org/). Fixed: * Prevent OOM GPU error by avoiding loading the `region_fl` model, #199 + * XML output: encoding should be `utf-8`, not `utf8`, #196, #197 ## [0.6.0rc1] - 2025-10-10 From f485dd41819018a39960e45d5fd61c68d835cf1a Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 14 Oct 2025 16:10:50 +0200 Subject: [PATCH 366/492] :package: v0.6.0rc2 --- CHANGELOG.md | 3 +++ src/eynollah/ocrd-tool.json | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 636880f..f84c153 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +## [0.6.0rc2] - 2025-10-14 + Fixed: * Prevent OOM GPU error by avoiding loading the `region_fl` model, #199 @@ -295,6 +297,7 @@ Fixed: Initial release +[0.6.0rc2]: ../../compare/v0.6.0rc2...v0.6.0rc1 [0.6.0rc1]: ../../compare/v0.6.0rc1...v0.5.0 [0.5.0]: ../../compare/v0.5.0...v0.4.0 [0.4.0]: ../../compare/v0.4.0...v0.3.1 diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index 2ae4ead..f9c6f4d 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -1,5 +1,5 @@ { - "version": "0.6.0rc1", + "version": "0.6.0rc2", "git_url": "https://github.com/qurator-spk/eynollah", "dockerhub": "ocrd/eynollah", "tools": { From 948c8c3441f6dfa1f371e01a73f79ba957acd5c7 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 15 Oct 2025 16:58:17 +0200 Subject: [PATCH 367/492] join_polygons: try to catch rare case of MultiPolygon --- src/eynollah/utils/contour.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 21068b3..f71bdc4 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -353,6 +353,8 @@ def join_polygons(polygons: Sequence[Polygon], scale=20) -> Polygon: bridgep = orient(LineString(nearest).buffer(max(1, scale/5), resolution=1), -1) polygons.append(bridgep) jointp = unary_union(polygons) + if jointp.geom_type == 'MultiPolygon': + jointp = unary_union(jointp.geoms) assert jointp.geom_type == 'Polygon', jointp.wkt # follow-up calculations will necessarily be integer; # so anticipate rounding here and then ensure validity From bd8c8bfeacbe6abb6e4217fe4008869af3ee97e9 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 16 Oct 2025 16:15:31 +0200 Subject: [PATCH 368/492] training: pin numpy to <1.24 as well --- train/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/train/requirements.txt b/train/requirements.txt index 2fb9908..63f3813 100644 --- a/train/requirements.txt +++ b/train/requirements.txt @@ -1,5 +1,6 @@ sacred seaborn +numpy <1.24.0 tqdm imutils scipy From d2f0a43088e31a8948b903b5b1de10cd695ce3ae Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 16 Oct 2025 20:46:03 +0200 Subject: [PATCH 369/492] :memo: changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f84c153..249affa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +Fixed: + + * `join_polygons` always returning Polygon, not MultiPolygon, #203 + ## [0.6.0rc2] - 2025-10-14 Fixed: From 2e0fb64dcb43894bdaf8df033471711fad2574f2 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 16 Oct 2025 21:29:37 +0200 Subject: [PATCH 370/492] disable ruff check for training code for now --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 2945f6a..e7744a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,8 @@ source = ["eynollah"] [tool.ruff] line-length = 120 +# TODO: Reenable and fix after release v0.6.0 +exclude = ['src/eynollah/training'] [tool.ruff.lint] ignore = [ @@ -73,3 +75,4 @@ ignore = [ [tool.ruff.format] quote-style = "preserve" + From 2ac01ecaccbbddc36bc609fc9866c628e21b8ccc Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Wed, 15 Oct 2025 16:58:17 +0200 Subject: [PATCH 371/492] join_polygons: try to catch rare case of MultiPolygon --- src/eynollah/utils/contour.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index 21068b3..f71bdc4 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -353,6 +353,8 @@ def join_polygons(polygons: Sequence[Polygon], scale=20) -> Polygon: bridgep = orient(LineString(nearest).buffer(max(1, scale/5), resolution=1), -1) polygons.append(bridgep) jointp = unary_union(polygons) + if jointp.geom_type == 'MultiPolygon': + jointp = unary_union(jointp.geoms) assert jointp.geom_type == 'Polygon', jointp.wkt # follow-up calculations will necessarily be integer; # so anticipate rounding here and then ensure validity From 46d25647f7d0cc1ea0354a9bd90f8e9479f32ffa Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 16 Oct 2025 20:46:03 +0200 Subject: [PATCH 372/492] :memo: changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f84c153..249affa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +Fixed: + + * `join_polygons` always returning Polygon, not MultiPolygon, #203 + ## [0.6.0rc2] - 2025-10-14 Fixed: From ca8edb35e3cdaa789390835052c1781aa3331e63 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 17 Oct 2025 10:35:13 +0200 Subject: [PATCH 373/492] :memo: changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10b3923..d8d7a6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +Added: + + * `eynollah-training` CLI and docs for training the models, #187, #193, https://github.com/qurator-spk/sbb_pixelwise_segmentation/tree/unifying-training-models + Fixed: * `join_polygons` always returning Polygon, not MultiPolygon, #203 From 38c028c6b500fcc7e2d5202f8930c38f74fc9bdc Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 17 Oct 2025 10:36:30 +0200 Subject: [PATCH 374/492] :package: v0.6.0 --- CHANGELOG.md | 3 +++ src/eynollah/ocrd-tool.json | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8d7a6c..c2caaa6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ Versioned according to [Semantic Versioning](http://semver.org/). ## Unreleased +## [0.6.0] - 2025-10-17 + Added: * `eynollah-training` CLI and docs for training the models, #187, #193, https://github.com/qurator-spk/sbb_pixelwise_segmentation/tree/unifying-training-models @@ -305,6 +307,7 @@ Fixed: Initial release +[0.6.0]: ../../compare/v0.6.0...v0.6.0rc2 [0.6.0rc2]: ../../compare/v0.6.0rc2...v0.6.0rc1 [0.6.0rc1]: ../../compare/v0.6.0rc1...v0.5.0 [0.5.0]: ../../compare/v0.5.0...v0.4.0 diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index f9c6f4d..dbbdc3b 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -1,5 +1,5 @@ { - "version": "0.6.0rc2", + "version": "0.6.0", "git_url": "https://github.com/qurator-spk/eynollah", "dockerhub": "ocrd/eynollah", "tools": { From 3a73ccca2e4fa09c8026f64446c0477d552de128 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 17 Oct 2025 13:45:14 +0200 Subject: [PATCH 375/492] training/models.py: make imports explicit --- pyproject.toml | 2 - src/eynollah/training/models.py | 81 +++++++++++++++++++++------------ 2 files changed, 51 insertions(+), 32 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e7744a1..39992ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,8 +58,6 @@ source = ["eynollah"] [tool.ruff] line-length = 120 -# TODO: Reenable and fix after release v0.6.0 -exclude = ['src/eynollah/training'] [tool.ruff.lint] ignore = [ diff --git a/src/eynollah/training/models.py b/src/eynollah/training/models.py index fdc5437..7fc34b6 100644 --- a/src/eynollah/training/models.py +++ b/src/eynollah/training/models.py @@ -1,9 +1,29 @@ -import tensorflow as tf from tensorflow import keras -from tensorflow.keras.models import * -from tensorflow.keras.layers import * -from tensorflow.keras import layers -from tensorflow.keras.regularizers import l2 +from keras.layers import ( + Activation, + Add, + AveragePooling2D, + BatchNormalization, + Conv2D, + Dense, + Dropout, + Embedding, + Flatten, + Input, + Lambda, + Layer, + LayerNormalization, + MaxPooling2D, + MultiHeadAttention, + UpSampling2D, + ZeroPadding2D, + add, + concatenate +) +from keras.models import Model +import tensorflow as tf +# from keras import layers, models +from keras.regularizers import l2 ##mlp_head_units = [512, 256]#[2048, 1024] ###projection_dim = 64 @@ -15,13 +35,13 @@ MERGE_AXIS = -1 def mlp(x, hidden_units, dropout_rate): for units in hidden_units: - x = layers.Dense(units, activation=tf.nn.gelu)(x) - x = layers.Dropout(dropout_rate)(x) + x = Dense(units, activation=tf.nn.gelu)(x) + x = Dropout(dropout_rate)(x) return x -class Patches(layers.Layer): +class Patches(Layer): def __init__(self, patch_size_x, patch_size_y):#__init__(self, **kwargs):#:__init__(self, patch_size):#__init__(self, **kwargs): - super(Patches, self).__init__() + super().__init__() self.patch_size_x = patch_size_x self.patch_size_y = patch_size_y @@ -49,9 +69,9 @@ class Patches(layers.Layer): }) return config -class Patches_old(layers.Layer): +class Patches_old(Layer): def __init__(self, patch_size):#__init__(self, **kwargs):#:__init__(self, patch_size):#__init__(self, **kwargs): - super(Patches, self).__init__() + super().__init__() self.patch_size = patch_size def call(self, images): @@ -69,8 +89,8 @@ class Patches_old(layers.Layer): #print(patches.shape,patch_dims,'patch_dims') patches = tf.reshape(patches, [batch_size, -1, patch_dims]) return patches - def get_config(self): + def get_config(self): config = super().get_config().copy() config.update({ 'patch_size': self.patch_size, @@ -78,12 +98,12 @@ class Patches_old(layers.Layer): return config -class PatchEncoder(layers.Layer): +class PatchEncoder(Layer): def __init__(self, num_patches, projection_dim): super(PatchEncoder, self).__init__() self.num_patches = num_patches - self.projection = layers.Dense(units=projection_dim) - self.position_embedding = layers.Embedding( + self.projection = Dense(units=projection_dim) + self.position_embedding = Embedding( input_dim=num_patches, output_dim=projection_dim ) @@ -144,7 +164,7 @@ def identity_block(input_tensor, kernel_size, filters, stage, block): x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) - x = layers.add([x, input_tensor]) + x = add([x, input_tensor]) x = Activation('relu')(x) return x @@ -189,12 +209,12 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)) name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) - x = layers.add([x, shortcut]) + x = add([x, shortcut]) x = Activation('relu')(x) return x -def resnet50_unet_light(n_classes, input_height=224, input_width=224, taks="segmentation", weight_decay=1e-6, pretraining=False): +def resnet50_unet_light(n_classes, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): assert input_height % 32 == 0 assert input_width % 32 == 0 @@ -397,7 +417,7 @@ def resnet50_unet(n_classes, input_height=224, input_width=224, task="segmentati def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=None, transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): if mlp_head_units is None: mlp_head_units = [128, 64] - inputs = layers.Input(shape=(input_height, input_width, 3)) + inputs = Input(shape=(input_height, input_width, 3)) #transformer_units = [ #projection_dim * 2, @@ -452,20 +472,21 @@ def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_he for _ in range(transformer_layers): # Layer normalization 1. - x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) + x1 = LayerNormalization(epsilon=1e-6)(encoded_patches) # Create a multi-head attention layer. - attention_output = layers.MultiHeadAttention( + attention_output = MultiHeadAttention( num_heads=num_heads, key_dim=projection_dim, dropout=0.1 )(x1, x1) # Skip connection 1. - x2 = layers.Add()([attention_output, encoded_patches]) + x2 = Add()([attention_output, encoded_patches]) # Layer normalization 2. - x3 = layers.LayerNormalization(epsilon=1e-6)(x2) + x3 = LayerNormalization(epsilon=1e-6)(x2) # MLP. x3 = mlp(x3, hidden_units=mlp_head_units, dropout_rate=0.1) # Skip connection 2. - encoded_patches = layers.Add()([x3, x2]) + encoded_patches = Add()([x3, x2]) + assert isinstance(x, Layer) encoded_patches = tf.reshape(encoded_patches, [-1, x.shape[1], x.shape[2] , int( projection_dim / (patch_size_x * patch_size_y) )]) v1024_2048 = Conv2D( 1024 , (1, 1), padding='same', data_format=IMAGE_ORDERING,kernel_regularizer=l2(weight_decay))(encoded_patches) @@ -521,7 +542,7 @@ def vit_resnet50_unet(n_classes, patch_size_x, patch_size_y, num_patches, mlp_he def vit_resnet50_unet_transformer_before_cnn(n_classes, patch_size_x, patch_size_y, num_patches, mlp_head_units=None, transformer_layers=8, num_heads =4, projection_dim = 64, input_height=224, input_width=224, task="segmentation", weight_decay=1e-6, pretraining=False): if mlp_head_units is None: mlp_head_units = [128, 64] - inputs = layers.Input(shape=(input_height, input_width, 3)) + inputs = Input(shape=(input_height, input_width, 3)) ##transformer_units = [ ##projection_dim * 2, @@ -536,19 +557,19 @@ def vit_resnet50_unet_transformer_before_cnn(n_classes, patch_size_x, patch_size for _ in range(transformer_layers): # Layer normalization 1. - x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) + x1 = LayerNormalization(epsilon=1e-6)(encoded_patches) # Create a multi-head attention layer. - attention_output = layers.MultiHeadAttention( + attention_output = MultiHeadAttention( num_heads=num_heads, key_dim=projection_dim, dropout=0.1 )(x1, x1) # Skip connection 1. - x2 = layers.Add()([attention_output, encoded_patches]) + x2 = Add()([attention_output, encoded_patches]) # Layer normalization 2. - x3 = layers.LayerNormalization(epsilon=1e-6)(x2) + x3 = LayerNormalization(epsilon=1e-6)(x2) # MLP. x3 = mlp(x3, hidden_units=mlp_head_units, dropout_rate=0.1) # Skip connection 2. - encoded_patches = layers.Add()([x3, x2]) + encoded_patches = Add()([x3, x2]) encoded_patches = tf.reshape(encoded_patches, [-1, input_height, input_width , int( projection_dim / (patch_size_x * patch_size_y) )]) From af74890b2edafc7256d122ef078ce0e8dfed35ed Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 17 Oct 2025 14:07:43 +0200 Subject: [PATCH 376/492] training/inference.py: add typing info, organize imports --- src/eynollah/training/inference.py | 53 ++++++++++++++++++------------ 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/src/eynollah/training/inference.py b/src/eynollah/training/inference.py index 3fa8fd6..10fca6c 100644 --- a/src/eynollah/training/inference.py +++ b/src/eynollah/training/inference.py @@ -1,14 +1,15 @@ import sys import os +from typing import Tuple import warnings import json import numpy as np import cv2 -from tensorflow.keras.models import load_model +from numpy._typing import NDArray import tensorflow as tf -from tensorflow.keras import backend as K -from tensorflow.keras.layers import * +from keras.models import Model, load_model +from keras import backend as K import click from tensorflow.python.keras import backend as tensorflow_backend import xml.etree.ElementTree as ET @@ -34,6 +35,7 @@ Tool to load model and predict for given image. """ class sbb_predict: + def __init__(self,image, dir_in, model, task, config_params_model, patches, save, save_layout, ground_truth, xml_file, out, min_area): self.image=image self.dir_in=dir_in @@ -77,7 +79,7 @@ class sbb_predict: #print(img[:,:,0].min()) #blur = cv2.GaussianBlur(img,(5,5)) #ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) - retval1, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + _, threshold1 = cv2.threshold(img1, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) @@ -116,19 +118,19 @@ class sbb_predict: denominator = K.sum(K.square(y_pred) + K.square(y_true), axes) return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch - def weighted_categorical_crossentropy(self,weights=None): - - def loss(y_true, y_pred): - labels_floats = tf.cast(y_true, tf.float32) - per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) - - if weights is not None: - weight_mask = tf.maximum(tf.reduce_max(tf.constant( - np.array(weights, dtype=np.float32)[None, None, None]) - * labels_floats, axis=-1), 1.0) - per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] - return tf.reduce_mean(per_pixel_loss) - return self.loss + # def weighted_categorical_crossentropy(self,weights=None): + # + # def loss(y_true, y_pred): + # labels_floats = tf.cast(y_true, tf.float32) + # per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred) + # + # if weights is not None: + # weight_mask = tf.maximum(tf.reduce_max(tf.constant( + # np.array(weights, dtype=np.float32)[None, None, None]) + # * labels_floats, axis=-1), 1.0) + # per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None] + # return tf.reduce_mean(per_pixel_loss) + # return self.loss def IoU(self,Yi,y_predi): @@ -177,12 +179,13 @@ class sbb_predict: ##if self.weights_dir!=None: ##self.model.load_weights(self.weights_dir) + assert isinstance(self.model, Model) if self.task != 'classification' and self.task != 'reading_order': self.img_height=self.model.layers[len(self.model.layers)-1].output_shape[1] self.img_width=self.model.layers[len(self.model.layers)-1].output_shape[2] self.n_classes=self.model.layers[len(self.model.layers)-1].output_shape[3] - def visualize_model_output(self, prediction, img, task): + def visualize_model_output(self, prediction, img, task) -> Tuple[NDArray, NDArray]: if task == "binarization": prediction = prediction * -1 prediction = prediction + 1 @@ -226,9 +229,12 @@ class sbb_predict: added_image = cv2.addWeighted(img,0.5,layout_only,0.1,0) + assert isinstance(added_image, np.ndarray) + assert isinstance(layout_only, np.ndarray) return added_image, layout_only def predict(self, image_dir): + assert isinstance(self.model, Model) if self.task == 'classification': classes_names = self.config_params_model['classification_classes_name'] img_1ch = img=cv2.imread(image_dir, 0) @@ -240,7 +246,7 @@ class sbb_predict: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.model.predict(img_in, verbose=0) + label_p_pred = self.model.predict(img_in, verbose='0') index_class = np.argmax(label_p_pred[0]) print("Predicted Class: {}".format(classes_names[str(int(index_class))])) @@ -361,7 +367,7 @@ class sbb_predict: #input_1[:,:,1] = img3[:,:,0]/5. if batch_counter==inference_bs or ( (tot_counter//inference_bs)==full_bs_ite and tot_counter%inference_bs==last_bs): - y_pr = self.model.predict(input_1 , verbose=0) + y_pr = self.model.predict(input_1 , verbose='0') scalibility_num = scalibility_num+1 if batch_counter==inference_bs: @@ -395,6 +401,7 @@ class sbb_predict: name_space = name_space.split('{')[1] page_element = root_xml.find(link+'Page') + assert isinstance(page_element, ET.Element) """ ro_subelement = ET.SubElement(page_element, 'ReadingOrder') @@ -489,7 +496,7 @@ class sbb_predict: img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :] label_p_pred = self.model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]), - verbose=0) + verbose='0') if self.task == 'enhancement': seg = label_p_pred[0, :, :, :] @@ -497,6 +504,8 @@ class sbb_predict: elif self.task == 'segmentation' or self.task == 'binarization': seg = np.argmax(label_p_pred, axis=3)[0] seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + else: + raise ValueError(f"Unhandled task {self.task}") if i == 0 and j == 0: @@ -551,6 +560,8 @@ class sbb_predict: elif self.task == 'segmentation' or self.task == 'binarization': seg = np.argmax(label_p_pred, axis=3)[0] seg = np.repeat(seg[:, :, np.newaxis], 3, axis=2) + else: + raise ValueError(f"Unhandled task {self.task}") prediction_true = seg.astype(int) From 557fb227f3b0e51433ee20c610c197f394f6fd5d Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 17 Oct 2025 14:21:05 +0200 Subject: [PATCH 377/492] training/gt_gen_utils: fix type errors, comment out dead code --- src/eynollah/training/gt_gen_utils.py | 29 +++++++++++++-------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/eynollah/training/gt_gen_utils.py b/src/eynollah/training/gt_gen_utils.py index 2e3428b..28ab422 100644 --- a/src/eynollah/training/gt_gen_utils.py +++ b/src/eynollah/training/gt_gen_utils.py @@ -252,6 +252,7 @@ def get_textline_contours_for_visualization(xml_file): + x_len, y_len = 0, 0 for jj in root1.iter(link+'Page'): y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) @@ -293,6 +294,7 @@ def get_textline_contours_and_ocr_text(xml_file): + x_len, y_len = 0, 0 for jj in root1.iter(link+'Page'): y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) @@ -362,7 +364,7 @@ def get_layout_contours_for_visualization(xml_file): link=alltags[0].split('}')[0]+'}' - + x_len, y_len = 0, 0 for jj in root1.iter(link+'Page'): y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) @@ -637,7 +639,7 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ link=alltags[0].split('}')[0]+'}' - + x_len, y_len = 0, 0 for jj in root1.iter(link+'Page'): y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) @@ -645,15 +647,12 @@ def get_images_of_ground_truth(gt_list, dir_in, output_dir, output_type, config_ if 'columns_width' in list(config_params.keys()): columns_width_dict = config_params['columns_width'] metadata_element = root1.find(link+'Metadata') - comment_is_sub_element = False + num_col = None for child in metadata_element: tag2 = child.tag if tag2.endswith('}Comments') or tag2.endswith('}comments'): text_comments = child.text num_col = int(text_comments.split('num_col')[1]) - comment_is_sub_element = True - if not comment_is_sub_element: - num_col = None if num_col: x_new = columns_width_dict[str(num_col)] @@ -1739,15 +1738,15 @@ tot_region_ref,x_len, y_len,index_tot_regions, img_poly -def bounding_box(cnt,color, corr_order_index ): - x, y, w, h = cv2.boundingRect(cnt) - x = int(x*scale_w) - y = int(y*scale_h) - - w = int(w*scale_w) - h = int(h*scale_h) - - return [x,y,w,h,int(color), int(corr_order_index)+1] +# def bounding_box(cnt,color, corr_order_index ): +# x, y, w, h = cv2.boundingRect(cnt) +# x = int(x*scale_w) +# y = int(y*scale_h) +# +# w = int(w*scale_w) +# h = int(h*scale_h) +# +# return [x,y,w,h,int(color), int(corr_order_index)+1] def resize_image(seg_in,input_height,input_width): return cv2.resize(seg_in,(input_width,input_height),interpolation=cv2.INTER_NEAREST) From 6c89888166d92a56d58d3f8026ba8f1075d038b4 Mon Sep 17 00:00:00 2001 From: kba Date: Fri, 17 Oct 2025 17:47:59 +0200 Subject: [PATCH 378/492] Refactor CLI for consistent logging and late imports --- src/eynollah/cli.py | 132 +++++++++++--------------- src/eynollah/eynollah.py | 14 ++- src/eynollah/image_enhancer.py | 6 +- src/eynollah/mb_ro_on_layout.py | 9 +- src/eynollah/ocrd_cli_binarization.py | 1 + src/eynollah/processor.py | 2 +- src/eynollah/sbb_binarize.py | 31 +++--- src/eynollah/utils/__init__.py | 1 - src/eynollah/writer.py | 4 +- 9 files changed, 88 insertions(+), 112 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index c9bad52..4a0704f 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -1,15 +1,34 @@ -import sys -import click +from dataclasses import dataclass import logging -from ocrd_utils import initLogging, getLevelName, getLogger -from eynollah.eynollah import Eynollah, Eynollah_ocr -from eynollah.sbb_binarize import SbbBinarizer -from eynollah.image_enhancer import Enhancer -from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout +import sys +from typing import Union + +import click + + +@dataclass +class EynollahCliContext(): + log_level : Union[str, None] = 'INFO' @click.group() -def main(): - pass +@click.option( + "--log_level", + "-l", + type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), + help="Override log level globally to this", +) +@click.pass_context +def main(ctx, log_level): + """ + eynollah - Document Layout Analysis, Image Enhancement, OCR + """ + ctx.obj = EynollahCliContext(log_level=log_level) + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.NOTSET) + formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') + console_handler.setFormatter(formatter) + logging.getLogger('eynollah').addHandler(console_handler) + logging.getLogger('eynollah').setLevel(ctx.obj.log_level or logging.INFO) @main.command() @click.option( @@ -38,18 +57,13 @@ def main(): type=click.Path(exists=True, file_okay=False), required=True, ) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) - -def machine_based_reading_order(input, dir_in, out, model, log_level): +def machine_based_reading_order(input, dir_in, out, model): + """ + Generate ReadingOrder with a ML model + """ + from .mb_ro_on_layout import machine_based_reading_order_on_layout assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." orderer = machine_based_reading_order_on_layout(model) - if log_level: - orderer.logger.setLevel(getLevelName(log_level)) orderer.run(xml_filename=input, dir_in=dir_in, @@ -79,17 +93,13 @@ def machine_based_reading_order(input, dir_in, out, model, log_level): type=click.Path(file_okay=True, dir_okay=True), required=True, ) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) -def binarization(patches, model_dir, input_image, dir_in, output, log_level): +def binarization(patches, model_dir, input_image, dir_in, output): + """ + Binarize images with a ML model + """ assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." + from .sbb_binarize import SbbBinarizer binarizer = SbbBinarizer(model_dir) - if log_level: - binarizer.log.setLevel(getLevelName(log_level)) binarizer.run(image_path=input_image, use_patches=patches, output=output, dir_in=dir_in) @@ -144,24 +154,18 @@ def binarization(patches, model_dir, input_image, dir_in, output, log_level): is_flag=True, help="if this parameter set to true, this tool will save the enhanced image in org scale.", ) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) - -def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, save_org_scale, log_level): +def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, save_org_scale): + """ + Enhance image + """ assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - initLogging() + from .image_enhancer import Enhancer enhancer = Enhancer( model, num_col_upper=num_col_upper, num_col_lower=num_col_lower, save_org_scale=save_org_scale, ) - if log_level: - enhancer.logger.setLevel(getLevelName(log_level)) enhancer.run(overwrite=overwrite, dir_in=dir_in, image_filename=image, @@ -366,30 +370,10 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low is_flag=True, help="if this parameter set to true, this tool will ignore layout detection and reading order. It means that textline detection will be done within printspace and contours of textline will be written in xml output file.", ) -# TODO move to top-level CLI context -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override 'eynollah' log level globally to this", -) -# -@click.option( - "--setup-logging", - is_flag=True, - help="Setup a basic console logger", -) - -def layout(image, out, overwrite, dir_in, model, model_version, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, transformer_ocr, batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level, setup_logging): - if setup_logging: - console_handler = logging.StreamHandler(sys.stdout) - console_handler.setLevel(logging.INFO) - formatter = logging.Formatter('%(message)s') - console_handler.setFormatter(formatter) - getLogger('eynollah').addHandler(console_handler) - getLogger('eynollah').setLevel(logging.INFO) - else: - initLogging() +def layout(image, out, overwrite, dir_in, model, model_version, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, transformer_ocr, batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction): + """ + Detect Layout (with optional image enhancement and reading order detection) + """ assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" @@ -409,6 +393,7 @@ def layout(image, out, overwrite, dir_in, model, model_version, save_images, sav assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." + from .eynollah import Eynollah eynollah = Eynollah( model, model_versions=model_version, @@ -435,8 +420,6 @@ def layout(image, out, overwrite, dir_in, model, model_version, save_images, sav threshold_art_class_textline=threshold_art_class_textline, threshold_art_class_layout=threshold_art_class_layout, ) - if log_level: - eynollah.logger.setLevel(getLevelName(log_level)) eynollah.run(overwrite=overwrite, image_filename=image, dir_in=dir_in, @@ -537,16 +520,11 @@ def layout(image, out, overwrite, dir_in, model, model_version, save_images, sav "-min_conf", help="minimum OCR confidence value. Text lines with a confidence value lower than this threshold will not be included in the output XML file.", ) -@click.option( - "--log_level", - "-l", - type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), - help="Override log level globally to this", -) -def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): - initLogging() - +def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, batch_size, dataset_abbrevation, min_conf_value_of_textline_text): + """ + Recognize text with a CNN/RNN or transformer ML model. + """ assert bool(model) != bool(model_name), "Either -m (model directory) or --model_name (specific model name) must be provided." assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" @@ -554,6 +532,7 @@ def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." + from .eynollah import Eynollah_ocr eynollah_ocr = Eynollah_ocr( dir_models=model, model_name=model_name, @@ -562,10 +541,7 @@ def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, batch_size=batch_size, pref_of_dataset=dataset_abbrevation, - min_conf_value_of_textline_text=min_conf_value_of_textline_text, - ) - if log_level: - eynollah_ocr.logger.setLevel(getLevelName(log_level)) + min_conf_value_of_textline_text=min_conf_value_of_textline_text) eynollah_ocr.run(overwrite=overwrite, dir_in=dir_in, dir_in_bin=dir_in_bin, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 13acba6..03ee7ce 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -7,6 +7,7 @@ document layout analysis (segmentation) with output in PAGE-XML """ # cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files +import logging import sys if sys.version_info < (3, 10): import importlib_resources @@ -19,8 +20,7 @@ import math import os import sys import time -from typing import Dict, List, Optional, Tuple -import atexit +from typing import List, Optional, Tuple import warnings from functools import partial from pathlib import Path @@ -39,7 +39,7 @@ from scipy.ndimage import gaussian_filter1d from numba import cuda from skimage.morphology import skeletonize from ocrd import OcrdPage -from ocrd_utils import getLogger, tf_disable_interactive_logs +from ocrd_utils import tf_disable_interactive_logs import statistics try: @@ -60,8 +60,6 @@ tf_disable_interactive_logs() import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.keras.models import load_model -tf.get_logger().setLevel("ERROR") -warnings.filterwarnings("ignore") # use tf1 compatibility for keras backend from tensorflow.compat.v1.keras.backend import set_session from tensorflow.keras import layers @@ -230,8 +228,9 @@ class Eynollah: threshold_art_class_layout: Optional[float] = None, threshold_art_class_textline: Optional[float] = None, skip_layout_and_reading_order : bool = False, + logger : Optional[logging.Logger] = None, ): - self.logger = getLogger('eynollah') + self.logger = logger or logging.getLogger('eynollah') self.plotter = None if skip_layout_and_reading_order: @@ -4888,14 +4887,13 @@ class Eynollah_ocr: do_not_mask_with_textline_contour=False, pref_of_dataset=None, min_conf_value_of_textline_text : Optional[float]=None, - logger=None, ): self.model_name = model_name self.tr_ocr = tr_ocr self.export_textline_images_and_text = export_textline_images_and_text self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour self.pref_of_dataset = pref_of_dataset - self.logger = logger if logger else getLogger('eynollah') + self.logger = logging.getLogger('eynollah') if not export_textline_images_and_text: if min_conf_value_of_textline_text: diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 9247efe..5e82cbd 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -2,7 +2,7 @@ Image enhancer. The output can be written as same scale of input or in new predicted scale. """ -from logging import Logger +import logging import os import time from typing import Optional @@ -11,7 +11,6 @@ import gc import cv2 import numpy as np -from ocrd_utils import getLogger, tf_disable_interactive_logs import tensorflow as tf from skimage.morphology import skeletonize from tensorflow.keras.models import load_model @@ -35,7 +34,6 @@ class Enhancer: num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, save_org_scale : bool = False, - logger : Optional[Logger] = None, ): self.input_binary = False self.light_version = False @@ -49,7 +47,7 @@ class Enhancer: else: self.num_col_lower = num_col_lower - self.logger = logger if logger else getLogger('enhancement') + self.logger = logging.getLogger('eynollah.enhancement') self.dir_models = dir_models self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 1b991ae..7dc3f00 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -1,8 +1,8 @@ """ -Image enhancer. The output can be written as same scale of input or in new predicted scale. +Machine learning based reading order detection """ -from logging import Logger +import logging import os import time from typing import Optional @@ -11,7 +11,6 @@ import xml.etree.ElementTree as ET import cv2 import numpy as np -from ocrd_utils import getLogger import statistics import tensorflow as tf from tensorflow.keras.models import load_model @@ -33,9 +32,9 @@ class machine_based_reading_order_on_layout: def __init__( self, dir_models : str, - logger : Optional[Logger] = None, + logger : Optional[logging.Logger] = None, ): - self.logger = logger if logger else getLogger('mbreorder') + self.logger = logger or logging.getLogger('eynollah.mbreorder') self.dir_models = dir_models self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824" diff --git a/src/eynollah/ocrd_cli_binarization.py b/src/eynollah/ocrd_cli_binarization.py index 848bbac..e5f85b1 100644 --- a/src/eynollah/ocrd_cli_binarization.py +++ b/src/eynollah/ocrd_cli_binarization.py @@ -34,6 +34,7 @@ class SbbBinarizeProcessor(Processor): Set up the model prior to processing. """ # resolve relative path via OCR-D ResourceManager + assert isinstance(self.parameter, dict) model_path = self.resolve_resource(self.parameter['model']) self.binarizer = SbbBinarizer(model_dir=model_path, logger=self.logger) diff --git a/src/eynollah/processor.py b/src/eynollah/processor.py index 12c7356..60c136c 100644 --- a/src/eynollah/processor.py +++ b/src/eynollah/processor.py @@ -32,8 +32,8 @@ class EynollahProcessor(Processor): allow_scaling=self.parameter['allow_scaling'], headers_off=self.parameter['headers_off'], tables=self.parameter['tables'], + logger=self.logger ) - self.eynollah.logger = self.logger self.eynollah.plotter = None def shutdown(self): diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 3716987..1b46a01 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -2,19 +2,16 @@ Tool to load model and binarize a given image. """ -import sys from glob import glob import os import logging +from typing import Optional import numpy as np -from PIL import Image import cv2 -from ocrd_utils import tf_disable_interactive_logs -tf_disable_interactive_logs() import tensorflow as tf -from tensorflow.keras.models import load_model -from tensorflow.python.keras import backend as tensorflow_backend +from keras.models import load_model +from keras import backend as tensorflow_backend from .utils import is_image_filename @@ -23,9 +20,13 @@ def resize_image(img_in, input_height, input_width): class SbbBinarizer: - def __init__(self, model_dir, logger=None): + def __init__( + self, + model_dir, + logger: Optional[logging.Logger] = None, + ): self.model_dir = model_dir - self.log = logger if logger else logging.getLogger('SbbBinarizer') + self.logger = logger or logging.getLogger('eynollah.binarize') self.start_new_session() @@ -325,7 +326,7 @@ class SbbBinarizer: image = cv2.imread(image_path) img_last = 0 for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) + self.logger.debug('Binarizing with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) res = self.predict(model, image, use_patches) @@ -345,17 +346,19 @@ class SbbBinarizer: img_last[:, :][img_last[:, :] > 0] = 255 img_last = (img_last[:, :] == 0) * 255 if output: + self.logger.info('Writing binarized image to %s', output) cv2.imwrite(output, img_last) return img_last else: ls_imgs = list(filter(is_image_filename, os.listdir(dir_in))) - for image_name in ls_imgs: + self.logger.info("Found %d image files to binarize in %s", len(ls_imgs), dir_in) + for i, image_name in enumerate(ls_imgs): image_stem = image_name.split('.')[0] - print(image_name,'image_name') + self.logger.info('Binarizing [%3d/%d] %s', i + 1, len(ls_imgs), image_name) image = cv2.imread(os.path.join(dir_in,image_name) ) img_last = 0 for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) + self.logger.debug('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) res = self.predict(model, image, use_patches) @@ -375,4 +378,6 @@ class SbbBinarizer: img_last[:, :][img_last[:, :] > 0] = 255 img_last = (img_last[:, :] == 0) * 255 - cv2.imwrite(os.path.join(output, image_stem + '.png'), img_last) + output_filename = os.path.join(output, image_stem + '.png') + self.logger.info('Writing binarized image to %s', output_filename) + cv2.imwrite(output_filename, img_last) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 5ccb2af..9734f93 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -19,7 +19,6 @@ from .contour import (contours_in_same_horizon, find_new_features_of_contours, return_contours_of_image, return_parent_contours) - def pairwise(iterable): # pairwise('ABCDEFG') → AB BC CD DE EF FG diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 9c3456a..52402f8 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -2,11 +2,11 @@ # pylint: disable=import-error from pathlib import Path import os.path +import logging import xml.etree.ElementTree as ET from .utils.xml import create_page_xml, xml_reading_order from .utils.counter import EynollahIdCounter -from ocrd_utils import getLogger from ocrd_models.ocrd_page import ( BorderType, CoordsType, @@ -24,7 +24,7 @@ import numpy as np class EynollahXmlWriter: def __init__(self, *, dir_out, image_filename, curved_line,textline_light, pcgts=None): - self.logger = getLogger('eynollah.writer') + self.logger = logging.getLogger('eynollah.writer') self.counter = EynollahIdCounter() self.dir_out = dir_out self.image_filename = image_filename From 2a1f892d72938dd8ac5c8719a161effc92f68f28 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:17:41 +0200 Subject: [PATCH 379/492] expand keywords and supported Python versions --- pyproject.toml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e7744a1..fde7967 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,12 @@ description = "Document Layout Analysis" readme = "README.md" license.file = "LICENSE" requires-python = ">=3.8" -keywords = ["document layout analysis", "image segmentation"] +keywords = [ + "document layout analysis", + "image segmentation", + "binarization", + "optical character recognition" +] dynamic = [ "dependencies", @@ -25,6 +30,10 @@ classifiers = [ "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3 :: Only", "Topic :: Scientific/Engineering :: Image Processing", ] From 20a95365c283e4b90638063173fed3b8fb65cee1 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:19:00 +0200 Subject: [PATCH 380/492] remove redundant parentheses --- src/eynollah/utils/__init__.py | 2 +- src/eynollah/utils/separate_lines.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 5ccb2af..fc01520 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1355,7 +1355,7 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, label_lines, contours_h=None): t_ins_c0 = time.time() - separators_closeup=( (region_pre_p[:,:]==label_lines))*1 + separators_closeup= (region_pre_p[:, :] == label_lines) * 1 separators_closeup[0:110,:]=0 separators_closeup[separators_closeup.shape[0]-150:,:]=0 diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 22ef00d..d745ec7 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1475,9 +1475,9 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] - img_resized = np.zeros((int(img_int.shape[0] * (1.2)), int(img_int.shape[1] * (3)))) - img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], - int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] = img_int[:, :] + img_resized = np.zeros((int(img_int.shape[0] * 1.2), int(img_int.shape[1] * 3))) + img_resized[int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], + int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] = img_int[:, :] # plt.imshow(img_xline) # plt.show() img_line_rotated = rotate_image(img_resized, slopes_tile_wise[i]) @@ -1489,8 +1489,8 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_patch_separated_returned[:, :][img_patch_separated_returned[:, :] != 0] = 1 img_patch_separated_returned_true_size = img_patch_separated_returned[ - int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], - int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] + int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], + int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] img_patch_interest_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size @@ -1519,7 +1519,7 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] max_shape=np.max(img_int.shape) - img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) + img_resized=np.zeros((int(max_shape * 1.1) , int(max_shape * 1.1))) onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) From 9733d575bfd2caa19df0465a0fac9e5f352303b8 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:21:49 +0200 Subject: [PATCH 381/492] replace list declaration with list literal (faster) --- src/eynollah/utils/__init__.py | 18 ++++++------------ src/eynollah/utils/separate_lines.py | 6 ++---- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index fc01520..c906dd0 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -151,8 +151,7 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( min_ys=np.min(y_sep) max_ys=np.max(y_sep) - y_mains=[] - y_mains.append(min_ys) + y_mains= [min_ys] y_mains_sep_ohne_grenzen=[] for ii in range(len(new_main_sep_y)): @@ -525,8 +524,7 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg_fin[i + 1]) + forest = [peaks_neg_fin[i + 1]] if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -694,8 +692,7 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg_fin[i + 1]) + forest = [peaks_neg_fin[i + 1]] if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -1346,8 +1343,7 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( return img_p_in, special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): - peaks_neg_tot = [] - peaks_neg_tot.append(first_point) + peaks_neg_tot = [first_point] for ii in range(len(peaks_neg_fin)): peaks_neg_tot.append(peaks_neg_fin[ii]) peaks_neg_tot.append(last_point) @@ -1516,8 +1512,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, args_cy_splitter=np.argsort(cy_main_splitters) cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - splitter_y_new=[] - splitter_y_new.append(0) + splitter_y_new= [0] for i in range(len(cy_main_splitters_sort)): splitter_y_new.append( cy_main_splitters_sort[i] ) splitter_y_new.append(region_pre_p.shape[0]) @@ -1593,8 +1588,7 @@ def return_boxes_of_images_by_order_of_reading_new( num_col, peaks_neg_fin = find_num_col( regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], :], num_col_classifier, tables, multiplier=3.) - peaks_neg_fin_early=[] - peaks_neg_fin_early.append(0) + peaks_neg_fin_early= [0] #print(peaks_neg_fin,'peaks_neg_fin') for p_n in peaks_neg_fin: peaks_neg_fin_early.append(p_n) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index d745ec7..84ca6d7 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1227,8 +1227,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks[i] > cut_off: if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [] - forest.append(peaks_neg[i + 1]) + forest = [peaks_neg[i + 1]] if i == (len(peaks_neg) - 1): if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -1248,8 +1247,7 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks_pos[i] > cut_off: if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) - forest = [] - forest.append(peaks[i + 1]) + forest = [peaks[i + 1]] if i == (len(peaks) - 1): if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) From f212ffa22ddfcdf953ec133d21dce900136cd7c1 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:27:18 +0200 Subject: [PATCH 382/492] remove unnecessary backslash --- src/eynollah/utils/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index c906dd0..aa89bd1 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1384,8 +1384,7 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, gray = cv2.bitwise_not(separators_closeup_n_binary) gray=gray.astype(np.uint8) - bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ - cv2.THRESH_BINARY, 15, -2) + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) horizontal = np.copy(bw) vertical = np.copy(bw) From 496a0e2ca43631b092b5537b11d0ba7336a4375c Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Fri, 17 Oct 2025 19:19:26 +0200 Subject: [PATCH 383/492] readme and documentation updates --- README.md | 80 ++++++++++++++++++++++---------------------------- docs/docker.md | 24 +++++++++++++++ docs/ocrd.md | 21 +++++++++++++ 3 files changed, 80 insertions(+), 45 deletions(-) create mode 100644 docs/docker.md create mode 100644 docs/ocrd.md diff --git a/README.md b/README.md index 3ba5086..fabb594 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ > Document Layout Analysis, Binarization and OCR with Deep Learning and Heuristics +[![Python Versions](https://img.shields.io/pypi/pyversions/eynollah.svg)](https://pypi.python.org/pypi/eynollah) [![PyPI Version](https://img.shields.io/pypi/v/eynollah)](https://pypi.org/project/eynollah/) [![GH Actions Test](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/test-eynollah.yml) [![GH Actions Deploy](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml/badge.svg)](https://github.com/qurator-spk/eynollah/actions/workflows/build-docker.yml) @@ -11,24 +12,22 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) ## Features -* Support for 10 distinct segmentation classes: +* Document layout analysis using pixelwise segmentation models with support for 10 distinct segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) -* Support for various image optimization operations: - * cropping (border detection), binarization, deskewing, dewarping, scaling, enhancing, resizing * Textline segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text -* Text recognition (OCR) using either CNN-RNN or Transformer models -* Detection of reading order (left-to-right or right-to-left) using either heuristics or trainable models +* Document image binarization with pixelwise segmentation or hybrid CNN-Transformer models +* Text recognition (OCR) with CNN-RNN or TrOCR models +* Detection of reading order (left-to-right or right-to-left) using heuristics or trainable models * Output in [PAGE-XML](https://github.com/PRImA-Research-Lab/PAGE-XML) * [OCR-D](https://github.com/qurator-spk/eynollah#use-as-ocr-d-processor) interface :warning: Development is focused on achieving the best quality of results for a wide variety of historical -documents and therefore processing can be very slow. We aim to improve this, but contributions are welcome. +documents using a combination of multiple deep learning models and heuristics; therefore processing can be slow. ## Installation - Python `3.8-3.11` with Tensorflow `<2.13` on Linux are currently supported. - -For (limited) GPU support the CUDA toolkit needs to be installed. A known working config is CUDA `11` with cuDNN `8.6`. +For (limited) GPU support the CUDA toolkit needs to be installed. +A working config is CUDA `11.8` with cuDNN `8.6`. You can either install from PyPI @@ -53,23 +52,33 @@ pip install "eynollah[OCR]" make install EXTRAS=OCR ``` +With Docker, use + +``` +docker pull ghcr.io/qurator-spk/eynollah:latest +``` + +For additional documentation on using Eynollah and Docker, see [`docker.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/docker.md). + ## Models -Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). +Pretrained models can be downloaded from [Zenodo](https://zenodo.org/records/17194824) or [Hugging Face](https://huggingface.co/SBB?search_models=eynollah). -For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). -Model cards are also provided for our trained models. +For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). ## Training -In case you want to train your own model with Eynollah, see the -documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the -tools in the [`train` folder](https://github.com/qurator-spk/eynollah/tree/main/train). +To train your own model with Eynollah, see the documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the +tools in the [`train`](https://github.com/qurator-spk/eynollah/tree/main/train) folder. ## Usage -Eynollah supports five use cases: layout analysis (segmentation), binarization, -image enhancement, text recognition (OCR), and reading order detection. +Eynollah supports five use cases: +1. [layout analysis (segmentation)](#layout-analysis), +2. [binarization](#binarization), +3. [image enhancement](#image-enhancement), +4. [text recognition (OCR)](#ocr), and +5. [reading order detection](#reading-order-detection). ### Layout Analysis @@ -114,6 +123,8 @@ If no further option is set, the tool performs layout detection of main regions and marginals). The best output quality is achieved when RGB images are used as input rather than greyscale or binarized images. +Additional documentation can be found in [`usage.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). + ### Binarization The binarization module performs document image binarization using pretrained pixelwise segmentation models. @@ -127,9 +138,12 @@ eynollah binarization \ -m \ ``` +### Image Enhancement +TODO + ### OCR -The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. +The OCR module performs text recognition using either CNN-RNN or TrOCR models. The command-line interface for OCR can be called like this: @@ -141,7 +155,7 @@ eynollah ocr \ -m | --model_name \ ``` -### Machine-based-reading-order +### Reading Order Detection The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. @@ -160,36 +174,12 @@ eynollah machine-based-reading-order \ Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). -In this case, the source image file group with (preferably) RGB images should be used as input like this: - - ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: -- existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) -- existing annotation (and respective `AlternativeImage`s) are partially _ignored_: - - previous page frame detection (`cropped` images) - - previous derotation (`deskewed` images) - - previous thresholding (`binarized` images) -- if the page-level image nevertheless deviates from the original (`@imageFilename`) - (because some other preprocessing step was in effect like `denoised`), then - the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 - -In general, it makes more sense to add other workflow steps **after** Eynollah. - -There is also an OCR-D processor for binarization: - - ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 - -#### Additional documentation - -Additional documentation is available in the [docs](https://github.com/qurator-spk/eynollah/tree/main/docs) directory. +Further documentation on using Eynollah with OCR-D can be found in [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/ocrd.md). ## How to cite ```bibtex -@inproceedings{hip23rezanezhad, +@inproceedings{hip23eynollah, title = {Document Layout Analysis with Deep Learning and Heuristics}, author = {Rezanezhad, Vahid and Baierer, Konstantin and Gerber, Mike and Labusch, Kai and Neudecker, Clemens}, booktitle = {Proceedings of the 7th International Workshop on Historical Document Imaging and Processing {HIP} 2023, diff --git a/docs/docker.md b/docs/docker.md new file mode 100644 index 0000000..466adf6 --- /dev/null +++ b/docs/docker.md @@ -0,0 +1,24 @@ +# 1. ocrd resource manager +(just once, to get the models and install them into a named volume for later re-use) + + vol_models=ocrd-resources:/usr/local/share/ocrd-resources + docker run --rm -v $vol_models ocrd/eynollah ocrd resmgr download ocrd-eynollah-segment default + +Now, each time you want to use Eynollah, pass the same resources volume again. +Also, bind-mount some data directory, e.g. current working directory $PWD (/data is default working directory in the container). +Either use standalone CLI (2) or OCR-D CLI (3): + +# 2. standalone CLI (follow self-help, cf. readme) + + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah binarization --help + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah layout --help + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah ocr --help + +# 3. OCR-D CLI (follow self-help, cf. readme and https://ocr-d.de/en/spec/cli) + + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-eynollah-segment -h + docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-sbb-binarize -h + +Alternatively, just "log in" to the container once and use the commands there: + + docker run --rm -v $vol_models -v $PWD:/data -it ocrd/eynollah bash \ No newline at end of file diff --git a/docs/ocrd.md b/docs/ocrd.md new file mode 100644 index 0000000..a391024 --- /dev/null +++ b/docs/ocrd.md @@ -0,0 +1,21 @@ +When using Eynollah in OCR-D, the source image file group with (preferably) RGB images should be used as input like this: + + ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 + +If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: +- existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) +- existing annotation (and respective `AlternativeImage`s) are partially _ignored_: + - previous page frame detection (`cropped` images) + - previous derotation (`deskewed` images) + - previous thresholding (`binarized` images) +- if the page-level image nevertheless deviates from the original (`@imageFilename`) + (because some other preprocessing step was in effect like `denoised`), then + the output PAGE-XML will be based on that as new top-level (`@imageFilename`) + + ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 + +In general, it makes more sense to add other workflow steps **after** Eynollah. + +There is also an OCR-D processor for binarization: + + ocrd-sbb-binarize -I OCR-D-IMG -O OCR-D-BIN -P models default-2021-03-09 From 9d2dbb838845cdf15663fb611f5d8f477b469774 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 20 Oct 2025 14:47:55 +0200 Subject: [PATCH 384/492] updating model based reading orde detection --- docs/models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/models.md b/docs/models.md index 3d296d5..40b23ae 100644 --- a/docs/models.md +++ b/docs/models.md @@ -151,7 +151,7 @@ This model is used for the task of illustration detection only. Model card: [Reading Order Detection]() -TODO +The model extracts the reading order of text regions from the layout by classifying pairwise relationships between them. A sorting algorithm then determines the overall reading sequence. ## Heuristic methods From 3ec5ceb22e317fbe5234f625412898232277ab68 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 20 Oct 2025 14:55:14 +0200 Subject: [PATCH 385/492] Update flowchart --- docs/models.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/models.md b/docs/models.md index 40b23ae..50ef726 100644 --- a/docs/models.md +++ b/docs/models.md @@ -18,7 +18,8 @@ Two Arabic/Persian terms form the name of the model suite: عين الله, whic See the flowchart below for the different stages and how they interact: -![](https://user-images.githubusercontent.com/952378/100619946-1936f680-331e-11eb-9297-6e8b4cab3c16.png) +eynollah_flowchart + ## Models From c8455370a9dfde698ee91125d3400d8a313ede5a Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Mon, 20 Oct 2025 15:13:45 +0200 Subject: [PATCH 386/492] updating heuristics and ocr documentation --- docs/models.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/models.md b/docs/models.md index 50ef726..7f83b33 100644 --- a/docs/models.md +++ b/docs/models.md @@ -154,13 +154,17 @@ Model card: [Reading Order Detection]() The model extracts the reading order of text regions from the layout by classifying pairwise relationships between them. A sorting algorithm then determines the overall reading sequence. +### OCR + +We have trained three OCR models: two CNN-RNN–based models and one transformer-based TrOCR model. The CNN-RNN models are generally faster and provide better results in most cases, though their performance decreases with heavily degraded images. The TrOCR model, on the other hand, is computationally expensive and slower during inference, but it can possibly produce better results on strongly degraded images. ## Heuristic methods Additionally, some heuristic methods are employed to further improve the model predictions: * After border detection, the largest contour is determined by a bounding box, and the image cropped to these coordinates. -* For text region detection, the image is scaled up to make it easier for the model to detect background space between text regions. +* Unlike the non-light version, where the image is scaled up to help the model better detect the background spaces between text regions, the light version uses down-scaled images. In this case, introducing an artificial class along the boundaries of text regions and text lines has helped to isolate and separate the text regions more effectively. * A minimum area is defined for text regions in relation to the overall image dimensions, so that very small regions that are noise can be filtered out. -* Deskewing is applied on the text region level (due to regions having different degrees of skew) in order to improve the textline segmentation result. -* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels. -* Finally, using the derived coordinates, bounding boxes are determined for each textline. +* In the non-light version, deskewing is applied at the text-region level (since regions may have different degrees of skew) to improve text-line segmentation results. In contrast, the light version performs deskewing only at the page level to enhance margin detection and heuristic reading-order estimation. +* After deskewing, a calculation of the pixel distribution on the X-axis allows the separation of textlines (foreground) and background pixels (only in non-light version). +* Finally, using the derived coordinates, bounding boxes are determined for each textline (only in non-light version). +* As mentioned above, the reading order can be determined using a model; however, this approach is computationally expensive, time-consuming, and less accurate due to the limited amount of ground-truth data available for training. Therefore, our tool uses a heuristic reading-order detection method as the default. The heuristic approach relies on headers and separators to determine the reading order of text regions. From a850ef39ea826087b41ea9f89d96d3cbb0dcadda Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 20 Oct 2025 18:34:44 +0200 Subject: [PATCH 387/492] factor model loading in Eynollah to EynollahModelZoo --- src/eynollah/cli.py | 49 +++++- src/eynollah/eynollah.py | 195 ++++-------------------- src/eynollah/image_enhancer.py | 2 +- src/eynollah/mb_ro_on_layout.py | 2 +- src/eynollah/model_zoo.py | 260 ++++++++++++++++++++++++++++++++ src/eynollah/patch_encoder.py | 52 +++++++ 6 files changed, 389 insertions(+), 171 deletions(-) create mode 100644 src/eynollah/model_zoo.py create mode 100644 src/eynollah/patch_encoder.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index c9bad52..a56e710 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -1,16 +1,57 @@ +from dataclasses import dataclass import sys +import os import click import logging +from typing import Tuple, List from ocrd_utils import initLogging, getLevelName, getLogger from eynollah.eynollah import Eynollah, Eynollah_ocr from eynollah.sbb_binarize import SbbBinarizer from eynollah.image_enhancer import Enhancer from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout +from eynollah.model_zoo import EynollahModelZoo + +@dataclass +class EynollahCliCtx(): + model_basedir: str + model_overrides: List[Tuple[str, str, str]] @click.group() def main(): pass +@main.command('list-models') +@click.option( + "--model", + "-m", + 'model_basedir', + help="directory of models", + type=click.Path(exists=True, file_okay=False), + # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", + required=True, +) +@click.option( + "--model-overrides", + "-mv", + help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", + type=(str, str, str), + multiple=True, +) +@click.pass_context +def list_models( + ctx, + model_basedir: str, + model_overrides: List[Tuple[str, str, str]], +): + """ + List all the models in the zoo + """ + ctx.obj = EynollahCliCtx( + model_basedir=model_basedir, + model_overrides=model_overrides + ) + print(EynollahModelZoo(basedir=ctx.obj.model_basedir, model_overrides=ctx.obj.model_overrides)) + @main.command() @click.option( "--input", @@ -198,15 +239,17 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low @click.option( "--model", "-m", + 'model_basedir', help="directory of models", type=click.Path(exists=True, file_okay=False), + # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", required=True, ) @click.option( "--model_version", "-mv", - help="override default versions of model categories", - type=(str, str), + help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", + type=(str, str, str), multiple=True, ) @click.option( @@ -411,7 +454,7 @@ def layout(image, out, overwrite, dir_in, model, model_version, save_images, sav assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." eynollah = Eynollah( model, - model_versions=model_version, + model_overrides=model_version, extract_only_images=extract_only_images, enable_plotting=enable_plotting, allow_enhancement=allow_enhancement, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 13acba6..dadb1e0 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -2,12 +2,15 @@ # pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member # pylint: disable=too-many-public-methods,too-many-arguments,too-many-instance-attributes,too-many-public-methods, # pylint: disable=consider-using-enumerate +# pyright: reportUnnecessaryTypeIgnoreComment=true +# pyright: reportPossiblyUnboundVariable=false """ document layout analysis (segmentation) with output in PAGE-XML """ # cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files import sys + if sys.version_info < (3, 10): import importlib_resources else: @@ -19,7 +22,7 @@ import math import os import sys import time -from typing import Dict, List, Optional, Tuple +from typing import Dict, Union,List, Optional, Tuple import atexit import warnings from functools import partial @@ -58,8 +61,7 @@ except ImportError: #os.environ['CUDA_VISIBLE_DEVICES'] = '-1' tf_disable_interactive_logs() import tensorflow as tf -from tensorflow.python.keras import backend as K -from tensorflow.keras.models import load_model +from keras.models import load_model tf.get_logger().setLevel("ERROR") warnings.filterwarnings("ignore") # use tf1 compatibility for keras backend @@ -67,6 +69,7 @@ from tensorflow.compat.v1.keras.backend import set_session from tensorflow.keras import layers from tensorflow.keras.layers import StringLookup +from .model_zoo import EynollahModelZoo from .utils.contour import ( filter_contours_area_of_image, filter_contours_area_of_image_tables, @@ -155,59 +158,12 @@ patch_size = 1 num_patches =21*21#14*14#28*28#14*14#28*28 -class Patches(layers.Layer): - def __init__(self, **kwargs): - super(Patches, self).__init__() - self.patch_size = patch_size - - def call(self, images): - batch_size = tf.shape(images)[0] - patches = tf.image.extract_patches( - images=images, - sizes=[1, self.patch_size, self.patch_size, 1], - strides=[1, self.patch_size, self.patch_size, 1], - rates=[1, 1, 1, 1], - padding="VALID", - ) - patch_dims = patches.shape[-1] - patches = tf.reshape(patches, [batch_size, -1, patch_dims]) - return patches - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'patch_size': self.patch_size, - }) - return config - -class PatchEncoder(layers.Layer): - def __init__(self, **kwargs): - super(PatchEncoder, self).__init__() - self.num_patches = num_patches - self.projection = layers.Dense(units=projection_dim) - self.position_embedding = layers.Embedding( - input_dim=num_patches, output_dim=projection_dim - ) - - def call(self, patch): - positions = tf.range(start=0, limit=self.num_patches, delta=1) - encoded = self.projection(patch) + self.position_embedding(positions) - return encoded - def get_config(self): - - config = super().get_config().copy() - config.update({ - 'num_patches': self.num_patches, - 'projection': self.projection, - 'position_embedding': self.position_embedding, - }) - return config class Eynollah: def __init__( self, dir_models : str, - model_versions: List[Tuple[str, str]] = [], + model_overrides: List[Tuple[str, str, str]] = [], extract_only_images : bool =False, enable_plotting : bool = False, allow_enhancement : bool = False, @@ -232,6 +188,7 @@ class Eynollah: skip_layout_and_reading_order : bool = False, ): self.logger = getLogger('eynollah') + self.model_zoo = EynollahModelZoo(basedir=dir_models) self.plotter = None if skip_layout_and_reading_order: @@ -297,93 +254,13 @@ class Eynollah: self.logger.warning("no GPU device available") self.logger.info("Loading models...") - self.setup_models(dir_models, model_versions) + self.setup_models(*model_overrides) self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") - @staticmethod - def our_load_model(model_file, basedir=""): - if basedir: - model_file = os.path.join(basedir, model_file) - if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): - # prefer SavedModel over HDF5 format if it exists - model_file = model_file[:-3] - try: - model = load_model(model_file, compile=False) - except: - model = load_model(model_file, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - return model - - def setup_models(self, basedir: Path, model_versions: List[Tuple[str, str]] = []): - self.model_versions = { - "enhancement": "eynollah-enhancement_20210425", - "binarization": "eynollah-binarization_20210425", - "col_classifier": "eynollah-column-classifier_20210425", - "page": "model_eynollah_page_extraction_20250915", - #?: "eynollah-main-regions-aug-scaling_20210425", - "region": ( # early layout - "eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18" if self.extract_only_images else - "eynollah-main-regions_20220314" if self.light_version else - "eynollah-main-regions-ensembled_20210425"), - "region_p2": ( # early layout, non-light, 2nd part - "eynollah-main-regions-aug-rotation_20210425"), - "region_1_2": ( # early layout, light, 1-or-2-column - #"modelens_12sp_elay_0_3_4__3_6_n" - #"modelens_earlylayout_12spaltige_2_3_5_6_7_8" - #"modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" - #"modelens_1_2_4_5_early_lay_1_2_spaltige" - #"model_3_eraly_layout_no_patches_1_2_spaltige" - "modelens_e_l_all_sp_0_1_2_3_4_171024"), - "region_fl_np": ( # full layout / no patches - #"modelens_full_lay_1_3_031124" - #"modelens_full_lay_13__3_19_241024" - #"model_full_lay_13_241024" - #"modelens_full_lay_13_17_231024" - #"modelens_full_lay_1_2_221024" - #"eynollah-full-regions-1column_20210425" - "modelens_full_lay_1__4_3_091124"), - "region_fl": ( # full layout / with patches - #"eynollah-full-regions-3+column_20210425" - ##"model_2_full_layout_new_trans" - #"modelens_full_lay_1_3_031124" - #"modelens_full_lay_13__3_19_241024" - #"model_full_lay_13_241024" - #"modelens_full_lay_13_17_231024" - #"modelens_full_lay_1_2_221024" - #"modelens_full_layout_24_till_28" - #"model_2_full_layout_new_trans" - "modelens_full_lay_1__4_3_091124"), - "reading_order": ( - #"model_mb_ro_aug_ens_11" - #"model_step_3200000_mb_ro" - #"model_ens_reading_order_machine_based" - #"model_mb_ro_aug_ens_8" - #"model_ens_reading_order_machine_based" - "model_eynollah_reading_order_20250824"), - "textline": ( - #"modelens_textline_1_4_16092024" - #"model_textline_ens_3_4_5_6_artificial" - #"modelens_textline_1_3_4_20240915" - #"model_textline_ens_3_4_5_6_artificial" - #"modelens_textline_9_12_13_14_15" - #"eynollah-textline_light_20210425" - "modelens_textline_0_1__2_4_16092024" if self.textline_light else - #"eynollah-textline_20210425" - "modelens_textline_0_1__2_4_16092024"), - "table": ( - None if not self.tables else - "modelens_table_0t4_201124" if self.light_version else - "eynollah-tables_20210319"), - "ocr": ( - None if not self.ocr else - "model_eynollah_ocr_trocr_20250919" if self.tr else - "model_eynollah_ocr_cnnrnn_20250930") - } + def setup_models(self, *model_overrides: Tuple[str, str, str]): # override defaults from CLI - for key, val in model_versions: - assert key in self.model_versions, "unknown model category '%s'" % key - self.logger.warning("overriding default model %s version %s to %s", key, self.model_versions[key], val) - self.model_versions[key] = val + self.model_zoo.override_models(*model_overrides) + # load models, depending on modes # (note: loading too many models can cause OOM on GPU/CUDA, # thus, we try set up the minimal configuration for the current mode) @@ -391,10 +268,10 @@ class Eynollah: "col_classifier", "binarization", "page", - "region" + ("region", 'extract_only_images' if self.extract_only_images else 'light' if self.light_version else '') ] if not self.extract_only_images: - loadable.append("textline") + loadable.append(("textline", 'light' if self.light_version else '')) if self.light_version: loadable.append("region_1_2") else: @@ -407,38 +284,24 @@ class Eynollah: if self.reading_order_machine_based: loadable.append("reading_order") if self.tables: - loadable.append("table") - - self.models = {name: self.our_load_model(self.model_versions[name], basedir) - for name in loadable - } + loadable.append(("table", 'light' if self.light_version else '')) if self.ocr: - ocr_model_dir = os.path.join(basedir, self.model_versions["ocr"]) if self.tr: - self.models["ocr"] = VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) + loadable.append(('ocr', 'tr')) + loadable.append(('ocr_tr_processor', 'tr')) + # TODO why here and why only for tr? if torch.cuda.is_available(): self.logger.info("Using GPU acceleration") self.device = torch.device("cuda:0") else: self.logger.info("Using CPU processing") self.device = torch.device("cpu") - #self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") - self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") else: - ocr_model = load_model(ocr_model_dir, compile=False) - self.models["ocr"] = tf.keras.models.Model( - ocr_model.get_layer(name = "image").input, - ocr_model.get_layer(name = "dense2").output) - - with open(os.path.join(ocr_model_dir, "characters_org.txt"), "r") as config_file: - characters = json.load(config_file) - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - # Mapping integers back to original characters. - self.num_to_char = StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) + loadable.append('ocr') + loadable.append('num_to_char') + + self.models = self.model_zoo.load_models(*loadable) def __del__(self): if hasattr(self, 'executor') and getattr(self, 'executor'): @@ -4261,7 +4124,7 @@ class Eynollah: gc.collect() ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, np.zeros((len(all_found_textline_polygons), 4)), - self.models["ocr"], self.b_s_ocr, self.num_to_char, textline_light=True) + self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], textline_light=True) else: ocr_all_textlines = None @@ -4770,27 +4633,27 @@ class Eynollah: if len(all_found_textline_polygons): ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, all_box_coord, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) if len(all_found_textline_polygons_marginals_left): ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_left, all_box_coord_marginals_left, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) if len(all_found_textline_polygons_marginals_right): ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_right, all_box_coord_marginals_right, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) if self.full_layout and len(all_found_textline_polygons): ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_h, all_box_coord_h, - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) if self.full_layout and len(polygons_of_drop_capitals): ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( image_page, polygons_of_drop_capitals, np.zeros((len(polygons_of_drop_capitals), 4)), - self.models["ocr"], self.b_s_ocr, self.num_to_char, self.textline_light, self.curved_line) + self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) else: if self.light_version: @@ -4839,7 +4702,7 @@ class Eynollah: img_croped = img_poly_on_img[y:y+h, x:x+w, :] #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) text_ocr = self.return_ocr_of_textline_without_common_section( - img_croped, self.models["ocr"], self.processor, self.device, w, h2w_ratio, ind_tot) + img_croped, self.models["ocr"], self.models['ocr_tr_processor'], self.device, w, h2w_ratio, ind_tot) ocr_textline_in_textregion.append(text_ocr) ind_tot = ind_tot +1 ocr_all_textlines.append(ocr_textline_in_textregion) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 9247efe..93b5daa 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -22,7 +22,7 @@ from .utils import ( is_image_filename, crop_image_inside_box ) -from .eynollah import PatchEncoder, Patches +from .patch_encoder import PatchEncoder, Patches DPI_THRESHOLD = 298 KERNEL = np.ones((5, 5), np.uint8) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 1b991ae..0a8a7ae 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -23,7 +23,7 @@ from .utils.contour import ( return_parent_contours, ) from .utils import is_xml_filename -from .eynollah import PatchEncoder, Patches +from .patch_encoder import PatchEncoder, Patches DPI_THRESHOLD = 298 KERNEL = np.ones((5, 5), np.uint8) diff --git a/src/eynollah/model_zoo.py b/src/eynollah/model_zoo.py new file mode 100644 index 0000000..b332b4a --- /dev/null +++ b/src/eynollah/model_zoo.py @@ -0,0 +1,260 @@ +from dataclasses import dataclass +import json +import logging +from pathlib import Path +from types import MappingProxyType +from typing import Dict, Literal, Optional, Tuple, List, Union +from copy import deepcopy + +from keras.layers import StringLookup +from keras.models import Model, load_model +from transformers import TrOCRProcessor, VisionEncoderDecoderModel + +from eynollah.patch_encoder import PatchEncoder, Patches + + +# Dict mapping model_category to dict mapping variant (default is '') to Path +DEFAULT_MODEL_VERSIONS: Dict[str, Dict[str, str]] = { + + "enhancement": { + '': "eynollah-enhancement_20210425" + }, + + "binarization": { + '': "eynollah-binarization_20210425" + }, + + "col_classifier": { + '': "eynollah-column-classifier_20210425", + }, + + "page": { + '': "model_eynollah_page_extraction_20250915", + }, + + # TODO: What is this commented out model? + #?: "eynollah-main-regions-aug-scaling_20210425", + + # early layout + "region": { + '': "eynollah-main-regions-ensembled_20210425", + 'extract_only_images': "eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", + 'light': "eynollah-main-regions_20220314", + }, + + # early layout, non-light, 2nd part + "region_p2": { + '': "eynollah-main-regions-aug-rotation_20210425", + }, + + # early layout, light, 1-or-2-column + "region_1_2": { + #'': "modelens_12sp_elay_0_3_4__3_6_n" + #'': "modelens_earlylayout_12spaltige_2_3_5_6_7_8" + #'': "modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" + #'': "modelens_1_2_4_5_early_lay_1_2_spaltige" + #'': "model_3_eraly_layout_no_patches_1_2_spaltige" + '': "modelens_e_l_all_sp_0_1_2_3_4_171024" + }, + + # full layout / no patches + "region_fl_np": { + #'': "modelens_full_lay_1_3_031124" + #'': "modelens_full_lay_13__3_19_241024" + #'': "model_full_lay_13_241024" + #'': "modelens_full_lay_13_17_231024" + #'': "modelens_full_lay_1_2_221024" + #'': "eynollah-full-regions-1column_20210425" + '': "modelens_full_lay_1__4_3_091124" + }, + + # full layout / with patches + "region_fl": { + #'': "eynollah-full-regions-3+column_20210425" + #'': #"model_2_full_layout_new_trans" + #'': "modelens_full_lay_1_3_031124" + #'': "modelens_full_lay_13__3_19_241024" + #'': "model_full_lay_13_241024" + #'': "modelens_full_lay_13_17_231024" + #'': "modelens_full_lay_1_2_221024" + #'': "modelens_full_layout_24_till_28" + #'': "model_2_full_layout_new_trans" + '': "modelens_full_lay_1__4_3_091124", + }, + + "reading_order": { + #'': "model_mb_ro_aug_ens_11" + #'': "model_step_3200000_mb_ro" + #'': "model_ens_reading_order_machine_based" + #'': "model_mb_ro_aug_ens_8" + #'': "model_ens_reading_order_machine_based" + '': "model_eynollah_reading_order_20250824" + }, + + "textline": { + #'light': "eynollah-textline_light_20210425" + 'light': "modelens_textline_0_1__2_4_16092024", + #'': "modelens_textline_1_4_16092024" + #'': "model_textline_ens_3_4_5_6_artificial" + #'': "modelens_textline_1_3_4_20240915" + #'': "model_textline_ens_3_4_5_6_artificial" + #'': "modelens_textline_9_12_13_14_15" + #'': "eynollah-textline_20210425" + '': "modelens_textline_0_1__2_4_16092024" + }, + + "table": { + 'light': "modelens_table_0t4_201124", + '': "eynollah-tables_20210319", + }, + + "ocr": { + 'tr': "model_eynollah_ocr_trocr_20250919", + '': "model_eynollah_ocr_cnnrnn_20250930", + }, + + 'ocr_tr_processor': { + '': 'microsoft/trocr-base-printed', + 'htr': "microsoft/trocr-base-handwritten", + }, + + 'num_to_char': { + '': 'model_eynollah_ocr_cnnrnn_20250930/characters_org.txt' + }, +} + + +class EynollahModelZoo(): + """ + Wrapper class that handles storage and loading of models for all eynollah runners. + """ + model_basedir: Path + model_versions: dict + + def __init__( + self, + basedir: str, + model_overrides: List[Tuple[str, str, str]], + ) -> None: + self.model_basedir = Path(basedir) + self.logger = logging.getLogger('eynollah.model_zoo') + self.model_versions = deepcopy(DEFAULT_MODEL_VERSIONS) + if model_overrides: + self.override_models(*model_overrides) + + def override_models(self, *model_overrides: Tuple[str, str, str]): + """ + Override the default model versions + """ + for model_category, model_variant, model_filename in model_overrides: + if model_category not in DEFAULT_MODEL_VERSIONS: + raise ValueError(f"Unknown model_category '{model_category}', must be one of {DEFAULT_MODEL_VERSIONS.keys()}") + if model_variant not in DEFAULT_MODEL_VERSIONS[model_category]: + raise ValueError(f"Unknown variant {model_variant} for {model_category}. Known variants: {DEFAULT_MODEL_VERSIONS[model_category].keys()}") + self.logger.warning( + "Overriding default model %s ('%s' variant) from %s to %s", + model_category, + model_variant, + DEFAULT_MODEL_VERSIONS[model_category][model_variant], + model_filename + ) + self.model_versions[model_category][model_variant] = model_filename + + def model_path( + self, + model_category: str, + model_variant: str = '', + model_filename: str = '', + absolute: bool = True, + ) -> Path: + """ + Translate model_{type,variant,filename} tuple into an absolute (or relative) Path + """ + if model_category not in DEFAULT_MODEL_VERSIONS: + raise ValueError(f"Unknown model_category '{model_category}', must be one of {DEFAULT_MODEL_VERSIONS.keys()}") + if model_variant not in DEFAULT_MODEL_VERSIONS[model_category]: + raise ValueError(f"Unknown variant {model_variant} for {model_category}. Known variants: {DEFAULT_MODEL_VERSIONS[model_category].keys()}") + if not model_filename: + model_filename = DEFAULT_MODEL_VERSIONS[model_category][model_variant] + if not Path(model_filename).is_absolute() and absolute: + model_path = Path(self.model_basedir).joinpath(model_filename) + else: + model_path = Path(model_filename) + return model_path + + def load_models( + self, + *all_load_args: Union[str, Tuple[str], Tuple[str, str], Tuple[str, str, str]], + ) -> Dict: + """ + Load all models by calling load_model and return a dictionary mapping model_category to loaded model + """ + ret = {} + for load_args in all_load_args: + if isinstance(load_args, str): + ret[load_args] = self.load_model(load_args) + else: + ret[load_args[0]] = self.load_model(*load_args) + return ret + + def load_model( + self, + model_category: str, + model_variant: str = '', + model_filename: str = '', + ) -> Union[VisionEncoderDecoderModel, TrOCRProcessor, Model]: + """ + Load any model + """ + model_path = self.model_path(model_category, model_variant, model_filename) + if model_path.suffix == '.h5' and Path(model_path.stem).exists(): + # prefer SavedModel over HDF5 format if it exists + model_path = Path(model_path.stem) + if model_category == 'ocr': + model = self._load_ocr_model(variant=model_variant) + elif model_category == 'num_to_char': + model = self._load_num_to_char() + elif model_category == 'tr_processor': + return TrOCRProcessor.from_pretrained(self.model_path(...)) + else: + try: + model = load_model(model_path, compile=False) + except Exception as e: + self.logger.exception(e) + model = load_model(model_path, compile=False, custom_objects={ + "PatchEncoder": PatchEncoder, "Patches": Patches}) + return model # type: ignore + + def _load_ocr_model(self, variant: str) -> Union[VisionEncoderDecoderModel, TrOCRProcessor, Model]: + """ + Load OCR model + """ + ocr_model_dir = Path(self.model_basedir, self.model_versions["ocr"][variant]) + if variant == 'tr': + return VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) + else: + ocr_model = load_model(ocr_model_dir, compile=False) + assert isinstance(ocr_model, Model) + return Model( + ocr_model.get_layer(name = "image").input, # type: ignore + ocr_model.get_layer(name = "dense2").output) # type: ignore + + def _load_num_to_char(self): + """ + Load decoder for OCR + """ + with open(self.model_path('ocr') / self.model_path('ocr', 'num_to_char', absolute=False), "r") as config_file: + characters = json.load(config_file) + # Mapping characters to integers. + char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) + # Mapping integers back to original characters. + return StringLookup( + vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True + ) + + def __str__(self): + return str(json.dumps({ + 'basedir': str(self.model_basedir), + 'versions': self.model_versions, + }, indent=2)) + diff --git a/src/eynollah/patch_encoder.py b/src/eynollah/patch_encoder.py new file mode 100644 index 0000000..939ad7b --- /dev/null +++ b/src/eynollah/patch_encoder.py @@ -0,0 +1,52 @@ +from keras import layers +import tensorflow as tf + +projection_dim = 64 +patch_size = 1 +num_patches =21*21#14*14#28*28#14*14#28*28 + +class PatchEncoder(layers.Layer): + + def __init__(self): + super().__init__() + self.projection = layers.Dense(units=projection_dim) + self.position_embedding = layers.Embedding(input_dim=num_patches, output_dim=projection_dim) + + def call(self, patch): + positions = tf.range(start=0, limit=num_patches, delta=1) + encoded = self.projection(patch) + self.position_embedding(positions) + return encoded + + def get_config(self): + config = super().get_config().copy() + config.update({ + 'num_patches': num_patches, + 'projection': self.projection, + 'position_embedding': self.position_embedding, + }) + return config + +class Patches(layers.Layer): + def __init__(self, **kwargs): + super(Patches, self).__init__() + self.patch_size = patch_size + + def call(self, images): + batch_size = tf.shape(images)[0] + patches = tf.image.extract_patches( + images=images, + sizes=[1, self.patch_size, self.patch_size, 1], + strides=[1, self.patch_size, self.patch_size, 1], + rates=[1, 1, 1, 1], + padding="VALID", + ) + patch_dims = patches.shape[-1] + patches = tf.reshape(patches, [batch_size, -1, patch_dims]) + return patches + def get_config(self): + + config = super().get_config().copy() + config.update({ + 'patch_size': self.patch_size, + }) + return config From b90cfdfcc402c7a9bb91cc6d327a2cf8f543145e Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 20 Oct 2025 18:56:24 +0200 Subject: [PATCH 388/492] adapt tests to -l being top-level option now --- tests/test_run.py | 61 ++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/tests/test_run.py b/tests/test_run.py index 79c64c2..9606706 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -4,11 +4,7 @@ import pytest import logging from PIL import Image from eynollah.cli import ( - layout as layout_cli, - binarization as binarization_cli, - enhancement as enhancement_cli, - machine_based_reading_order as mbreorder_cli, - ocr as ocr_cli, + main as main_cli, ) from click.testing import CliRunner from ocrd_modelfactory import page_from_file @@ -43,18 +39,19 @@ def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' args = [ + 'layout', '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) + result = runner.invoke(main_cli, args + options, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert str(infile) in logmsgs @@ -78,18 +75,19 @@ def test_run_eynollah_layout_filename2(tmp_path, pytestconfig, caplog, options): infile = testdir.joinpath('resources/euler_rechenkunst01_1738_0025.tif') outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' args = [ + 'layout', '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) + result = runner.invoke(main_cli, args + options, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert str(infile) in logmsgs @@ -109,18 +107,19 @@ def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ + 'layout', '-m', MODELS_LAYOUT, '-di', str(indir), '-o', str(outdir), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args, catch_exceptions=False) + result = runner.invoke(main_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 @@ -137,18 +136,19 @@ def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, opti infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ + 'binarization', '-m', MODELS_BIN, '-i', str(infile), '-o', str(outfile), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'SbbBinarizer' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) + result = runner.invoke(main_cli, args + options, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) @@ -163,18 +163,19 @@ def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ + 'binarization', '-m', MODELS_BIN, '-di', str(indir), '-o', str(outdir), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'SbbBinarizer' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args, catch_exceptions=False) + result = runner.invoke(main_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 @@ -190,18 +191,19 @@ def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, optio infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ + 'enhancement', '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'enhancement' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) + result = runner.invoke(main_cli, args + options, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs @@ -216,18 +218,19 @@ def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ + 'enhancement', '-m', MODELS_LAYOUT, '-di', str(indir), '-o', str(outdir), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'enhancement' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args, catch_exceptions=False) + result = runner.invoke(main_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 @@ -237,18 +240,19 @@ def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') args = [ + 'machine-based-reading-order', '-m', MODELS_LAYOUT, '-i', str(infile), '-o', str(outfile.parent), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'mbreorder' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) + result = runner.invoke(main_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] # FIXME: mbreorder has no logging! @@ -266,18 +270,19 @@ def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ + 'machine-based-reading-order', '-m', MODELS_LAYOUT, '-di', str(indir), '-o', str(outdir), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'mbreorder' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) + result = runner.invoke(main_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] # FIXME: mbreorder has no logging! @@ -298,13 +303,14 @@ def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') outrenderfile.parent.mkdir() args = [ + 'ocr', '-m', MODELS_OCR, '-i', str(infile), '-dx', str(infile.parent), '-o', str(outfile.parent), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.DEBUG) def only_eynollah(logrec): return logrec.name == 'eynollah' @@ -312,7 +318,7 @@ def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): if "-doit" in options: options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) + result = runner.invoke(main_cli, args + options, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] # FIXME: ocr has no logging! @@ -331,19 +337,20 @@ def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ + 'ocr', '-m', MODELS_OCR, '-di', str(indir), '-dx', str(indir), '-o', str(outdir), ] if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) + args = ['-l', 'DEBUG'] + args caplog.set_level(logging.INFO) def only_eynollah(logrec): return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args, catch_exceptions=False) + result = runner.invoke(main_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout logmsgs = [logrec.message for logrec in caplog.records] # FIXME: ocr has no logging! From 48d1198d2476c553c5941d663115ebcc235444c6 Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 20 Oct 2025 19:15:31 +0200 Subject: [PATCH 389/492] move Eynollah_ocr to separate module --- src/eynollah/cli.py | 4 +- src/eynollah/eynollah.py | 961 ----------------------------------- src/eynollah/eynollah_ocr.py | 961 +++++++++++++++++++++++++++++++++++ 3 files changed, 963 insertions(+), 963 deletions(-) create mode 100644 src/eynollah/eynollah_ocr.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index a56e710..4eced59 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -1,11 +1,11 @@ from dataclasses import dataclass import sys -import os import click import logging from typing import Tuple, List from ocrd_utils import initLogging, getLevelName, getLogger -from eynollah.eynollah import Eynollah, Eynollah_ocr +from eynollah.eynollah import Eynollah +from eynollah.eynollah_ocr import Eynollah_ocr from eynollah.sbb_binarize import SbbBinarizer from eynollah.image_enhancer import Enhancer from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index dadb1e0..0a7b660 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -4739,964 +4739,3 @@ class Eynollah: return pcgts -class Eynollah_ocr: - def __init__( - self, - dir_models, - model_name=None, - dir_xmls=None, - tr_ocr=False, - batch_size=None, - export_textline_images_and_text=False, - do_not_mask_with_textline_contour=False, - pref_of_dataset=None, - min_conf_value_of_textline_text : Optional[float]=None, - logger=None, - ): - self.model_name = model_name - self.tr_ocr = tr_ocr - self.export_textline_images_and_text = export_textline_images_and_text - self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour - self.pref_of_dataset = pref_of_dataset - self.logger = logger if logger else getLogger('eynollah') - - if not export_textline_images_and_text: - if min_conf_value_of_textline_text: - self.min_conf_value_of_textline_text = float(min_conf_value_of_textline_text) - else: - self.min_conf_value_of_textline_text = 0.3 - if tr_ocr: - self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - if self.model_name: - self.model_ocr_dir = self.model_name - else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" - self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) - self.model_ocr.to(self.device) - if not batch_size: - self.b_s = 2 - else: - self.b_s = int(batch_size) - - else: - if self.model_name: - self.model_ocr_dir = self.model_name - else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250930" - model_ocr = load_model(self.model_ocr_dir , compile=False) - - self.prediction_model = tf.keras.models.Model( - model_ocr.get_layer(name = "image").input, - model_ocr.get_layer(name = "dense2").output) - if not batch_size: - self.b_s = 8 - else: - self.b_s = int(batch_size) - - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: - characters = json.load(config_file) - - AUTOTUNE = tf.data.AUTOTUNE - - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - - # Mapping integers back to original characters. - self.num_to_char = StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) - self.end_character = len(characters) + 2 - - def run(self, overwrite: bool = False, - dir_in: Optional[str] = None, - dir_in_bin: Optional[str] = None, - image_filename: Optional[str] = None, - dir_xmls: Optional[str] = None, - dir_out_image_text: Optional[str] = None, - dir_out: Optional[str] = None, - ): - if dir_in: - ls_imgs = [os.path.join(dir_in, image_filename) - for image_filename in filter(is_image_filename, - os.listdir(dir_in))] - else: - ls_imgs = [image_filename] - - if self.tr_ocr: - tr_ocr_input_height_and_width = 384 - for dir_img in ls_imgs: - file_name = Path(dir_img).stem - dir_xml = os.path.join(dir_xmls, file_name+'.xml') - out_file_ocr = os.path.join(dir_out, file_name+'.xml') - - if os.path.exists(out_file_ocr): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) - else: - self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) - continue - - img = cv2.imread(dir_img) - - if dir_out_image_text: - out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') - image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") - draw = ImageDraw.Draw(image_text) - total_bb_coordinates = [] - - ##file_name = Path(dir_xmls).stem - tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) - - - - cropped_lines = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - extracted_texts = [] - - indexer_text_region = 0 - indexer_b_s = 0 - - for nn in root1.iter(region_tags): - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - x,y,w,h = cv2.boundingRect(textline_coords) - - if dir_out_image_text: - total_bb_coordinates.append([x,y,w,h]) - - h2w_ratio = h/float(w) - - img_poly_on_img = np.copy(img) - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - img_crop[mask_poly==0] = 255 - - self.logger.debug("processing %d lines for '%s'", - len(cropped_lines), nn.attrib['id']) - if h2w_ratio > 0.1: - cropped_lines.append(resize_image(img_crop, - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width) ) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - splited_images, _ = return_textlines_split_if_needed(img_crop, None) - #print(splited_images) - if splited_images: - cropped_lines.append(resize_image(splited_images[0], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - cropped_lines.append(resize_image(splited_images[1], - tr_ocr_input_height_and_width, - tr_ocr_input_height_and_width)) - cropped_lines_meging_indexing.append(-1) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - else: - cropped_lines.append(img_crop) - cropped_lines_meging_indexing.append(0) - indexer_b_s+=1 - - if indexer_b_s==self.b_s: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( - pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( - generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - - - indexer_text_region = indexer_text_region +1 - - if indexer_b_s!=0: - imgs = cropped_lines[:] - cropped_lines = [] - indexer_b_s = 0 - - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) - - extracted_texts = extracted_texts + generated_text_merged - - ####extracted_texts = [] - ####n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - ####for i in range(n_iterations): - ####if i==(n_iterations-1): - ####n_start = i*self.b_s - ####imgs = cropped_lines[n_start:] - ####else: - ####n_start = i*self.b_s - ####n_end = (i+1)*self.b_s - ####imgs = cropped_lines[n_start:n_end] - ####pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values - ####generated_ids_merged = self.model_ocr.generate( - #### pixel_values_merged.to(self.device)) - ####generated_text_merged = self.processor.batch_decode( - #### generated_ids_merged, skip_special_tokens=True) - - ####extracted_texts = extracted_texts + generated_text_merged - - del cropped_lines - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - #print(extracted_texts_merged, len(extracted_texts_merged)) - - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - if dir_out_image_text: - - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - font = ImageFont.truetype(font=font, size=40) - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - - - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') - #######text_by_textregion = [] - #######for ind in unique_cropped_lines_region_indexer: - #######ind = np.array(cropped_lines_region_indexer)==ind - #######extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - #######text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer) == ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) - else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - - - indexer = 0 - indexer_textregion = 0 - for nn in root1.iter(region_tags): - #id_textregion = nn.attrib['id'] - #id_textregions.append(id_textregion) - #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') - - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - ##text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - ##childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] - else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 - - ###sample_order = [(id_to_order[tid], text) - ### for tid, text in zip(id_textregions, textregions_by_existing_ids) - ### if tid in id_to_order] - - ##ordered_texts_sample = [text for _, text in sorted(sample_order)] - ##tot_page_text = ' '.join(ordered_texts_sample) - - ##for page_element in root1.iter(link+'Page'): - ##text_page = ET.SubElement(page_element, 'TextEquiv') - ##unicode_textpage = ET.SubElement(text_page, 'Unicode') - ##unicode_textpage.text = tot_page_text - - ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) - else: - ###max_len = 280#512#280#512 - ###padding_token = 1500#299#1500#299 - image_width = 512#max_len * 4 - image_height = 32 - - - img_size=(image_width, image_height) - - for dir_img in ls_imgs: - file_name = Path(dir_img).stem - dir_xml = os.path.join(dir_xmls, file_name+'.xml') - out_file_ocr = os.path.join(dir_out, file_name+'.xml') - - if os.path.exists(out_file_ocr): - if overwrite: - self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) - else: - self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) - continue - - img = cv2.imread(dir_img) - if dir_in_bin is not None: - cropped_lines_bin = [] - dir_img_bin = os.path.join(dir_in_bin, file_name+'.png') - img_bin = cv2.imread(dir_img_bin) - - if dir_out_image_text: - out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') - image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") - draw = ImageDraw.Draw(image_text) - total_bb_coordinates = [] - - tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) - root1=tree1.getroot() - alltags=[elem.tag for elem in root1.iter()] - link=alltags[0].split('}')[0]+'}' - - name_space = alltags[0].split('}')[0] - name_space = name_space.split('{')[1] - - region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) - - cropped_lines = [] - cropped_lines_ver_index = [] - cropped_lines_region_indexer = [] - cropped_lines_meging_indexing = [] - - tinl = time.time() - indexer_text_region = 0 - indexer_textlines = 0 - for nn in root1.iter(region_tags): - try: - type_textregion = nn.attrib['type'] - except: - type_textregion = 'paragraph' - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - for child_textlines in child_textregion: - if child_textlines.tag.endswith("Coords"): - cropped_lines_region_indexer.append(indexer_text_region) - p_h=child_textlines.attrib['points'].split(' ') - textline_coords = np.array( [ [int(x.split(',')[0]), - int(x.split(',')[1]) ] - for x in p_h] ) - - x,y,w,h = cv2.boundingRect(textline_coords) - - angle_radians = math.atan2(h, w) - # Convert to degrees - angle_degrees = math.degrees(angle_radians) - if type_textregion=='drop-capital': - angle_degrees = 0 - - if dir_out_image_text: - total_bb_coordinates.append([x,y,w,h]) - - w_scaled = w * image_height/float(h) - - img_poly_on_img = np.copy(img) - if dir_in_bin is not None: - img_poly_on_img_bin = np.copy(img_bin) - img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] - - mask_poly = np.zeros(img.shape) - mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) - - - mask_poly = mask_poly[y:y+h, x:x+w, :] - img_crop = img_poly_on_img[y:y+h, x:x+w, :] - - if self.export_textline_images_and_text: - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - - else: - # print(file_name, angle_degrees, w*h, - # mask_poly[:,:,0].sum(), - # mask_poly[:,:,0].sum() /float(w*h) , - # 'didi') - - if angle_degrees > 3: - better_des_slope = get_orientation_moments(textline_coords) - - img_crop = rotate_image_with_padding(img_crop, better_des_slope) - if dir_in_bin is not None: - img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) - - mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) - mask_poly = mask_poly.astype('uint8') - - #new bounding box - x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) - - mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] - img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] - - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: - img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - - if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: - if dir_in_bin is not None: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - else: - better_des_slope = 0 - if not self.do_not_mask_with_textline_contour: - img_crop[mask_poly==0] = 255 - if dir_in_bin is not None: - if not self.do_not_mask_with_textline_contour: - img_crop_bin[mask_poly==0] = 255 - if type_textregion=='drop-capital': - pass - else: - if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: - if dir_in_bin is not None: - img_crop, img_crop_bin = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly, img_crop_bin) - else: - img_crop, _ = \ - break_curved_line_into_small_pieces_and_then_merge( - img_crop, mask_poly) - - if not self.export_textline_images_and_text: - if w_scaled < 750:#1.5*image_width: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - cropped_lines_meging_indexing.append(0) - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - else: - splited_images, splited_images_bin = return_textlines_split_if_needed( - img_crop, img_crop_bin if dir_in_bin is not None else None) - if splited_images: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[0], image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images[1], image_height, image_width) - - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(-1) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[0], image_height, image_width) - cropped_lines_bin.append(img_fin) - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - splited_images_bin[1], image_height, image_width) - cropped_lines_bin.append(img_fin) - - else: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop, image_height, image_width) - cropped_lines.append(img_fin) - cropped_lines_meging_indexing.append(0) - - if abs(better_des_slope) > 45: - cropped_lines_ver_index.append(1) - else: - cropped_lines_ver_index.append(0) - - if dir_in_bin is not None: - img_fin = preprocess_and_resize_image_for_ocrcnn_model( - img_crop_bin, image_height, image_width) - cropped_lines_bin.append(img_fin) - - if self.export_textline_images_and_text: - if img_crop.shape[0]==0 or img_crop.shape[1]==0: - pass - else: - if child_textlines.tag.endswith("TextEquiv"): - for cheild_text in child_textlines: - if cheild_text.tag.endswith("Unicode"): - textline_text = cheild_text.text - if textline_text: - base_name = os.path.join( - dir_out, file_name + '_line_' + str(indexer_textlines)) - if self.pref_of_dataset: - base_name += '_' + self.pref_of_dataset - if not self.do_not_mask_with_textline_contour: - base_name += '_masked' - - with open(base_name + '.txt', 'w') as text_file: - text_file.write(textline_text) - cv2.imwrite(base_name + '.png', img_crop) - indexer_textlines+=1 - - if not self.export_textline_images_and_text: - indexer_text_region = indexer_text_region +1 - - if not self.export_textline_images_and_text: - extracted_texts = [] - extracted_conf_value = [] - - n_iterations = math.ceil(len(cropped_lines) / self.b_s) - - for i in range(n_iterations): - if i==(n_iterations-1): - n_start = i*self.b_s - imgs = cropped_lines[n_start:] - imgs = np.array(imgs) - imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) - indices_ver = np.where(ver_imgs == 1)[0] - - #print(indices_ver, 'indices_ver') - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_ver_flipped = None - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:] - imgs_bin = np.array(imgs_bin) - imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - - else: - imgs_bin_ver_flipped = None - else: - n_start = i*self.b_s - n_end = (i+1)*self.b_s - imgs = cropped_lines[n_start:n_end] - imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) - - ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) - indices_ver = np.where(ver_imgs == 1)[0] - #print(indices_ver, 'indices_ver') - - if len(indices_ver)>0: - imgs_ver_flipped = imgs[indices_ver, : ,: ,:] - imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_ver_flipped = None - - - if dir_in_bin is not None: - imgs_bin = cropped_lines_bin[n_start:n_end] - imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) - - - if len(indices_ver)>0: - imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] - imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] - #print(imgs_ver_flipped, 'imgs_ver_flipped') - else: - imgs_bin_ver_flipped = None - - - self.logger.debug("processing next %d lines", len(imgs)) - preds = self.prediction_model.predict(imgs, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.prediction_model.predict(imgs_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - if dir_in_bin is not None: - preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) - - if len(indices_ver)>0: - preds_flipped = self.prediction_model.predict(imgs_bin_ver_flipped, verbose=0) - preds_max_fliped = np.max(preds_flipped, axis=2 ) - preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) - pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character - masked_means_flipped = \ - np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) - masked_means_flipped[np.isnan(masked_means_flipped)] = 0 - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - masked_means[np.isnan(masked_means)] = 0 - - masked_means_ver = masked_means[indices_ver] - #print(masked_means_ver, 'pred_max_not_unk') - - indices_where_flipped_conf_value_is_higher = \ - np.where(masked_means_flipped > masked_means_ver)[0] - - #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') - if len(indices_where_flipped_conf_value_is_higher)>0: - indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] - preds_bin[indices_to_be_replaced,:,:] = \ - preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] - - preds = (preds + preds_bin) / 2. - - pred_texts = decode_batch_predictions(preds, self.num_to_char) - - preds_max = np.max(preds, axis=2 ) - preds_max_args = np.argmax(preds, axis=2 ) - pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character - masked_means = \ - np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ - np.sum(pred_max_not_unk_mask_bool, axis=1) - - for ib in range(imgs.shape[0]): - pred_texts_ib = pred_texts[ib].replace("[UNK]", "") - if masked_means[ib] >= self.min_conf_value_of_textline_text: - extracted_texts.append(pred_texts_ib) - extracted_conf_value.append(masked_means[ib]) - else: - extracted_texts.append("") - extracted_conf_value.append(0) - del cropped_lines - if dir_in_bin is not None: - del cropped_lines_bin - gc.collect() - - extracted_texts_merged = [extracted_texts[ind] - if cropped_lines_meging_indexing[ind]==0 - else extracted_texts[ind]+" "+extracted_texts[ind+1] - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value[ind] - if cropped_lines_meging_indexing[ind]==0 - else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. - if cropped_lines_meging_indexing[ind]==1 - else None - for ind in range(len(cropped_lines_meging_indexing))] - - extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] - for ind_cfm in range(len(extracted_texts_merged)) - if extracted_texts_merged[ind_cfm] is not None] - extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] - unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) - - if dir_out_image_text: - #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! - font = importlib_resources.files(__package__) / "Charis-Regular.ttf" - with importlib_resources.as_file(font) as font: - font = ImageFont.truetype(font=font, size=40) - - for indexer_text, bb_ind in enumerate(total_bb_coordinates): - x_bb = bb_ind[0] - y_bb = bb_ind[1] - w_bb = bb_ind[2] - h_bb = bb_ind[3] - - font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], - font.path, w_bb, int(h_bb*0.4) ) - - ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) - - text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) - text_width = text_bbox[2] - text_bbox[0] - text_height = text_bbox[3] - text_bbox[1] - - text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally - text_y = y_bb + (h_bb - text_height) // 2 # Center vertically - - # Draw the text - draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) - image_text.save(out_image_with_text) - - text_by_textregion = [] - for ind in unique_cropped_lines_region_indexer: - ind = np.array(cropped_lines_region_indexer)==ind - extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] - if len(extracted_texts_merged_un)>1: - text_by_textregion_ind = "" - next_glue = "" - for indt in range(len(extracted_texts_merged_un)): - if (extracted_texts_merged_un[indt].endswith('⸗') or - extracted_texts_merged_un[indt].endswith('-') or - extracted_texts_merged_un[indt].endswith('¬')): - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] - next_glue = "" - else: - text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] - next_glue = " " - text_by_textregion.append(text_by_textregion_ind) - else: - text_by_textregion.append(" ".join(extracted_texts_merged_un)) - #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') - - ###index_tot_regions = [] - ###tot_region_ref = [] - - ###for jj in root1.iter(link+'RegionRefIndexed'): - ###index_tot_regions.append(jj.attrib['index']) - ###tot_region_ref.append(jj.attrib['regionRef']) - - ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} - - #id_textregions = [] - #textregions_by_existing_ids = [] - indexer = 0 - indexer_textregion = 0 - for nn in root1.iter(region_tags): - #id_textregion = nn.attrib['id'] - #id_textregions.append(id_textregion) - #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) - - is_textregion_text = False - for childtest in nn: - if childtest.tag.endswith("TextEquiv"): - is_textregion_text = True - - if not is_textregion_text: - text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') - unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') - - - has_textline = False - for child_textregion in nn: - if child_textregion.tag.endswith("TextLine"): - - is_textline_text = False - for childtest2 in child_textregion: - if childtest2.tag.endswith("TextEquiv"): - is_textline_text = True - - - if not is_textline_text: - text_subelement = ET.SubElement(child_textregion, 'TextEquiv') - text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") - unicode_textline = ET.SubElement(text_subelement, 'Unicode') - unicode_textline.text = extracted_texts_merged[indexer] - else: - for childtest3 in child_textregion: - if childtest3.tag.endswith("TextEquiv"): - for child_uc in childtest3: - if child_uc.tag.endswith("Unicode"): - childtest3.set('conf', - f"{extracted_conf_value_merged[indexer]:.2f}") - child_uc.text = extracted_texts_merged[indexer] - - indexer = indexer + 1 - has_textline = True - if has_textline: - if is_textregion_text: - for child4 in nn: - if child4.tag.endswith("TextEquiv"): - for childtr_uc in child4: - if childtr_uc.tag.endswith("Unicode"): - childtr_uc.text = text_by_textregion[indexer_textregion] - else: - unicode_textregion.text = text_by_textregion[indexer_textregion] - indexer_textregion = indexer_textregion + 1 - - ###sample_order = [(id_to_order[tid], text) - ### for tid, text in zip(id_textregions, textregions_by_existing_ids) - ### if tid in id_to_order] - - ##ordered_texts_sample = [text for _, text in sorted(sample_order)] - ##tot_page_text = ' '.join(ordered_texts_sample) - - ##for page_element in root1.iter(link+'Page'): - ##text_page = ET.SubElement(page_element, 'TextEquiv') - ##unicode_textpage = ET.SubElement(text_page, 'Unicode') - ##unicode_textpage.text = tot_page_text - - ET.register_namespace("",name_space) - tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) - #print("Job done in %.1fs", time.time() - t0) diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py new file mode 100644 index 0000000..19825c5 --- /dev/null +++ b/src/eynollah/eynollah_ocr.py @@ -0,0 +1,961 @@ +class Eynollah_ocr: + def __init__( + self, + dir_models, + model_name=None, + dir_xmls=None, + tr_ocr=False, + batch_size=None, + export_textline_images_and_text=False, + do_not_mask_with_textline_contour=False, + pref_of_dataset=None, + min_conf_value_of_textline_text : Optional[float]=None, + logger=None, + ): + self.model_name = model_name + self.tr_ocr = tr_ocr + self.export_textline_images_and_text = export_textline_images_and_text + self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour + self.pref_of_dataset = pref_of_dataset + self.logger = logger if logger else getLogger('eynollah') + + if not export_textline_images_and_text: + if min_conf_value_of_textline_text: + self.min_conf_value_of_textline_text = float(min_conf_value_of_textline_text) + else: + self.min_conf_value_of_textline_text = 0.3 + if tr_ocr: + self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + if self.model_name: + self.model_ocr_dir = self.model_name + else: + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" + self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) + self.model_ocr.to(self.device) + if not batch_size: + self.b_s = 2 + else: + self.b_s = int(batch_size) + + else: + if self.model_name: + self.model_ocr_dir = self.model_name + else: + self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250930" + model_ocr = load_model(self.model_ocr_dir , compile=False) + + self.prediction_model = tf.keras.models.Model( + model_ocr.get_layer(name = "image").input, + model_ocr.get_layer(name = "dense2").output) + if not batch_size: + self.b_s = 8 + else: + self.b_s = int(batch_size) + + with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: + characters = json.load(config_file) + + AUTOTUNE = tf.data.AUTOTUNE + + # Mapping characters to integers. + char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) + + # Mapping integers back to original characters. + self.num_to_char = StringLookup( + vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True + ) + self.end_character = len(characters) + 2 + + def run(self, overwrite: bool = False, + dir_in: Optional[str] = None, + dir_in_bin: Optional[str] = None, + image_filename: Optional[str] = None, + dir_xmls: Optional[str] = None, + dir_out_image_text: Optional[str] = None, + dir_out: Optional[str] = None, + ): + if dir_in: + ls_imgs = [os.path.join(dir_in, image_filename) + for image_filename in filter(is_image_filename, + os.listdir(dir_in))] + else: + ls_imgs = [image_filename] + + if self.tr_ocr: + tr_ocr_input_height_and_width = 384 + for dir_img in ls_imgs: + file_name = Path(dir_img).stem + dir_xml = os.path.join(dir_xmls, file_name+'.xml') + out_file_ocr = os.path.join(dir_out, file_name+'.xml') + + if os.path.exists(out_file_ocr): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) + else: + self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) + continue + + img = cv2.imread(dir_img) + + if dir_out_image_text: + out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') + image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") + draw = ImageDraw.Draw(image_text) + total_bb_coordinates = [] + + ##file_name = Path(dir_xmls).stem + tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + name_space = alltags[0].split('}')[0] + name_space = name_space.split('{')[1] + + region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) + + + + cropped_lines = [] + cropped_lines_region_indexer = [] + cropped_lines_meging_indexing = [] + + extracted_texts = [] + + indexer_text_region = 0 + indexer_b_s = 0 + + for nn in root1.iter(region_tags): + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + + for child_textlines in child_textregion: + if child_textlines.tag.endswith("Coords"): + cropped_lines_region_indexer.append(indexer_text_region) + p_h=child_textlines.attrib['points'].split(' ') + textline_coords = np.array( [ [int(x.split(',')[0]), + int(x.split(',')[1]) ] + for x in p_h] ) + x,y,w,h = cv2.boundingRect(textline_coords) + + if dir_out_image_text: + total_bb_coordinates.append([x,y,w,h]) + + h2w_ratio = h/float(w) + + img_poly_on_img = np.copy(img) + mask_poly = np.zeros(img.shape) + mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + img_crop[mask_poly==0] = 255 + + self.logger.debug("processing %d lines for '%s'", + len(cropped_lines), nn.attrib['id']) + if h2w_ratio > 0.1: + cropped_lines.append(resize_image(img_crop, + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width) ) + cropped_lines_meging_indexing.append(0) + indexer_b_s+=1 + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + else: + splited_images, _ = return_textlines_split_if_needed(img_crop, None) + #print(splited_images) + if splited_images: + cropped_lines.append(resize_image(splited_images[0], + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width)) + cropped_lines_meging_indexing.append(1) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + + cropped_lines.append(resize_image(splited_images[1], + tr_ocr_input_height_and_width, + tr_ocr_input_height_and_width)) + cropped_lines_meging_indexing.append(-1) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + else: + cropped_lines.append(img_crop) + cropped_lines_meging_indexing.append(0) + indexer_b_s+=1 + + if indexer_b_s==self.b_s: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate( + pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode( + generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + + + indexer_text_region = indexer_text_region +1 + + if indexer_b_s!=0: + imgs = cropped_lines[:] + cropped_lines = [] + indexer_b_s = 0 + + pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + + extracted_texts = extracted_texts + generated_text_merged + + ####extracted_texts = [] + ####n_iterations = math.ceil(len(cropped_lines) / self.b_s) + + ####for i in range(n_iterations): + ####if i==(n_iterations-1): + ####n_start = i*self.b_s + ####imgs = cropped_lines[n_start:] + ####else: + ####n_start = i*self.b_s + ####n_end = (i+1)*self.b_s + ####imgs = cropped_lines[n_start:n_end] + ####pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + ####generated_ids_merged = self.model_ocr.generate( + #### pixel_values_merged.to(self.device)) + ####generated_text_merged = self.processor.batch_decode( + #### generated_ids_merged, skip_special_tokens=True) + + ####extracted_texts = extracted_texts + generated_text_merged + + del cropped_lines + gc.collect() + + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] + + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + #print(extracted_texts_merged, len(extracted_texts_merged)) + + unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + + if dir_out_image_text: + + #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! + font = importlib_resources.files(__package__) / "Charis-Regular.ttf" + with importlib_resources.as_file(font) as font: + font = ImageFont.truetype(font=font, size=40) + + for indexer_text, bb_ind in enumerate(total_bb_coordinates): + + + x_bb = bb_ind[0] + y_bb = bb_ind[1] + w_bb = bb_ind[2] + h_bb = bb_ind[3] + + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], + font.path, w_bb, int(h_bb*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally + text_y = y_bb + (h_bb - text_height) // 2 # Center vertically + + # Draw the text + draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) + image_text.save(out_image_with_text) + + #print(len(unique_cropped_lines_region_indexer), 'unique_cropped_lines_region_indexer') + #######text_by_textregion = [] + #######for ind in unique_cropped_lines_region_indexer: + #######ind = np.array(cropped_lines_region_indexer)==ind + #######extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] + #######text_by_textregion.append(" ".join(extracted_texts_merged_un)) + + text_by_textregion = [] + for ind in unique_cropped_lines_region_indexer: + ind = np.array(cropped_lines_region_indexer) == ind + extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] + if len(extracted_texts_merged_un)>1: + text_by_textregion_ind = "" + next_glue = "" + for indt in range(len(extracted_texts_merged_un)): + if (extracted_texts_merged_un[indt].endswith('⸗') or + extracted_texts_merged_un[indt].endswith('-') or + extracted_texts_merged_un[indt].endswith('¬')): + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] + next_glue = "" + else: + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] + next_glue = " " + text_by_textregion.append(text_by_textregion_ind) + else: + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + + + indexer = 0 + indexer_textregion = 0 + for nn in root1.iter(region_tags): + #id_textregion = nn.attrib['id'] + #id_textregions.append(id_textregion) + #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) + + is_textregion_text = False + for childtest in nn: + if childtest.tag.endswith("TextEquiv"): + is_textregion_text = True + + if not is_textregion_text: + text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') + unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + + + has_textline = False + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + + is_textline_text = False + for childtest2 in child_textregion: + if childtest2.tag.endswith("TextEquiv"): + is_textline_text = True + + + if not is_textline_text: + text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + ##text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + unicode_textline = ET.SubElement(text_subelement, 'Unicode') + unicode_textline.text = extracted_texts_merged[indexer] + else: + for childtest3 in child_textregion: + if childtest3.tag.endswith("TextEquiv"): + for child_uc in childtest3: + if child_uc.tag.endswith("Unicode"): + ##childtest3.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + child_uc.text = extracted_texts_merged[indexer] + + indexer = indexer + 1 + has_textline = True + if has_textline: + if is_textregion_text: + for child4 in nn: + if child4.tag.endswith("TextEquiv"): + for childtr_uc in child4: + if childtr_uc.tag.endswith("Unicode"): + childtr_uc.text = text_by_textregion[indexer_textregion] + else: + unicode_textregion.text = text_by_textregion[indexer_textregion] + indexer_textregion = indexer_textregion + 1 + + ###sample_order = [(id_to_order[tid], text) + ### for tid, text in zip(id_textregions, textregions_by_existing_ids) + ### if tid in id_to_order] + + ##ordered_texts_sample = [text for _, text in sorted(sample_order)] + ##tot_page_text = ' '.join(ordered_texts_sample) + + ##for page_element in root1.iter(link+'Page'): + ##text_page = ET.SubElement(page_element, 'TextEquiv') + ##unicode_textpage = ET.SubElement(text_page, 'Unicode') + ##unicode_textpage.text = tot_page_text + + ET.register_namespace("",name_space) + tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) + else: + ###max_len = 280#512#280#512 + ###padding_token = 1500#299#1500#299 + image_width = 512#max_len * 4 + image_height = 32 + + + img_size=(image_width, image_height) + + for dir_img in ls_imgs: + file_name = Path(dir_img).stem + dir_xml = os.path.join(dir_xmls, file_name+'.xml') + out_file_ocr = os.path.join(dir_out, file_name+'.xml') + + if os.path.exists(out_file_ocr): + if overwrite: + self.logger.warning("will overwrite existing output file '%s'", out_file_ocr) + else: + self.logger.warning("will skip input for existing output file '%s'", out_file_ocr) + continue + + img = cv2.imread(dir_img) + if dir_in_bin is not None: + cropped_lines_bin = [] + dir_img_bin = os.path.join(dir_in_bin, file_name+'.png') + img_bin = cv2.imread(dir_img_bin) + + if dir_out_image_text: + out_image_with_text = os.path.join(dir_out_image_text, file_name+'.png') + image_text = Image.new("RGB", (img.shape[1], img.shape[0]), "white") + draw = ImageDraw.Draw(image_text) + total_bb_coordinates = [] + + tree1 = ET.parse(dir_xml, parser = ET.XMLParser(encoding="utf-8")) + root1=tree1.getroot() + alltags=[elem.tag for elem in root1.iter()] + link=alltags[0].split('}')[0]+'}' + + name_space = alltags[0].split('}')[0] + name_space = name_space.split('{')[1] + + region_tags=np.unique([x for x in alltags if x.endswith('TextRegion')]) + + cropped_lines = [] + cropped_lines_ver_index = [] + cropped_lines_region_indexer = [] + cropped_lines_meging_indexing = [] + + tinl = time.time() + indexer_text_region = 0 + indexer_textlines = 0 + for nn in root1.iter(region_tags): + try: + type_textregion = nn.attrib['type'] + except: + type_textregion = 'paragraph' + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + for child_textlines in child_textregion: + if child_textlines.tag.endswith("Coords"): + cropped_lines_region_indexer.append(indexer_text_region) + p_h=child_textlines.attrib['points'].split(' ') + textline_coords = np.array( [ [int(x.split(',')[0]), + int(x.split(',')[1]) ] + for x in p_h] ) + + x,y,w,h = cv2.boundingRect(textline_coords) + + angle_radians = math.atan2(h, w) + # Convert to degrees + angle_degrees = math.degrees(angle_radians) + if type_textregion=='drop-capital': + angle_degrees = 0 + + if dir_out_image_text: + total_bb_coordinates.append([x,y,w,h]) + + w_scaled = w * image_height/float(h) + + img_poly_on_img = np.copy(img) + if dir_in_bin is not None: + img_poly_on_img_bin = np.copy(img_bin) + img_crop_bin = img_poly_on_img_bin[y:y+h, x:x+w, :] + + mask_poly = np.zeros(img.shape) + mask_poly = cv2.fillPoly(mask_poly, pts=[textline_coords], color=(1, 1, 1)) + + + mask_poly = mask_poly[y:y+h, x:x+w, :] + img_crop = img_poly_on_img[y:y+h, x:x+w, :] + + if self.export_textline_images_and_text: + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + + else: + # print(file_name, angle_degrees, w*h, + # mask_poly[:,:,0].sum(), + # mask_poly[:,:,0].sum() /float(w*h) , + # 'didi') + + if angle_degrees > 3: + better_des_slope = get_orientation_moments(textline_coords) + + img_crop = rotate_image_with_padding(img_crop, better_des_slope) + if dir_in_bin is not None: + img_crop_bin = rotate_image_with_padding(img_crop_bin, better_des_slope) + + mask_poly = rotate_image_with_padding(mask_poly, better_des_slope) + mask_poly = mask_poly.astype('uint8') + + #new bounding box + x_n, y_n, w_n, h_n = get_contours_and_bounding_boxes(mask_poly[:,:,0]) + + mask_poly = mask_poly[y_n:y_n+h_n, x_n:x_n+w_n, :] + img_crop = img_crop[y_n:y_n+h_n, x_n:x_n+w_n, :] + + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + if dir_in_bin is not None: + img_crop_bin = img_crop_bin[y_n:y_n+h_n, x_n:x_n+w_n, :] + if not self.do_not_mask_with_textline_contour: + img_crop_bin[mask_poly==0] = 255 + + if mask_poly[:,:,0].sum() /float(w_n*h_n) < 0.50 and w_scaled > 90: + if dir_in_bin is not None: + img_crop, img_crop_bin = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly) + + else: + better_des_slope = 0 + if not self.do_not_mask_with_textline_contour: + img_crop[mask_poly==0] = 255 + if dir_in_bin is not None: + if not self.do_not_mask_with_textline_contour: + img_crop_bin[mask_poly==0] = 255 + if type_textregion=='drop-capital': + pass + else: + if mask_poly[:,:,0].sum() /float(w*h) < 0.50 and w_scaled > 90: + if dir_in_bin is not None: + img_crop, img_crop_bin = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly, img_crop_bin) + else: + img_crop, _ = \ + break_curved_line_into_small_pieces_and_then_merge( + img_crop, mask_poly) + + if not self.export_textline_images_and_text: + if w_scaled < 750:#1.5*image_width: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) + cropped_lines.append(img_fin) + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + cropped_lines_meging_indexing.append(0) + if dir_in_bin is not None: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop_bin, image_height, image_width) + cropped_lines_bin.append(img_fin) + else: + splited_images, splited_images_bin = return_textlines_split_if_needed( + img_crop, img_crop_bin if dir_in_bin is not None else None) + if splited_images: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[0], image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(1) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images[1], image_height, image_width) + + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(-1) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + if dir_in_bin is not None: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[0], image_height, image_width) + cropped_lines_bin.append(img_fin) + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + splited_images_bin[1], image_height, image_width) + cropped_lines_bin.append(img_fin) + + else: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop, image_height, image_width) + cropped_lines.append(img_fin) + cropped_lines_meging_indexing.append(0) + + if abs(better_des_slope) > 45: + cropped_lines_ver_index.append(1) + else: + cropped_lines_ver_index.append(0) + + if dir_in_bin is not None: + img_fin = preprocess_and_resize_image_for_ocrcnn_model( + img_crop_bin, image_height, image_width) + cropped_lines_bin.append(img_fin) + + if self.export_textline_images_and_text: + if img_crop.shape[0]==0 or img_crop.shape[1]==0: + pass + else: + if child_textlines.tag.endswith("TextEquiv"): + for cheild_text in child_textlines: + if cheild_text.tag.endswith("Unicode"): + textline_text = cheild_text.text + if textline_text: + base_name = os.path.join( + dir_out, file_name + '_line_' + str(indexer_textlines)) + if self.pref_of_dataset: + base_name += '_' + self.pref_of_dataset + if not self.do_not_mask_with_textline_contour: + base_name += '_masked' + + with open(base_name + '.txt', 'w') as text_file: + text_file.write(textline_text) + cv2.imwrite(base_name + '.png', img_crop) + indexer_textlines+=1 + + if not self.export_textline_images_and_text: + indexer_text_region = indexer_text_region +1 + + if not self.export_textline_images_and_text: + extracted_texts = [] + extracted_conf_value = [] + + n_iterations = math.ceil(len(cropped_lines) / self.b_s) + + for i in range(n_iterations): + if i==(n_iterations-1): + n_start = i*self.b_s + imgs = cropped_lines[n_start:] + imgs = np.array(imgs) + imgs = imgs.reshape(imgs.shape[0], image_height, image_width, 3) + + ver_imgs = np.array( cropped_lines_ver_index[n_start:] ) + indices_ver = np.where(ver_imgs == 1)[0] + + #print(indices_ver, 'indices_ver') + if len(indices_ver)>0: + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + + else: + imgs_ver_flipped = None + + if dir_in_bin is not None: + imgs_bin = cropped_lines_bin[n_start:] + imgs_bin = np.array(imgs_bin) + imgs_bin = imgs_bin.reshape(imgs_bin.shape[0], image_height, image_width, 3) + + if len(indices_ver)>0: + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + + else: + imgs_bin_ver_flipped = None + else: + n_start = i*self.b_s + n_end = (i+1)*self.b_s + imgs = cropped_lines[n_start:n_end] + imgs = np.array(imgs).reshape(self.b_s, image_height, image_width, 3) + + ver_imgs = np.array( cropped_lines_ver_index[n_start:n_end] ) + indices_ver = np.where(ver_imgs == 1)[0] + #print(indices_ver, 'indices_ver') + + if len(indices_ver)>0: + imgs_ver_flipped = imgs[indices_ver, : ,: ,:] + imgs_ver_flipped = imgs_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_ver_flipped = None + + + if dir_in_bin is not None: + imgs_bin = cropped_lines_bin[n_start:n_end] + imgs_bin = np.array(imgs_bin).reshape(self.b_s, image_height, image_width, 3) + + + if len(indices_ver)>0: + imgs_bin_ver_flipped = imgs_bin[indices_ver, : ,: ,:] + imgs_bin_ver_flipped = imgs_bin_ver_flipped[:,::-1,::-1,:] + #print(imgs_ver_flipped, 'imgs_ver_flipped') + else: + imgs_bin_ver_flipped = None + + + self.logger.debug("processing next %d lines", len(imgs)) + preds = self.prediction_model.predict(imgs, verbose=0) + + if len(indices_ver)>0: + preds_flipped = self.prediction_model.predict(imgs_ver_flipped, verbose=0) + preds_max_fliped = np.max(preds_flipped, axis=2 ) + preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character + masked_means_flipped = \ + np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped[np.isnan(masked_means_flipped)] = 0 + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means[np.isnan(masked_means)] = 0 + + masked_means_ver = masked_means[indices_ver] + #print(masked_means_ver, 'pred_max_not_unk') + + indices_where_flipped_conf_value_is_higher = \ + np.where(masked_means_flipped > masked_means_ver)[0] + + #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') + if len(indices_where_flipped_conf_value_is_higher)>0: + indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] + preds[indices_to_be_replaced,:,:] = \ + preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + if dir_in_bin is not None: + preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) + + if len(indices_ver)>0: + preds_flipped = self.prediction_model.predict(imgs_bin_ver_flipped, verbose=0) + preds_max_fliped = np.max(preds_flipped, axis=2 ) + preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) + pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character + masked_means_flipped = \ + np.sum(preds_max_fliped * pred_max_not_unk_mask_bool_flipped, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool_flipped, axis=1) + masked_means_flipped[np.isnan(masked_means_flipped)] = 0 + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) + masked_means[np.isnan(masked_means)] = 0 + + masked_means_ver = masked_means[indices_ver] + #print(masked_means_ver, 'pred_max_not_unk') + + indices_where_flipped_conf_value_is_higher = \ + np.where(masked_means_flipped > masked_means_ver)[0] + + #print(indices_where_flipped_conf_value_is_higher, 'indices_where_flipped_conf_value_is_higher') + if len(indices_where_flipped_conf_value_is_higher)>0: + indices_to_be_replaced = indices_ver[indices_where_flipped_conf_value_is_higher] + preds_bin[indices_to_be_replaced,:,:] = \ + preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] + + preds = (preds + preds_bin) / 2. + + pred_texts = decode_batch_predictions(preds, self.num_to_char) + + preds_max = np.max(preds, axis=2 ) + preds_max_args = np.argmax(preds, axis=2 ) + pred_max_not_unk_mask_bool = preds_max_args[:,:]!=self.end_character + masked_means = \ + np.sum(preds_max * pred_max_not_unk_mask_bool, axis=1) / \ + np.sum(pred_max_not_unk_mask_bool, axis=1) + + for ib in range(imgs.shape[0]): + pred_texts_ib = pred_texts[ib].replace("[UNK]", "") + if masked_means[ib] >= self.min_conf_value_of_textline_text: + extracted_texts.append(pred_texts_ib) + extracted_conf_value.append(masked_means[ib]) + else: + extracted_texts.append("") + extracted_conf_value.append(0) + del cropped_lines + if dir_in_bin is not None: + del cropped_lines_bin + gc.collect() + + extracted_texts_merged = [extracted_texts[ind] + if cropped_lines_meging_indexing[ind]==0 + else extracted_texts[ind]+" "+extracted_texts[ind+1] + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] + + extracted_conf_value_merged = [extracted_conf_value[ind] + if cropped_lines_meging_indexing[ind]==0 + else (extracted_conf_value[ind]+extracted_conf_value[ind+1])/2. + if cropped_lines_meging_indexing[ind]==1 + else None + for ind in range(len(cropped_lines_meging_indexing))] + + extracted_conf_value_merged = [extracted_conf_value_merged[ind_cfm] + for ind_cfm in range(len(extracted_texts_merged)) + if extracted_texts_merged[ind_cfm] is not None] + extracted_texts_merged = [ind for ind in extracted_texts_merged if ind is not None] + unique_cropped_lines_region_indexer = np.unique(cropped_lines_region_indexer) + + if dir_out_image_text: + #font_path = "Charis-7.000/Charis-Regular.ttf" # Make sure this file exists! + font = importlib_resources.files(__package__) / "Charis-Regular.ttf" + with importlib_resources.as_file(font) as font: + font = ImageFont.truetype(font=font, size=40) + + for indexer_text, bb_ind in enumerate(total_bb_coordinates): + x_bb = bb_ind[0] + y_bb = bb_ind[1] + w_bb = bb_ind[2] + h_bb = bb_ind[3] + + font = fit_text_single_line(draw, extracted_texts_merged[indexer_text], + font.path, w_bb, int(h_bb*0.4) ) + + ##draw.rectangle([x_bb, y_bb, x_bb + w_bb, y_bb + h_bb], outline="red", width=2) + + text_bbox = draw.textbbox((0, 0), extracted_texts_merged[indexer_text], font=font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + text_x = x_bb + (w_bb - text_width) // 2 # Center horizontally + text_y = y_bb + (h_bb - text_height) // 2 # Center vertically + + # Draw the text + draw.text((text_x, text_y), extracted_texts_merged[indexer_text], fill="black", font=font) + image_text.save(out_image_with_text) + + text_by_textregion = [] + for ind in unique_cropped_lines_region_indexer: + ind = np.array(cropped_lines_region_indexer)==ind + extracted_texts_merged_un = np.array(extracted_texts_merged)[ind] + if len(extracted_texts_merged_un)>1: + text_by_textregion_ind = "" + next_glue = "" + for indt in range(len(extracted_texts_merged_un)): + if (extracted_texts_merged_un[indt].endswith('⸗') or + extracted_texts_merged_un[indt].endswith('-') or + extracted_texts_merged_un[indt].endswith('¬')): + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt][:-1] + next_glue = "" + else: + text_by_textregion_ind += next_glue + extracted_texts_merged_un[indt] + next_glue = " " + text_by_textregion.append(text_by_textregion_ind) + else: + text_by_textregion.append(" ".join(extracted_texts_merged_un)) + #print(text_by_textregion, 'text_by_textregiontext_by_textregiontext_by_textregiontext_by_textregiontext_by_textregion') + + ###index_tot_regions = [] + ###tot_region_ref = [] + + ###for jj in root1.iter(link+'RegionRefIndexed'): + ###index_tot_regions.append(jj.attrib['index']) + ###tot_region_ref.append(jj.attrib['regionRef']) + + ###id_to_order = {tid: ro for tid, ro in zip(tot_region_ref, index_tot_regions)} + + #id_textregions = [] + #textregions_by_existing_ids = [] + indexer = 0 + indexer_textregion = 0 + for nn in root1.iter(region_tags): + #id_textregion = nn.attrib['id'] + #id_textregions.append(id_textregion) + #textregions_by_existing_ids.append(text_by_textregion[indexer_textregion]) + + is_textregion_text = False + for childtest in nn: + if childtest.tag.endswith("TextEquiv"): + is_textregion_text = True + + if not is_textregion_text: + text_subelement_textregion = ET.SubElement(nn, 'TextEquiv') + unicode_textregion = ET.SubElement(text_subelement_textregion, 'Unicode') + + + has_textline = False + for child_textregion in nn: + if child_textregion.tag.endswith("TextLine"): + + is_textline_text = False + for childtest2 in child_textregion: + if childtest2.tag.endswith("TextEquiv"): + is_textline_text = True + + + if not is_textline_text: + text_subelement = ET.SubElement(child_textregion, 'TextEquiv') + text_subelement.set('conf', f"{extracted_conf_value_merged[indexer]:.2f}") + unicode_textline = ET.SubElement(text_subelement, 'Unicode') + unicode_textline.text = extracted_texts_merged[indexer] + else: + for childtest3 in child_textregion: + if childtest3.tag.endswith("TextEquiv"): + for child_uc in childtest3: + if child_uc.tag.endswith("Unicode"): + childtest3.set('conf', + f"{extracted_conf_value_merged[indexer]:.2f}") + child_uc.text = extracted_texts_merged[indexer] + + indexer = indexer + 1 + has_textline = True + if has_textline: + if is_textregion_text: + for child4 in nn: + if child4.tag.endswith("TextEquiv"): + for childtr_uc in child4: + if childtr_uc.tag.endswith("Unicode"): + childtr_uc.text = text_by_textregion[indexer_textregion] + else: + unicode_textregion.text = text_by_textregion[indexer_textregion] + indexer_textregion = indexer_textregion + 1 + + ###sample_order = [(id_to_order[tid], text) + ### for tid, text in zip(id_textregions, textregions_by_existing_ids) + ### if tid in id_to_order] + + ##ordered_texts_sample = [text for _, text in sorted(sample_order)] + ##tot_page_text = ' '.join(ordered_texts_sample) + + ##for page_element in root1.iter(link+'Page'): + ##text_page = ET.SubElement(page_element, 'TextEquiv') + ##unicode_textpage = ET.SubElement(text_page, 'Unicode') + ##unicode_textpage.text = tot_page_text + + ET.register_namespace("",name_space) + tree1.write(out_file_ocr,xml_declaration=True,method='xml',encoding="utf-8",default_namespace=None) + #print("Job done in %.1fs", time.time() - t0) From d609a532bf1714b9b41c9b5a7454bce0c44c434f Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 20 Oct 2025 19:46:07 +0200 Subject: [PATCH 390/492] organize imports mostly --- src/eynollah/eynollah.py | 39 +++++++++--------------------------- src/eynollah/eynollah_ocr.py | 39 ++++++++++++++++++++++++++++++++++++ src/eynollah/model_zoo.py | 26 ++++++++++++++++++++---- 3 files changed, 71 insertions(+), 33 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 0a7b660..f281ac6 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -8,40 +8,26 @@ document layout analysis (segmentation) with output in PAGE-XML """ -# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files -import sys - -if sys.version_info < (3, 10): - import importlib_resources -else: - import importlib.resources as importlib_resources - from difflib import SequenceMatcher as sq from PIL import Image, ImageDraw, ImageFont import math import os -import sys import time from typing import Dict, Union,List, Optional, Tuple -import atexit import warnings from functools import partial from pathlib import Path from multiprocessing import cpu_count import gc import copy -import json from concurrent.futures import ProcessPoolExecutor -import xml.etree.ElementTree as ET import cv2 import numpy as np import shapely.affinity from scipy.signal import find_peaks from scipy.ndimage import gaussian_filter1d -from numba import cuda from skimage.morphology import skeletonize -from ocrd import OcrdPage from ocrd_utils import getLogger, tf_disable_interactive_logs import statistics @@ -53,10 +39,6 @@ try: import matplotlib.pyplot as plt except ImportError: plt = None -try: - from transformers import TrOCRProcessor, VisionEncoderDecoderModel -except ImportError: - TrOCRProcessor = VisionEncoderDecoderModel = None #os.environ['CUDA_VISIBLE_DEVICES'] = '-1' tf_disable_interactive_logs() @@ -290,13 +272,6 @@ class Eynollah: if self.tr: loadable.append(('ocr', 'tr')) loadable.append(('ocr_tr_processor', 'tr')) - # TODO why here and why only for tr? - if torch.cuda.is_available(): - self.logger.info("Using GPU acceleration") - self.device = torch.device("cuda:0") - else: - self.logger.info("Using CPU processing") - self.device = torch.device("cpu") else: loadable.append('ocr') loadable.append('num_to_char') @@ -307,10 +282,16 @@ class Eynollah: if hasattr(self, 'executor') and getattr(self, 'executor'): self.executor.shutdown() self.executor = None - if hasattr(self, 'models') and getattr(self, 'models'): - for model_name in list(self.models): - if self.models[model_name]: - del self.models[model_name] + self.model_zoo.shutdown() + + @property + def device(self): + # TODO why here and why only for tr? + if torch.cuda.is_available(): + self.logger.info("Using GPU acceleration") + return torch.device("cuda:0") + self.logger.info("Using CPU processing") + return torch.device("cpu") def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 19825c5..6adea55 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -1,3 +1,41 @@ +# pyright: reportPossiblyUnboundVariable=false + +from logging import getLogger +from typing import Optional +from pathlib import Path +import os +import json +import gc +import sys +import math +import cv2 +import time + +from keras.layers import StringLookup + +from eynollah.utils.resize import resize_image +from eynollah.utils.utils_ocr import break_curved_line_into_small_pieces_and_then_merge, decode_batch_predictions, fit_text_single_line, get_contours_and_bounding_boxes, get_orientation_moments, preprocess_and_resize_image_for_ocrcnn_model, return_textlines_split_if_needed, rotate_image_with_padding + +from .utils import is_image_filename + +import xml.etree.ElementTree as ET +import tensorflow as tf +from keras.models import load_model +from PIL import Image, ImageDraw, ImageFont +import numpy as np +import torch + +# cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files +if sys.version_info < (3, 10): + import importlib_resources +else: + import importlib.resources as importlib_resources + +try: + from transformers import TrOCRProcessor, VisionEncoderDecoderModel +except ImportError: + TrOCRProcessor = VisionEncoderDecoderModel = None + class Eynollah_ocr: def __init__( self, @@ -25,6 +63,7 @@ class Eynollah_ocr: else: self.min_conf_value_of_textline_text = 0.3 if tr_ocr: + assert TrOCRProcessor self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if self.model_name: diff --git a/src/eynollah/model_zoo.py b/src/eynollah/model_zoo.py index b332b4a..ee8b6b0 100644 --- a/src/eynollah/model_zoo.py +++ b/src/eynollah/model_zoo.py @@ -2,7 +2,6 @@ from dataclasses import dataclass import json import logging from pathlib import Path -from types import MappingProxyType from typing import Dict, Literal, Optional, Tuple, List, Union from copy import deepcopy @@ -12,6 +11,8 @@ from transformers import TrOCRProcessor, VisionEncoderDecoderModel from eynollah.patch_encoder import PatchEncoder, Patches +SomeEynollahModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, Model] + # Dict mapping model_category to dict mapping variant (default is '') to Path DEFAULT_MODEL_VERSIONS: Dict[str, Dict[str, str]] = { @@ -134,13 +135,14 @@ class EynollahModelZoo(): def __init__( self, basedir: str, - model_overrides: List[Tuple[str, str, str]], + model_overrides: Optional[List[Tuple[str, str, str]]]=None, ) -> None: self.model_basedir = Path(basedir) self.logger = logging.getLogger('eynollah.model_zoo') self.model_versions = deepcopy(DEFAULT_MODEL_VERSIONS) if model_overrides: self.override_models(*model_overrides) + self._loaded: Dict[Tuple[str, str], SomeEynollahModel] = {} def override_models(self, *model_overrides: Tuple[str, str, str]): """ @@ -202,7 +204,7 @@ class EynollahModelZoo(): model_category: str, model_variant: str = '', model_filename: str = '', - ) -> Union[VisionEncoderDecoderModel, TrOCRProcessor, Model]: + ) -> SomeEynollahModel: """ Load any model """ @@ -223,9 +225,16 @@ class EynollahModelZoo(): self.logger.exception(e) model = load_model(model_path, compile=False, custom_objects={ "PatchEncoder": PatchEncoder, "Patches": Patches}) + self._loaded[(model_category, model_variant)] = model return model # type: ignore - def _load_ocr_model(self, variant: str) -> Union[VisionEncoderDecoderModel, TrOCRProcessor, Model]: + def get_model(self, model_categeory, model_variant) -> SomeEynollahModel: + needle = (model_categeory, model_variant) + if needle not in self._loaded: + raise ValueError('Model/variant "{needle} not previously loaded with "load_model(..)"') + return self._loaded[needle] + + def _load_ocr_model(self, variant: str) -> SomeEynollahModel: """ Load OCR model """ @@ -258,3 +267,12 @@ class EynollahModelZoo(): 'versions': self.model_versions, }, indent=2)) + def shutdown(self): + """ + Ensure that a loaded models is not referenced by ``self._loaded`` anymore + """ + if hasattr(self, '_loaded') and getattr(self, '_loaded'): + for needle in self._loaded: + if self._loaded[needle]: + del self._loaded[needle] + From 062f317d2e9525cbff30812f68374ea017e4b41f Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 20 Oct 2025 21:14:52 +0200 Subject: [PATCH 391/492] Introduce model_zoo to Eynollah_ocr --- src/eynollah/eynollah.py | 102 ++++++++++++------------- src/eynollah/eynollah_ocr.py | 136 ++++++++++++++++----------------- src/eynollah/model_zoo.py | 42 ++++++---- src/eynollah/utils/__init__.py | 7 +- 4 files changed, 149 insertions(+), 138 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index f281ac6..3582c67 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -271,12 +271,12 @@ class Eynollah: if self.ocr: if self.tr: loadable.append(('ocr', 'tr')) - loadable.append(('ocr_tr_processor', 'tr')) + loadable.append(('trocr_processor', 'tr')) else: loadable.append('ocr') loadable.append('num_to_char') - self.models = self.model_zoo.load_models(*loadable) + self.model_zoo.load_models(*loadable) def __del__(self): if hasattr(self, 'executor') and getattr(self, 'executor'): @@ -338,8 +338,8 @@ class Eynollah: def predict_enhancement(self, img): self.logger.debug("enter predict_enhancement") - img_height_model = self.models["enhancement"].layers[-1].output_shape[1] - img_width_model = self.models["enhancement"].layers[-1].output_shape[2] + img_height_model = self.model_zoo.get("enhancement").layers[-1].output_shape[1] + img_width_model = self.model_zoo.get("enhancement").layers[-1].output_shape[2] if img.shape[0] < img_height_model: img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) if img.shape[1] < img_width_model: @@ -380,7 +380,7 @@ class Eynollah: index_y_d = img_h - img_height_model img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.models["enhancement"].predict(img_patch, verbose=0) + label_p_pred = self.model_zoo.get("enhancement").predict(img_patch, verbose=0) seg = label_p_pred[0, :, :, :] * 255 if i == 0 and j == 0: @@ -555,7 +555,7 @@ class Eynollah: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) + label_p_pred = self.model_zoo.get("col_classifier").predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 self.logger.info("Found %s columns (%s)", num_col, label_p_pred) @@ -573,7 +573,7 @@ class Eynollah: self.logger.info("Detected %s DPI", dpi) if self.input_binary: img = self.imread() - prediction_bin = self.do_prediction(True, img, self.models["binarization"], n_batch_inference=5) + prediction_bin = self.do_prediction(True, img, self.model_zoo.get("binarization"), n_batch_inference=5) prediction_bin = 255 * (prediction_bin[:,:,0] == 0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) img= np.copy(prediction_bin) @@ -613,7 +613,7 @@ class Eynollah: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) + label_p_pred = self.model_zoo.get("col_classifier").predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): @@ -634,7 +634,7 @@ class Eynollah: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.models["col_classifier"].predict(img_in, verbose=0) + label_p_pred = self.model_zoo.get("col_classifier").predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 if num_col > self.num_col_upper: @@ -1486,7 +1486,7 @@ class Eynollah: cont_page = [] if not self.ignore_page_extraction: img = np.copy(self.image)#cv2.GaussianBlur(self.image, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.models["page"]) + img_page_prediction = self.do_prediction(False, img, self.model_zoo.get("page")) imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) ##thresh = cv2.dilate(thresh, KERNEL, iterations=3) @@ -1534,7 +1534,7 @@ class Eynollah: else: img = self.imread() img = cv2.GaussianBlur(img, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.models["page"]) + img_page_prediction = self.do_prediction(False, img, self.model_zoo.get("page")) imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) @@ -1560,7 +1560,7 @@ class Eynollah: self.logger.debug("enter extract_text_regions") img_height_h = img.shape[0] img_width_h = img.shape[1] - model_region = self.models["region_fl"] if patches else self.models["region_fl_np"] + model_region = self.model_zoo.get("region_fl") if patches else self.model_zoo.get("region_fl_np") if self.light_version: thresholding_for_fl_light_version = True @@ -1595,7 +1595,7 @@ class Eynollah: self.logger.debug("enter extract_text_regions") img_height_h = img.shape[0] img_width_h = img.shape[1] - model_region = self.models["region_fl"] if patches else self.models["region_fl_np"] + model_region = self.model_zoo.get("region_fl") if patches else self.model_zoo.get("region_fl_np") if not patches: img = otsu_copy_binary(img) @@ -1816,14 +1816,14 @@ class Eynollah: img_w = img_org.shape[1] img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w)) - prediction_textline = self.do_prediction(use_patches, img, self.models["textline"], + prediction_textline = self.do_prediction(use_patches, img, self.model_zoo.get("textline"), marginal_of_patch_percent=0.15, n_batch_inference=3, thresholding_for_artificial_class_in_light_version=self.textline_light, threshold_art_class_textline=self.threshold_art_class_textline) #if not self.textline_light: #if num_col_classifier==1: - #prediction_textline_nopatch = self.do_prediction(False, img, self.models["textline"]) + #prediction_textline_nopatch = self.do_prediction(False, img, self.model_zoo.get_model("textline")) #prediction_textline[:,:][prediction_textline_nopatch[:,:]==0] = 0 prediction_textline = resize_image(prediction_textline, img_h, img_w) @@ -1894,7 +1894,7 @@ class Eynollah: #cv2.imwrite('prediction_textline2.png', prediction_textline[:,:,0]) - prediction_textline_longshot = self.do_prediction(False, img, self.models["textline"]) + prediction_textline_longshot = self.do_prediction(False, img, self.model_zoo.get("textline")) prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w) @@ -1927,7 +1927,7 @@ class Eynollah: img_h_new = int(img.shape[0] / float(img.shape[1]) * img_w_new) img_resized = resize_image(img,img_h_new, img_w_new ) - prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.models["region"]) + prediction_regions_org, _ = self.do_prediction_new_concept(True, img_resized, self.model_zoo.get("region")) prediction_regions_org = resize_image(prediction_regions_org,img_height_h, img_width_h ) image_page, page_coord, cont_page = self.extract_page() @@ -2043,7 +2043,7 @@ class Eynollah: #if self.input_binary: #img_bin = np.copy(img_resized) ###if (not self.input_binary and self.full_layout) or (not self.input_binary and num_col_classifier >= 30): - ###prediction_bin = self.do_prediction(True, img_resized, self.models["binarization"], n_batch_inference=5) + ###prediction_bin = self.do_prediction(True, img_resized, self.model_zoo.get_model("binarization"), n_batch_inference=5) ####print("inside bin ", time.time()-t_bin) ###prediction_bin=prediction_bin[:,:,0] @@ -2058,7 +2058,7 @@ class Eynollah: ###else: ###img_bin = np.copy(img_resized) if (self.ocr and self.tr) and not self.input_binary: - prediction_bin = self.do_prediction(True, img_resized, self.models["binarization"], n_batch_inference=5) + prediction_bin = self.do_prediction(True, img_resized, self.model_zoo.get("binarization"), n_batch_inference=5) prediction_bin = 255 * (prediction_bin[:,:,0] == 0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) prediction_bin = prediction_bin.astype(np.uint16) @@ -2090,14 +2090,14 @@ class Eynollah: self.logger.debug("resized to %dx%d for %d cols", img_resized.shape[1], img_resized.shape[0], num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.models["region_1_2"], n_batch_inference=1, + True, img_resized, self.model_zoo.get("region_1_2"), n_batch_inference=1, thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) else: prediction_regions_org = np.zeros((self.image_org.shape[0], self.image_org.shape[1], 3)) confidence_matrix = np.zeros((self.image_org.shape[0], self.image_org.shape[1])) prediction_regions_page, confidence_matrix_page = self.do_prediction_new_concept( - False, self.image_page_org_size, self.models["region_1_2"], n_batch_inference=1, + False, self.image_page_org_size, self.model_zoo.get("region_1_2"), n_batch_inference=1, thresholding_for_artificial_class_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) ys = slice(*self.page_coord[0:2]) @@ -2111,10 +2111,10 @@ class Eynollah: self.logger.debug("resized to %dx%d (new_h=%d) for %d cols", img_resized.shape[1], img_resized.shape[0], new_h, num_col_classifier) prediction_regions_org, confidence_matrix = self.do_prediction_new_concept( - True, img_resized, self.models["region_1_2"], n_batch_inference=2, + True, img_resized, self.model_zoo.get("region_1_2"), n_batch_inference=2, thresholding_for_some_classes_in_light_version=True, threshold_art_class_layout=self.threshold_art_class_layout) - ###prediction_regions_org = self.do_prediction(True, img_bin, self.models["region"], + ###prediction_regions_org = self.do_prediction(True, img_bin, self.model_zoo.get_model("region"), ###n_batch_inference=3, ###thresholding_for_some_classes_in_light_version=True) #print("inside 3 ", time.time()-t_in) @@ -2194,7 +2194,7 @@ class Eynollah: ratio_x=1 img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org_y = self.do_prediction(True, img, self.models["region"]) + prediction_regions_org_y = self.do_prediction(True, img, self.model_zoo.get("region")) prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h ) #plt.imshow(prediction_regions_org_y[:,:,0]) @@ -2209,7 +2209,7 @@ class Eynollah: _, _ = find_num_col(img_only_regions, num_col_classifier, self.tables, multiplier=6.0) img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1))) - prediction_regions_org = self.do_prediction(True, img, self.models["region"]) + prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get("region")) prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) prediction_regions_org=prediction_regions_org[:,:,0] @@ -2217,7 +2217,7 @@ class Eynollah: img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1])) - prediction_regions_org2 = self.do_prediction(True, img, self.models["region_p2"], marginal_of_patch_percent=0.2) + prediction_regions_org2 = self.do_prediction(True, img, self.model_zoo.get("region_p2"), marginal_of_patch_percent=0.2) prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h ) mask_zeros2 = (prediction_regions_org2[:,:,0] == 0) @@ -2241,7 +2241,7 @@ class Eynollah: if self.input_binary: prediction_bin = np.copy(img_org) else: - prediction_bin = self.do_prediction(True, img_org, self.models["binarization"], n_batch_inference=5) + prediction_bin = self.do_prediction(True, img_org, self.model_zoo.get("binarization"), n_batch_inference=5) prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) prediction_bin = 255 * (prediction_bin[:,:,0]==0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) @@ -2251,7 +2251,7 @@ class Eynollah: img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org = self.do_prediction(True, img, self.models["region"]) + prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get("region")) prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) prediction_regions_org=prediction_regions_org[:,:,0] @@ -2278,7 +2278,7 @@ class Eynollah: except: if self.input_binary: prediction_bin = np.copy(img_org) - prediction_bin = self.do_prediction(True, img_org, self.models["binarization"], n_batch_inference=5) + prediction_bin = self.do_prediction(True, img_org, self.model_zoo.get("binarization"), n_batch_inference=5) prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h ) prediction_bin = 255 * (prediction_bin[:,:,0]==0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2) @@ -2289,14 +2289,14 @@ class Eynollah: img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x)) - prediction_regions_org = self.do_prediction(True, img, self.models["region"]) + prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get("region")) prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) prediction_regions_org=prediction_regions_org[:,:,0] #mask_lines_only=(prediction_regions_org[:,:]==3)*1 #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1)) - #prediction_regions_org = self.do_prediction(True, img, self.models["region"]) + #prediction_regions_org = self.do_prediction(True, img, self.model_zoo.get_model("region")) #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h ) #prediction_regions_org = prediction_regions_org[:,:,0] #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0 @@ -2667,13 +2667,13 @@ class Eynollah: img_width_h = img_org.shape[1] patches = False if self.light_version: - prediction_table, _ = self.do_prediction_new_concept(patches, img, self.models["table"]) + prediction_table, _ = self.do_prediction_new_concept(patches, img, self.model_zoo.get("table")) prediction_table = prediction_table.astype(np.int16) return prediction_table[:,:,0] else: if num_col_classifier < 4 and num_col_classifier > 2: - prediction_table = self.do_prediction(patches, img, self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.models["table"]) + prediction_table = self.do_prediction(patches, img, self.model_zoo.get("table")) + pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_zoo.get("table")) pre_updown = cv2.flip(pre_updown, -1) prediction_table[:,:,0][pre_updown[:,:,0]==1]=1 @@ -2692,8 +2692,8 @@ class Eynollah: xs = slice(w_start, w_start + img.shape[1]) img_new[ys, xs] = img - prediction_ext = self.do_prediction(patches, img_new, self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.models["table"]) + prediction_ext = self.do_prediction(patches, img_new, self.model_zoo.get("table")) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_zoo.get("table")) pre_updown = cv2.flip(pre_updown, -1) prediction_table = prediction_ext[ys, xs] @@ -2714,8 +2714,8 @@ class Eynollah: xs = slice(w_start, w_start + img.shape[1]) img_new[ys, xs] = img - prediction_ext = self.do_prediction(patches, img_new, self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.models["table"]) + prediction_ext = self.do_prediction(patches, img_new, self.model_zoo.get("table")) + pre_updown = self.do_prediction(patches, cv2.flip(img_new[:,:,:], -1), self.model_zoo.get("table")) pre_updown = cv2.flip(pre_updown, -1) prediction_table = prediction_ext[ys, xs] @@ -2727,10 +2727,10 @@ class Eynollah: prediction_table = np.zeros(img.shape) img_w_half = img.shape[1] // 2 - pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], self.models["table"]) - pre2 = self.do_prediction(patches, img[:,img_w_half:,:], self.models["table"]) - pre_full = self.do_prediction(patches, img[:,:,:], self.models["table"]) - pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.models["table"]) + pre1 = self.do_prediction(patches, img[:,0:img_w_half,:], self.model_zoo.get("table")) + pre2 = self.do_prediction(patches, img[:,img_w_half:,:], self.model_zoo.get("table")) + pre_full = self.do_prediction(patches, img[:,:,:], self.model_zoo.get("table")) + pre_updown = self.do_prediction(patches, cv2.flip(img[:,:,:], -1), self.model_zoo.get("table")) pre_updown = cv2.flip(pre_updown, -1) prediction_table_full_erode = cv2.erode(pre_full[:,:,0], KERNEL, iterations=4) @@ -3522,7 +3522,7 @@ class Eynollah: tot_counter += 1 batch.append(j) if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): - y_pr = self.models["reading_order"].predict(input_1 , verbose=0) + y_pr = self.model_zoo.get("reading_order").predict(input_1 , verbose=0) for jb, j in enumerate(batch): if y_pr[jb][0]>=0.5: post_list.append(j) @@ -4105,7 +4105,7 @@ class Eynollah: gc.collect() ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, np.zeros((len(all_found_textline_polygons), 4)), - self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], textline_light=True) + self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), textline_light=True) else: ocr_all_textlines = None @@ -4614,27 +4614,27 @@ class Eynollah: if len(all_found_textline_polygons): ocr_all_textlines = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons, all_box_coord, - self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) + self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) if len(all_found_textline_polygons_marginals_left): ocr_all_textlines_marginals_left = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_left, all_box_coord_marginals_left, - self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) + self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) if len(all_found_textline_polygons_marginals_right): ocr_all_textlines_marginals_right = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_marginals_right, all_box_coord_marginals_right, - self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) + self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) if self.full_layout and len(all_found_textline_polygons): ocr_all_textlines_h = return_rnn_cnn_ocr_of_given_textlines( image_page, all_found_textline_polygons_h, all_box_coord_h, - self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) + self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) if self.full_layout and len(polygons_of_drop_capitals): ocr_all_textlines_drop = return_rnn_cnn_ocr_of_given_textlines( image_page, polygons_of_drop_capitals, np.zeros((len(polygons_of_drop_capitals), 4)), - self.models["ocr"], self.b_s_ocr, self.models["num_to_char"], self.textline_light, self.curved_line) + self.model_zoo.get("ocr"), self.b_s_ocr, self.model_zoo.get("num_to_char"), self.textline_light, self.curved_line) else: if self.light_version: @@ -4646,7 +4646,7 @@ class Eynollah: gc.collect() torch.cuda.empty_cache() - self.models["ocr"].to(self.device) + self.model_zoo.get("ocr").to(self.device) ind_tot = 0 #cv2.imwrite('./img_out.png', image_page) @@ -4683,7 +4683,7 @@ class Eynollah: img_croped = img_poly_on_img[y:y+h, x:x+w, :] #cv2.imwrite('./extracted_lines/'+str(ind_tot)+'.jpg', img_croped) text_ocr = self.return_ocr_of_textline_without_common_section( - img_croped, self.models["ocr"], self.models['ocr_tr_processor'], self.device, w, h2w_ratio, ind_tot) + img_croped, self.model_zoo.get("ocr"), self.model_zoo.get("trocr_processor"), self.device, w, h2w_ratio, ind_tot) ocr_textline_in_textregion.append(text_ocr) ind_tot = ind_tot +1 ocr_all_textlines.append(ocr_textline_in_textregion) diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 6adea55..69dd6b7 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -1,6 +1,6 @@ # pyright: reportPossiblyUnboundVariable=false -from logging import getLogger +from logging import Logger, getLogger from typing import Optional from pathlib import Path import os @@ -8,23 +8,31 @@ import json import gc import sys import math -import cv2 import time from keras.layers import StringLookup - -from eynollah.utils.resize import resize_image -from eynollah.utils.utils_ocr import break_curved_line_into_small_pieces_and_then_merge, decode_batch_predictions, fit_text_single_line, get_contours_and_bounding_boxes, get_orientation_moments, preprocess_and_resize_image_for_ocrcnn_model, return_textlines_split_if_needed, rotate_image_with_padding - -from .utils import is_image_filename - +import cv2 import xml.etree.ElementTree as ET import tensorflow as tf from keras.models import load_model from PIL import Image, ImageDraw, ImageFont import numpy as np +from eynollah.model_zoo import EynollahModelZoo import torch +from .utils import is_image_filename +from .utils.resize import resize_image +from .utils.utils_ocr import ( + break_curved_line_into_small_pieces_and_then_merge, + decode_batch_predictions, + fit_text_single_line, + get_contours_and_bounding_boxes, + get_orientation_moments, + preprocess_and_resize_image_for_ocrcnn_model, + return_textlines_split_if_needed, + rotate_image_with_padding, +) + # cannot use importlib.resources until we move to 3.9+ forimportlib.resources.files if sys.version_info < (3, 10): import importlib_resources @@ -43,68 +51,51 @@ class Eynollah_ocr: model_name=None, dir_xmls=None, tr_ocr=False, - batch_size=None, - export_textline_images_and_text=False, - do_not_mask_with_textline_contour=False, + batch_size: Optional[int]=None, + export_textline_images_and_text: bool=False, + do_not_mask_with_textline_contour: bool=False, pref_of_dataset=None, - min_conf_value_of_textline_text : Optional[float]=None, - logger=None, + min_conf_value_of_textline_text : float=0.3, + logger: Optional[Logger]=None, ): - self.model_name = model_name self.tr_ocr = tr_ocr self.export_textline_images_and_text = export_textline_images_and_text self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour self.pref_of_dataset = pref_of_dataset self.logger = logger if logger else getLogger('eynollah') + self.model_zoo = EynollahModelZoo(basedir=dir_models) - if not export_textline_images_and_text: - if min_conf_value_of_textline_text: - self.min_conf_value_of_textline_text = float(min_conf_value_of_textline_text) + # TODO: Properly document what 'export_textline_images_and_text' is about + if export_textline_images_and_text: + self.logger.info("export_textline_images_and_text was set, so no actual models are loaded") + return + + self.min_conf_value_of_textline_text = min_conf_value_of_textline_text + self.b_s = 2 if batch_size is None and tr_ocr else 8 if batch_size is None else batch_size + + if tr_ocr: + self.model_zoo.load_model('trocr_processor', '') + if model_name: + self.model_zoo.load_model('ocr', 'tr', model_name) else: - self.min_conf_value_of_textline_text = 0.3 - if tr_ocr: - assert TrOCRProcessor - self.processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-printed") - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - if self.model_name: - self.model_ocr_dir = self.model_name - else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_trocr_20250919" - self.model_ocr = VisionEncoderDecoderModel.from_pretrained(self.model_ocr_dir) - self.model_ocr.to(self.device) - if not batch_size: - self.b_s = 2 - else: - self.b_s = int(batch_size) - + self.model_zoo.load_model('ocr', 'tr') + self.model_zoo.get('ocr').to(self.device) + else: + if model_name: + self.model_zoo.load_model('ocr', '', model_name) else: - if self.model_name: - self.model_ocr_dir = self.model_name - else: - self.model_ocr_dir = dir_models + "/model_eynollah_ocr_cnnrnn_20250930" - model_ocr = load_model(self.model_ocr_dir , compile=False) - - self.prediction_model = tf.keras.models.Model( - model_ocr.get_layer(name = "image").input, - model_ocr.get_layer(name = "dense2").output) - if not batch_size: - self.b_s = 8 - else: - self.b_s = int(batch_size) - - with open(os.path.join(self.model_ocr_dir, "characters_org.txt"),"r") as config_file: - characters = json.load(config_file) - - AUTOTUNE = tf.data.AUTOTUNE + self.model_zoo.load_model('ocr', '') + self.model_zoo.load_model('num_to_char') + self.end_character = len(self.model_zoo.load_model('characters')) + 2 - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) - - # Mapping integers back to original characters. - self.num_to_char = StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) - self.end_character = len(characters) + 2 + @property + def device(self): + if torch.cuda.is_available(): + self.logger.info("Using GPU acceleration") + return torch.device("cuda:0") + else: + self.logger.info("Using CPU processing") + return torch.device("cpu") def run(self, overwrite: bool = False, dir_in: Optional[str] = None, @@ -119,13 +110,16 @@ class Eynollah_ocr: for image_filename in filter(is_image_filename, os.listdir(dir_in))] else: + assert image_filename ls_imgs = [image_filename] if self.tr_ocr: tr_ocr_input_height_and_width = 384 for dir_img in ls_imgs: file_name = Path(dir_img).stem + assert dir_xmls # FIXME: check the logic dir_xml = os.path.join(dir_xmls, file_name+'.xml') + assert dir_out # FIXME: check the logic out_file_ocr = os.path.join(dir_out, file_name+'.xml') if os.path.exists(out_file_ocr): @@ -204,10 +198,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_ocr.generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( + generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -227,10 +221,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_ocr.generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( + generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -247,10 +241,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_ocr.generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( + generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -265,10 +259,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_ocr.generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode( + generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -282,9 +276,9 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.processor.batch_decode(generated_ids_merged, skip_special_tokens=True) + generated_text_merged = self.model_zoo.get('processor').batch_decode(generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -299,10 +293,10 @@ class Eynollah_ocr: ####n_start = i*self.b_s ####n_end = (i+1)*self.b_s ####imgs = cropped_lines[n_start:n_end] - ####pixel_values_merged = self.processor(imgs, return_tensors="pt").pixel_values + ####pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values ####generated_ids_merged = self.model_ocr.generate( #### pixel_values_merged.to(self.device)) - ####generated_text_merged = self.processor.batch_decode( + ####generated_text_merged = self.model_zoo.get('processor').batch_decode( #### generated_ids_merged, skip_special_tokens=True) ####extracted_texts = extracted_texts + generated_text_merged diff --git a/src/eynollah/model_zoo.py b/src/eynollah/model_zoo.py index ee8b6b0..b92d4f1 100644 --- a/src/eynollah/model_zoo.py +++ b/src/eynollah/model_zoo.py @@ -11,7 +11,7 @@ from transformers import TrOCRProcessor, VisionEncoderDecoderModel from eynollah.patch_encoder import PatchEncoder, Patches -SomeEynollahModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, Model] +SomeEynollahModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, Model, List] # Dict mapping model_category to dict mapping variant (default is '') to Path @@ -114,14 +114,19 @@ DEFAULT_MODEL_VERSIONS: Dict[str, Dict[str, str]] = { '': "model_eynollah_ocr_cnnrnn_20250930", }, - 'ocr_tr_processor': { + 'trocr_processor': { '': 'microsoft/trocr-base-printed', 'htr': "microsoft/trocr-base-handwritten", }, 'num_to_char': { - '': 'model_eynollah_ocr_cnnrnn_20250930/characters_org.txt' + '': 'characters_org.txt' }, + + 'characters': { + '': 'characters_org.txt' + }, + } @@ -142,7 +147,7 @@ class EynollahModelZoo(): self.model_versions = deepcopy(DEFAULT_MODEL_VERSIONS) if model_overrides: self.override_models(*model_overrides) - self._loaded: Dict[Tuple[str, str], SomeEynollahModel] = {} + self._loaded: Dict[str, SomeEynollahModel] = {} def override_models(self, *model_overrides: Tuple[str, str, str]): """ @@ -216,7 +221,9 @@ class EynollahModelZoo(): model = self._load_ocr_model(variant=model_variant) elif model_category == 'num_to_char': model = self._load_num_to_char() - elif model_category == 'tr_processor': + elif model_category == 'characters': + model = self._load_characters() + elif model_category == 'trocr_processor': return TrOCRProcessor.from_pretrained(self.model_path(...)) else: try: @@ -225,14 +232,13 @@ class EynollahModelZoo(): self.logger.exception(e) model = load_model(model_path, compile=False, custom_objects={ "PatchEncoder": PatchEncoder, "Patches": Patches}) - self._loaded[(model_category, model_variant)] = model + self._loaded[model_category] = model return model # type: ignore - def get_model(self, model_categeory, model_variant) -> SomeEynollahModel: - needle = (model_categeory, model_variant) - if needle not in self._loaded: - raise ValueError('Model/variant "{needle} not previously loaded with "load_model(..)"') - return self._loaded[needle] + def get(self, model_category) -> SomeEynollahModel: + if model_category not in self._loaded: + raise ValueError(f'Model "{model_category} not previously loaded with "load_model(..)"') + return self._loaded[model_category] def _load_ocr_model(self, variant: str) -> SomeEynollahModel: """ @@ -247,15 +253,21 @@ class EynollahModelZoo(): return Model( ocr_model.get_layer(name = "image").input, # type: ignore ocr_model.get_layer(name = "dense2").output) # type: ignore + + def _load_characters(self) -> List[str]: + """ + Load encoding for OCR + """ + with open(self.model_path('ocr') / self.model_path('num_to_char', absolute=False), "r") as config_file: + return json.load(config_file) - def _load_num_to_char(self): + def _load_num_to_char(self) -> StringLookup: """ Load decoder for OCR """ - with open(self.model_path('ocr') / self.model_path('ocr', 'num_to_char', absolute=False), "r") as config_file: - characters = json.load(config_file) + characters = self._load_characters() # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) + char_to_num = StringLookup(vocabulary=characters, mask_token=None) # Mapping integers back to original characters. return StringLookup( vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 5ccb2af..94f6983 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -393,7 +393,12 @@ def find_num_col_deskew(regions_without_separators, sigma_, multiplier=3.8): z = gaussian_filter1d(regions_without_separators_0, sigma_) return np.std(z) -def find_num_col(regions_without_separators, num_col_classifier, tables, multiplier=3.8): +def find_num_col( + regions_without_separators, + num_col_classifier, + tables, + multiplier=3.8, +): if not regions_without_separators.any(): return 0, [] #plt.imshow(regions_without_separators) From 6e3399fe7afecbd38df9a5d017388d1d5f83ae05 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 22:16:56 +0200 Subject: [PATCH 392/492] combine Docker docs --- docs/docker.md | 27 +++++++++++++++++++++++---- train/README.md | 16 ---------------- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/docs/docker.md b/docs/docker.md index 466adf6..e47f2d5 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -1,4 +1,8 @@ -# 1. ocrd resource manager +## Inference with Docker + + docker pull ghcr.io/qurator-spk/eynollah:latest + +### 1. ocrd resource manager (just once, to get the models and install them into a named volume for later re-use) vol_models=ocrd-resources:/usr/local/share/ocrd-resources @@ -6,19 +10,34 @@ Now, each time you want to use Eynollah, pass the same resources volume again. Also, bind-mount some data directory, e.g. current working directory $PWD (/data is default working directory in the container). + Either use standalone CLI (2) or OCR-D CLI (3): -# 2. standalone CLI (follow self-help, cf. readme) +### 2. standalone CLI +(follow self-help, cf. readme) docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah binarization --help docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah layout --help docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah eynollah ocr --help -# 3. OCR-D CLI (follow self-help, cf. readme and https://ocr-d.de/en/spec/cli) +### 3. OCR-D CLI +(follow self-help, cf. readme and https://ocr-d.de/en/spec/cli) docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-eynollah-segment -h docker run --rm -v $vol_models -v $PWD:/data ocrd/eynollah ocrd-sbb-binarize -h Alternatively, just "log in" to the container once and use the commands there: - docker run --rm -v $vol_models -v $PWD:/data -it ocrd/eynollah bash \ No newline at end of file + docker run --rm -v $vol_models -v $PWD:/data -it ocrd/eynollah bash + +## Training with Docker + +Build the Docker image + + cd train + docker build -t model-training . + +Run the Docker image + + cd train + docker run --gpus all -v $PWD:/entry_point_dir model-training diff --git a/train/README.md b/train/README.md index 5f6d326..d270542 100644 --- a/train/README.md +++ b/train/README.md @@ -41,19 +41,3 @@ each class will be defined with a RGB value and beside images, a text file of cl > Convert COCO GT or results for a single image to a segmentation map and write it to disk. * [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) > Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. - -### Train using Docker - -Build the Docker image: - -```bash -cd train -docker build -t model-training . -``` - -Run Docker image - -```bash -cd train -docker run --gpus all -v $PWD:/entry_point_dir model-training -``` From e5254dc6c5bfcf2ee6d7b2b8636c14e32674f12f Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 22:39:54 +0200 Subject: [PATCH 393/492] integrate training docs --- docs/train.md | 38 ++++++++++++++++++++++++++++++++++++++ train/README.md | 43 ------------------------------------------- 2 files changed, 38 insertions(+), 43 deletions(-) delete mode 100644 train/README.md diff --git a/docs/train.md b/docs/train.md index 252bead..ffa39a9 100644 --- a/docs/train.md +++ b/docs/train.md @@ -1,3 +1,41 @@ +# Prerequisistes + +## 1. Install Eynollah with training dependencies + +Clone the repository and install eynollah along with the dependencies necessary for training: + +```sh +git clone https://github.com/qurator-spk/eynollah +cd eynollah +pip install '.[training]' +``` + +## 2. Pretrained encoder + +Download our pretrained weights and add them to a `train/pretrained_model` folder: + +```sh +cd train +wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 +tar xf pretrained_model.tar.gz +``` + +## 3. Example data + +### Binarization +A small sample of training data for binarization experiment can be found on [Zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), +which contains `images` and `labels` folders. + +## 4. Helpful tools + +* [`pagexml2img`](https://github.com/qurator-spk/page2img) +> Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, +each class will be defined with a RGB value and beside images, a text file of classes will also be produced. +* [`cocoSegmentationToPng`](https://github.com/nightrome/cocostuffapi/blob/17acf33aef3c6cc2d6aca46dcf084266c2778cf0/PythonAPI/pycocotools/cocostuffhelper.py#L130) +> Convert COCO GT or results for a single image to a segmentation map and write it to disk. +* [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) +> Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. + # Training documentation This document aims to assist users in preparing training datasets, training models, and diff --git a/train/README.md b/train/README.md deleted file mode 100644 index d270542..0000000 --- a/train/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Training eynollah - -This README explains the technical details of how to set up and run training, for detailed information on parameterization, see [`docs/train.md`](../docs/train.md) - -## Introduction - -This folder contains the source code for training an encoder model for document image segmentation. - -## Installation - -Clone the repository and install eynollah along with the dependencies necessary for training: - -```sh -git clone https://github.com/qurator-spk/eynollah -cd eynollah -pip install '.[training]' -``` - -### Pretrained encoder - -Download our pretrained weights and add them to a `train/pretrained_model` folder: - -```sh -cd train -wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 -tar xf pretrained_model.tar.gz -``` - -### Binarization training data - -A small sample of training data for binarization experiment can be found [on -zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), -which contains `images` and `labels` folders. - -### Helpful tools - -* [`pagexml2img`](https://github.com/qurator-spk/page2img) -> Tool to extract 2-D or 3-D RGB images from PAGE-XML data. In the former case, the output will be 1 2-D image array which each class has filled with a pixel value. In the case of a 3-D RGB image, -each class will be defined with a RGB value and beside images, a text file of classes will also be produced. -* [`cocoSegmentationToPng`](https://github.com/nightrome/cocostuffapi/blob/17acf33aef3c6cc2d6aca46dcf084266c2778cf0/PythonAPI/pycocotools/cocostuffhelper.py#L130) -> Convert COCO GT or results for a single image to a segmentation map and write it to disk. -* [`ocrd-segment-extract-pages`](https://github.com/OCR-D/ocrd_segment/blob/master/ocrd_segment/extract_pages.py) -> Extract region classes and their colours in mask (pseg) images. Allows the color map as free dict parameter, and comes with a default that mimics PageViewer's coloring for quick debugging; it also warns when regions do overlap. From 230e7cc705eef7800924917c23b9b4242d69f926 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 22:52:54 +0200 Subject: [PATCH 394/492] integrate ocrd docs --- README.md | 11 ++--------- docs/ocrd.md | 5 +++++ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index fabb594..d6930f7 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ eynollah ocr \ ### Reading Order Detection -The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. +The reading order detection module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. The command-line interface for machine based reading order can be called like this: @@ -169,17 +169,10 @@ eynollah machine-based-reading-order \ -o ``` -#### Use as OCR-D processor - -Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), -formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). - -Further documentation on using Eynollah with OCR-D can be found in [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/ocrd.md). - ## How to cite ```bibtex -@inproceedings{hip23eynollah, +@inproceedings{hip23rezanezhad, title = {Document Layout Analysis with Deep Learning and Heuristics}, author = {Rezanezhad, Vahid and Baierer, Konstantin and Gerber, Mike and Labusch, Kai and Neudecker, Clemens}, booktitle = {Proceedings of the 7th International Workshop on Historical Document Imaging and Processing {HIP} 2023, diff --git a/docs/ocrd.md b/docs/ocrd.md index a391024..9e7e268 100644 --- a/docs/ocrd.md +++ b/docs/ocrd.md @@ -1,3 +1,8 @@ +## Use as OCR-D processor + +Eynollah ships with a CLI interface to be used as [OCR-D](https://ocr-d.de) [processor](https://ocr-d.de/en/spec/cli), +formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah/tree/main/src/eynollah/ocrd-tool.json). + When using Eynollah in OCR-D, the source image file group with (preferably) RGB images should be used as input like this: ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 From 7d70835d2251161b9c4ce4c41ad1ca98d2ca6953 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Mon, 20 Oct 2025 23:19:10 +0200 Subject: [PATCH 395/492] small fixes to main readme --- README.md | 22 +++++++++++++--------- docs/docker.md | 4 ++-- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index d6930f7..6dc5bf9 100644 --- a/README.md +++ b/README.md @@ -52,24 +52,25 @@ pip install "eynollah[OCR]" make install EXTRAS=OCR ``` -With Docker, use +### Docker + +Use ``` docker pull ghcr.io/qurator-spk/eynollah:latest ``` -For additional documentation on using Eynollah and Docker, see [`docker.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/docker.md). +When using Eynollah with Docker, see [`docker.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/docker.md). ## Models Pretrained models can be downloaded from [Zenodo](https://zenodo.org/records/17194824) or [Hugging Face](https://huggingface.co/SBB?search_models=eynollah). -For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +For model documentation and model cards, see [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). ## Training -To train your own model with Eynollah, see the documentation in [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the -tools in the [`train`](https://github.com/qurator-spk/eynollah/tree/main/train) folder. +To train your own model with Eynollah, see [`train.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/train.md) and use the tools in the [`train`](https://github.com/qurator-spk/eynollah/tree/main/train) folder. ## Usage @@ -83,10 +84,7 @@ Eynollah supports five use cases: ### Layout Analysis The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading -order using either heuristic methods or a [pretrained reading order detection model](https://github.com/qurator-spk/eynollah#machine-based-reading-order). - -Reading order detection can be performed either as part of layout analysis based on image input, or, currently under -development, based on pre-existing layout analysis results in PAGE-XML format as input. +order using heuristic methods or a [pretrained model](https://github.com/qurator-spk/eynollah#machine-based-reading-order). The command-line interface for layout analysis can be called like this: @@ -156,6 +154,8 @@ eynollah ocr \ ``` ### Reading Order Detection +Reading order detection can be performed either as part of layout analysis based on image input, or, currently under +development, based on pre-existing layout analysis data in PAGE-XML format as input. The reading order detection module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. @@ -169,6 +169,10 @@ eynollah machine-based-reading-order \ -o ``` +## Use as OCR-D processor + +See [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). + ## How to cite ```bibtex diff --git a/docs/docker.md b/docs/docker.md index e47f2d5..7965622 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -32,12 +32,12 @@ Alternatively, just "log in" to the container once and use the commands there: ## Training with Docker -Build the Docker image +Build the Docker training image cd train docker build -t model-training . -Run the Docker image +Run the Docker training image cd train docker run --gpus all -v $PWD:/entry_point_dir model-training From 44b75eb36f3adc1b27a61b3b13c5477cd67bddd1 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 10:48:48 +0200 Subject: [PATCH 396/492] cli: model -> model_basedir --- src/eynollah/cli.py | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 4eced59..14ae77d 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -423,7 +423,43 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low help="Setup a basic console logger", ) -def layout(image, out, overwrite, dir_in, model, model_version, save_images, save_layout, save_deskewed, save_all, extract_only_images, save_page, enable_plotting, allow_enhancement, curved_line, textline_light, full_layout, tables, right2left, input_binary, allow_scaling, headers_off, light_version, reading_order_machine_based, do_ocr, transformer_ocr, batch_size_ocr, num_col_upper, num_col_lower, threshold_art_class_textline, threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, log_level, setup_logging): +def layout( + image, + out, + overwrite, + dir_in, + model_basedir, + model_version, + save_images, + save_layout, + save_deskewed, + save_all, + extract_only_images, + save_page, + enable_plotting, + allow_enhancement, + curved_line, + textline_light, + full_layout, + tables, + right2left, + input_binary, + allow_scaling, + headers_off, + light_version, + reading_order_machine_based, + do_ocr, + transformer_ocr, + batch_size_ocr, + num_col_upper, + num_col_lower, + threshold_art_class_textline, + threshold_art_class_layout, + skip_layout_and_reading_order, + ignore_page_extraction, + log_level, + setup_logging, +): if setup_logging: console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) @@ -453,7 +489,7 @@ def layout(image, out, overwrite, dir_in, model, model_version, save_images, sav assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." eynollah = Eynollah( - model, + model_basedir, model_overrides=model_version, extract_only_images=extract_only_images, enable_plotting=enable_plotting, From c6b863b13f31eaa2b0dc68460e75c80230b2a0fe Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 12:05:27 +0200 Subject: [PATCH 397/492] typing and asserts --- src/eynollah/eynollah.py | 19 +++++++++---------- src/eynollah/model_zoo.py | 16 +++++++++++----- src/eynollah/plot.py | 4 ++-- src/eynollah/writer.py | 14 ++++++++------ 4 files changed, 30 insertions(+), 23 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 3582c67..6356198 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -9,11 +9,10 @@ document layout analysis (segmentation) with output in PAGE-XML """ from difflib import SequenceMatcher as sq -from PIL import Image, ImageDraw, ImageFont import math import os import time -from typing import Dict, Union,List, Optional, Tuple +from typing import Dict, Type, Union,List, Optional, Tuple import warnings from functools import partial from pathlib import Path @@ -32,7 +31,7 @@ from ocrd_utils import getLogger, tf_disable_interactive_logs import statistics try: - import torch + import torch # type: ignore except ImportError: torch = None try: @@ -43,13 +42,8 @@ except ImportError: #os.environ['CUDA_VISIBLE_DEVICES'] = '-1' tf_disable_interactive_logs() import tensorflow as tf -from keras.models import load_model tf.get_logger().setLevel("ERROR") warnings.filterwarnings("ignore") -# use tf1 compatibility for keras backend -from tensorflow.compat.v1.keras.backend import set_session -from tensorflow.keras import layers -from tensorflow.keras.layers import StringLookup from .model_zoo import EynollahModelZoo from .utils.contour import ( @@ -280,6 +274,7 @@ class Eynollah: def __del__(self): if hasattr(self, 'executor') and getattr(self, 'executor'): + assert self.executor self.executor.shutdown() self.executor = None self.model_zoo.shutdown() @@ -287,6 +282,7 @@ class Eynollah: @property def device(self): # TODO why here and why only for tr? + assert torch if torch.cuda.is_available(): self.logger.info("Using GPU acceleration") return torch.device("cuda:0") @@ -689,8 +685,8 @@ class Eynollah: self.img_hight_int = int(self.image.shape[0] * scale) self.img_width_int = int(self.image.shape[1] * scale) - self.scale_y = self.img_hight_int / float(self.image.shape[0]) - self.scale_x = self.img_width_int / float(self.image.shape[1]) + self.scale_y: float = self.img_hight_int / float(self.image.shape[0]) + self.scale_x: float = self.img_width_int / float(self.image.shape[1]) self.image = resize_image(self.image, self.img_hight_int, self.img_width_int) @@ -1755,6 +1751,7 @@ class Eynollah: return [], [], [] self.logger.debug("enter get_slopes_and_deskew_new_light") with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: + assert self.executor results = self.executor.map(partial(do_work_of_slopes_new_light, textline_mask_tot_ea=textline_mask_tot_shared, slope_deskew=slope_deskew, @@ -1771,6 +1768,7 @@ class Eynollah: return [], [], [] self.logger.debug("enter get_slopes_and_deskew_new") with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: + assert self.executor results = self.executor.map(partial(do_work_of_slopes_new, textline_mask_tot_ea=textline_mask_tot_shared, slope_deskew=slope_deskew, @@ -1791,6 +1789,7 @@ class Eynollah: self.logger.debug("enter get_slopes_and_deskew_new_curved") with share_ndarray(textline_mask_tot) as textline_mask_tot_shared: with share_ndarray(mask_texts_only) as mask_texts_only_shared: + assert self.executor results = self.executor.map(partial(do_work_of_slopes_new_curved, textline_mask_tot_ea=textline_mask_tot_shared, mask_texts_only=mask_texts_only_shared, diff --git a/src/eynollah/model_zoo.py b/src/eynollah/model_zoo.py index b92d4f1..7f90bc0 100644 --- a/src/eynollah/model_zoo.py +++ b/src/eynollah/model_zoo.py @@ -2,7 +2,7 @@ from dataclasses import dataclass import json import logging from pathlib import Path -from typing import Dict, Literal, Optional, Tuple, List, Union +from typing import Dict, Literal, Optional, Tuple, List, Type, TypeVar, Union from copy import deepcopy from keras.layers import StringLookup @@ -12,7 +12,7 @@ from transformers import TrOCRProcessor, VisionEncoderDecoderModel from eynollah.patch_encoder import PatchEncoder, Patches SomeEynollahModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, Model, List] - +T = TypeVar('T') # Dict mapping model_category to dict mapping variant (default is '') to Path DEFAULT_MODEL_VERSIONS: Dict[str, Dict[str, str]] = { @@ -149,7 +149,10 @@ class EynollahModelZoo(): self.override_models(*model_overrides) self._loaded: Dict[str, SomeEynollahModel] = {} - def override_models(self, *model_overrides: Tuple[str, str, str]): + def override_models( + self, + *model_overrides: Tuple[str, str, str], + ): """ Override the default model versions """ @@ -235,10 +238,13 @@ class EynollahModelZoo(): self._loaded[model_category] = model return model # type: ignore - def get(self, model_category) -> SomeEynollahModel: + def get(self, model_category: str, model_type: Optional[Type[T]]=None) -> T: if model_category not in self._loaded: raise ValueError(f'Model "{model_category} not previously loaded with "load_model(..)"') - return self._loaded[model_category] + ret = self._loaded[model_category] + if model_type: + assert isinstance(ret, model_type) + return ret # type: ignore # FIXME: convince typing that we're returning generic type def _load_ocr_model(self, variant: str) -> SomeEynollahModel: """ diff --git a/src/eynollah/plot.py b/src/eynollah/plot.py index c026e94..b1b2359 100644 --- a/src/eynollah/plot.py +++ b/src/eynollah/plot.py @@ -40,8 +40,8 @@ class EynollahPlotter: self.image_filename_stem = image_filename_stem # XXX TODO hacky these cannot be set at init time self.image_org = image_org - self.scale_x = scale_x - self.scale_y = scale_y + self.scale_x : float = scale_x + self.scale_y : float = scale_y def save_plot_of_layout_main(self, text_regions_p, image_page): if self.dir_of_layout is not None: diff --git a/src/eynollah/writer.py b/src/eynollah/writer.py index 9c3456a..a0ec077 100644 --- a/src/eynollah/writer.py +++ b/src/eynollah/writer.py @@ -2,7 +2,7 @@ # pylint: disable=import-error from pathlib import Path import os.path -import xml.etree.ElementTree as ET +from typing import Optional from .utils.xml import create_page_xml, xml_reading_order from .utils.counter import EynollahIdCounter @@ -10,7 +10,6 @@ from ocrd_utils import getLogger from ocrd_models.ocrd_page import ( BorderType, CoordsType, - PcGtsType, TextLineType, TextEquivType, TextRegionType, @@ -32,10 +31,10 @@ class EynollahXmlWriter: self.curved_line = curved_line self.textline_light = textline_light self.pcgts = pcgts - self.scale_x = None # XXX set outside __init__ - self.scale_y = None # XXX set outside __init__ - self.height_org = None # XXX set outside __init__ - self.width_org = None # XXX set outside __init__ + self.scale_x: Optional[float] = None # XXX set outside __init__ + self.scale_y: Optional[float] = None # XXX set outside __init__ + self.height_org: Optional[int] = None # XXX set outside __init__ + self.width_org: Optional[int] = None # XXX set outside __init__ @property def image_filename_stem(self): @@ -135,6 +134,7 @@ class EynollahXmlWriter: # create the file structure pcgts = self.pcgts if self.pcgts else create_page_xml(self.image_filename, self.height_org, self.width_org) page = pcgts.get_Page() + assert page page.set_Border(BorderType(Coords=CoordsType(points=self.calculate_page_coords(cont_page)))) counter = EynollahIdCounter() @@ -152,6 +152,7 @@ class EynollahXmlWriter: Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord, skip_layout_reading_order)) ) + assert textregion.Coords if conf_contours_textregions: textregion.Coords.set_conf(conf_contours_textregions[mm]) page.add_TextRegion(textregion) @@ -168,6 +169,7 @@ class EynollahXmlWriter: id=counter.next_region_id, type_='heading', Coords=CoordsType(points=self.calculate_polygon_coords(region_contour, page_coord)) ) + assert textregion.Coords if conf_contours_textregions_h: textregion.Coords.set_conf(conf_contours_textregions_h[mm]) page.add_TextRegion(textregion) From a53d5fc4523bc46aaef18f22f956aa7c91a0b958 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 13:15:57 +0200 Subject: [PATCH 398/492] update docs/makefile to point to v0.6.0 models --- Makefile | 16 +++++++++------- README.md | 6 +++--- src/eynollah/ocrd-tool.json | 6 +++--- train/README.md | 4 ++-- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/Makefile b/Makefile index 29dd877..4a28a23 100644 --- a/Makefile +++ b/Makefile @@ -6,21 +6,23 @@ EXTRAS ?= DOCKER_BASE_IMAGE ?= docker.io/ocrd/core-cuda-tf2:latest DOCKER_TAG ?= ocrd/eynollah DOCKER ?= docker +WGET = wget -O #SEG_MODEL := https://qurator-data.de/eynollah/2021-04-25/models_eynollah.tar.gz #SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah_renamed.tar.gz # SEG_MODEL := https://qurator-data.de/eynollah/2022-04-05/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz -SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 +#SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 +SEG_MODEL := https://zenodo.org/records/17295988/files/models_layout_v0_6_0.tar.gz?download=1 SEG_MODELFILE = $(notdir $(patsubst %?download=1,%,$(SEG_MODEL))) SEG_MODELNAME = $(SEG_MODELFILE:%.tar.gz=%) -BIN_MODEL := https://github.com/qurator-spk/sbb_binarization/releases/download/v0.0.11/saved_model_2021_03_09.zip +BIN_MODEL := https://zenodo.org/records/17295988/files/models_binarization_v0_6_0.tar.gz?download=1 BIN_MODELFILE = $(notdir $(BIN_MODEL)) BIN_MODELNAME := default-2021-03-09 -OCR_MODEL := https://zenodo.org/records/17236998/files/models_ocr_v0_5_1.tar.gz?download=1 +OCR_MODEL := https://zenodo.org/records/17295988/files/models_ocr_v0_6_0.tar.gz?download=1 OCR_MODELFILE = $(notdir $(patsubst %?download=1,%,$(OCR_MODEL))) OCR_MODELNAME = $(OCR_MODELFILE:%.tar.gz=%) @@ -55,18 +57,18 @@ help: # END-EVAL -# Download and extract models to $(PWD)/models_layout_v0_5_0 +# Download and extract models to $(PWD)/models_layout_v0_6_0 models: $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME) # do not download these files if we already have the directories .INTERMEDIATE: $(BIN_MODELFILE) $(SEG_MODELFILE) $(OCR_MODELFILE) $(BIN_MODELFILE): - wget -O $@ $(BIN_MODEL) + $(WGET) $@ $(BIN_MODEL) $(SEG_MODELFILE): - wget -O $@ $(SEG_MODEL) + $(WGET) $@ $(SEG_MODEL) $(OCR_MODELFILE): - wget -O $@ $(OCR_MODEL) + $(WGET) $@ $(OCR_MODEL) $(BIN_MODELNAME): $(BIN_MODELFILE) mkdir $@ diff --git a/README.md b/README.md index 3ba5086..3ecb3d7 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ make install EXTRAS=OCR ## Models -Pretrained models can be downloaded from [zenodo](https://zenodo.org/records/17194824) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). +Pretrained models can be downloaded from [zenodo](https://doi.org/10.5281/zenodo.17194823) or [huggingface](https://huggingface.co/SBB?search_models=eynollah). For documentation on models, have a look at [`models.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). Model cards are also provided for our trained models. @@ -162,7 +162,7 @@ formally described in [`ocrd-tool.json`](https://github.com/qurator-spk/eynollah In this case, the source image file group with (preferably) RGB images should be used as input like this: - ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_5_0 + ocrd-eynollah-segment -I OCR-D-IMG -O OCR-D-SEG -P models eynollah_layout_v0_6_0 If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynollah behaves as follows: - existing regions are kept and ignored (i.e. in effect they might overlap segments from Eynollah results) @@ -174,7 +174,7 @@ If the input file group is PAGE-XML (from a previous OCR-D workflow step), Eynol (because some other preprocessing step was in effect like `denoised`), then the output PAGE-XML will be based on that as new top-level (`@imageFilename`) - ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_5_0 + ocrd-eynollah-segment -I OCR-D-XYZ -O OCR-D-SEG -P models eynollah_layout_v0_6_0 In general, it makes more sense to add other workflow steps **after** Eynollah. diff --git a/src/eynollah/ocrd-tool.json b/src/eynollah/ocrd-tool.json index dbbdc3b..3d1193d 100644 --- a/src/eynollah/ocrd-tool.json +++ b/src/eynollah/ocrd-tool.json @@ -83,10 +83,10 @@ }, "resources": [ { - "url": "https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1", - "name": "models_layout_v0_5_0", + "url": "https://zenodo.org/records/17295988/files/models_layout_v0_6_0.tar.gz?download=1", + "name": "models_layout_v0_6_0", "type": "archive", - "path_in_archive": "models_layout_v0_5_0", + "path_in_archive": "models_layout_v0_6_0", "size": 3525684179, "description": "Models for layout detection, reading order detection, textline detection, page extraction, column classification, table detection, binarization, image enhancement", "version_range": ">= v0.5.0" diff --git a/train/README.md b/train/README.md index 5f6d326..6aeea5d 100644 --- a/train/README.md +++ b/train/README.md @@ -22,14 +22,14 @@ Download our pretrained weights and add them to a `train/pretrained_model` folde ```sh cd train -wget -O pretrained_model.tar.gz https://zenodo.org/records/17243320/files/pretrained_model_v0_5_1.tar.gz?download=1 +wget -O pretrained_model.tar.gz "https://zenodo.org/records/17295988/files/pretrained_model_v0_6_0.tar.gz?download=1" tar xf pretrained_model.tar.gz ``` ### Binarization training data A small sample of training data for binarization experiment can be found [on -zenodo](https://zenodo.org/records/17243320/files/training_data_sample_binarization_v0_5_1.tar.gz?download=1), +zenodo](https://zenodo.org/records/17295988/files/training_data_sample_binarization_v0_6_0.tar.gz?download=1), which contains `images` and `labels` folders. ### Helpful tools From 9d2b18d2af47664325affd98c36cbe00b3e68b13 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 13:29:55 +0200 Subject: [PATCH 399/492] test_run: check log messages starting with eynollah --- tests/test_run.py | 29 +++++------------------------ 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/tests/test_run.py b/tests/test_run.py index 79c64c2..a410d34 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -16,10 +16,13 @@ from ocrd_models.constants import NAMESPACES as NS testdir = Path(__file__).parent.resolve() -MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_layout_v0_5_0').resolve())) -MODELS_OCR = environ.get('MODELS_OCR', str(testdir.joinpath('..', 'models_ocr_v0_5_1').resolve())) +MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_layout_v0_6_0').resolve())) +MODELS_OCR = environ.get('MODELS_OCR', str(testdir.joinpath('..', 'models_ocr_v0_6_0').resolve())) MODELS_BIN = environ.get('MODELS_BIN', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) +def only_eynollah(logrec): + return logrec.name.startswith('eynollah') + @pytest.mark.parametrize( "options", [ @@ -50,8 +53,6 @@ def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(layout_cli, args + options, catch_exceptions=False) @@ -85,8 +86,6 @@ def test_run_eynollah_layout_filename2(tmp_path, pytestconfig, caplog, options): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(layout_cli, args + options, catch_exceptions=False) @@ -116,8 +115,6 @@ def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(layout_cli, args, catch_exceptions=False) @@ -144,8 +141,6 @@ def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, opti if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'SbbBinarizer' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) @@ -170,8 +165,6 @@ def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'SbbBinarizer' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(binarization_cli, args, catch_exceptions=False) @@ -197,8 +190,6 @@ def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, optio if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'enhancement' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) @@ -223,8 +214,6 @@ def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'enhancement' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(enhancement_cli, args, catch_exceptions=False) @@ -244,8 +233,6 @@ def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'mbreorder' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) @@ -273,8 +260,6 @@ def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'mbreorder' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) @@ -306,8 +291,6 @@ def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.DEBUG) - def only_eynollah(logrec): - return logrec.name == 'eynollah' runner = CliRunner() if "-doit" in options: options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) @@ -339,8 +322,6 @@ def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) - def only_eynollah(logrec): - return logrec.name == 'eynollah' runner = CliRunner() with caplog.filtering(only_eynollah): result = runner.invoke(ocr_cli, args, catch_exceptions=False) From de34a1580905c21292281457dafd3c8ef8ad5c52 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 17:27:16 +0200 Subject: [PATCH 400/492] Makefile: fix make models for OCR --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 4a28a23..b1cbcc4 100644 --- a/Makefile +++ b/Makefile @@ -71,8 +71,7 @@ $(OCR_MODELFILE): $(WGET) $@ $(OCR_MODEL) $(BIN_MODELNAME): $(BIN_MODELFILE) - mkdir $@ - unzip -d $@ $< + tar zxf $< $(SEG_MODELNAME): $(SEG_MODELFILE) tar zxf $< $(OCR_MODELNAME): $(OCR_MODELFILE) From bcffa2e5035356c638a138f9c09f67f23ec02b06 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 17:53:24 +0200 Subject: [PATCH 401/492] adopt binarizer to the zoo --- src/eynollah/cli.py | 25 +++++++++++++++-- src/eynollah/model_zoo.py | 13 +++++++++ src/eynollah/sbb_binarize.py | 54 +++++++++++++++++------------------- 3 files changed, 62 insertions(+), 30 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 14ae77d..c7d4bd9 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -120,18 +120,39 @@ def machine_based_reading_order(input, dir_in, out, model, log_level): type=click.Path(file_okay=True, dir_okay=True), required=True, ) +@click.option( + '-M', + '--mode', + type=click.Choice(['single', 'multi']), + default='single', + help="Whether to use the (faster) single-model binarization or the (slightly better) multi-model binarization" +) @click.option( "--log_level", "-l", type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), help="Override log level globally to this", ) -def binarization(patches, model_dir, input_image, dir_in, output, log_level): +def binarization( + patches, + model_dir, + input_image, + mode, + dir_in, + output, + log_level, +): assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." binarizer = SbbBinarizer(model_dir) if log_level: binarizer.log.setLevel(getLevelName(log_level)) - binarizer.run(image_path=input_image, use_patches=patches, output=output, dir_in=dir_in) + binarizer.run( + image_path=input_image, + use_patches=patches, + mode=mode, + output=output, + dir_in=dir_in + ) @main.command() diff --git a/src/eynollah/model_zoo.py b/src/eynollah/model_zoo.py index 7f90bc0..100d974 100644 --- a/src/eynollah/model_zoo.py +++ b/src/eynollah/model_zoo.py @@ -25,6 +25,19 @@ DEFAULT_MODEL_VERSIONS: Dict[str, Dict[str, str]] = { '': "eynollah-binarization_20210425" }, + "binarization_multi_1": { + '': "saved_model_2020_01_16/model_bin1", + }, + "binarization_multi_2": { + '': "saved_model_2020_01_16/model_bin2", + }, + "binarization_multi_3": { + '': "saved_model_2020_01_16/model_bin3", + }, + "binarization_multi_4": { + '': "saved_model_2020_01_16/model_bin4", + }, + "col_classifier": { '': "eynollah-column-classifier_20210425", }, diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 3716987..f8898a1 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -2,18 +2,19 @@ Tool to load model and binarize a given image. """ -import sys -from glob import glob import os import logging +from pathlib import Path +from typing import Dict, List +from keras.models import Model import numpy as np -from PIL import Image import cv2 from ocrd_utils import tf_disable_interactive_logs + +from eynollah.model_zoo import EynollahModelZoo tf_disable_interactive_logs() import tensorflow as tf -from tensorflow.keras.models import load_model from tensorflow.python.keras import backend as tensorflow_backend from .utils import is_image_filename @@ -23,40 +24,37 @@ def resize_image(img_in, input_height, input_width): class SbbBinarizer: - def __init__(self, model_dir, logger=None): - self.model_dir = model_dir + def __init__(self, model_dir, mode='single', logger=None): + if mode not in ('single', 'multi'): + raise ValueError(f"'mode' must be either 'multi' or 'single', not {mode}") self.log = logger if logger else logging.getLogger('SbbBinarizer') - - self.start_new_session() - - self.model_files = glob(self.model_dir+"/*/", recursive = True) - - self.models = [] - for model_file in self.model_files: - self.models.append(self.load_model(model_file)) + self.model_zoo = EynollahModelZoo(basedir=model_dir) + self.models = self.setup_models(mode) + self.session = self.start_new_session() def start_new_session(self): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True - self.session = tf.compat.v1.Session(config=config) # tf.InteractiveSession() - tensorflow_backend.set_session(self.session) + session = tf.compat.v1.Session(config=config) # tf.InteractiveSession() + tensorflow_backend.set_session(session) + return session + + def setup_models(self, mode: str) -> Dict[Path, Model]: + return { + self.model_zoo.model_path(v): self.model_zoo.load_model(v) + for v in (['binarization'] if mode == 'single' else [f'binarization_multi_{i}' for i in range(1, 5)]) + } def end_session(self): tensorflow_backend.clear_session() self.session.close() del self.session - def load_model(self, model_name): - model = load_model(os.path.join(self.model_dir, model_name), compile=False) + def predict(self, img, use_patches, n_batch_inference=5): + model = self.model_zoo.get('binarization', Model) model_height = model.layers[len(model.layers)-1].output_shape[1] model_width = model.layers[len(model.layers)-1].output_shape[2] - n_classes = model.layers[len(model.layers)-1].output_shape[3] - return model, model_height, model_width, n_classes - - def predict(self, model_in, img, use_patches, n_batch_inference=5): - tensorflow_backend.set_session(self.session) - model, model_height, model_width, n_classes = model_in img_org_h = img.shape[0] img_org_w = img.shape[1] @@ -324,8 +322,8 @@ class SbbBinarizer: if image_path is not None: image = cv2.imread(image_path) img_last = 0 - for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) + for n, (model_file, model) in enumerate(self.models.items()): + self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.models.keys()))) res = self.predict(model, image, use_patches) @@ -354,8 +352,8 @@ class SbbBinarizer: print(image_name,'image_name') image = cv2.imread(os.path.join(dir_in,image_name) ) img_last = 0 - for n, (model, model_file) in enumerate(zip(self.models, self.model_files)): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.model_files))) + for n, (model_file, model) in enumerate(self.models.items()): + self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.models.keys()))) res = self.predict(model, image, use_patches) From f0c86672f8797d71074f6d2199b3b99c120e8ec2 Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 17:55:08 +0200 Subject: [PATCH 402/492] adopt mb_ro_on_layout to the zoo --- src/eynollah/eynollah.py | 2 +- src/eynollah/mb_ro_on_layout.py | 28 ++++++++++------------------ 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 6356198..4d1644d 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -12,7 +12,7 @@ from difflib import SequenceMatcher as sq import math import os import time -from typing import Dict, Type, Union,List, Optional, Tuple +from typing import List, Optional, Tuple import warnings from functools import partial from pathlib import Path diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 0a8a7ae..8338d35 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -10,12 +10,13 @@ from pathlib import Path import xml.etree.ElementTree as ET import cv2 +from keras.models import Model import numpy as np from ocrd_utils import getLogger import statistics import tensorflow as tf -from tensorflow.keras.models import load_model +from .model_zoo import EynollahModelZoo from .utils.resize import resize_image from .utils.contour import ( find_new_features_of_contours, @@ -23,7 +24,6 @@ from .utils.contour import ( return_parent_contours, ) from .utils import is_xml_filename -from .patch_encoder import PatchEncoder, Patches DPI_THRESHOLD = 298 KERNEL = np.ones((5, 5), np.uint8) @@ -45,21 +45,11 @@ class machine_based_reading_order_on_layout: except: self.logger.warning("no GPU device available") - self.model_reading_order = self.our_load_model(self.model_reading_order_dir) + self.model_zoo = EynollahModelZoo(basedir=dir_models) + self.model_zoo.load_model('reading_order') + # FIXME: light_version is always true, no need for checks in the code self.light_version = True - @staticmethod - def our_load_model(model_file): - if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): - # prefer SavedModel over HDF5 format if it exists - model_file = model_file[:-3] - try: - model = load_model(model_file, compile=False) - except: - model = load_model(model_file, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - return model - def read_xml(self, xml_file): tree1 = ET.parse(xml_file, parser = ET.XMLParser(encoding='utf-8')) root1=tree1.getroot() @@ -69,6 +59,7 @@ class machine_based_reading_order_on_layout: index_tot_regions = [] tot_region_ref = [] + y_len, x_len = 0, 0 for jj in root1.iter(link+'Page'): y_len=int(jj.attrib['imageHeight']) x_len=int(jj.attrib['imageWidth']) @@ -81,13 +72,13 @@ class machine_based_reading_order_on_layout: co_printspace = [] if link+'PrintSpace' in alltags: region_tags_printspace = np.unique([x for x in alltags if x.endswith('PrintSpace')]) - elif link+'Border' in alltags: + else: region_tags_printspace = np.unique([x for x in alltags if x.endswith('Border')]) for tag in region_tags_printspace: if link+'PrintSpace' in alltags: tag_endings_printspace = ['}PrintSpace','}printspace'] - elif link+'Border' in alltags: + else: tag_endings_printspace = ['}Border','}border'] if tag.endswith(tag_endings_printspace[0]) or tag.endswith(tag_endings_printspace[1]): @@ -683,7 +674,7 @@ class machine_based_reading_order_on_layout: tot_counter += 1 batch.append(j) if tot_counter % inference_bs == 0 or tot_counter == len(ij_list): - y_pr = self.model_reading_order.predict(input_1 , verbose=0) + y_pr = self.model_zoo.get('reading_order', Model).predict(input_1 , verbose='0') for jb, j in enumerate(batch): if y_pr[jb][0]>=0.5: post_list.append(j) @@ -802,6 +793,7 @@ class machine_based_reading_order_on_layout: alltags=[elem.tag for elem in root_xml.iter()] ET.register_namespace("",name_space) + assert dir_out tree_xml.write(os.path.join(dir_out, file_name+'.xml'), xml_declaration=True, method='xml', From 1337461d478c349ec0909c9772419f991bbd4cce Mon Sep 17 00:00:00 2001 From: kba Date: Tue, 21 Oct 2025 19:24:55 +0200 Subject: [PATCH 403/492] adopt image_enhancer to the zoo --- src/eynollah/image_enhancer.py | 44 ++++++++++------------------------ 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 93b5daa..cec8877 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -5,17 +5,18 @@ Image enhancer. The output can be written as same scale of input or in new predi from logging import Logger import os import time -from typing import Optional +from typing import Dict, Optional from pathlib import Path import gc import cv2 +from keras.models import Model import numpy as np from ocrd_utils import getLogger, tf_disable_interactive_logs import tensorflow as tf from skimage.morphology import skeletonize -from tensorflow.keras.models import load_model +from .model_zoo import EynollahModelZoo from .utils.resize import resize_image from .utils.pil_cv2 import pil2cv from .utils import ( @@ -50,11 +51,9 @@ class Enhancer: self.num_col_lower = num_col_lower self.logger = logger if logger else getLogger('enhancement') - self.dir_models = dir_models - self.model_dir_of_binarization = dir_models + "/eynollah-binarization_20210425" - self.model_dir_of_enhancement = dir_models + "/eynollah-enhancement_20210425" - self.model_dir_of_col_classifier = dir_models + "/eynollah-column-classifier_20210425" - self.model_page_dir = dir_models + "/model_eynollah_page_extraction_20250915" + self.model_zoo = EynollahModelZoo(basedir=dir_models) + for v in ['binarization', 'enhancement', 'col_classifier', 'page']: + self.model_zoo.load_model(v) try: for device in tf.config.list_physical_devices('GPU'): @@ -62,11 +61,6 @@ class Enhancer: except: self.logger.warning("no GPU device available") - self.model_page = self.our_load_model(self.model_page_dir) - self.model_classifier = self.our_load_model(self.model_dir_of_col_classifier) - self.model_enhancement = self.our_load_model(self.model_dir_of_enhancement) - self.model_bin = self.our_load_model(self.model_dir_of_binarization) - def cache_images(self, image_filename=None, image_pil=None, dpi=None): ret = {} if image_filename: @@ -102,24 +96,12 @@ class Enhancer: def isNaN(self, num): return num != num - - @staticmethod - def our_load_model(model_file): - if model_file.endswith('.h5') and Path(model_file[:-3]).exists(): - # prefer SavedModel over HDF5 format if it exists - model_file = model_file[:-3] - try: - model = load_model(model_file, compile=False) - except: - model = load_model(model_file, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - return model def predict_enhancement(self, img): self.logger.debug("enter predict_enhancement") - img_height_model = self.model_enhancement.layers[-1].output_shape[1] - img_width_model = self.model_enhancement.layers[-1].output_shape[2] + img_height_model = self.model_zoo.get('enhancement', Model).layers[-1].output_shape[1] + img_width_model = self.model_zoo.get('enhancement', Model).layers[-1].output_shape[2] if img.shape[0] < img_height_model: img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST) if img.shape[1] < img_width_model: @@ -160,7 +142,7 @@ class Enhancer: index_y_d = img_h - img_height_model img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.model_enhancement.predict(img_patch, verbose=0) + label_p_pred = self.model_zoo.get('enhancement', Model).predict(img_patch, verbose=0) seg = label_p_pred[0, :, :, :] * 255 if i == 0 and j == 0: @@ -246,7 +228,7 @@ class Enhancer: else: img = self.imread() img = cv2.GaussianBlur(img, (5, 5), 0) - img_page_prediction = self.do_prediction(False, img, self.model_page) + img_page_prediction = self.do_prediction(False, img, self.model_zoo.get('page')) imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(imgray, 0, 255, 0) @@ -291,7 +273,7 @@ class Enhancer: self.logger.info("Detected %s DPI", dpi) if self.input_binary: img = self.imread() - prediction_bin = self.do_prediction(True, img, self.model_bin, n_batch_inference=5) + prediction_bin = self.do_prediction(True, img, self.model_zoo.get('binarization'), n_batch_inference=5) prediction_bin = 255 * (prediction_bin[:,:,0]==0) prediction_bin = np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2).astype(np.uint8) img= np.copy(prediction_bin) @@ -332,7 +314,7 @@ class Enhancer: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.model_classifier.predict(img_in, verbose=0) + label_p_pred = self.model_zoo.get('col_classifier').predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 elif (self.num_col_upper and self.num_col_lower) and (self.num_col_upper!=self.num_col_lower): if self.input_binary: @@ -352,7 +334,7 @@ class Enhancer: img_in[0, :, :, 1] = img_1ch[:, :] img_in[0, :, :, 2] = img_1ch[:, :] - label_p_pred = self.model_classifier.predict(img_in, verbose=0) + label_p_pred = self.model_zoo.get('col_classifier').predict(img_in, verbose=0) num_col = np.argmax(label_p_pred[0]) + 1 if num_col > self.num_col_upper: From 4c8abfe19cfba385bc358ee4f2c31456d0ed7bb3 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 22 Oct 2025 10:40:49 +0200 Subject: [PATCH 404/492] eynollah_ocr: actually replace the model calls --- src/eynollah/eynollah_ocr.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 69dd6b7..b021e92 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -199,7 +199,7 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( + generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) @@ -222,7 +222,7 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( + generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) @@ -242,7 +242,7 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( + generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) @@ -260,7 +260,7 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate( + generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) generated_text_merged = self.model_zoo.get('processor').batch_decode( generated_ids_merged, skip_special_tokens=True) @@ -277,7 +277,7 @@ class Eynollah_ocr: indexer_b_s = 0 pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values - generated_ids_merged = self.model_ocr.generate(pixel_values_merged.to(self.device)) + generated_ids_merged = self.model_zoo.get('ocr').generate(pixel_values_merged.to(self.device)) generated_text_merged = self.model_zoo.get('processor').batch_decode(generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -753,10 +753,10 @@ class Eynollah_ocr: self.logger.debug("processing next %d lines", len(imgs)) - preds = self.prediction_model.predict(imgs, verbose=0) + preds = self.model_zoo.get('ocr').predict(imgs, verbose=0) if len(indices_ver)>0: - preds_flipped = self.prediction_model.predict(imgs_ver_flipped, verbose=0) + preds_flipped = self.model_zoo.get('ocr').predict(imgs_ver_flipped, verbose=0) preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character @@ -786,10 +786,10 @@ class Eynollah_ocr: preds[indices_to_be_replaced,:,:] = \ preds_flipped[indices_where_flipped_conf_value_is_higher, :, :] if dir_in_bin is not None: - preds_bin = self.prediction_model.predict(imgs_bin, verbose=0) + preds_bin = self.model_zoo.get('ocr').predict(imgs_bin, verbose=0) if len(indices_ver)>0: - preds_flipped = self.prediction_model.predict(imgs_bin_ver_flipped, verbose=0) + preds_flipped = self.model_zoo.get('ocr').predict(imgs_bin_ver_flipped, verbose=0) preds_max_fliped = np.max(preds_flipped, axis=2 ) preds_max_args_flipped = np.argmax(preds_flipped, axis=2 ) pred_max_not_unk_mask_bool_flipped = preds_max_args_flipped[:,:]!=self.end_character @@ -821,7 +821,7 @@ class Eynollah_ocr: preds = (preds + preds_bin) / 2. - pred_texts = decode_batch_predictions(preds, self.num_to_char) + pred_texts = decode_batch_predictions(preds, self.model_zoo.get('num_to_char')) preds_max = np.max(preds, axis=2 ) preds_max_args = np.argmax(preds, axis=2 ) From 146658f026e6df2d2cc380b342935643324824fe Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 22 Oct 2025 10:47:09 +0200 Subject: [PATCH 405/492] eynollah layout: fix trocr_processor model_zoo call --- src/eynollah/eynollah.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 4d1644d..232631a 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -265,7 +265,7 @@ class Eynollah: if self.ocr: if self.tr: loadable.append(('ocr', 'tr')) - loadable.append(('trocr_processor', 'tr')) + loadable.append(('trocr_processor', '')) else: loadable.append('ocr') loadable.append('num_to_char') From d94285b3ea2972fa2a402bc3b88222a2d6d4a164 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 22 Oct 2025 13:07:35 +0200 Subject: [PATCH 406/492] rewrite model spec data structure --- src/eynollah/model_zoo.py | 453 +++++++++++++++++++++++++------------- 1 file changed, 306 insertions(+), 147 deletions(-) diff --git a/src/eynollah/model_zoo.py b/src/eynollah/model_zoo.py index 100d974..6bb06d3 100644 --- a/src/eynollah/model_zoo.py +++ b/src/eynollah/model_zoo.py @@ -1,154 +1,329 @@ +from copy import deepcopy from dataclasses import dataclass import json import logging from pathlib import Path -from typing import Dict, Literal, Optional, Tuple, List, Type, TypeVar, Union -from copy import deepcopy +from typing import Dict, Optional, Set, Tuple, List, Type, TypeVar, Union from keras.layers import StringLookup -from keras.models import Model, load_model +from keras.models import Model as KerasModel, load_model from transformers import TrOCRProcessor, VisionEncoderDecoderModel from eynollah.patch_encoder import PatchEncoder, Patches -SomeEynollahModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, Model, List] +AnyModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, KerasModel, List] T = TypeVar('T') -# Dict mapping model_category to dict mapping variant (default is '') to Path -DEFAULT_MODEL_VERSIONS: Dict[str, Dict[str, str]] = { +# NOTE: This needs to change whenever models change +ZENODO = "https://zenodo.org/records/17295988/files" +MODELS_VERSION = "v0_7_0" - "enhancement": { - '': "eynollah-enhancement_20210425" - }, +def dist_url(dist_name: str) -> str: + return f'{ZENODO}/models_{dist_name}_${MODELS_VERSION}.zip' - "binarization": { - '': "eynollah-binarization_20210425" - }, +@dataclass +class EynollahModelSpec(): + """ + Describing a single model abstractly. + """ + category: str + # Relative filename to the models_eynollah directory in the dists + filename: str + # The smallest model distribution containing this model (link to Zenodo) + dist: str + type: Type[AnyModel] + variant: str = '' + help: str = '' - "binarization_multi_1": { - '': "saved_model_2020_01_16/model_bin1", - }, - "binarization_multi_2": { - '': "saved_model_2020_01_16/model_bin2", - }, - "binarization_multi_3": { - '': "saved_model_2020_01_16/model_bin3", - }, - "binarization_multi_4": { - '': "saved_model_2020_01_16/model_bin4", - }, +class EynollahModelSpecSet(): + """ + List of all used models for eynollah. + """ + specs: List[EynollahModelSpec] - "col_classifier": { - '': "eynollah-column-classifier_20210425", - }, + def __init__(self, specs: List[EynollahModelSpec]) -> None: + self.specs = specs + self.categories: Set[str] = set([spec.category for spec in self.specs]) + self.variants: Dict[str, Set[str]] = { + spec.category: set([x.variant for x in self.specs if x.category == spec.category]) + for spec in self.specs + } + self._index_category_variant: Dict[Tuple[str, str], EynollahModelSpec] = { + (spec.category, spec.variant): spec + for spec in self.specs + } - "page": { - '': "model_eynollah_page_extraction_20250915", - }, + def asdict(self) -> Dict[str, Dict[str, str]]: + return { + spec.category: { + spec.variant: spec.filename + } + for spec in self.specs + } - # TODO: What is this commented out model? - #?: "eynollah-main-regions-aug-scaling_20210425", + def get(self, category: str, variant: str) -> EynollahModelSpec: + if category not in self.categories: + raise ValueError(f"Unknown category '{category}', must be one of {self.categories}") + if variant not in self.variants[category]: + raise ValueError(f"Unknown variant {variant} for {category}. Known variants: {self.variants[category]}") + return self._index_category_variant[(category, variant)] - # early layout - "region": { - '': "eynollah-main-regions-ensembled_20210425", - 'extract_only_images': "eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", - 'light': "eynollah-main-regions_20220314", - }, +DEFAULT_MODEL_SPECS = EynollahModelSpecSet([# {{{ - # early layout, non-light, 2nd part - "region_p2": { - '': "eynollah-main-regions-aug-rotation_20210425", - }, + EynollahModelSpec( + category="enhancement", + variant='', + filename="models_eynollah/eynollah-enhancement_20210425", + dist=dist_url("enhancement"), + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization", + variant='', + filename="models_eynollah/eynollah-binarization_20210425", + dist=dist_url("binarization"), + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization_multi_1", + variant='', + filename="models_eynollah/saved_model_2020_01_16/model_bin1", + dist=dist_url("binarization"), + type=KerasModel, + ), - # early layout, light, 1-or-2-column - "region_1_2": { - #'': "modelens_12sp_elay_0_3_4__3_6_n" - #'': "modelens_earlylayout_12spaltige_2_3_5_6_7_8" - #'': "modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18" - #'': "modelens_1_2_4_5_early_lay_1_2_spaltige" - #'': "model_3_eraly_layout_no_patches_1_2_spaltige" - '': "modelens_e_l_all_sp_0_1_2_3_4_171024" - }, + EynollahModelSpec( + category="binarization_multi_2", + variant='', + filename="models_eynollah/saved_model_2020_01_16/model_bin2", + dist=dist_url("binarization"), + type=KerasModel, + ), - # full layout / no patches - "region_fl_np": { - #'': "modelens_full_lay_1_3_031124" - #'': "modelens_full_lay_13__3_19_241024" - #'': "model_full_lay_13_241024" - #'': "modelens_full_lay_13_17_231024" - #'': "modelens_full_lay_1_2_221024" - #'': "eynollah-full-regions-1column_20210425" - '': "modelens_full_lay_1__4_3_091124" - }, + EynollahModelSpec( + category="binarization_multi_3", + variant='', + filename="models_eynollah/saved_model_2020_01_16/model_bin3", + dist=dist_url("binarization"), + type=KerasModel, + ), - # full layout / with patches - "region_fl": { - #'': "eynollah-full-regions-3+column_20210425" - #'': #"model_2_full_layout_new_trans" - #'': "modelens_full_lay_1_3_031124" - #'': "modelens_full_lay_13__3_19_241024" - #'': "model_full_lay_13_241024" - #'': "modelens_full_lay_13_17_231024" - #'': "modelens_full_lay_1_2_221024" - #'': "modelens_full_layout_24_till_28" - #'': "model_2_full_layout_new_trans" - '': "modelens_full_lay_1__4_3_091124", - }, + EynollahModelSpec( + category="binarization_multi_4", + variant='', + filename="models_eynollah/saved_model_2020_01_16/model_bin4", + dist=dist_url("binarization"), + type=KerasModel, + ), - "reading_order": { - #'': "model_mb_ro_aug_ens_11" - #'': "model_step_3200000_mb_ro" - #'': "model_ens_reading_order_machine_based" - #'': "model_mb_ro_aug_ens_8" - #'': "model_ens_reading_order_machine_based" - '': "model_eynollah_reading_order_20250824" - }, + EynollahModelSpec( + category="col_classifier", + variant='', + filename="models_eynollah/eynollah-column-classifier_20210425", + dist=dist_url("layout"), + type=KerasModel, + ), - "textline": { - #'light': "eynollah-textline_light_20210425" - 'light': "modelens_textline_0_1__2_4_16092024", - #'': "modelens_textline_1_4_16092024" - #'': "model_textline_ens_3_4_5_6_artificial" - #'': "modelens_textline_1_3_4_20240915" - #'': "model_textline_ens_3_4_5_6_artificial" - #'': "modelens_textline_9_12_13_14_15" - #'': "eynollah-textline_20210425" - '': "modelens_textline_0_1__2_4_16092024" - }, + EynollahModelSpec( + category="page", + variant='', + filename="models_eynollah/model_eynollah_page_extraction_20250915", + dist=dist_url("layout"), + type=KerasModel, + ), - "table": { - 'light': "modelens_table_0t4_201124", - '': "eynollah-tables_20210319", - }, + EynollahModelSpec( + category="region", + variant='', + filename="models_eynollah/eynollah-main-regions-ensembled_20210425", + dist=dist_url("layout"), + type=KerasModel, + ), - "ocr": { - 'tr': "model_eynollah_ocr_trocr_20250919", - '': "model_eynollah_ocr_cnnrnn_20250930", - }, + EynollahModelSpec( + category="region", + variant='extract_only_images', + filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", + dist=dist_url("layout"), + type=KerasModel, + ), - 'trocr_processor': { - '': 'microsoft/trocr-base-printed', - 'htr': "microsoft/trocr-base-handwritten", - }, + EynollahModelSpec( + category="region", + variant='light', + filename="models_eynollah/eynollah-main-regions_20220314", + dist=dist_url("layout"), + help="early layout", + type=KerasModel, + ), - 'num_to_char': { - '': 'characters_org.txt' - }, + EynollahModelSpec( + category="region_p2", + variant='', + filename="models_eynollah/eynollah-main-regions-aug-rotation_20210425", + dist=dist_url("layout"), + help="early layout, non-light, 2nd part", + type=KerasModel, + ), - 'characters': { - '': 'characters_org.txt' - }, + EynollahModelSpec( + category="region_1_2", + variant='', + #filename="models_eynollah/modelens_12sp_elay_0_3_4__3_6_n", + #filename="models_eynollah/modelens_earlylayout_12spaltige_2_3_5_6_7_8", + #filename="models_eynollah/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18", + #filename="models_eynollah/modelens_1_2_4_5_early_lay_1_2_spaltige", + #filename="models_eynollah/model_3_eraly_layout_no_patches_1_2_spaltige", + filename="models_eynollah/modelens_e_l_all_sp_0_1_2_3_4_171024", + dist=dist_url("layout"), + help="early layout, light, 1-or-2-column", + type=KerasModel, + ), -} + EynollahModelSpec( + category="region_fl_np", + variant='', + #'filename="models_eynollah/modelens_full_lay_1_3_031124", + #'filename="models_eynollah/modelens_full_lay_13__3_19_241024", + #'filename="models_eynollah/model_full_lay_13_241024", + #'filename="models_eynollah/modelens_full_lay_13_17_231024", + #'filename="models_eynollah/modelens_full_lay_1_2_221024", + #'filename="models_eynollah/eynollah-full-regions-1column_20210425", + filename="models_eynollah/modelens_full_lay_1__4_3_091124", + dist=dist_url("layout"), + help="full layout / no patches", + type=KerasModel, + ), + # FIXME: Why is region_fl and region_fl_np the same model? + EynollahModelSpec( + category="region_fl", + variant='', + # filename="models_eynollah/eynollah-full-regions-3+column_20210425", + # filename="models_eynollah/model_2_full_layout_new_trans", + # filename="models_eynollah/modelens_full_lay_1_3_031124", + # filename="models_eynollah/modelens_full_lay_13__3_19_241024", + # filename="models_eynollah/model_full_lay_13_241024", + # filename="models_eynollah/modelens_full_lay_13_17_231024", + # filename="models_eynollah/modelens_full_lay_1_2_221024", + # filename="models_eynollah/modelens_full_layout_24_till_28", + # filename="models_eynollah/model_2_full_layout_new_trans", + filename="models_eynollah/modelens_full_lay_1__4_3_091124", + dist=dist_url("layout"), + help="full layout / with patches", + type=KerasModel, + ), + + EynollahModelSpec( + category="reading_order", + variant='', + #filename="models_eynollah/model_mb_ro_aug_ens_11", + #filename="models_eynollah/model_step_3200000_mb_ro", + #filename="models_eynollah/model_ens_reading_order_machine_based", + #filename="models_eynollah/model_mb_ro_aug_ens_8", + #filename="models_eynollah/model_ens_reading_order_machine_based", + filename="models_eynollah/model_eynollah_reading_order_20250824", + dist=dist_url("layout"), + type=KerasModel, + ), + + EynollahModelSpec( + category="textline", + variant='', + #filename="models_eynollah/modelens_textline_1_4_16092024", + #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", + #filename="models_eynollah/modelens_textline_1_3_4_20240915", + #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", + #filename="models_eynollah/modelens_textline_9_12_13_14_15", + #filename="models_eynollah/eynollah-textline_20210425", + filename="models_eynollah/modelens_textline_0_1__2_4_16092024", + dist=dist_url("layout"), + type=KerasModel, + ), + + EynollahModelSpec( + category="textline", + variant='light', + #filename="models_eynollah/eynollah-textline_light_20210425", + filename="models_eynollah/modelens_textline_0_1__2_4_16092024", + dist=dist_url("layout"), + type=KerasModel, + ), + + EynollahModelSpec( + category="table", + variant='', + filename="models_eynollah/eynollah-tables_20210319", + dist=dist_url("layout"), + type=KerasModel, + ), + + EynollahModelSpec( + category="table", + variant='light', + filename="models_eynollah/modelens_table_0t4_201124", + dist=dist_url("layout"), + type=KerasModel, + ), + + EynollahModelSpec( + category="ocr", + variant='', + filename="models_eynollah/model_eynollah_ocr_cnnrnn_20250930", + dist=dist_url("ocr"), + type=KerasModel, + ), + + EynollahModelSpec( + category="num_to_char", + variant='', + filename="models_eynollah/characters_org.txt", + dist=dist_url("ocr"), + type=KerasModel, + ), + + EynollahModelSpec( + category="characters", + variant='', + filename="models_eynollah/characters_org.txt", + dist=dist_url("ocr"), + type=List, + ), + + EynollahModelSpec( + category="ocr", + variant='tr', + filename="models_eynollah/model_eynollah_ocr_trocr_20250919", + dist=dist_url("trocr"), + type=KerasModel, + ), + + EynollahModelSpec( + category="trocr_processor", + variant='', + filename="models_eynollah/microsoft/trocr-base-printed", + dist=dist_url("trocr"), + type=KerasModel, + ), + + EynollahModelSpec( + category="trocr_processor", + variant='htr', + filename="models_eynollah/microsoft/trocr-base-handwritten", + dist=dist_url("trocr"), + type=TrOCRProcessor, + ), + +])# }}} class EynollahModelZoo(): """ Wrapper class that handles storage and loading of models for all eynollah runners. """ model_basedir: Path - model_versions: dict + specs: EynollahModelSpecSet def __init__( self, @@ -157,10 +332,10 @@ class EynollahModelZoo(): ) -> None: self.model_basedir = Path(basedir) self.logger = logging.getLogger('eynollah.model_zoo') - self.model_versions = deepcopy(DEFAULT_MODEL_VERSIONS) + self.specs = deepcopy(DEFAULT_MODEL_SPECS) if model_overrides: self.override_models(*model_overrides) - self._loaded: Dict[str, SomeEynollahModel] = {} + self._loaded: Dict[str, AnyModel] = {} def override_models( self, @@ -170,39 +345,24 @@ class EynollahModelZoo(): Override the default model versions """ for model_category, model_variant, model_filename in model_overrides: - if model_category not in DEFAULT_MODEL_VERSIONS: - raise ValueError(f"Unknown model_category '{model_category}', must be one of {DEFAULT_MODEL_VERSIONS.keys()}") - if model_variant not in DEFAULT_MODEL_VERSIONS[model_category]: - raise ValueError(f"Unknown variant {model_variant} for {model_category}. Known variants: {DEFAULT_MODEL_VERSIONS[model_category].keys()}") - self.logger.warning( - "Overriding default model %s ('%s' variant) from %s to %s", - model_category, - model_variant, - DEFAULT_MODEL_VERSIONS[model_category][model_variant], - model_filename - ) - self.model_versions[model_category][model_variant] = model_filename + spec = self.specs.get(model_category, model_variant) + self.logger.warning("Overriding filename for model spec %s to %s", spec, model_filename) + self.specs.get(model_category, model_variant).filename = model_filename def model_path( self, model_category: str, model_variant: str = '', - model_filename: str = '', absolute: bool = True, ) -> Path: """ - Translate model_{type,variant,filename} tuple into an absolute (or relative) Path + Translate model_{type,variant} tuple into an absolute (or relative) Path """ - if model_category not in DEFAULT_MODEL_VERSIONS: - raise ValueError(f"Unknown model_category '{model_category}', must be one of {DEFAULT_MODEL_VERSIONS.keys()}") - if model_variant not in DEFAULT_MODEL_VERSIONS[model_category]: - raise ValueError(f"Unknown variant {model_variant} for {model_category}. Known variants: {DEFAULT_MODEL_VERSIONS[model_category].keys()}") - if not model_filename: - model_filename = DEFAULT_MODEL_VERSIONS[model_category][model_variant] - if not Path(model_filename).is_absolute() and absolute: - model_path = Path(self.model_basedir).joinpath(model_filename) + spec = self.specs.get(model_category, model_variant) + if not Path(spec.filename).is_absolute() and absolute: + model_path = Path(self.model_basedir).joinpath(spec.filename) else: - model_path = Path(model_filename) + model_path = Path(spec.filename) return model_path def load_models( @@ -224,12 +384,11 @@ class EynollahModelZoo(): self, model_category: str, model_variant: str = '', - model_filename: str = '', - ) -> SomeEynollahModel: + ) -> AnyModel: """ Load any model """ - model_path = self.model_path(model_category, model_variant, model_filename) + model_path = self.model_path(model_category, model_variant) if model_path.suffix == '.h5' and Path(model_path.stem).exists(): # prefer SavedModel over HDF5 format if it exists model_path = Path(model_path.stem) @@ -259,17 +418,17 @@ class EynollahModelZoo(): assert isinstance(ret, model_type) return ret # type: ignore # FIXME: convince typing that we're returning generic type - def _load_ocr_model(self, variant: str) -> SomeEynollahModel: + def _load_ocr_model(self, variant: str) -> AnyModel: """ Load OCR model """ - ocr_model_dir = Path(self.model_basedir, self.model_versions["ocr"][variant]) + ocr_model_dir = self.model_path('ocr', variant) if variant == 'tr': return VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) else: ocr_model = load_model(ocr_model_dir, compile=False) - assert isinstance(ocr_model, Model) - return Model( + assert isinstance(ocr_model, KerasModel) + return KerasModel( ocr_model.get_layer(name = "image").input, # type: ignore ocr_model.get_layer(name = "dense2").output) # type: ignore @@ -279,7 +438,7 @@ class EynollahModelZoo(): """ with open(self.model_path('ocr') / self.model_path('num_to_char', absolute=False), "r") as config_file: return json.load(config_file) - + def _load_num_to_char(self) -> StringLookup: """ Load decoder for OCR @@ -295,7 +454,7 @@ class EynollahModelZoo(): def __str__(self): return str(json.dumps({ 'basedir': str(self.model_basedir), - 'versions': self.model_versions, + 'versions': self.specs, }, indent=2)) def shutdown(self): From 04bc4a63d0365bd93b5c302abaec431219b66776 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 22 Oct 2025 16:04:48 +0200 Subject: [PATCH 407/492] reorganize model_zoo --- requirements.txt | 1 + src/eynollah/cli.py | 40 +- src/eynollah/cli_models.py | 49 +++ src/eynollah/eynollah_ocr.py | 6 +- src/eynollah/model_zoo.py | 468 ------------------------ src/eynollah/model_zoo/__init__.py | 4 + src/eynollah/model_zoo/default_specs.py | 314 ++++++++++++++++ src/eynollah/model_zoo/model_zoo.py | 189 ++++++++++ src/eynollah/model_zoo/specs.py | 55 +++ src/eynollah/model_zoo/types.py | 6 + src/eynollah/sbb_binarize.py | 2 +- 11 files changed, 627 insertions(+), 507 deletions(-) create mode 100644 src/eynollah/cli_models.py delete mode 100644 src/eynollah/model_zoo.py create mode 100644 src/eynollah/model_zoo/__init__.py create mode 100644 src/eynollah/model_zoo/default_specs.py create mode 100644 src/eynollah/model_zoo/model_zoo.py create mode 100644 src/eynollah/model_zoo/specs.py create mode 100644 src/eynollah/model_zoo/types.py diff --git a/requirements.txt b/requirements.txt index db1d7df..bbacd48 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ tensorflow < 2.13 numba <= 0.58.1 scikit-image biopython +tabulate diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index c7d4bd9..595f0ee 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -11,46 +11,13 @@ from eynollah.image_enhancer import Enhancer from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout from eynollah.model_zoo import EynollahModelZoo -@dataclass -class EynollahCliCtx(): - model_basedir: str - model_overrides: List[Tuple[str, str, str]] +from .cli_models import models_cli @click.group() def main(): pass -@main.command('list-models') -@click.option( - "--model", - "-m", - 'model_basedir', - help="directory of models", - type=click.Path(exists=True, file_okay=False), - # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", - required=True, -) -@click.option( - "--model-overrides", - "-mv", - help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", - type=(str, str, str), - multiple=True, -) -@click.pass_context -def list_models( - ctx, - model_basedir: str, - model_overrides: List[Tuple[str, str, str]], -): - """ - List all the models in the zoo - """ - ctx.obj = EynollahCliCtx( - model_basedir=model_basedir, - model_overrides=model_overrides - ) - print(EynollahModelZoo(basedir=ctx.obj.model_basedir, model_overrides=ctx.obj.model_overrides)) +main.add_command(models_cli, 'models') @main.command() @click.option( @@ -143,13 +110,12 @@ def binarization( log_level, ): assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - binarizer = SbbBinarizer(model_dir) + binarizer = SbbBinarizer(model_dir, mode=mode) if log_level: binarizer.log.setLevel(getLevelName(log_level)) binarizer.run( image_path=input_image, use_patches=patches, - mode=mode, output=output, dir_in=dir_in ) diff --git a/src/eynollah/cli_models.py b/src/eynollah/cli_models.py new file mode 100644 index 0000000..b67a3ef --- /dev/null +++ b/src/eynollah/cli_models.py @@ -0,0 +1,49 @@ +from dataclasses import dataclass +from typing import List, Tuple +import click +from .model_zoo import EynollahModelZoo + +@dataclass() +class EynollahCliCtx(): + model_basedir: str + model_overrides: List[Tuple[str, str, str]] + + +@click.group() +def models_cli(): + """ + Organize models for the various runners in eynollah. + """ + +@models_cli.command('list') +@click.option( + "--model", + "-m", + 'model_basedir', + help="directory of models", + type=click.Path(exists=True, file_okay=False), + # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", + required=True, +) +@click.option( + "--model-overrides", + "-mv", + help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", + type=(str, str, str), + multiple=True, +) +@click.pass_context +def list_models( + ctx, + model_basedir: str, + model_overrides: List[Tuple[str, str, str]], +): + """ + List all the models in the zoo + """ + ctx.obj = EynollahCliCtx( + model_basedir=model_basedir, + model_overrides=model_overrides + ) + print(EynollahModelZoo(basedir=ctx.obj.model_basedir, model_overrides=ctx.obj.model_overrides)) + diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index b021e92..cfd410c 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -18,7 +18,11 @@ from keras.models import load_model from PIL import Image, ImageDraw, ImageFont import numpy as np from eynollah.model_zoo import EynollahModelZoo -import torch +try: + import torch +except ImportError: + torch = None + from .utils import is_image_filename from .utils.resize import resize_image diff --git a/src/eynollah/model_zoo.py b/src/eynollah/model_zoo.py deleted file mode 100644 index 6bb06d3..0000000 --- a/src/eynollah/model_zoo.py +++ /dev/null @@ -1,468 +0,0 @@ -from copy import deepcopy -from dataclasses import dataclass -import json -import logging -from pathlib import Path -from typing import Dict, Optional, Set, Tuple, List, Type, TypeVar, Union - -from keras.layers import StringLookup -from keras.models import Model as KerasModel, load_model -from transformers import TrOCRProcessor, VisionEncoderDecoderModel - -from eynollah.patch_encoder import PatchEncoder, Patches - -AnyModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, KerasModel, List] -T = TypeVar('T') - -# NOTE: This needs to change whenever models change -ZENODO = "https://zenodo.org/records/17295988/files" -MODELS_VERSION = "v0_7_0" - -def dist_url(dist_name: str) -> str: - return f'{ZENODO}/models_{dist_name}_${MODELS_VERSION}.zip' - -@dataclass -class EynollahModelSpec(): - """ - Describing a single model abstractly. - """ - category: str - # Relative filename to the models_eynollah directory in the dists - filename: str - # The smallest model distribution containing this model (link to Zenodo) - dist: str - type: Type[AnyModel] - variant: str = '' - help: str = '' - -class EynollahModelSpecSet(): - """ - List of all used models for eynollah. - """ - specs: List[EynollahModelSpec] - - def __init__(self, specs: List[EynollahModelSpec]) -> None: - self.specs = specs - self.categories: Set[str] = set([spec.category for spec in self.specs]) - self.variants: Dict[str, Set[str]] = { - spec.category: set([x.variant for x in self.specs if x.category == spec.category]) - for spec in self.specs - } - self._index_category_variant: Dict[Tuple[str, str], EynollahModelSpec] = { - (spec.category, spec.variant): spec - for spec in self.specs - } - - def asdict(self) -> Dict[str, Dict[str, str]]: - return { - spec.category: { - spec.variant: spec.filename - } - for spec in self.specs - } - - def get(self, category: str, variant: str) -> EynollahModelSpec: - if category not in self.categories: - raise ValueError(f"Unknown category '{category}', must be one of {self.categories}") - if variant not in self.variants[category]: - raise ValueError(f"Unknown variant {variant} for {category}. Known variants: {self.variants[category]}") - return self._index_category_variant[(category, variant)] - -DEFAULT_MODEL_SPECS = EynollahModelSpecSet([# {{{ - - EynollahModelSpec( - category="enhancement", - variant='', - filename="models_eynollah/eynollah-enhancement_20210425", - dist=dist_url("enhancement"), - type=KerasModel, - ), - - EynollahModelSpec( - category="binarization", - variant='', - filename="models_eynollah/eynollah-binarization_20210425", - dist=dist_url("binarization"), - type=KerasModel, - ), - - EynollahModelSpec( - category="binarization_multi_1", - variant='', - filename="models_eynollah/saved_model_2020_01_16/model_bin1", - dist=dist_url("binarization"), - type=KerasModel, - ), - - EynollahModelSpec( - category="binarization_multi_2", - variant='', - filename="models_eynollah/saved_model_2020_01_16/model_bin2", - dist=dist_url("binarization"), - type=KerasModel, - ), - - EynollahModelSpec( - category="binarization_multi_3", - variant='', - filename="models_eynollah/saved_model_2020_01_16/model_bin3", - dist=dist_url("binarization"), - type=KerasModel, - ), - - EynollahModelSpec( - category="binarization_multi_4", - variant='', - filename="models_eynollah/saved_model_2020_01_16/model_bin4", - dist=dist_url("binarization"), - type=KerasModel, - ), - - EynollahModelSpec( - category="col_classifier", - variant='', - filename="models_eynollah/eynollah-column-classifier_20210425", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="page", - variant='', - filename="models_eynollah/model_eynollah_page_extraction_20250915", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="region", - variant='', - filename="models_eynollah/eynollah-main-regions-ensembled_20210425", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="region", - variant='extract_only_images', - filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="region", - variant='light', - filename="models_eynollah/eynollah-main-regions_20220314", - dist=dist_url("layout"), - help="early layout", - type=KerasModel, - ), - - EynollahModelSpec( - category="region_p2", - variant='', - filename="models_eynollah/eynollah-main-regions-aug-rotation_20210425", - dist=dist_url("layout"), - help="early layout, non-light, 2nd part", - type=KerasModel, - ), - - EynollahModelSpec( - category="region_1_2", - variant='', - #filename="models_eynollah/modelens_12sp_elay_0_3_4__3_6_n", - #filename="models_eynollah/modelens_earlylayout_12spaltige_2_3_5_6_7_8", - #filename="models_eynollah/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18", - #filename="models_eynollah/modelens_1_2_4_5_early_lay_1_2_spaltige", - #filename="models_eynollah/model_3_eraly_layout_no_patches_1_2_spaltige", - filename="models_eynollah/modelens_e_l_all_sp_0_1_2_3_4_171024", - dist=dist_url("layout"), - help="early layout, light, 1-or-2-column", - type=KerasModel, - ), - - EynollahModelSpec( - category="region_fl_np", - variant='', - #'filename="models_eynollah/modelens_full_lay_1_3_031124", - #'filename="models_eynollah/modelens_full_lay_13__3_19_241024", - #'filename="models_eynollah/model_full_lay_13_241024", - #'filename="models_eynollah/modelens_full_lay_13_17_231024", - #'filename="models_eynollah/modelens_full_lay_1_2_221024", - #'filename="models_eynollah/eynollah-full-regions-1column_20210425", - filename="models_eynollah/modelens_full_lay_1__4_3_091124", - dist=dist_url("layout"), - help="full layout / no patches", - type=KerasModel, - ), - - # FIXME: Why is region_fl and region_fl_np the same model? - EynollahModelSpec( - category="region_fl", - variant='', - # filename="models_eynollah/eynollah-full-regions-3+column_20210425", - # filename="models_eynollah/model_2_full_layout_new_trans", - # filename="models_eynollah/modelens_full_lay_1_3_031124", - # filename="models_eynollah/modelens_full_lay_13__3_19_241024", - # filename="models_eynollah/model_full_lay_13_241024", - # filename="models_eynollah/modelens_full_lay_13_17_231024", - # filename="models_eynollah/modelens_full_lay_1_2_221024", - # filename="models_eynollah/modelens_full_layout_24_till_28", - # filename="models_eynollah/model_2_full_layout_new_trans", - filename="models_eynollah/modelens_full_lay_1__4_3_091124", - dist=dist_url("layout"), - help="full layout / with patches", - type=KerasModel, - ), - - EynollahModelSpec( - category="reading_order", - variant='', - #filename="models_eynollah/model_mb_ro_aug_ens_11", - #filename="models_eynollah/model_step_3200000_mb_ro", - #filename="models_eynollah/model_ens_reading_order_machine_based", - #filename="models_eynollah/model_mb_ro_aug_ens_8", - #filename="models_eynollah/model_ens_reading_order_machine_based", - filename="models_eynollah/model_eynollah_reading_order_20250824", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="textline", - variant='', - #filename="models_eynollah/modelens_textline_1_4_16092024", - #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", - #filename="models_eynollah/modelens_textline_1_3_4_20240915", - #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", - #filename="models_eynollah/modelens_textline_9_12_13_14_15", - #filename="models_eynollah/eynollah-textline_20210425", - filename="models_eynollah/modelens_textline_0_1__2_4_16092024", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="textline", - variant='light', - #filename="models_eynollah/eynollah-textline_light_20210425", - filename="models_eynollah/modelens_textline_0_1__2_4_16092024", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="table", - variant='', - filename="models_eynollah/eynollah-tables_20210319", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="table", - variant='light', - filename="models_eynollah/modelens_table_0t4_201124", - dist=dist_url("layout"), - type=KerasModel, - ), - - EynollahModelSpec( - category="ocr", - variant='', - filename="models_eynollah/model_eynollah_ocr_cnnrnn_20250930", - dist=dist_url("ocr"), - type=KerasModel, - ), - - EynollahModelSpec( - category="num_to_char", - variant='', - filename="models_eynollah/characters_org.txt", - dist=dist_url("ocr"), - type=KerasModel, - ), - - EynollahModelSpec( - category="characters", - variant='', - filename="models_eynollah/characters_org.txt", - dist=dist_url("ocr"), - type=List, - ), - - EynollahModelSpec( - category="ocr", - variant='tr', - filename="models_eynollah/model_eynollah_ocr_trocr_20250919", - dist=dist_url("trocr"), - type=KerasModel, - ), - - EynollahModelSpec( - category="trocr_processor", - variant='', - filename="models_eynollah/microsoft/trocr-base-printed", - dist=dist_url("trocr"), - type=KerasModel, - ), - - EynollahModelSpec( - category="trocr_processor", - variant='htr', - filename="models_eynollah/microsoft/trocr-base-handwritten", - dist=dist_url("trocr"), - type=TrOCRProcessor, - ), - -])# }}} - -class EynollahModelZoo(): - """ - Wrapper class that handles storage and loading of models for all eynollah runners. - """ - model_basedir: Path - specs: EynollahModelSpecSet - - def __init__( - self, - basedir: str, - model_overrides: Optional[List[Tuple[str, str, str]]]=None, - ) -> None: - self.model_basedir = Path(basedir) - self.logger = logging.getLogger('eynollah.model_zoo') - self.specs = deepcopy(DEFAULT_MODEL_SPECS) - if model_overrides: - self.override_models(*model_overrides) - self._loaded: Dict[str, AnyModel] = {} - - def override_models( - self, - *model_overrides: Tuple[str, str, str], - ): - """ - Override the default model versions - """ - for model_category, model_variant, model_filename in model_overrides: - spec = self.specs.get(model_category, model_variant) - self.logger.warning("Overriding filename for model spec %s to %s", spec, model_filename) - self.specs.get(model_category, model_variant).filename = model_filename - - def model_path( - self, - model_category: str, - model_variant: str = '', - absolute: bool = True, - ) -> Path: - """ - Translate model_{type,variant} tuple into an absolute (or relative) Path - """ - spec = self.specs.get(model_category, model_variant) - if not Path(spec.filename).is_absolute() and absolute: - model_path = Path(self.model_basedir).joinpath(spec.filename) - else: - model_path = Path(spec.filename) - return model_path - - def load_models( - self, - *all_load_args: Union[str, Tuple[str], Tuple[str, str], Tuple[str, str, str]], - ) -> Dict: - """ - Load all models by calling load_model and return a dictionary mapping model_category to loaded model - """ - ret = {} - for load_args in all_load_args: - if isinstance(load_args, str): - ret[load_args] = self.load_model(load_args) - else: - ret[load_args[0]] = self.load_model(*load_args) - return ret - - def load_model( - self, - model_category: str, - model_variant: str = '', - ) -> AnyModel: - """ - Load any model - """ - model_path = self.model_path(model_category, model_variant) - if model_path.suffix == '.h5' and Path(model_path.stem).exists(): - # prefer SavedModel over HDF5 format if it exists - model_path = Path(model_path.stem) - if model_category == 'ocr': - model = self._load_ocr_model(variant=model_variant) - elif model_category == 'num_to_char': - model = self._load_num_to_char() - elif model_category == 'characters': - model = self._load_characters() - elif model_category == 'trocr_processor': - return TrOCRProcessor.from_pretrained(self.model_path(...)) - else: - try: - model = load_model(model_path, compile=False) - except Exception as e: - self.logger.exception(e) - model = load_model(model_path, compile=False, custom_objects={ - "PatchEncoder": PatchEncoder, "Patches": Patches}) - self._loaded[model_category] = model - return model # type: ignore - - def get(self, model_category: str, model_type: Optional[Type[T]]=None) -> T: - if model_category not in self._loaded: - raise ValueError(f'Model "{model_category} not previously loaded with "load_model(..)"') - ret = self._loaded[model_category] - if model_type: - assert isinstance(ret, model_type) - return ret # type: ignore # FIXME: convince typing that we're returning generic type - - def _load_ocr_model(self, variant: str) -> AnyModel: - """ - Load OCR model - """ - ocr_model_dir = self.model_path('ocr', variant) - if variant == 'tr': - return VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) - else: - ocr_model = load_model(ocr_model_dir, compile=False) - assert isinstance(ocr_model, KerasModel) - return KerasModel( - ocr_model.get_layer(name = "image").input, # type: ignore - ocr_model.get_layer(name = "dense2").output) # type: ignore - - def _load_characters(self) -> List[str]: - """ - Load encoding for OCR - """ - with open(self.model_path('ocr') / self.model_path('num_to_char', absolute=False), "r") as config_file: - return json.load(config_file) - - def _load_num_to_char(self) -> StringLookup: - """ - Load decoder for OCR - """ - characters = self._load_characters() - # Mapping characters to integers. - char_to_num = StringLookup(vocabulary=characters, mask_token=None) - # Mapping integers back to original characters. - return StringLookup( - vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True - ) - - def __str__(self): - return str(json.dumps({ - 'basedir': str(self.model_basedir), - 'versions': self.specs, - }, indent=2)) - - def shutdown(self): - """ - Ensure that a loaded models is not referenced by ``self._loaded`` anymore - """ - if hasattr(self, '_loaded') and getattr(self, '_loaded'): - for needle in self._loaded: - if self._loaded[needle]: - del self._loaded[needle] - diff --git a/src/eynollah/model_zoo/__init__.py b/src/eynollah/model_zoo/__init__.py new file mode 100644 index 0000000..e1dc985 --- /dev/null +++ b/src/eynollah/model_zoo/__init__.py @@ -0,0 +1,4 @@ +__all__ = [ + 'EynollahModelZoo', +] +from .model_zoo import EynollahModelZoo diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py new file mode 100644 index 0000000..e06c829 --- /dev/null +++ b/src/eynollah/model_zoo/default_specs.py @@ -0,0 +1,314 @@ +from .specs import EynollahModelSpec, EynollahModelSpecSet +from .types import KerasModel, TrOCRProcessor, List + +# NOTE: This needs to change whenever models/versions change +ZENODO = "https://zenodo.org/records/17295988/files" +MODELS_VERSION = "v0_7_0" + +def dist_url(dist_name: str) -> str: + return f'{ZENODO}/models_{dist_name}_{MODELS_VERSION}.zip' + +DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ + + EynollahModelSpec( + category="enhancement", + variant='', + filename="models_eynollah/eynollah-enhancement_20210425", + dists=['enhancement', 'layout'], + dist_url=dist_url("enhancement"), + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization", + variant='', + filename="models_eynollah/eynollah-binarization-hybrid_20230504", + dists=['layout', 'binarization'], + dist_url=dist_url("binarization"), + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization", + variant='20210309', + filename="models_eynollah/eynollah-binarization_20210309", + dists=['binarization'], + dist_url=dist_url("binarization"), + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization", + variant='augment', + filename="models_eynollah/eynollah-binarization_20210425", + dists=['binarization'], + dist_url=dist_url("binarization"), + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization_multi_1", + variant='', + filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin1", + dist_url=dist_url("binarization"), + dists=['binarization'], + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization_multi_2", + variant='', + filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin2", + dist_url=dist_url("binarization"), + dists=['binarization'], + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization_multi_3", + variant='', + filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin3", + dist_url=dist_url("binarization"), + dists=['binarization'], + type=KerasModel, + ), + + EynollahModelSpec( + category="binarization_multi_4", + variant='', + filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin4", + dist_url=dist_url("binarization"), + dists=['binarization'], + type=KerasModel, + ), + + EynollahModelSpec( + category="col_classifier", + variant='', + filename="models_eynollah/eynollah-column-classifier_20210425", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="page", + variant='', + filename="models_eynollah/model_eynollah_page_extraction_20250915", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="region", + variant='', + filename="models_eynollah/eynollah-main-regions-ensembled_20210425", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="region", + variant='extract_only_images', + filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="region", + variant='light', + filename="models_eynollah/eynollah-main-regions_20220314", + dist_url=dist_url("layout"), + help="early layout", + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="region_p2", + variant='', + filename="models_eynollah/eynollah-main-regions-aug-rotation_20210425", + dist_url=dist_url("layout"), + help="early layout, non-light, 2nd part", + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="region_1_2", + variant='', + #filename="models_eynollah/modelens_12sp_elay_0_3_4__3_6_n", + #filename="models_eynollah/modelens_earlylayout_12spaltige_2_3_5_6_7_8", + #filename="models_eynollah/modelens_early12_sp_2_3_5_6_7_8_9_10_12_14_15_16_18", + #filename="models_eynollah/modelens_1_2_4_5_early_lay_1_2_spaltige", + #filename="models_eynollah/model_3_eraly_layout_no_patches_1_2_spaltige", + filename="models_eynollah/modelens_e_l_all_sp_0_1_2_3_4_171024", + dist_url=dist_url("layout"), + dists=['layout'], + help="early layout, light, 1-or-2-column", + type=KerasModel, + ), + + EynollahModelSpec( + category="region_fl_np", + variant='', + #'filename="models_eynollah/modelens_full_lay_1_3_031124", + #'filename="models_eynollah/modelens_full_lay_13__3_19_241024", + #'filename="models_eynollah/model_full_lay_13_241024", + #'filename="models_eynollah/modelens_full_lay_13_17_231024", + #'filename="models_eynollah/modelens_full_lay_1_2_221024", + #'filename="models_eynollah/eynollah-full-regions-1column_20210425", + filename="models_eynollah/modelens_full_lay_1__4_3_091124", + dist_url=dist_url("layout"), + help="full layout / no patches", + dists=['layout'], + type=KerasModel, + ), + + # FIXME: Why is region_fl and region_fl_np the same model? + EynollahModelSpec( + category="region_fl", + variant='', + # filename="models_eynollah/eynollah-full-regions-3+column_20210425", + # filename="models_eynollah/model_2_full_layout_new_trans", + # filename="models_eynollah/modelens_full_lay_1_3_031124", + # filename="models_eynollah/modelens_full_lay_13__3_19_241024", + # filename="models_eynollah/model_full_lay_13_241024", + # filename="models_eynollah/modelens_full_lay_13_17_231024", + # filename="models_eynollah/modelens_full_lay_1_2_221024", + # filename="models_eynollah/modelens_full_layout_24_till_28", + # filename="models_eynollah/model_2_full_layout_new_trans", + filename="models_eynollah/modelens_full_lay_1__4_3_091124", + dist_url=dist_url("layout"), + help="full layout / with patches", + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="reading_order", + variant='', + #filename="models_eynollah/model_mb_ro_aug_ens_11", + #filename="models_eynollah/model_step_3200000_mb_ro", + #filename="models_eynollah/model_ens_reading_order_machine_based", + #filename="models_eynollah/model_mb_ro_aug_ens_8", + #filename="models_eynollah/model_ens_reading_order_machine_based", + filename="models_eynollah/model_eynollah_reading_order_20250824", + dist_url=dist_url("reading_order"), + dists=['layout', 'reading_order'], + type=KerasModel, + ), + + EynollahModelSpec( + category="textline", + variant='', + #filename="models_eynollah/modelens_textline_1_4_16092024", + #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", + #filename="models_eynollah/modelens_textline_1_3_4_20240915", + #filename="models_eynollah/model_textline_ens_3_4_5_6_artificial", + #filename="models_eynollah/modelens_textline_9_12_13_14_15", + #filename="models_eynollah/eynollah-textline_20210425", + filename="models_eynollah/modelens_textline_0_1__2_4_16092024", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="textline", + variant='light', + #filename="models_eynollah/eynollah-textline_light_20210425", + filename="models_eynollah/modelens_textline_0_1__2_4_16092024", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="table", + variant='', + filename="models_eynollah/eynollah-tables_20210319", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="table", + variant='light', + filename="models_eynollah/modelens_table_0t4_201124", + dist_url=dist_url("layout"), + dists=['layout'], + type=KerasModel, + ), + + EynollahModelSpec( + category="ocr", + variant='', + filename="models_eynollah/model_eynollah_ocr_cnnrnn_20250930", + dist_url=dist_url("ocr"), + dists=['layout', 'ocr'], + type=KerasModel, + ), + + EynollahModelSpec( + category="ocr", + variant='degraded', + filename="models_eynollah/model_eynollah_ocr_cnnrnn__degraded_20250805/", + help="slightly better at degraded Fraktur", + dist_url=dist_url("ocr"), + dists=['ocr'], + type=KerasModel, + ), + + EynollahModelSpec( + category="num_to_char", + variant='', + filename="characters_org.txt", + dist_url=dist_url("ocr"), + dists=['ocr'], + type=KerasModel, + ), + + EynollahModelSpec( + category="characters", + variant='', + filename="characters_org.txt", + dist_url=dist_url("ocr"), + dists=['ocr'], + type=list, + ), + + EynollahModelSpec( + category="ocr", + variant='tr', + filename="models_eynollah/model_eynollah_ocr_trocr_20250919", + dist_url=dist_url("trocr"), + help='much slower transformer-based', + dists=['trocr'], + type=KerasModel, + ), + + EynollahModelSpec( + category="trocr_processor", + variant='', + filename="models_eynollah/microsoft/trocr-base-printed", + dist_url=dist_url("trocr"), + dists=['trocr'], + type=KerasModel, + ), + + EynollahModelSpec( + category="trocr_processor", + variant='htr', + filename="models_eynollah/microsoft/trocr-base-handwritten", + dist_url=dist_url("trocr"), + dists=['trocr'], + type=TrOCRProcessor, + ), + +]) diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py new file mode 100644 index 0000000..7cfaa3a --- /dev/null +++ b/src/eynollah/model_zoo/model_zoo.py @@ -0,0 +1,189 @@ +import json +import logging +from copy import deepcopy +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Type, Union + +from keras.layers import StringLookup +from keras.models import Model as KerasModel +from keras.models import load_model +from tabulate import tabulate +from transformers import TrOCRProcessor, VisionEncoderDecoderModel + +from ..patch_encoder import PatchEncoder, Patches +from .specs import EynollahModelSpecSet +from .default_specs import DEFAULT_MODEL_SPECS +from .types import AnyModel, T + + +class EynollahModelZoo: + """ + Wrapper class that handles storage and loading of models for all eynollah runners. + """ + + model_basedir: Path + specs: EynollahModelSpecSet + + def __init__( + self, + basedir: str, + model_overrides: Optional[List[Tuple[str, str, str]]] = None, + ) -> None: + self.model_basedir = Path(basedir) + self.logger = logging.getLogger('eynollah.model_zoo') + self.specs = deepcopy(DEFAULT_MODEL_SPECS) + if model_overrides: + self.override_models(*model_overrides) + self._loaded: Dict[str, AnyModel] = {} + + def override_models( + self, + *model_overrides: Tuple[str, str, str], + ): + """ + Override the default model versions + """ + for model_category, model_variant, model_filename in model_overrides: + spec = self.specs.get(model_category, model_variant) + self.logger.warning("Overriding filename for model spec %s to %s", spec, model_filename) + self.specs.get(model_category, model_variant).filename = model_filename + + def model_path( + self, + model_category: str, + model_variant: str = '', + absolute: bool = True, + ) -> Path: + """ + Translate model_{type,variant} tuple into an absolute (or relative) Path + """ + spec = self.specs.get(model_category, model_variant) + if spec.category in ('characters', 'num_to_char'): + return self.model_path('ocr') / spec.filename + if not Path(spec.filename).is_absolute() and absolute: + model_path = Path(self.model_basedir).joinpath(spec.filename) + else: + model_path = Path(spec.filename) + return model_path + + def load_models( + self, + *all_load_args: Union[str, Tuple[str], Tuple[str, str], Tuple[str, str, str]], + ) -> Dict: + """ + Load all models by calling load_model and return a dictionary mapping model_category to loaded model + """ + ret = {} + for load_args in all_load_args: + if isinstance(load_args, str): + ret[load_args] = self.load_model(load_args) + else: + ret[load_args[0]] = self.load_model(*load_args) + return ret + + def load_model( + self, + model_category: str, + model_variant: str = '', + ) -> AnyModel: + """ + Load any model + """ + model_path = self.model_path(model_category, model_variant) + if model_path.suffix == '.h5' and Path(model_path.stem).exists(): + # prefer SavedModel over HDF5 format if it exists + model_path = Path(model_path.stem) + if model_category == 'ocr': + model = self._load_ocr_model(variant=model_variant) + elif model_category == 'num_to_char': + model = self._load_num_to_char() + elif model_category == 'characters': + model = self._load_characters() + elif model_category == 'trocr_processor': + return TrOCRProcessor.from_pretrained(self.model_path(...)) + else: + try: + model = load_model(model_path, compile=False) + except Exception as e: + self.logger.exception(e) + model = load_model( + model_path, compile=False, custom_objects={"PatchEncoder": PatchEncoder, "Patches": Patches} + ) + self._loaded[model_category] = model + return model # type: ignore + + def get(self, model_category: str, model_type: Optional[Type[T]] = None) -> T: + if model_category not in self._loaded: + raise ValueError(f'Model "{model_category} not previously loaded with "load_model(..)"') + ret = self._loaded[model_category] + if model_type: + assert isinstance(ret, model_type) + return ret # type: ignore # FIXME: convince typing that we're returning generic type + + def _load_ocr_model(self, variant: str) -> AnyModel: + """ + Load OCR model + """ + ocr_model_dir = self.model_path('ocr', variant) + if variant == 'tr': + return VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) + else: + ocr_model = load_model(ocr_model_dir, compile=False) + assert isinstance(ocr_model, KerasModel) + return KerasModel( + ocr_model.get_layer(name="image").input, # type: ignore + ocr_model.get_layer(name="dense2").output, # type: ignore + ) + + def _load_characters(self) -> List[str]: + """ + Load encoding for OCR + """ + with open(self.model_path('num_to_char'), "r") as config_file: + return json.load(config_file) + + def _load_num_to_char(self) -> StringLookup: + """ + Load decoder for OCR + """ + characters = self._load_characters() + # Mapping characters to integers. + char_to_num = StringLookup(vocabulary=characters, mask_token=None) + # Mapping integers back to original characters. + return StringLookup(vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True) + + def __str__(self): + return tabulate( + [ + [ + spec.type.__name__, + spec.category, + spec.variant, + spec.help, + ', '.join(spec.dists), + f'Yes, at {self.model_path(spec.category, spec.variant)}' + if self.model_path(spec.category, spec.variant).exists() + else f'No, download {spec.dist_url}', + # self.model_path(spec.category, spec.variant), + ] + for spec in sorted(self.specs.specs, key=lambda x: x.category + '0' + x.variant) + ], + headers=[ + 'Type', + 'Category', + 'Variant', + 'Help', + 'Used in', + 'Installed', + ], + tablefmt='github', + ) + + def shutdown(self): + """ + Ensure that a loaded models is not referenced by ``self._loaded`` anymore + """ + if hasattr(self, '_loaded') and getattr(self, '_loaded'): + for needle in self._loaded: + if self._loaded[needle]: + del self._loaded[needle] diff --git a/src/eynollah/model_zoo/specs.py b/src/eynollah/model_zoo/specs.py new file mode 100644 index 0000000..4f8cffa --- /dev/null +++ b/src/eynollah/model_zoo/specs.py @@ -0,0 +1,55 @@ +from dataclasses import dataclass +from typing import Dict, List, Set, Tuple, Type +from .types import AnyModel + + +@dataclass +class EynollahModelSpec(): + """ + Describing a single model abstractly. + """ + category: str + # Relative filename to the models_eynollah directory in the dists + filename: str + # basename of the ZIP files that should contain this model + dists: List[str] + # URL to the smallest model distribution containing this model (link to Zenodo) + dist_url: str + type: Type[AnyModel] + variant: str = '' + help: str = '' + +class EynollahModelSpecSet(): + """ + List of all used models for eynollah. + """ + specs: List[EynollahModelSpec] + + def __init__(self, specs: List[EynollahModelSpec]) -> None: + self.specs = specs + self.categories: Set[str] = set([spec.category for spec in self.specs]) + self.variants: Dict[str, Set[str]] = { + spec.category: set([x.variant for x in self.specs if x.category == spec.category]) + for spec in self.specs + } + self._index_category_variant: Dict[Tuple[str, str], EynollahModelSpec] = { + (spec.category, spec.variant): spec + for spec in self.specs + } + + def asdict(self) -> Dict[str, Dict[str, str]]: + return { + spec.category: { + spec.variant: spec.filename + } + for spec in self.specs + } + + def get(self, category: str, variant: str) -> EynollahModelSpec: + if category not in self.categories: + raise ValueError(f"Unknown category '{category}', must be one of {self.categories}") + if variant not in self.variants[category]: + raise ValueError(f"Unknown variant {variant} for {category}. Known variants: {self.variants[category]}") + return self._index_category_variant[(category, variant)] + + diff --git a/src/eynollah/model_zoo/types.py b/src/eynollah/model_zoo/types.py new file mode 100644 index 0000000..5c3685e --- /dev/null +++ b/src/eynollah/model_zoo/types.py @@ -0,0 +1,6 @@ +from typing import List, TypeVar, Union +from keras.models import Model as KerasModel +from transformers import TrOCRProcessor, VisionEncoderDecoderModel + +AnyModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, KerasModel, List] +T = TypeVar('T') diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index f8898a1..48dc7b1 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -24,7 +24,7 @@ def resize_image(img_in, input_height, input_width): class SbbBinarizer: - def __init__(self, model_dir, mode='single', logger=None): + def __init__(self, model_dir: str, mode: str, logger=None): if mode not in ('single', 'multi'): raise ValueError(f"'mode' must be either 'multi' or 'single', not {mode}") self.log = logger if logger else logging.getLogger('SbbBinarizer') From 883546a6b8bf9dcdde080fa452f61433f057e701 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 22 Oct 2025 16:38:05 +0200 Subject: [PATCH 408/492] eynollah models package --- src/eynollah/cli_models.py | 80 ++++++++++++++++++++++------- src/eynollah/model_zoo/model_zoo.py | 2 +- src/eynollah/model_zoo/specs.py | 2 +- 3 files changed, 64 insertions(+), 20 deletions(-) diff --git a/src/eynollah/cli_models.py b/src/eynollah/cli_models.py index b67a3ef..595c499 100644 --- a/src/eynollah/cli_models.py +++ b/src/eynollah/cli_models.py @@ -1,21 +1,19 @@ from dataclasses import dataclass -from typing import List, Tuple +from pathlib import Path +from typing import List, Set, Tuple import click + +from eynollah.model_zoo.default_specs import MODELS_VERSION from .model_zoo import EynollahModelZoo + @dataclass() -class EynollahCliCtx(): - model_basedir: str - model_overrides: List[Tuple[str, str, str]] +class EynollahCliCtx: + model_zoo: EynollahModelZoo @click.group() -def models_cli(): - """ - Organize models for the various runners in eynollah. - """ - -@models_cli.command('list') +@click.pass_context @click.option( "--model", "-m", @@ -32,18 +30,64 @@ def models_cli(): type=(str, str, str), multiple=True, ) -@click.pass_context -def list_models( +def models_cli( ctx, model_basedir: str, model_overrides: List[Tuple[str, str, str]], ): """ - List all the models in the zoo + Organize models for the various runners in eynollah. """ - ctx.obj = EynollahCliCtx( - model_basedir=model_basedir, - model_overrides=model_overrides - ) - print(EynollahModelZoo(basedir=ctx.obj.model_basedir, model_overrides=ctx.obj.model_overrides)) + ctx.obj = EynollahCliCtx(model_zoo=EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides)) + +@models_cli.command('list') +@click.pass_context +def list_models( + ctx, +): + """ + List all the models in the zoo + """ + print(ctx.obj.model_zoo) + + +@models_cli.command('package') +@click.option( + '--set-version', '-V', 'version', help="Version to use for packaging", default=MODELS_VERSION, show_default=True +) +@click.argument('output_dir') +@click.pass_context +def package( + ctx, + version, + output_dir, +): + """ + Generate shell code to copy all the models in the zoo into properly named folders in OUTPUT_DIR for distribution. + + eynollah models -m SRC package OUTPUT_DIR + + SRC should contain a directory "models_eynollah" containing all the models. + """ + mkdirs: Set[Path] = set([]) + copies: Set[Tuple[Path, Path]] = set([]) + for spec in ctx.obj.model_zoo.specs.specs: + # skip these as they are dependent on the ocr model + if spec.category in ('num_to_char', 'characters'): + continue + src: Path = ctx.obj.model_zoo.model_path(spec.category, spec.variant) + # Only copy the top-most directory relative to models_eynollah + while src.parent.name != 'models_eynollah': + src = src.parent + for dist in spec.dists: + dist_dir = Path(f"{output_dir}/models_{dist}_{version}/models_eynollah") + copies.add((src, dist_dir)) + mkdirs.add(dist_dir) + for dir in mkdirs: + print(f"mkdir -p {dir}") + for (src, dst) in copies: + print(f"cp -r {src} {dst}") + for dir in mkdirs: + zip_path = Path(f'../{dir.parent.name}.zip') + print(f"(cd {dir}/..; zip -r {zip_path} models_eynollah)") diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py index 7cfaa3a..8948a1f 100644 --- a/src/eynollah/model_zoo/model_zoo.py +++ b/src/eynollah/model_zoo/model_zoo.py @@ -166,7 +166,7 @@ class EynollahModelZoo: else f'No, download {spec.dist_url}', # self.model_path(spec.category, spec.variant), ] - for spec in sorted(self.specs.specs, key=lambda x: x.category + '0' + x.variant) + for spec in self.specs.specs ], headers=[ 'Type', diff --git a/src/eynollah/model_zoo/specs.py b/src/eynollah/model_zoo/specs.py index 4f8cffa..322afa4 100644 --- a/src/eynollah/model_zoo/specs.py +++ b/src/eynollah/model_zoo/specs.py @@ -26,7 +26,7 @@ class EynollahModelSpecSet(): specs: List[EynollahModelSpec] def __init__(self, specs: List[EynollahModelSpec]) -> None: - self.specs = specs + self.specs = sorted(specs, key=lambda x: x.category + '0' + x.variant) self.categories: Set[str] = set([spec.category for spec in self.specs]) self.variants: Dict[str, Set[str]] = { spec.category: set([x.variant for x in self.specs if x.category == spec.category]) From 874cfc247fe451510d444db46553cb700cf89138 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 22 Oct 2025 17:56:18 +0200 Subject: [PATCH 409/492] . --- .github/workflows/test-eynollah.yml | 60 +++++++++++++---------------- 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 466e690..5b22fd1 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -24,61 +24,52 @@ jobs: sudo rm -rf "$AGENT_TOOLSDIRECTORY" df -h - uses: actions/checkout@v4 - - uses: actions/cache/restore@v4 - id: seg_model_cache + + - name: Lint with ruff + uses: astral-sh/ruff-action@v3 with: - path: models_layout_v0_5_0 - key: seg-models - - uses: actions/cache/restore@v4 - id: ocr_model_cache + src: "./src" + + - name: Try to restore models_eynollah + uses: actions/cache/restore@v4 + id: all_model_cache with: - path: models_ocr_v0_5_1 - key: ocr-models - - uses: actions/cache/restore@v4 - id: bin_model_cache - with: - path: default-2021-03-09 - key: bin-models + path: models_eynollah + key: models_eynollah + - name: Download models - if: steps.seg_model_cache.outputs.cache-hit != 'true' || steps.bin_model_cache.outputs.cache-hit != 'true' || steps.ocr_model_cache.outputs.cache-hit != true - run: make models + if: steps.all_model_cache.outputs.cache-hit != 'true' + run: | + make models + ls -la models_eynollah + - uses: actions/cache/save@v4 - if: steps.seg_model_cache.outputs.cache-hit != 'true' + if: steps.all_model_cache.outputs.cache-hit != 'true' with: - path: models_layout_v0_5_0 - key: seg-models - - uses: actions/cache/save@v4 - if: steps.ocr_model_cache.outputs.cache-hit != 'true' - with: - path: models_ocr_v0_5_1 - key: ocr-models - - uses: actions/cache/save@v4 - if: steps.bin_model_cache.outputs.cache-hit != 'true' - with: - path: default-2021-03-09 - key: bin-models + path: models_eynollah + key: models_eynollah + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + - name: Install dependencies run: | python -m pip install --upgrade pip make install-dev EXTRAS=OCR,plotting make deps-test EXTRAS=OCR,plotting - ls -l models_* - - name: Lint with ruff - uses: astral-sh/ruff-action@v3 - with: - src: "./src" + - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" + - name: Get coverage results run: | coverage report --format=markdown >> $GITHUB_STEP_SUMMARY coverage html coverage json coverage xml + - name: Store coverage results uses: actions/upload-artifact@v4 with: @@ -88,12 +79,15 @@ jobs: pytest.xml coverage.xml coverage.json + - name: Upload coverage results uses: codecov/codecov-action@v4 with: files: coverage.xml fail_ci_if_error: false + - name: Test standalone CLI run: make smoke-test + - name: Test OCR-D CLI run: make ocrd-test From 2fc723d292093cdfb263e2d6681e478d7018b953 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 18:29:14 +0200 Subject: [PATCH 410/492] extend README --- README.md | 66 ++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 46 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 3ba5086..c6ba0e4 100644 --- a/README.md +++ b/README.md @@ -91,24 +91,35 @@ eynollah layout \ The following options can be used to further configure the processing: -| option | description | -|-------------------|:-------------------------------------------------------------------------------| -| `-fl` | full layout analysis including all steps and segmentation classes | -| `-light` | lighter and faster but simpler method for main region detection and deskewing | -| `-tll` | this indicates the light textline and should be passed with light version | -| `-tab` | apply table detection | -| `-ae` | apply enhancement (the resulting image is saved to the output directory) | -| `-as` | apply scaling | -| `-cl` | apply contour detection for curved text lines instead of bounding boxes | -| `-ib` | apply binarization (the resulting image is saved to the output directory) | -| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | -| `-eoi` | extract only images to output directory (other processing will not be done) | -| `-ho` | ignore headers for reading order dectection | -| `-si ` | save image regions detected to this directory | -| `-sd ` | save deskewed image to this directory | -| `-sl ` | save layout prediction as plot to this directory | -| `-sp ` | save cropped page image to this directory | -| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | +| option | description | +|-------------------|:------------------------------------------------------------------------------- | +| `-fl` | full layout analysis including all steps and segmentation classes (recommended) | +| `-light` | lighter and faster but simpler method for main region detection and deskewing (recommended) | +| `-tll` | this indicates the light textline and should be passed with light version (recommended) | +| `-tab` | apply table detection | +| `-ae` | apply enhancement (the resulting image is saved to the output directory) | +| `-as` | apply scaling | +| `-cl` | apply contour detection for curved text lines instead of bounding boxes | +| `-ib` | apply binarization (the resulting image is saved to the output directory) | +| `-ep` | enable plotting (MUST always be used with `-sl`, `-sd`, `-sa`, `-si` or `-ae`) | +| `-eoi` | extract only images to output directory (other processing will not be done) | +| `-ho` | ignore headers for reading order dectection | +| `-si ` | save image regions detected to this directory | +| `-sd ` | save deskewed image to this directory | +| `-sl ` | save layout prediction as plot to this directory | +| `-sp ` | save cropped page image to this directory | +| `-sa ` | save all (plot, enhanced/binary image, layout) to this directory | +| `-thart` | threshold of artifical class in the case of textline detection. The default value is 0.1 | +| `-tharl` | threshold of artifical class in the case of layout detection. The default value is 0.1 | +| `-ocr` | do ocr | +| `-tr` | apply transformer ocr. Default model is a CNN-RNN model | +| `-bs_ocr` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively | +| `-ncu` | upper limit of columns in document image | +| `-ncl` | lower limit of columns in document image | +| `-slro` | skip layout detection and reading order | +| `-romb` | apply machine based reading order detection | +| `-ipe` | ignore page extraction | + If no further option is set, the tool performs layout detection of main regions (background, text, images, separators and marginals). @@ -124,7 +135,7 @@ The command-line interface for binarization can be called like this: eynollah binarization \ -i | -di \ -o \ - -m \ + -m ``` ### OCR @@ -138,9 +149,24 @@ eynollah ocr \ -i | -di \ -dx \ -o \ - -m | --model_name \ + -m | --model_name ``` +The following options can be used to further configure the ocr processing: + +| option | description | +|-------------------|:------------------------------------------------------------------------------- | +| `-dib` | directory of bins(files type must be '.png'). Prediction with both RGB and bins. | +| `-doit` | Directory containing output images rendered with the predicted text | +| `--model_name` | Specific model file path to use for OCR | +| `-trocr` | transformer ocr will be applied, otherwise cnn_rnn model | +| `-etit` | textlines images and text in xml will be exported into output dir (OCR training data) | +| `-nmtc` | cropped textline images will not be masked with textline contour | +| `-bs` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively | +| `-ds_pref` | add an abbrevation of dataset name to generated training data | +| `-min_conf` | minimum OCR confidence value. OCRs with textline conf lower than this will be ignored | + + ### Machine-based-reading-order The machine-based reading-order module employs a pretrained model to identify the reading order from layouts represented in PAGE-XML files. From ab9ddd5214f4161038a48193df94b4cd363729f8 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 18:41:15 +0200 Subject: [PATCH 411/492] OCR examples are added to README --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index c6ba0e4..405cab4 100644 --- a/README.md +++ b/README.md @@ -140,6 +140,16 @@ eynollah binarization \ ### OCR +

+ Input Image + Output Image +

+ +

+ Input Image + Output Image +

+ The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. The command-line interface for OCR can be called like this: From 59eb4fd3bee8199155998cffc75b47931dc8bb33 Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 19:04:01 +0200 Subject: [PATCH 412/492] images with ro are added to readme --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 405cab4..e8a2721 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,11 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) +

+ Input Image + Output Image +

+ ## Features * Support for 10 distinct segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) From b56bb4428444aa67d43d759f319704393214921e Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 21:30:06 +0200 Subject: [PATCH 413/492] providing ocr model evaluation metrics --- docs/models.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/models.md b/docs/models.md index 7f83b33..a42cdb2 100644 --- a/docs/models.md +++ b/docs/models.md @@ -157,6 +157,38 @@ The model extracts the reading order of text regions from the layout by classify ### OCR We have trained three OCR models: two CNN-RNN–based models and one transformer-based TrOCR model. The CNN-RNN models are generally faster and provide better results in most cases, though their performance decreases with heavily degraded images. The TrOCR model, on the other hand, is computationally expensive and slower during inference, but it can possibly produce better results on strongly degraded images. + +#### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250805 + +This model is trained on data where most of the samples are in Fraktur german script. + +| Dataset | Input | CER | WER | +|-----------------------|:-------|:-----------|:----------| +| OCR-D-GT-Archiveform | BIN | 0.02147 | 0.05685 | +| OCR-D-GT-Archiveform | RGB | | | + +#### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) + +Compared to the model_eynollah_ocr_cnnrnn_20250805 model, this model is trained on a larger proportion of Antiqua data and achieves superior performance. + +| Dataset | Input | CER | WER | +|-----------------------|:------------|:-----------|:----------| +| OCR-D-GT-Archiveform | BIN | 0.01635 | 0.05410 | +| OCR-D-GT-Archiveform | RGB | 0.01471 | 0.05813 | +| BLN600 | RGB | 0.04409 | 0.08879 | +| BLN600 | Enhanced | 0.03599 | 0.06244 | + + +#### Transformer OCR model: model_eynollah_ocr_trocr_20250919 + +This transformer OCR model is trained on the same data as model_eynollah_ocr_trocr_20250919. + +| Dataset | Input | CER | WER | +|-----------------------|:------------|:-----------|:----------| +| OCR-D-GT-Archiveform | BIN | 0.01841 | 0.05589 | +| OCR-D-GT-Archiveform | RGB | | | +| BLN600 | RGB | 0.06347 | 0.13853 | + ## Heuristic methods Additionally, some heuristic methods are employed to further improve the model predictions: From 7b7714af2e3a40d18448a5dda6e7f624016c9eac Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 22:42:37 +0200 Subject: [PATCH 414/492] completing ocr evaluations metric --- docs/models.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/models.md b/docs/models.md index a42cdb2..7482043 100644 --- a/docs/models.md +++ b/docs/models.md @@ -165,7 +165,7 @@ This model is trained on data where most of the samples are in Fraktur german sc | Dataset | Input | CER | WER | |-----------------------|:-------|:-----------|:----------| | OCR-D-GT-Archiveform | BIN | 0.02147 | 0.05685 | -| OCR-D-GT-Archiveform | RGB | | | +| OCR-D-GT-Archiveform | RGB | 0.01636 | 0.06285 | #### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) @@ -186,7 +186,7 @@ This transformer OCR model is trained on the same data as model_eynollah_ocr_tro | Dataset | Input | CER | WER | |-----------------------|:------------|:-----------|:----------| | OCR-D-GT-Archiveform | BIN | 0.01841 | 0.05589 | -| OCR-D-GT-Archiveform | RGB | | | +| OCR-D-GT-Archiveform | RGB | 0.01552 | 0.06177 | | BLN600 | RGB | 0.06347 | 0.13853 | ## Heuristic methods From d0ad7a98b723ba494eee107e8fef388c444768bf Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Wed, 22 Oct 2025 22:45:22 +0200 Subject: [PATCH 415/492] starting qualitative ocr evaluation --- docs/models.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/models.md b/docs/models.md index 7482043..741fc67 100644 --- a/docs/models.md +++ b/docs/models.md @@ -189,6 +189,16 @@ This transformer OCR model is trained on the same data as model_eynollah_ocr_tro | OCR-D-GT-Archiveform | RGB | 0.01552 | 0.06177 | | BLN600 | RGB | 0.06347 | 0.13853 | +##### Qualitative evaluation of the models + +###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250805 + + +###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) + + +###### Transformer OCR model: model_eynollah_ocr_trocr_20250919 + ## Heuristic methods Additionally, some heuristic methods are employed to further improve the model predictions: From ec1fd93dad864e0267b68d7528cd5ba5978da957 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 23 Oct 2025 11:58:23 +0200 Subject: [PATCH 416/492] wip --- .github/workflows/test-eynollah.yml | 6 +- Makefile | 45 +--- src/eynollah/cli_models.py | 6 +- src/eynollah/model_zoo/default_specs.py | 4 +- tests/test_run.py | 102 -------- tests/test_run_layout.py | 330 ++++++++++++++++++++++++ 6 files changed, 349 insertions(+), 144 deletions(-) create mode 100644 tests/test_run_layout.py diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 5b22fd1..dae190a 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -31,7 +31,7 @@ jobs: src: "./src" - name: Try to restore models_eynollah - uses: actions/cache/restore@v4 + uses: actions/cache/restore@v4 id: all_model_cache with: path: models_eynollah @@ -40,8 +40,8 @@ jobs: - name: Download models if: steps.all_model_cache.outputs.cache-hit != 'true' run: | - make models - ls -la models_eynollah + make models + ls -la models_eynollah - uses: actions/cache/save@v4 if: steps.all_model_cache.outputs.cache-hit != 'true' diff --git a/Makefile b/Makefile index b1cbcc4..1e7f2dd 100644 --- a/Makefile +++ b/Makefile @@ -14,17 +14,9 @@ WGET = wget -O #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz #SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 -SEG_MODEL := https://zenodo.org/records/17295988/files/models_layout_v0_6_0.tar.gz?download=1 -SEG_MODELFILE = $(notdir $(patsubst %?download=1,%,$(SEG_MODEL))) -SEG_MODELNAME = $(SEG_MODELFILE:%.tar.gz=%) - -BIN_MODEL := https://zenodo.org/records/17295988/files/models_binarization_v0_6_0.tar.gz?download=1 -BIN_MODELFILE = $(notdir $(BIN_MODEL)) -BIN_MODELNAME := default-2021-03-09 - -OCR_MODEL := https://zenodo.org/records/17295988/files/models_ocr_v0_6_0.tar.gz?download=1 -OCR_MODELFILE = $(notdir $(patsubst %?download=1,%,$(OCR_MODEL))) -OCR_MODELNAME = $(OCR_MODELFILE:%.tar.gz=%) +EYNOLLAH_MODELS_URL := https://zenodo.org/records/17295988/files/models_all_v0_7_0.zip +EYNOLLAH_MODELS_ZIP = $(notdir $(SEG_MODEL)) +EYNOLLAH_MODELS_DIR = $(SEG_MODELFILE:%.zip=%) PYTEST_ARGS ?= -vv --isolate @@ -49,33 +41,23 @@ help: @echo " EXTRAS comma-separated list of features (like 'OCR,plotting') for 'install' [$(EXTRAS)]" @echo " DOCKER_TAG Docker image tag for 'docker' [$(DOCKER_TAG)]" @echo " PYTEST_ARGS pytest args for 'test' (Set to '-s' to see log output during test execution, '-vv' to see individual tests. [$(PYTEST_ARGS)]" - @echo " SEG_MODEL URL of 'models' archive to download for segmentation 'test' [$(SEG_MODEL)]" - @echo " BIN_MODEL URL of 'models' archive to download for binarization 'test' [$(BIN_MODEL)]" - @echo " OCR_MODEL URL of 'models' archive to download for binarization 'test' [$(OCR_MODEL)]" + @echo " ALL_MODELS URL of archive of all models [$(ALL_MODELS)]" @echo "" # END-EVAL # Download and extract models to $(PWD)/models_layout_v0_6_0 -models: $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME) +models: $(EYNOLLAH_MODELS_DIR) # do not download these files if we already have the directories -.INTERMEDIATE: $(BIN_MODELFILE) $(SEG_MODELFILE) $(OCR_MODELFILE) +.INTERMEDIATE: $(EYNOLLAH_MODELS_ZIP) -$(BIN_MODELFILE): - $(WGET) $@ $(BIN_MODEL) -$(SEG_MODELFILE): - $(WGET) $@ $(SEG_MODEL) -$(OCR_MODELFILE): - $(WGET) $@ $(OCR_MODEL) +$(EYNOLLAH_MODELS_ZIP): + $(WGET) $@ $(EYNOLLAH_MODELS_URL) -$(BIN_MODELNAME): $(BIN_MODELFILE) - tar zxf $< -$(SEG_MODELNAME): $(SEG_MODELFILE) - tar zxf $< -$(OCR_MODELNAME): $(OCR_MODELFILE) - tar zxf $< +$(EYNOLLAH_MODELS_DIR): $(EYNOLLAH_MODELS_ZIP) + unzip $< build: $(PIP) install build @@ -89,13 +71,8 @@ install: install-dev: $(PIP) install -e .$(and $(EXTRAS),[$(EXTRAS)]) -ifeq (OCR,$(findstring OCR, $(EXTRAS))) -deps-test: $(OCR_MODELNAME) -endif -deps-test: $(BIN_MODELNAME) $(SEG_MODELNAME) +deps-test: $(EYNOLLAH_MODELS_ZIP) $(PIP) install -r requirements-test.txt -ifeq (OCR,$(findstring OCR, $(EXTRAS))) - ln -rs $(OCR_MODELNAME)/* $(SEG_MODELNAME)/ endif smoke-test: TMPDIR != mktemp -d diff --git a/src/eynollah/cli_models.py b/src/eynollah/cli_models.py index 595c499..a299d19 100644 --- a/src/eynollah/cli_models.py +++ b/src/eynollah/cli_models.py @@ -85,9 +85,9 @@ def package( copies.add((src, dist_dir)) mkdirs.add(dist_dir) for dir in mkdirs: - print(f"mkdir -p {dir}") + print(f"mkdir -vp {dir}") for (src, dst) in copies: - print(f"cp -r {src} {dst}") + print(f"cp -vr {src} {dst}") for dir in mkdirs: zip_path = Path(f'../{dir.parent.name}.zip') - print(f"(cd {dir}/..; zip -r {zip_path} models_eynollah)") + print(f"(cd {dir}/..; zip -vr {zip_path} models_eynollah)") diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index e06c829..a57f7f1 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -14,7 +14,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="enhancement", variant='', filename="models_eynollah/eynollah-enhancement_20210425", - dists=['enhancement', 'layout'], + dists=['enhancement', 'layout', 'ci'], dist_url=dist_url("enhancement"), type=KerasModel, ), @@ -23,7 +23,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ category="binarization", variant='', filename="models_eynollah/eynollah-binarization-hybrid_20230504", - dists=['layout', 'binarization'], + dists=['layout', 'binarization', ], dist_url=dist_url("binarization"), type=KerasModel, ), diff --git a/tests/test_run.py b/tests/test_run.py index a410d34..359b0f0 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -16,114 +16,12 @@ from ocrd_models.constants import NAMESPACES as NS testdir = Path(__file__).parent.resolve() -MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_layout_v0_6_0').resolve())) MODELS_OCR = environ.get('MODELS_OCR', str(testdir.joinpath('..', 'models_ocr_v0_6_0').resolve())) MODELS_BIN = environ.get('MODELS_BIN', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) def only_eynollah(logrec): return logrec.name.startswith('eynollah') -@pytest.mark.parametrize( - "options", - [ - [], # defaults - #["--allow_scaling", "--curved-line"], - ["--allow_scaling", "--curved-line", "--full-layout"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", - "--textline_light", "--light_version"], - # -ep ... - # -eoi ... - # FIXME: find out whether OCR extra was installed, otherwise skip these - ["--do_ocr"], - ["--do_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr"], - #["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"], - # --skip_layout_and_reading_order - ], ids=str) -def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert str(infile) in logmsgs - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line - -@pytest.mark.parametrize( - "options", - [ - ["--tables"], - ["--tables", "--full-layout"], - ["--tables", "--full-layout", "--textline_light", "--light_version"], - ], ids=str) -def test_run_eynollah_layout_filename2(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/euler_rechenkunst01_1738_0025.tif') - outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert str(infile) in logmsgs - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:TableRegion", namespaces=NS) - # model/decoding is not very precise, so (depending on mode) we can get fractures/splits/FP - assert len(regions) >= 1, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) >= 2, "result is inaccurate" # mostly table (if detected correctly), but 1 page and 1 catch-word line - -def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_LAYOUT, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 - assert any(logmsg for logmsg in logmsgs if logmsg.startswith('All jobs done in')) - assert len(list(outdir.iterdir())) == 2 - @pytest.mark.parametrize( "options", [ diff --git a/tests/test_run_layout.py b/tests/test_run_layout.py new file mode 100644 index 0000000..29cebc4 --- /dev/null +++ b/tests/test_run_layout.py @@ -0,0 +1,330 @@ +from os import environ +from pathlib import Path +import pytest +import logging +from PIL import Image +from eynollah.cli import ( + layout as layout_cli, + binarization as binarization_cli, + enhancement as enhancement_cli, + machine_based_reading_order as mbreorder_cli, + ocr as ocr_cli, +) +from click.testing import CliRunner +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + +testdir = Path(__file__).parent.resolve() + +MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_layout_v0_6_0').resolve())) + +def only_eynollah(logrec): + return logrec.name.startswith('eynollah') + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + #["--allow_scaling", "--curved-line"], + ["--allow_scaling", "--curved-line", "--full-layout"], + ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], + ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", + "--textline_light", "--light_version"], + # -ep ... + # -eoi ... + # FIXME: find out whether OCR extra was installed, otherwise skip these + ["--do_ocr"], + ["--do_ocr", "--light_version", "--textline_light"], + ["--do_ocr", "--transformer_ocr"], + #["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"], + ["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"], + # --skip_layout_and_reading_order + ], ids=str) +def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' + args = [ + '-m', MODELS_LAYOUT, + '-i', str(infile), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(layout_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert str(infile) in logmsgs + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath("//page:TextRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + lines = tree.xpath("//page:TextLine", namespaces=NS) + assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line + +@pytest.mark.parametrize( + "options", + [ + ["--tables"], + ["--tables", "--full-layout"], + ["--tables", "--full-layout", "--textline_light", "--light_version"], + ], ids=str) +def test_run_eynollah_layout_filename2(tmp_path, pytestconfig, caplog, options): + infile = testdir.joinpath('resources/euler_rechenkunst01_1738_0025.tif') + outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' + args = [ + '-m', MODELS_LAYOUT, + '-i', str(infile), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(layout_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert str(infile) in logmsgs + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath("//page:TextRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + regions = tree.xpath("//page:TableRegion", namespaces=NS) + # model/decoding is not very precise, so (depending on mode) we can get fractures/splits/FP + assert len(regions) >= 1, "result is inaccurate" + regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + lines = tree.xpath("//page:TextLine", namespaces=NS) + assert len(lines) >= 2, "result is inaccurate" # mostly table (if detected correctly), but 1 page and 1 catch-word line + +def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', MODELS_LAYOUT, + '-di', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(layout_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 + assert any(logmsg for logmsg in logmsgs if logmsg.startswith('All jobs done in')) + assert len(list(outdir.iterdir())) == 2 + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["--no-patches"], + ], ids=str) +def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, options): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') + args = [ + '-m', MODELS_BIN, + '-i', str(infile), + '-o', str(outfile), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) + assert outfile.exists() + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as binarized_img: + binarized_size = binarized_img.size + assert original_size == binarized_size + +def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', MODELS_BIN, + '-di', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(binarization_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 + assert len(list(outdir.iterdir())) == 2 + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["-sos"], + ], ids=str) +def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, options): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') + args = [ + '-m', MODELS_LAYOUT, + '-i', str(infile), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs + assert outfile.exists() + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as enhanced_img: + enhanced_size = enhanced_img.size + assert (original_size == enhanced_size) == ("-sos" in options) + +def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', MODELS_LAYOUT, + '-di', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(enhancement_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 + assert len(list(outdir.iterdir())) == 2 + +def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') + args = [ + '-m', MODELS_LAYOUT, + '-i', str(infile), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: mbreorder has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert outfile.exists() + #in_tree = page_from_file(str(infile)).etree + #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + out_tree = page_from_file(str(outfile)).etree + out_order = out_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + #assert len(out_order) >= 2, "result is inaccurate" + #assert in_order != out_order + assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] + +def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', MODELS_LAYOUT, + '-di', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: mbreorder has no logging! + #assert len([logmsg for logmsg in logmsgs if logmsg.startswith('???')]) == 2 + assert len(list(outdir.iterdir())) == 2 + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["-doit", #str(outrenderfile.parent)], + ], + ["-trocr"], + ], ids=str) +def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): + infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') + outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') + outrenderfile.parent.mkdir() + args = [ + '-m', MODELS_OCR, + '-i', str(infile), + '-dx', str(infile.parent), + '-o', str(outfile.parent), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.DEBUG) + runner = CliRunner() + if "-doit" in options: + options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) + with caplog.filtering(only_eynollah): + result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: ocr has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert outfile.exists() + if "-doit" in options: + assert outrenderfile.exists() + #in_tree = page_from_file(str(infile)).etree + #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + out_tree = page_from_file(str(outfile)).etree + out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) + assert len(out_texts) >= 2, ("result is inaccurate", out_texts) + assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) + +def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): + indir = testdir.joinpath('resources') + outdir = tmp_path + args = [ + '-m', MODELS_OCR, + '-di', str(indir), + '-dx', str(indir), + '-o', str(outdir), + ] + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(only_eynollah): + result = runner.invoke(ocr_cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + logmsgs = [logrec.message for logrec in caplog.records] + # FIXME: ocr has no logging! + #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs + assert len(list(outdir.iterdir())) == 2 From 6192e5ba5c95f3b8b3ad21f2e23aed0fbdededad Mon Sep 17 00:00:00 2001 From: vahidrezanezhad Date: Thu, 23 Oct 2025 16:37:24 +0200 Subject: [PATCH 417/492] qualitative evaluation of ocr models are added to docs --- docs/models.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/docs/models.md b/docs/models.md index 741fc67..b858630 100644 --- a/docs/models.md +++ b/docs/models.md @@ -191,13 +191,27 @@ This transformer OCR model is trained on the same data as model_eynollah_ocr_tro ##### Qualitative evaluation of the models -###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250805 +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | -###### CNN-RNN model: model_eynollah_ocr_cnnrnn_20250904 (Default) + +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | + + +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | + + +| | | | | +|:---:|:---:|:---:|:---:| +| Image | cnnrnn_20250805 | cnnrnn_20250904 | trocr_20250919 | -###### Transformer OCR model: model_eynollah_ocr_trocr_20250919 ## Heuristic methods From 51d2680d9c8ef1f61ed653707bbb01c66872c666 Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 27 Oct 2025 11:44:59 +0100 Subject: [PATCH 418/492] wip --- src/eynollah/model_zoo/default_specs.py | 6 +++--- src/eynollah/sbb_binarize.py | 5 ++--- tests/test_run.py | 19 +++++++++---------- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index a57f7f1..fa67393 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -21,8 +21,8 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ EynollahModelSpec( category="binarization", - variant='', - filename="models_eynollah/eynollah-binarization-hybrid_20230504", + variant='hybrid', + filename="models_eynollah/eynollah-binarization-hybrid_20230504/model_bin_hybrid_trans_cnn_sbb_ens", dists=['layout', 'binarization', ], dist_url=dist_url("binarization"), type=KerasModel, @@ -39,7 +39,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ EynollahModelSpec( category="binarization", - variant='augment', + variant='', filename="models_eynollah/eynollah-binarization_20210425", dists=['binarization'], dist_url=dist_url("binarization"), diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 48dc7b1..da165ea 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -27,7 +27,7 @@ class SbbBinarizer: def __init__(self, model_dir: str, mode: str, logger=None): if mode not in ('single', 'multi'): raise ValueError(f"'mode' must be either 'multi' or 'single', not {mode}") - self.log = logger if logger else logging.getLogger('SbbBinarizer') + self.log = logger if logger else logging.getLogger('eynollah.binarization') self.model_zoo = EynollahModelZoo(basedir=model_dir) self.models = self.setup_models(mode) self.session = self.start_new_session() @@ -51,8 +51,7 @@ class SbbBinarizer: self.session.close() del self.session - def predict(self, img, use_patches, n_batch_inference=5): - model = self.model_zoo.get('binarization', Model) + def predict(self, model, img, use_patches, n_batch_inference=5): model_height = model.layers[len(model.layers)-1].output_shape[1] model_width = model.layers[len(model.layers)-1].output_shape[2] diff --git a/tests/test_run.py b/tests/test_run.py index 359b0f0..6d97fbb 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -16,8 +16,7 @@ from ocrd_models.constants import NAMESPACES as NS testdir = Path(__file__).parent.resolve() -MODELS_OCR = environ.get('MODELS_OCR', str(testdir.joinpath('..', 'models_ocr_v0_6_0').resolve())) -MODELS_BIN = environ.get('MODELS_BIN', str(testdir.joinpath('..', 'default-2021-03-09').resolve())) +MODELS_DIR = environ.get('EYNOLLAH_MODELS_DIR', str(testdir.joinpath('..').resolve())) def only_eynollah(logrec): return logrec.name.startswith('eynollah') @@ -32,7 +31,7 @@ def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, opti infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ - '-m', MODELS_BIN, + '-m', MODELS_DIR, '-i', str(infile), '-o', str(outfile), ] @@ -56,7 +55,7 @@ def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', MODELS_BIN, + '-m', MODELS_DIR, '-di', str(indir), '-o', str(outdir), ] @@ -81,7 +80,7 @@ def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, optio infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') args = [ - '-m', MODELS_LAYOUT, + '-m', MODELS_DIR, '-i', str(infile), '-o', str(outfile.parent), ] @@ -105,7 +104,7 @@ def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', MODELS_LAYOUT, + '-m', MODELS_DIR, '-di', str(indir), '-o', str(outdir), ] @@ -124,7 +123,7 @@ def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') args = [ - '-m', MODELS_LAYOUT, + '-m', MODELS_DIR, '-i', str(infile), '-o', str(outfile.parent), ] @@ -151,7 +150,7 @@ def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', MODELS_LAYOUT, + '-m', MODELS_DIR, '-di', str(indir), '-o', str(outdir), ] @@ -181,7 +180,7 @@ def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') outrenderfile.parent.mkdir() args = [ - '-m', MODELS_OCR, + '-m', MODELS_DIR, '-i', str(infile), '-dx', str(infile.parent), '-o', str(outfile.parent), @@ -212,7 +211,7 @@ def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): indir = testdir.joinpath('resources') outdir = tmp_path args = [ - '-m', MODELS_OCR, + '-m', MODELS_DIR, '-di', str(indir), '-dx', str(indir), '-o', str(outdir), From 294b6356d3b233fa80322e5b259c31ad7038cd6d Mon Sep 17 00:00:00 2001 From: kba Date: Mon, 27 Oct 2025 11:45:16 +0100 Subject: [PATCH 419/492] wip --- Makefile | 1 - src/eynollah/eynollah.py | 13 +++++++++++-- src/eynollah/eynollah_ocr.py | 4 ++++ src/eynollah/model_zoo/__init__.py | 5 ++++- src/eynollah/model_zoo/model_zoo.py | 8 +++----- tests/test_model_zoo.py | 19 +++++++++++++++++++ 6 files changed, 41 insertions(+), 9 deletions(-) create mode 100644 tests/test_model_zoo.py diff --git a/Makefile b/Makefile index 1e7f2dd..4fcd9fb 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,6 @@ install-dev: deps-test: $(EYNOLLAH_MODELS_ZIP) $(PIP) install -r requirements-test.txt -endif smoke-test: TMPDIR != mktemp -d smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 232631a..98e894c 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -45,7 +45,7 @@ import tensorflow as tf tf.get_logger().setLevel("ERROR") warnings.filterwarnings("ignore") -from .model_zoo import EynollahModelZoo +from .model_zoo import (EynollahModelZoo, KerasModel, TrOCRProcessor) from .utils.contour import ( filter_contours_area_of_image, filter_contours_area_of_image_tables, @@ -178,6 +178,7 @@ class Eynollah: self.full_layout = full_layout self.tables = tables self.right2left = right2left + # --input-binary sensible if image is very dark, if layout is not working. self.input_binary = input_binary self.allow_scaling = allow_scaling self.headers_off = headers_off @@ -3651,7 +3652,15 @@ class Eynollah: pass def return_ocr_of_textline_without_common_section( - self, textline_image, model_ocr, processor, device, width_textline, h2w_ratio,ind_tot): + self, + textline_image, + model_ocr: KerasModel, + processor: TrOCRProcessor, + device, + width_textline, + h2w_ratio, + ind_tot, + ): if h2w_ratio > 0.05: pixel_values = processor(textline_image, return_tensors="pt").pixel_values diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index cfd410c..41643de 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -63,8 +63,11 @@ class Eynollah_ocr: logger: Optional[Logger]=None, ): self.tr_ocr = tr_ocr + # For generating textline-image pairs for traning, move to generate_gt_for_training self.export_textline_images_and_text = export_textline_images_and_text + # masking for OCR and GT generation, relevant for skewed lines and bounding boxes self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour + # prefix or dataset self.pref_of_dataset = pref_of_dataset self.logger = logger if logger else getLogger('eynollah') self.model_zoo = EynollahModelZoo(basedir=dir_models) @@ -103,6 +106,7 @@ class Eynollah_ocr: def run(self, overwrite: bool = False, dir_in: Optional[str] = None, + # Prediction with RGB and binarized images for selected pages, should not be the default dir_in_bin: Optional[str] = None, image_filename: Optional[str] = None, dir_xmls: Optional[str] = None, diff --git a/src/eynollah/model_zoo/__init__.py b/src/eynollah/model_zoo/__init__.py index e1dc985..dda52c2 100644 --- a/src/eynollah/model_zoo/__init__.py +++ b/src/eynollah/model_zoo/__init__.py @@ -1,4 +1,7 @@ __all__ = [ 'EynollahModelZoo', + 'KerasModel', + 'TrOCRProcessor', + 'VisionEncoderDecoderModel', ] -from .model_zoo import EynollahModelZoo +from .model_zoo import EynollahModelZoo, KerasModel, TrOCRProcessor, VisionEncoderDecoderModel diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py index 8948a1f..dada98f 100644 --- a/src/eynollah/model_zoo/model_zoo.py +++ b/src/eynollah/model_zoo/model_zoo.py @@ -9,7 +9,6 @@ from keras.models import Model as KerasModel from keras.models import load_model from tabulate import tabulate from transformers import TrOCRProcessor, VisionEncoderDecoderModel - from ..patch_encoder import PatchEncoder, Patches from .specs import EynollahModelSpecSet from .default_specs import DEFAULT_MODEL_SPECS @@ -100,7 +99,7 @@ class EynollahModelZoo: elif model_category == 'characters': model = self._load_characters() elif model_category == 'trocr_processor': - return TrOCRProcessor.from_pretrained(self.model_path(...)) + model = TrOCRProcessor.from_pretrained(model_path) else: try: model = load_model(model_path, compile=False) @@ -184,6 +183,5 @@ class EynollahModelZoo: Ensure that a loaded models is not referenced by ``self._loaded`` anymore """ if hasattr(self, '_loaded') and getattr(self, '_loaded'): - for needle in self._loaded: - if self._loaded[needle]: - del self._loaded[needle] + for needle in self._loaded.keys(): + del self._loaded[needle] diff --git a/tests/test_model_zoo.py b/tests/test_model_zoo.py new file mode 100644 index 0000000..81e84f6 --- /dev/null +++ b/tests/test_model_zoo.py @@ -0,0 +1,19 @@ +from pathlib import Path + +from eynollah.model_zoo import EynollahModelZoo, TrOCRProcessor, VisionEncoderDecoderModel + +testdir = Path(__file__).parent.resolve() +MODELS_DIR = testdir.parent + +def test_trocr1(): + model_zoo = EynollahModelZoo(str(MODELS_DIR)) + model_zoo.load_model('trocr_processor') + proc = model_zoo.get('trocr_processor', TrOCRProcessor) + assert isinstance(proc, TrOCRProcessor) + + model_zoo.load_model('ocr', 'tr') + model = model_zoo.get('ocr') + assert isinstance(model, VisionEncoderDecoderModel) + print(proc) + +test_trocr1() From 22d61e8d9405a18b936537e0499fb4bd5205c9e9 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Tue, 28 Oct 2025 19:56:23 +0100 Subject: [PATCH 420/492] remove newspaper images from main readme --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index 9a6d9bc..5d5d5a8 100644 --- a/README.md +++ b/README.md @@ -11,11 +11,6 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) -

- Input Image - Output Image -

- ## Features * Document layout analysis using pixelwise segmentation models with support for 10 distinct segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) From b6f82c72b9025d2663baa76c6ddf70d225c4da3b Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 16:20:30 +0100 Subject: [PATCH 421/492] refactor cli tests --- src/eynollah/eynollah_ocr.py | 42 ++-- src/eynollah/image_enhancer.py | 17 +- src/eynollah/model_zoo/model_zoo.py | 5 +- src/eynollah/sbb_binarize.py | 7 +- tests/__init__.py | 0 tests/cli_tests/conftest.py | 36 +++ tests/cli_tests/test_binarization.py | 58 +++++ tests/cli_tests/test_enhance.py | 57 +++++ tests/cli_tests/test_layout.py | 109 +++++++++ tests/cli_tests/test_mbreorder.py | 53 +++++ tests/cli_tests/test_ocr.py | 67 ++++++ tests/cli_tests/test_run.py | 10 + tests/conftest.py | 25 ++ tests/test_run.py | 229 ------------------- tests/test_run_layout.py | 330 --------------------------- 15 files changed, 453 insertions(+), 592 deletions(-) delete mode 100644 tests/__init__.py create mode 100644 tests/cli_tests/conftest.py create mode 100644 tests/cli_tests/test_binarization.py create mode 100644 tests/cli_tests/test_enhance.py create mode 100644 tests/cli_tests/test_layout.py create mode 100644 tests/cli_tests/test_mbreorder.py create mode 100644 tests/cli_tests/test_ocr.py create mode 100644 tests/cli_tests/test_run.py create mode 100644 tests/conftest.py delete mode 100644 tests/test_run.py delete mode 100644 tests/test_run_layout.py diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 41643de..3aafd8e 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -59,7 +59,7 @@ class Eynollah_ocr: export_textline_images_and_text: bool=False, do_not_mask_with_textline_contour: bool=False, pref_of_dataset=None, - min_conf_value_of_textline_text : float=0.3, + min_conf_value_of_textline_text : Optional[float]=None, logger: Optional[Logger]=None, ): self.tr_ocr = tr_ocr @@ -69,7 +69,7 @@ class Eynollah_ocr: self.do_not_mask_with_textline_contour = do_not_mask_with_textline_contour # prefix or dataset self.pref_of_dataset = pref_of_dataset - self.logger = logger if logger else getLogger('eynollah') + self.logger = logger if logger else getLogger('eynollah.ocr') self.model_zoo = EynollahModelZoo(basedir=dir_models) # TODO: Properly document what 'export_textline_images_and_text' is about @@ -77,21 +77,15 @@ class Eynollah_ocr: self.logger.info("export_textline_images_and_text was set, so no actual models are loaded") return - self.min_conf_value_of_textline_text = min_conf_value_of_textline_text + self.min_conf_value_of_textline_text = min_conf_value_of_textline_text if min_conf_value_of_textline_text else 0.3 self.b_s = 2 if batch_size is None and tr_ocr else 8 if batch_size is None else batch_size if tr_ocr: - self.model_zoo.load_model('trocr_processor', '') - if model_name: - self.model_zoo.load_model('ocr', 'tr', model_name) - else: - self.model_zoo.load_model('ocr', 'tr') + self.model_zoo.load_model('trocr_processor') + self.model_zoo.load_model('ocr', 'tr', model_path_override=model_name) self.model_zoo.get('ocr').to(self.device) else: - if model_name: - self.model_zoo.load_model('ocr', '', model_name) - else: - self.model_zoo.load_model('ocr', '') + self.model_zoo.load_model('ocr', '', model_path_override=model_name) self.model_zoo.load_model('num_to_char') self.end_character = len(self.model_zoo.load_model('characters')) + 2 @@ -206,10 +200,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('processor').batch_decode( + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -229,10 +223,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('processor').batch_decode( + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -249,10 +243,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('processor').batch_decode( + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -267,10 +261,10 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_zoo.get('ocr').generate( pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('processor').batch_decode( + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -284,9 +278,9 @@ class Eynollah_ocr: cropped_lines = [] indexer_b_s = 0 - pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values + pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values generated_ids_merged = self.model_zoo.get('ocr').generate(pixel_values_merged.to(self.device)) - generated_text_merged = self.model_zoo.get('processor').batch_decode(generated_ids_merged, skip_special_tokens=True) + generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode(generated_ids_merged, skip_special_tokens=True) extracted_texts = extracted_texts + generated_text_merged @@ -301,10 +295,10 @@ class Eynollah_ocr: ####n_start = i*self.b_s ####n_end = (i+1)*self.b_s ####imgs = cropped_lines[n_start:n_end] - ####pixel_values_merged = self.model_zoo.get('processor')(imgs, return_tensors="pt").pixel_values + ####pixel_values_merged = self.model_zoo.get('trocr_processor')(imgs, return_tensors="pt").pixel_values ####generated_ids_merged = self.model_ocr.generate( #### pixel_values_merged.to(self.device)) - ####generated_text_merged = self.model_zoo.get('processor').batch_decode( + ####generated_text_merged = self.model_zoo.get('trocr_processor').batch_decode( #### generated_ids_merged, skip_special_tokens=True) ####extracted_texts = extracted_texts + generated_text_merged diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index cec8877..74b4865 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -50,7 +50,7 @@ class Enhancer: else: self.num_col_lower = num_col_lower - self.logger = logger if logger else getLogger('enhancement') + self.logger = logger if logger else getLogger('eynollah.enhance') self.model_zoo = EynollahModelZoo(basedir=dir_models) for v in ['binarization', 'enhancement', 'col_classifier', 'page']: self.model_zoo.load_model(v) @@ -142,7 +142,7 @@ class Enhancer: index_y_d = img_h - img_height_model img_patch = img[np.newaxis, index_y_d:index_y_u, index_x_d:index_x_u, :] - label_p_pred = self.model_zoo.get('enhancement', Model).predict(img_patch, verbose=0) + label_p_pred = self.model_zoo.get('enhancement', Model).predict(img_patch, verbose='0') seg = label_p_pred[0, :, :, :] * 255 if i == 0 and j == 0: @@ -667,7 +667,7 @@ class Enhancer: t0 = time.time() img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement(light_version=False) - return img_res + return img_res, is_image_enhanced def run(self, @@ -705,9 +705,18 @@ class Enhancer: self.logger.warning("will skip input for existing output file '%s'", self.output_filename) continue - image_enhanced = self.run_single() + did_resize = False + image_enhanced, did_enhance = self.run_single() if self.save_org_scale: image_enhanced = resize_image(image_enhanced, self.h_org, self.w_org) + did_resize = True + + self.logger.info( + "Image %s was %senhanced%s.", + img_filename, + '' if did_enhance else 'not ', + 'and resized' if did_resize else '' + ) cv2.imwrite(self.output_filename, image_enhanced) diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py index dada98f..32fdd0e 100644 --- a/src/eynollah/model_zoo/model_zoo.py +++ b/src/eynollah/model_zoo/model_zoo.py @@ -84,10 +84,13 @@ class EynollahModelZoo: self, model_category: str, model_variant: str = '', + model_path_override: Optional[str] = None, ) -> AnyModel: """ Load any model """ + if model_path_override: + self.override_models((model_category, model_variant, model_path_override)) model_path = self.model_path(model_category, model_variant) if model_path.suffix == '.h5' and Path(model_path.stem).exists(): # prefer SavedModel over HDF5 format if it exists @@ -183,5 +186,5 @@ class EynollahModelZoo: Ensure that a loaded models is not referenced by ``self._loaded`` anymore """ if hasattr(self, '_loaded') and getattr(self, '_loaded'): - for needle in self._loaded.keys(): + for needle in list(self._loaded.keys()): del self._loaded[needle] diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index da165ea..1bcf9d9 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -322,8 +322,7 @@ class SbbBinarizer: image = cv2.imread(image_path) img_last = 0 for n, (model_file, model) in enumerate(self.models.items()): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.models.keys()))) - + self.log.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file, n + 1, len(self.models.keys())) res = self.predict(model, image, use_patches) img_fin = np.zeros((res.shape[0], res.shape[1], 3)) @@ -348,11 +347,11 @@ class SbbBinarizer: ls_imgs = list(filter(is_image_filename, os.listdir(dir_in))) for image_name in ls_imgs: image_stem = image_name.split('.')[0] - print(image_name,'image_name') + # print(image_name,'image_name') image = cv2.imread(os.path.join(dir_in,image_name) ) img_last = 0 for n, (model_file, model) in enumerate(self.models.items()): - self.log.info('Predicting with model %s [%s/%s]' % (model_file, n + 1, len(self.models.keys()))) + self.log.info('Predicting %s with model %s [%s/%s]', image_name, model_file, n + 1, len(self.models.keys())) res = self.predict(model, image, use_patches) diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/cli_tests/conftest.py b/tests/cli_tests/conftest.py new file mode 100644 index 0000000..c54f47b --- /dev/null +++ b/tests/cli_tests/conftest.py @@ -0,0 +1,36 @@ +from typing import List +from click import Command +import pytest +import logging + +from click.testing import CliRunner, Result + +@pytest.fixture +def run_eynollah_ok_and_check_logs( + pytestconfig, + caplog, + model_dir, + eynollah_log_filter, +): + """ + Generates a Click Runner for `cli`, injects model_path and logging level + to `args`, runs the command and checks whether the logs generated contain + every fragment in `expected_logs` + """ + + def _run_click_ok_logs(cli: Command, args: List[str], expected_logs: List[str]) -> Result: + args = ['-m', model_dir] + args + if pytestconfig.getoption('verbose') > 0: + args.extend(['-l', 'DEBUG']) + caplog.set_level(logging.INFO) + runner = CliRunner() + with caplog.filtering(eynollah_log_filter): + result = runner.invoke(cli, args, catch_exceptions=False) + assert result.exit_code == 0, result.stdout + if expected_logs: + logmsgs = [logrec.message for logrec in caplog.records] + assert any(logmsg.startswith(needle) for needle in expected_logs for logmsg in logmsgs), f'{expected_logs} not in {logmsgs}' + return result + + return _run_click_ok_logs + diff --git a/tests/cli_tests/test_binarization.py b/tests/cli_tests/test_binarization.py new file mode 100644 index 0000000..4672a4f --- /dev/null +++ b/tests/cli_tests/test_binarization.py @@ -0,0 +1,58 @@ +import pytest +from PIL import Image +from eynollah.cli import ( + binarization as binarization_cli, +) +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["--no-patches"], + ], ids=str) +def test_run_eynollah_binarization_filename( + tmp_path, + run_eynollah_ok_and_check_logs, + tests_dir, + options, +): + infile = tests_dir.joinpath('resources/kant_aufklaerung_1784_0020.tif') + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') + run_eynollah_ok_and_check_logs( + binarization_cli, + [ + '-i', str(infile), + '-o', str(outfile), + ] + options, + [ + 'Predicting' + ] + ) + assert outfile.exists() + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as binarized_img: + binarized_size = binarized_img.size + assert original_size == binarized_size + +def test_run_eynollah_binarization_directory( + tmp_path, + run_eynollah_ok_and_check_logs, + resources_dir, + image_resources, +): + outdir = tmp_path + run_eynollah_ok_and_check_logs( + binarization_cli, + [ + '-di', str(resources_dir), + '-o', str(outdir), + ], + [ + f'Predicting {image_resources[0].name}', + f'Predicting {image_resources[1].name}', + ] + ) + assert len(list(outdir.iterdir())) == 2 diff --git a/tests/cli_tests/test_enhance.py b/tests/cli_tests/test_enhance.py new file mode 100644 index 0000000..590c07f --- /dev/null +++ b/tests/cli_tests/test_enhance.py @@ -0,0 +1,57 @@ +import pytest +from PIL import Image +from eynollah.cli import ( + enhancement as enhancement_cli, +) +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["-sos"], + ], ids=str) +def test_run_eynollah_enhancement_filename( + tmp_path, + resources_dir, + run_eynollah_ok_and_check_logs, + options, +): + infile = resources_dir / 'kant_aufklaerung_1784_0020.tif' + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') + run_eynollah_ok_and_check_logs( + enhancement_cli, + [ + '-i', str(infile), + '-o', str(outfile.parent), + ] + options, + [ + 'Image was enhanced', + ] + ) + with Image.open(infile) as original_img: + original_size = original_img.size + with Image.open(outfile) as enhanced_img: + enhanced_size = enhanced_img.size + assert (original_size == enhanced_size) == ("-sos" in options) + +def test_run_eynollah_enhancement_directory( + tmp_path, + resources_dir, + image_resources, + run_eynollah_ok_and_check_logs, +): + outdir = tmp_path + run_eynollah_ok_and_check_logs( + enhancement_cli, + [ + '-di', str(resources_dir), + '-o', str(outdir), + ], + [ + f'Image {image_resources[0]} was enhanced', + f'Image {image_resources[1]} was enhanced', + ] + ) + assert len(list(outdir.iterdir())) == 2 diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py new file mode 100644 index 0000000..db7b88c --- /dev/null +++ b/tests/cli_tests/test_layout.py @@ -0,0 +1,109 @@ +import pytest +from eynollah.cli import ( + layout as layout_cli, +) +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + #["--allow_scaling", "--curved-line"], + ["--allow_scaling", "--curved-line", "--full-layout"], + ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], + ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", + "--textline_light", "--light_version"], + # -ep ... + # -eoi ... + # FIXME: find out whether OCR extra was installed, otherwise skip these + ["--do_ocr"], + ["--do_ocr", "--light_version", "--textline_light"], + ["--do_ocr", "--transformer_ocr"], + #["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"], + ["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"], + # --skip_layout_and_reading_order + ], ids=str) +def test_run_eynollah_layout_filename( + tmp_path, + run_eynollah_ok_and_check_logs, + resources_dir, + options, +): + outdir = tmp_path + infile = resources_dir / 'kant_aufklaerung_1784_0020.tif' + outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' + run_eynollah_ok_and_check_logs( + layout_cli, + [ + '-i', str(infile), + '-o', str(outfile.parent), + ] + options, + [ + str(infile) + ] + ) + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath("//page:TextRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + lines = tree.xpath("//page:TextLine", namespaces=NS) + assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line + +@pytest.mark.parametrize( + "options", + [ + ["--tables"], + ["--tables", "--full-layout"], + ["--tables", "--full-layout", "--textline_light", "--light_version"], + ], ids=str) +def test_run_eynollah_layout_filename2( + tmp_path, + resources_dir, + run_eynollah_ok_and_check_logs, + options, +): + infile = resources_dir / 'euler_rechenkunst01_1738_0025.tif' + outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' + run_eynollah_ok_and_check_logs( + layout_cli, + [ + '-i', str(infile), + '-o', str(outfile.parent), + ] + options, + [ + infile + ] + ) + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath("//page:TextRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + regions = tree.xpath("//page:TableRegion", namespaces=NS) + # model/decoding is not very precise, so (depending on mode) we can get fractures/splits/FP + assert len(regions) >= 1, "result is inaccurate" + regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) + assert len(regions) >= 2, "result is inaccurate" + lines = tree.xpath("//page:TextLine", namespaces=NS) + assert len(lines) >= 2, "result is inaccurate" # mostly table (if detected correctly), but 1 page and 1 catch-word line + +def test_run_eynollah_layout_directory( + tmp_path, + resources_dir, + run_eynollah_ok_and_check_logs, +): + outdir = tmp_path + run_eynollah_ok_and_check_logs( + layout_cli, + [ + '-di', str(resources_dir), + '-o', str(outdir), + ], + [ + 'Job done in', + 'All jobs done in', + ] + ) + assert len(list(outdir.iterdir())) == 2 diff --git a/tests/cli_tests/test_mbreorder.py b/tests/cli_tests/test_mbreorder.py new file mode 100644 index 0000000..7fb246d --- /dev/null +++ b/tests/cli_tests/test_mbreorder.py @@ -0,0 +1,53 @@ +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + +from eynollah.cli import ( + machine_based_reading_order as mbreorder_cli, +) + + +def test_run_eynollah_mbreorder_filename( + tmp_path, + resources_dir, + run_eynollah_ok_and_check_logs, +): + infile = resources_dir / 'kant_aufklaerung_1784_0020.xml' + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') + run_eynollah_ok_and_check_logs( + mbreorder_cli, + [ + '-i', str(infile), + '-o', str(outfile.parent), + ], + [ + # FIXME: mbreorder has no logging! + ] + ) + assert outfile.exists() + #in_tree = page_from_file(str(infile)).etree + #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + out_tree = page_from_file(str(outfile)).etree + out_order = out_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + #assert len(out_order) >= 2, "result is inaccurate" + #assert in_order != out_order + assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] + +def test_run_eynollah_mbreorder_directory( + tmp_path, + resources_dir, + run_eynollah_ok_and_check_logs, +): + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') + outdir = tmp_path + run_eynollah_ok_and_check_logs( + mbreorder_cli, + [ + '-di', str(resources_dir), + '-o', str(outdir), + ], + [ + # FIXME: mbreorder has no logging! + ] + ) + assert len(list(outdir.iterdir())) == 2 + diff --git a/tests/cli_tests/test_ocr.py b/tests/cli_tests/test_ocr.py new file mode 100644 index 0000000..747d978 --- /dev/null +++ b/tests/cli_tests/test_ocr.py @@ -0,0 +1,67 @@ +import pytest +from eynollah.cli import ( + ocr as ocr_cli, +) +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + +@pytest.mark.parametrize( + "options", + [ + [], # defaults + ["-doit", #str(outrenderfile.parent)], + ], + ["-trocr"], + ], ids=str) +def test_run_eynollah_ocr_filename( + tmp_path, + run_eynollah_ok_and_check_logs, + resources_dir, + options, +): + infile = resources_dir / 'kant_aufklaerung_1784_0020.tif' + outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') + outrenderfile = tmp_path / 'render' / 'kant_aufklaerung_1784_0020.png' + outrenderfile.parent.mkdir() + if "-doit" in options: + options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) + run_eynollah_ok_and_check_logs( + ocr_cli, + [ + '-i', str(infile), + '-dx', str(infile.parent), + '-o', str(outfile.parent), + ] + options, + [ + # FIXME: ocr has no logging! + ] + ) + assert outfile.exists() + if "-doit" in options: + assert outrenderfile.exists() + #in_tree = page_from_file(str(infile)).etree + #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) + out_tree = page_from_file(str(outfile)).etree + out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) + assert len(out_texts) >= 2, ("result is inaccurate", out_texts) + assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) + +def test_run_eynollah_ocr_directory( + tmp_path, + run_eynollah_ok_and_check_logs, + resources_dir, +): + outdir = tmp_path + run_eynollah_ok_and_check_logs( + ocr_cli, + [ + '-di', str(resources_dir), + '-dx', str(resources_dir), + '-o', str(outdir), + ], + [ + # FIXME: ocr has no logging! + ] + ) + assert len(list(outdir.iterdir())) == 2 + diff --git a/tests/cli_tests/test_run.py b/tests/cli_tests/test_run.py new file mode 100644 index 0000000..122bab5 --- /dev/null +++ b/tests/cli_tests/test_run.py @@ -0,0 +1,10 @@ +import pytest +from PIL import Image +from eynollah.cli import ( + layout as layout_cli, + binarization as binarization_cli, + enhancement as enhancement_cli, +) +from ocrd_modelfactory import page_from_file +from ocrd_models.constants import NAMESPACES as NS + diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..e73d0e3 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,25 @@ +from glob import glob +import os +import pytest +from pathlib import Path + + +@pytest.fixture() +def tests_dir(): + return Path(__file__).parent.resolve() + +@pytest.fixture() +def model_dir(tests_dir): + return os.environ.get('EYNOLLAH_MODELS_DIR', str(tests_dir.joinpath('..').resolve())) + +@pytest.fixture() +def resources_dir(tests_dir): + return tests_dir / 'resources' + +@pytest.fixture() +def image_resources(resources_dir): + return [Path(x) for x in glob(str(resources_dir / '*.tif'))] + +@pytest.fixture() +def eynollah_log_filter(): + return lambda logrec: logrec.name.startswith('eynollah') diff --git a/tests/test_run.py b/tests/test_run.py deleted file mode 100644 index 6d97fbb..0000000 --- a/tests/test_run.py +++ /dev/null @@ -1,229 +0,0 @@ -from os import environ -from pathlib import Path -import pytest -import logging -from PIL import Image -from eynollah.cli import ( - layout as layout_cli, - binarization as binarization_cli, - enhancement as enhancement_cli, - machine_based_reading_order as mbreorder_cli, - ocr as ocr_cli, -) -from click.testing import CliRunner -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS - -testdir = Path(__file__).parent.resolve() - -MODELS_DIR = environ.get('EYNOLLAH_MODELS_DIR', str(testdir.joinpath('..').resolve())) - -def only_eynollah(logrec): - return logrec.name.startswith('eynollah') - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["--no-patches"], - ], ids=str) -def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') - args = [ - '-m', MODELS_DIR, - '-i', str(infile), - '-o', str(outfile), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as binarized_img: - binarized_size = binarized_img.size - assert original_size == binarized_size - -def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_DIR, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["-sos"], - ], ids=str) -def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') - args = [ - '-m', MODELS_DIR, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as enhanced_img: - enhanced_size = enhanced_img.size - assert (original_size == enhanced_size) == ("-sos" in options) - -def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_DIR, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') - args = [ - '-m', MODELS_DIR, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: mbreorder has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert outfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_order = out_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - #assert len(out_order) >= 2, "result is inaccurate" - #assert in_order != out_order - assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] - -def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_DIR, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: mbreorder has no logging! - #assert len([logmsg for logmsg in logmsgs if logmsg.startswith('???')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["-doit", #str(outrenderfile.parent)], - ], - ["-trocr"], - ], ids=str) -def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') - outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') - outrenderfile.parent.mkdir() - args = [ - '-m', MODELS_DIR, - '-i', str(infile), - '-dx', str(infile.parent), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.DEBUG) - runner = CliRunner() - if "-doit" in options: - options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert outfile.exists() - if "-doit" in options: - assert outrenderfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) - assert len(out_texts) >= 2, ("result is inaccurate", out_texts) - assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) - -def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_DIR, - '-di', str(indir), - '-dx', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert len(list(outdir.iterdir())) == 2 diff --git a/tests/test_run_layout.py b/tests/test_run_layout.py deleted file mode 100644 index 29cebc4..0000000 --- a/tests/test_run_layout.py +++ /dev/null @@ -1,330 +0,0 @@ -from os import environ -from pathlib import Path -import pytest -import logging -from PIL import Image -from eynollah.cli import ( - layout as layout_cli, - binarization as binarization_cli, - enhancement as enhancement_cli, - machine_based_reading_order as mbreorder_cli, - ocr as ocr_cli, -) -from click.testing import CliRunner -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS - -testdir = Path(__file__).parent.resolve() - -MODELS_LAYOUT = environ.get('MODELS_LAYOUT', str(testdir.joinpath('..', 'models_layout_v0_6_0').resolve())) - -def only_eynollah(logrec): - return logrec.name.startswith('eynollah') - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - #["--allow_scaling", "--curved-line"], - ["--allow_scaling", "--curved-line", "--full-layout"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based"], - ["--allow_scaling", "--curved-line", "--full-layout", "--reading_order_machine_based", - "--textline_light", "--light_version"], - # -ep ... - # -eoi ... - # FIXME: find out whether OCR extra was installed, otherwise skip these - ["--do_ocr"], - ["--do_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr"], - #["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light"], - ["--do_ocr", "--transformer_ocr", "--light_version", "--textline_light", "--full-layout"], - # --skip_layout_and_reading_order - ], ids=str) -def test_run_eynollah_layout_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert str(infile) in logmsgs - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) == 31, "result is inaccurate" # 29 paragraph lines, 1 page and 1 catch-word line - -@pytest.mark.parametrize( - "options", - [ - ["--tables"], - ["--tables", "--full-layout"], - ["--tables", "--full-layout", "--textline_light", "--light_version"], - ], ids=str) -def test_run_eynollah_layout_filename2(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/euler_rechenkunst01_1738_0025.tif') - outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert str(infile) in logmsgs - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath("//page:TextRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - regions = tree.xpath("//page:TableRegion", namespaces=NS) - # model/decoding is not very precise, so (depending on mode) we can get fractures/splits/FP - assert len(regions) >= 1, "result is inaccurate" - regions = tree.xpath("//page:SeparatorRegion", namespaces=NS) - assert len(regions) >= 2, "result is inaccurate" - lines = tree.xpath("//page:TextLine", namespaces=NS) - assert len(lines) >= 2, "result is inaccurate" # mostly table (if detected correctly), but 1 page and 1 catch-word line - -def test_run_eynollah_layout_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_LAYOUT, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(layout_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Job done in')]) == 2 - assert any(logmsg for logmsg in logmsgs if logmsg.startswith('All jobs done in')) - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["--no-patches"], - ], ids=str) -def test_run_eynollah_binarization_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') - args = [ - '-m', MODELS_BIN, - '-i', str(infile), - '-o', str(outfile), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Predicting')) - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as binarized_img: - binarized_size = binarized_img.size - assert original_size == binarized_size - -def test_run_eynollah_binarization_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_BIN, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(binarization_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Predicting')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["-sos"], - ], ids=str) -def test_run_eynollah_enhancement_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert any(True for logmsg in logmsgs if logmsg.startswith('Image was enhanced')), logmsgs - assert outfile.exists() - with Image.open(infile) as original_img: - original_size = original_img.size - with Image.open(outfile) as enhanced_img: - enhanced_size = enhanced_img.size - assert (original_size == enhanced_size) == ("-sos" in options) - -def test_run_eynollah_enhancement_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_LAYOUT, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(enhancement_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - assert len([logmsg for logmsg in logmsgs if logmsg.startswith('Image was enhanced')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -def test_run_eynollah_mbreorder_filename(tmp_path, pytestconfig, caplog): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.xml') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') - args = [ - '-m', MODELS_LAYOUT, - '-i', str(infile), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: mbreorder has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert outfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_order = out_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - #assert len(out_order) >= 2, "result is inaccurate" - #assert in_order != out_order - assert out_order == ['r_1_1', 'r_2_1', 'r_2_2', 'r_2_3'] - -def test_run_eynollah_mbreorder_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_LAYOUT, - '-di', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(mbreorder_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: mbreorder has no logging! - #assert len([logmsg for logmsg in logmsgs if logmsg.startswith('???')]) == 2 - assert len(list(outdir.iterdir())) == 2 - -@pytest.mark.parametrize( - "options", - [ - [], # defaults - ["-doit", #str(outrenderfile.parent)], - ], - ["-trocr"], - ], ids=str) -def test_run_eynollah_ocr_filename(tmp_path, pytestconfig, caplog, options): - infile = testdir.joinpath('resources/kant_aufklaerung_1784_0020.tif') - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') - outrenderfile = tmp_path.joinpath('render').joinpath('kant_aufklaerung_1784_0020.png') - outrenderfile.parent.mkdir() - args = [ - '-m', MODELS_OCR, - '-i', str(infile), - '-dx', str(infile.parent), - '-o', str(outfile.parent), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.DEBUG) - runner = CliRunner() - if "-doit" in options: - options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args + options, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert outfile.exists() - if "-doit" in options: - assert outrenderfile.exists() - #in_tree = page_from_file(str(infile)).etree - #in_order = in_tree.xpath("//page:OrderedGroup//@regionRef", namespaces=NS) - out_tree = page_from_file(str(outfile)).etree - out_texts = out_tree.xpath("//page:TextLine/page:TextEquiv[last()]/page:Unicode/text()", namespaces=NS) - assert len(out_texts) >= 2, ("result is inaccurate", out_texts) - assert sum(map(len, out_texts)) > 100, ("result is inaccurate", out_texts) - -def test_run_eynollah_ocr_directory(tmp_path, pytestconfig, caplog): - indir = testdir.joinpath('resources') - outdir = tmp_path - args = [ - '-m', MODELS_OCR, - '-di', str(indir), - '-dx', str(indir), - '-o', str(outdir), - ] - if pytestconfig.getoption('verbose') > 0: - args.extend(['-l', 'DEBUG']) - caplog.set_level(logging.INFO) - runner = CliRunner() - with caplog.filtering(only_eynollah): - result = runner.invoke(ocr_cli, args, catch_exceptions=False) - assert result.exit_code == 0, result.stdout - logmsgs = [logrec.message for logrec in caplog.records] - # FIXME: ocr has no logging! - #assert any(True for logmsg in logmsgs if logmsg.startswith('???')), logmsgs - assert len(list(outdir.iterdir())) == 2 From a913bdf7dce741aec7fe13d7af89b2984792d1d2 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 18:24:17 +0100 Subject: [PATCH 422/492] make --model-basedir and --model-overrides top-level CLI options --- src/eynollah/cli.py | 136 +++++++++++++-------------- src/eynollah/cli_models.py | 25 +---- src/eynollah/eynollah.py | 12 +-- src/eynollah/eynollah_ocr.py | 6 +- src/eynollah/image_enhancer.py | 5 +- src/eynollah/mb_ro_on_layout.py | 7 +- src/eynollah/model_zoo/__init__.py | 5 +- src/eynollah/model_zoo/model_zoo.py | 10 +- src/eynollah/model_zoo/types.py | 6 +- src/eynollah/sbb_binarize.py | 4 +- tests/cli_tests/conftest.py | 19 +++- tests/cli_tests/test_binarization.py | 9 +- tests/cli_tests/test_enhance.py | 9 +- tests/cli_tests/test_layout.py | 10 +- tests/cli_tests/test_mbreorder.py | 10 +- tests/cli_tests/test_ocr.py | 7 +- tests/cli_tests/test_run.py | 10 -- tests/conftest.py | 12 +++ 18 files changed, 132 insertions(+), 170 deletions(-) delete mode 100644 tests/cli_tests/test_run.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 595f0ee..9ae909f 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -2,20 +2,36 @@ from dataclasses import dataclass import sys import click import logging -from typing import Tuple, List from ocrd_utils import initLogging, getLevelName, getLogger -from eynollah.eynollah import Eynollah -from eynollah.eynollah_ocr import Eynollah_ocr -from eynollah.sbb_binarize import SbbBinarizer -from eynollah.image_enhancer import Enhancer -from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout from eynollah.model_zoo import EynollahModelZoo from .cli_models import models_cli +@dataclass() +class EynollahCliCtx: + model_zoo: EynollahModelZoo + + @click.group() -def main(): - pass +@click.option( + "--model-basedir", + "-m", + help="directory of models", + type=click.Path(exists=True, file_okay=False), + # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", + required=True, +) +@click.option( + "--model-overrides", + "-mv", + help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", + type=(str, str, str), + multiple=True, +) +@click.pass_context +def main(ctx, model_basedir, model_overrides): + # Initialize model zoo + ctx.obj = EynollahCliCtx(model_zoo=EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides)) main.add_command(models_cli, 'models') @@ -39,23 +55,17 @@ main.add_command(models_cli, 'models') type=click.Path(exists=True, file_okay=False), required=True, ) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), - required=True, -) @click.option( "--log_level", "-l", type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), help="Override log level globally to this", ) - -def machine_based_reading_order(input, dir_in, out, model, log_level): +@click.pass_context +def machine_based_reading_order(ctx, input, dir_in, out, log_level): + from eynollah.mb_ro_on_layout import machine_based_reading_order_on_layout assert bool(input) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - orderer = machine_based_reading_order_on_layout(model) + orderer = machine_based_reading_order_on_layout(model_zoo=ctx.obj.model_zoo) if log_level: orderer.logger.setLevel(getLevelName(log_level)) @@ -67,7 +77,6 @@ def machine_based_reading_order(input, dir_in, out, model, log_level): @main.command() @click.option('--patches/--no-patches', default=True, help='by enabling this parameter you let the model to see the image in patches.') -@click.option('--model_dir', '-m', type=click.Path(exists=True, file_okay=False), required=True, help='directory containing models for prediction') @click.option( "--input-image", "--image", "-i", @@ -92,7 +101,7 @@ def machine_based_reading_order(input, dir_in, out, model, log_level): '--mode', type=click.Choice(['single', 'multi']), default='single', - help="Whether to use the (faster) single-model binarization or the (slightly better) multi-model binarization" + help="Whether to use the (newer and faster) single-model binarization or the (slightly better) multi-model binarization" ) @click.option( "--log_level", @@ -100,17 +109,19 @@ def machine_based_reading_order(input, dir_in, out, model, log_level): type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), help="Override log level globally to this", ) +@click.pass_context def binarization( + ctx, patches, - model_dir, input_image, mode, dir_in, output, log_level, ): + from eynollah.sbb_binarize import SbbBinarizer assert bool(input_image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - binarizer = SbbBinarizer(model_dir, mode=mode) + binarizer = SbbBinarizer(model_zoo=ctx.obj.model_zoo, mode=mode) if log_level: binarizer.log.setLevel(getLevelName(log_level)) binarizer.run( @@ -148,14 +159,6 @@ def binarization( help="directory of input images (instead of --image)", type=click.Path(exists=True, file_okay=False), ) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), - required=True, -) - @click.option( "--num_col_upper", "-ncu", @@ -178,12 +181,13 @@ def binarization( type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), help="Override log level globally to this", ) - -def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_lower, save_org_scale, log_level): +@click.pass_context +def enhancement(ctx, image, out, overwrite, dir_in, num_col_upper, num_col_lower, save_org_scale, log_level): + from eynollah.image_enhancer import Enhancer assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." initLogging() enhancer = Enhancer( - model, + model_zoo=ctx.obj.model_zoo, num_col_upper=num_col_upper, num_col_lower=num_col_lower, save_org_scale=save_org_scale, @@ -223,22 +227,6 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low help="directory of input images (instead of --image)", type=click.Path(exists=True, file_okay=False), ) -@click.option( - "--model", - "-m", - 'model_basedir', - help="directory of models", - type=click.Path(exists=True, file_okay=False), - # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", - required=True, -) -@click.option( - "--model_version", - "-mv", - help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", - type=(str, str, str), - multiple=True, -) @click.option( "--save_images", "-si", @@ -409,14 +397,13 @@ def enhancement(image, out, overwrite, dir_in, model, num_col_upper, num_col_low is_flag=True, help="Setup a basic console logger", ) - +@click.pass_context def layout( + ctx, image, out, overwrite, dir_in, - model_basedir, - model_version, save_images, save_layout, save_deskewed, @@ -447,6 +434,7 @@ def layout( log_level, setup_logging, ): + from eynollah.eynollah import Eynollah if setup_logging: console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) @@ -476,8 +464,7 @@ def layout( assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." eynollah = Eynollah( - model_basedir, - model_overrides=model_version, + model_zoo=ctx.obj.model_zoo, extract_only_images=extract_only_images, enable_plotting=enable_plotting, allow_enhancement=allow_enhancement, @@ -559,17 +546,6 @@ def layout( help="overwrite (instead of skipping) if output xml exists", is_flag=True, ) -@click.option( - "--model", - "-m", - help="directory of models", - type=click.Path(exists=True, file_okay=False), -) -@click.option( - "--model_name", - help="Specific model file path to use for OCR", - type=click.Path(exists=True, file_okay=False), -) @click.option( "--tr_ocr", "-trocr/-notrocr", @@ -609,20 +585,36 @@ def layout( type=click.Choice(['OFF', 'DEBUG', 'INFO', 'WARN', 'ERROR']), help="Override log level globally to this", ) - -def ocr(image, dir_in, dir_in_bin, dir_xmls, out, dir_out_image_text, overwrite, model, model_name, tr_ocr, export_textline_images_and_text, do_not_mask_with_textline_contour, batch_size, dataset_abbrevation, min_conf_value_of_textline_text, log_level): +@click.pass_context +def ocr( + ctx, + image, + dir_in, + dir_in_bin, + dir_xmls, + out, + dir_out_image_text, + overwrite, + tr_ocr, + export_textline_images_and_text, + do_not_mask_with_textline_contour, + batch_size, + dataset_abbrevation, + min_conf_value_of_textline_text, + log_level, +): + from eynollah.eynollah_ocr import Eynollah_ocr initLogging() - - assert bool(model) != bool(model_name), "Either -m (model directory) or --model_name (specific model name) must be provided." + assert not export_textline_images_and_text or not tr_ocr, "Exporting textline and text -etit can not be set alongside transformer ocr -tr_ocr" - assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" + # FIXME: refactor: move export_textline_images_and_text out of eynollah.py + # assert not export_textline_images_and_text or not model, "Exporting textline and text -etit can not be set alongside model -m" assert not export_textline_images_and_text or not batch_size, "Exporting textline and text -etit can not be set alongside batch size -bs" assert not export_textline_images_and_text or not dir_in_bin, "Exporting textline and text -etit can not be set alongside directory of bin images -dib" assert not export_textline_images_and_text or not dir_out_image_text, "Exporting textline and text -etit can not be set alongside directory of images with predicted text -doit" assert bool(image) != bool(dir_in), "Either -i (single image) or -di (directory) must be provided, but not both." eynollah_ocr = Eynollah_ocr( - dir_models=model, - model_name=model_name, + model_zoo=ctx.obj.model_zoo, tr_ocr=tr_ocr, export_textline_images_and_text=export_textline_images_and_text, do_not_mask_with_textline_contour=do_not_mask_with_textline_contour, diff --git a/src/eynollah/cli_models.py b/src/eynollah/cli_models.py index a299d19..2f6eded 100644 --- a/src/eynollah/cli_models.py +++ b/src/eynollah/cli_models.py @@ -6,30 +6,7 @@ import click from eynollah.model_zoo.default_specs import MODELS_VERSION from .model_zoo import EynollahModelZoo - -@dataclass() -class EynollahCliCtx: - model_zoo: EynollahModelZoo - - @click.group() -@click.pass_context -@click.option( - "--model", - "-m", - 'model_basedir', - help="directory of models", - type=click.Path(exists=True, file_okay=False), - # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", - required=True, -) -@click.option( - "--model-overrides", - "-mv", - help="override default versions of model categories, syntax is 'CATEGORY VARIANT PATH', e.g 'region light /path/to/model'. See eynollah list-models for the full list", - type=(str, str, str), - multiple=True, -) def models_cli( ctx, model_basedir: str, @@ -38,7 +15,7 @@ def models_cli( """ Organize models for the various runners in eynollah. """ - ctx.obj = EynollahCliCtx(model_zoo=EynollahModelZoo(basedir=model_basedir, model_overrides=model_overrides)) + assert ctx.obj.model_zoo @models_cli.command('list') diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index 98e894c..867d86b 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -138,8 +138,8 @@ num_patches =21*21#14*14#28*28#14*14#28*28 class Eynollah: def __init__( self, - dir_models : str, - model_overrides: List[Tuple[str, str, str]] = [], + *, + model_zoo: EynollahModelZoo, extract_only_images : bool =False, enable_plotting : bool = False, allow_enhancement : bool = False, @@ -164,7 +164,7 @@ class Eynollah: skip_layout_and_reading_order : bool = False, ): self.logger = getLogger('eynollah') - self.model_zoo = EynollahModelZoo(basedir=dir_models) + self.model_zoo = model_zoo self.plotter = None if skip_layout_and_reading_order: @@ -231,12 +231,10 @@ class Eynollah: self.logger.warning("no GPU device available") self.logger.info("Loading models...") - self.setup_models(*model_overrides) + self.setup_models() self.logger.info(f"Model initialization complete ({time.time() - t_start:.1f}s)") - def setup_models(self, *model_overrides: Tuple[str, str, str]): - # override defaults from CLI - self.model_zoo.override_models(*model_overrides) + def setup_models(self): # load models, depending on modes # (note: loading too many models can cause OOM on GPU/CUDA, diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index 3aafd8e..d32777a 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -51,8 +51,8 @@ except ImportError: class Eynollah_ocr: def __init__( self, - dir_models, - model_name=None, + *, + model_zoo: EynollahModelZoo, dir_xmls=None, tr_ocr=False, batch_size: Optional[int]=None, @@ -70,7 +70,7 @@ class Eynollah_ocr: # prefix or dataset self.pref_of_dataset = pref_of_dataset self.logger = logger if logger else getLogger('eynollah.ocr') - self.model_zoo = EynollahModelZoo(basedir=dir_models) + self.model_zoo = model_zoo # TODO: Properly document what 'export_textline_images_and_text' is about if export_textline_images_and_text: diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 74b4865..08d3d90 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -32,7 +32,8 @@ KERNEL = np.ones((5, 5), np.uint8) class Enhancer: def __init__( self, - dir_models : str, + *, + model_zoo: EynollahModelZoo, num_col_upper : Optional[int] = None, num_col_lower : Optional[int] = None, save_org_scale : bool = False, @@ -51,7 +52,7 @@ class Enhancer: self.num_col_lower = num_col_lower self.logger = logger if logger else getLogger('eynollah.enhance') - self.model_zoo = EynollahModelZoo(basedir=dir_models) + self.model_zoo = model_zoo for v in ['binarization', 'enhancement', 'col_classifier', 'page']: self.model_zoo.load_model(v) diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 8338d35..620d6c0 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -32,12 +32,12 @@ KERNEL = np.ones((5, 5), np.uint8) class machine_based_reading_order_on_layout: def __init__( self, - dir_models : str, + *, + model_zoo: EynollahModelZoo, logger : Optional[Logger] = None, ): self.logger = logger if logger else getLogger('mbreorder') - self.dir_models = dir_models - self.model_reading_order_dir = dir_models + "/model_eynollah_reading_order_20250824" + self.model_zoo = model_zoo try: for device in tf.config.list_physical_devices('GPU'): @@ -45,7 +45,6 @@ class machine_based_reading_order_on_layout: except: self.logger.warning("no GPU device available") - self.model_zoo = EynollahModelZoo(basedir=dir_models) self.model_zoo.load_model('reading_order') # FIXME: light_version is always true, no need for checks in the code self.light_version = True diff --git a/src/eynollah/model_zoo/__init__.py b/src/eynollah/model_zoo/__init__.py index dda52c2..e1dc985 100644 --- a/src/eynollah/model_zoo/__init__.py +++ b/src/eynollah/model_zoo/__init__.py @@ -1,7 +1,4 @@ __all__ = [ 'EynollahModelZoo', - 'KerasModel', - 'TrOCRProcessor', - 'VisionEncoderDecoderModel', ] -from .model_zoo import EynollahModelZoo, KerasModel, TrOCRProcessor, VisionEncoderDecoderModel +from .model_zoo import EynollahModelZoo diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py index 32fdd0e..40e979f 100644 --- a/src/eynollah/model_zoo/model_zoo.py +++ b/src/eynollah/model_zoo/model_zoo.py @@ -4,11 +4,13 @@ from copy import deepcopy from pathlib import Path from typing import Dict, List, Optional, Tuple, Type, Union +from ocrd_utils import tf_disable_interactive_logs +tf_disable_interactive_logs() + from keras.layers import StringLookup from keras.models import Model as KerasModel from keras.models import load_model from tabulate import tabulate -from transformers import TrOCRProcessor, VisionEncoderDecoderModel from ..patch_encoder import PatchEncoder, Patches from .specs import EynollahModelSpecSet from .default_specs import DEFAULT_MODEL_SPECS @@ -102,6 +104,7 @@ class EynollahModelZoo: elif model_category == 'characters': model = self._load_characters() elif model_category == 'trocr_processor': + from transformers import TrOCRProcessor model = TrOCRProcessor.from_pretrained(model_path) else: try: @@ -128,7 +131,10 @@ class EynollahModelZoo: """ ocr_model_dir = self.model_path('ocr', variant) if variant == 'tr': - return VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) + from transformers import VisionEncoderDecoderModel + ret = VisionEncoderDecoderModel.from_pretrained(ocr_model_dir) + assert isinstance(ret, VisionEncoderDecoderModel) + return ret else: ocr_model = load_model(ocr_model_dir, compile=False) assert isinstance(ocr_model, KerasModel) diff --git a/src/eynollah/model_zoo/types.py b/src/eynollah/model_zoo/types.py index 5c3685e..7141d39 100644 --- a/src/eynollah/model_zoo/types.py +++ b/src/eynollah/model_zoo/types.py @@ -1,6 +1,8 @@ from typing import List, TypeVar, Union from keras.models import Model as KerasModel -from transformers import TrOCRProcessor, VisionEncoderDecoderModel -AnyModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, KerasModel, List] +# NOTE: Creating an actual union type requires loading transformers which is expensive and error-prone +# from transformers import TrOCRProcessor, VisionEncoderDecoderModel +# AnyModel = Union[VisionEncoderDecoderModel, TrOCRProcessor, KerasModel, List] +AnyModel = object T = TypeVar('T') diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 1bcf9d9..a8a05fa 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -24,11 +24,11 @@ def resize_image(img_in, input_height, input_width): class SbbBinarizer: - def __init__(self, model_dir: str, mode: str, logger=None): + def __init__(self, *, model_zoo: EynollahModelZoo, mode: str, logger=None): if mode not in ('single', 'multi'): raise ValueError(f"'mode' must be either 'multi' or 'single', not {mode}") self.log = logger if logger else logging.getLogger('eynollah.binarization') - self.model_zoo = EynollahModelZoo(basedir=model_dir) + self.model_zoo = model_zoo self.models = self.setup_models(mode) self.session = self.start_new_session() diff --git a/tests/cli_tests/conftest.py b/tests/cli_tests/conftest.py index c54f47b..223cc85 100644 --- a/tests/cli_tests/conftest.py +++ b/tests/cli_tests/conftest.py @@ -1,15 +1,17 @@ from typing import List -from click import Command import pytest import logging from click.testing import CliRunner, Result +from eynollah.cli import main as eynollah_cli + @pytest.fixture def run_eynollah_ok_and_check_logs( pytestconfig, caplog, model_dir, + eynollah_subcommands, eynollah_log_filter, ): """ @@ -18,14 +20,23 @@ def run_eynollah_ok_and_check_logs( every fragment in `expected_logs` """ - def _run_click_ok_logs(cli: Command, args: List[str], expected_logs: List[str]) -> Result: - args = ['-m', model_dir] + args + def _run_click_ok_logs( + subcommand: 'str', + args: List[str], + expected_logs: List[str], + ) -> Result: + assert subcommand in eynollah_subcommands, f'subcommand {subcommand} must be one of {eynollah_subcommands}' + args = [ + '-m', model_dir, + subcommand, + *args + ] if pytestconfig.getoption('verbose') > 0: args.extend(['-l', 'DEBUG']) caplog.set_level(logging.INFO) runner = CliRunner() with caplog.filtering(eynollah_log_filter): - result = runner.invoke(cli, args, catch_exceptions=False) + result = runner.invoke(eynollah_cli, args, catch_exceptions=False) assert result.exit_code == 0, result.stdout if expected_logs: logmsgs = [logrec.message for logrec in caplog.records] diff --git a/tests/cli_tests/test_binarization.py b/tests/cli_tests/test_binarization.py index 4672a4f..0490805 100644 --- a/tests/cli_tests/test_binarization.py +++ b/tests/cli_tests/test_binarization.py @@ -1,10 +1,5 @@ import pytest from PIL import Image -from eynollah.cli import ( - binarization as binarization_cli, -) -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS @pytest.mark.parametrize( "options", @@ -21,7 +16,7 @@ def test_run_eynollah_binarization_filename( infile = tests_dir.joinpath('resources/kant_aufklaerung_1784_0020.tif') outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') run_eynollah_ok_and_check_logs( - binarization_cli, + 'binarization', [ '-i', str(infile), '-o', str(outfile), @@ -45,7 +40,7 @@ def test_run_eynollah_binarization_directory( ): outdir = tmp_path run_eynollah_ok_and_check_logs( - binarization_cli, + 'binarization', [ '-di', str(resources_dir), '-o', str(outdir), diff --git a/tests/cli_tests/test_enhance.py b/tests/cli_tests/test_enhance.py index 590c07f..91e7c4b 100644 --- a/tests/cli_tests/test_enhance.py +++ b/tests/cli_tests/test_enhance.py @@ -1,10 +1,5 @@ import pytest from PIL import Image -from eynollah.cli import ( - enhancement as enhancement_cli, -) -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS @pytest.mark.parametrize( "options", @@ -21,7 +16,7 @@ def test_run_eynollah_enhancement_filename( infile = resources_dir / 'kant_aufklaerung_1784_0020.tif' outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.png') run_eynollah_ok_and_check_logs( - enhancement_cli, + 'enhancement', [ '-i', str(infile), '-o', str(outfile.parent), @@ -44,7 +39,7 @@ def test_run_eynollah_enhancement_directory( ): outdir = tmp_path run_eynollah_ok_and_check_logs( - enhancement_cli, + 'enhancement', [ '-di', str(resources_dir), '-o', str(outdir), diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py index db7b88c..776372c 100644 --- a/tests/cli_tests/test_layout.py +++ b/tests/cli_tests/test_layout.py @@ -1,7 +1,4 @@ import pytest -from eynollah.cli import ( - layout as layout_cli, -) from ocrd_modelfactory import page_from_file from ocrd_models.constants import NAMESPACES as NS @@ -30,11 +27,10 @@ def test_run_eynollah_layout_filename( resources_dir, options, ): - outdir = tmp_path infile = resources_dir / 'kant_aufklaerung_1784_0020.tif' outfile = tmp_path / 'kant_aufklaerung_1784_0020.xml' run_eynollah_ok_and_check_logs( - layout_cli, + 'layout', [ '-i', str(infile), '-o', str(outfile.parent), @@ -68,7 +64,7 @@ def test_run_eynollah_layout_filename2( infile = resources_dir / 'euler_rechenkunst01_1738_0025.tif' outfile = tmp_path / 'euler_rechenkunst01_1738_0025.xml' run_eynollah_ok_and_check_logs( - layout_cli, + 'layout', [ '-i', str(infile), '-o', str(outfile.parent), @@ -96,7 +92,7 @@ def test_run_eynollah_layout_directory( ): outdir = tmp_path run_eynollah_ok_and_check_logs( - layout_cli, + 'layout', [ '-di', str(resources_dir), '-o', str(outdir), diff --git a/tests/cli_tests/test_mbreorder.py b/tests/cli_tests/test_mbreorder.py index 7fb246d..25b44d8 100644 --- a/tests/cli_tests/test_mbreorder.py +++ b/tests/cli_tests/test_mbreorder.py @@ -1,11 +1,6 @@ from ocrd_modelfactory import page_from_file from ocrd_models.constants import NAMESPACES as NS -from eynollah.cli import ( - machine_based_reading_order as mbreorder_cli, -) - - def test_run_eynollah_mbreorder_filename( tmp_path, resources_dir, @@ -14,7 +9,7 @@ def test_run_eynollah_mbreorder_filename( infile = resources_dir / 'kant_aufklaerung_1784_0020.xml' outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') run_eynollah_ok_and_check_logs( - mbreorder_cli, + 'machine-based-reading-order', [ '-i', str(infile), '-o', str(outfile.parent), @@ -37,10 +32,9 @@ def test_run_eynollah_mbreorder_directory( resources_dir, run_eynollah_ok_and_check_logs, ): - outfile = tmp_path.joinpath('kant_aufklaerung_1784_0020.xml') outdir = tmp_path run_eynollah_ok_and_check_logs( - mbreorder_cli, + 'machine-based-reading-order', [ '-di', str(resources_dir), '-o', str(outdir), diff --git a/tests/cli_tests/test_ocr.py b/tests/cli_tests/test_ocr.py index 747d978..f6b33a7 100644 --- a/tests/cli_tests/test_ocr.py +++ b/tests/cli_tests/test_ocr.py @@ -1,7 +1,4 @@ import pytest -from eynollah.cli import ( - ocr as ocr_cli, -) from ocrd_modelfactory import page_from_file from ocrd_models.constants import NAMESPACES as NS @@ -26,7 +23,7 @@ def test_run_eynollah_ocr_filename( if "-doit" in options: options.insert(options.index("-doit") + 1, str(outrenderfile.parent)) run_eynollah_ok_and_check_logs( - ocr_cli, + 'ocr', [ '-i', str(infile), '-dx', str(infile.parent), @@ -53,7 +50,7 @@ def test_run_eynollah_ocr_directory( ): outdir = tmp_path run_eynollah_ok_and_check_logs( - ocr_cli, + 'ocr', [ '-di', str(resources_dir), '-dx', str(resources_dir), diff --git a/tests/cli_tests/test_run.py b/tests/cli_tests/test_run.py deleted file mode 100644 index 122bab5..0000000 --- a/tests/cli_tests/test_run.py +++ /dev/null @@ -1,10 +0,0 @@ -import pytest -from PIL import Image -from eynollah.cli import ( - layout as layout_cli, - binarization as binarization_cli, - enhancement as enhancement_cli, -) -from ocrd_modelfactory import page_from_file -from ocrd_models.constants import NAMESPACES as NS - diff --git a/tests/conftest.py b/tests/conftest.py index e73d0e3..9b70ae6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -23,3 +23,15 @@ def image_resources(resources_dir): @pytest.fixture() def eynollah_log_filter(): return lambda logrec: logrec.name.startswith('eynollah') + +@pytest.fixture +def eynollah_subcommands(): + return [ + 'binarization', + 'layout', + 'ocr', + 'enhancement', + 'machine-based-reading-order' + 'models' + ] + From 5e22e9db644197968ebd91899f17d20fac4c853b Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 19:08:32 +0100 Subject: [PATCH 423/492] model_zoo: make type str to reduce importing overhead --- src/eynollah/cli.py | 8 ++-- src/eynollah/cli_models.py | 9 ++-- src/eynollah/model_zoo/default_specs.py | 60 ++++++++++++------------- src/eynollah/model_zoo/model_zoo.py | 10 ++++- src/eynollah/model_zoo/specs.py | 2 +- tests/test_model_zoo.py | 33 +++++++------- 6 files changed, 63 insertions(+), 59 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 9ae909f..bd2d807 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -1,9 +1,11 @@ from dataclasses import dataclass +import os import sys import click import logging from ocrd_utils import initLogging, getLevelName, getLogger -from eynollah.model_zoo import EynollahModelZoo + +from .model_zoo import EynollahModelZoo from .cli_models import models_cli @@ -11,15 +13,13 @@ from .cli_models import models_cli class EynollahCliCtx: model_zoo: EynollahModelZoo - @click.group() @click.option( "--model-basedir", "-m", help="directory of models", type=click.Path(exists=True, file_okay=False), - # default=f"{os.environ['HOME']}/.local/share/ocrd-resources/ocrd-eynollah-segment", - required=True, + default=f'{os.getcwd()}/models_eynollah', ) @click.option( "--model-overrides", diff --git a/src/eynollah/cli_models.py b/src/eynollah/cli_models.py index 2f6eded..f3de596 100644 --- a/src/eynollah/cli_models.py +++ b/src/eynollah/cli_models.py @@ -1,16 +1,13 @@ -from dataclasses import dataclass from pathlib import Path -from typing import List, Set, Tuple +from typing import Set, Tuple import click from eynollah.model_zoo.default_specs import MODELS_VERSION -from .model_zoo import EynollahModelZoo @click.group() +@click.pass_context def models_cli( ctx, - model_basedir: str, - model_overrides: List[Tuple[str, str, str]], ): """ Organize models for the various runners in eynollah. @@ -26,6 +23,8 @@ def list_models( """ List all the models in the zoo """ + print(f"Model basedir: {ctx.obj.model_zoo.model_basedir}") + print(f"Model overrides: {ctx.obj.model_zoo.model_overrides}") print(ctx.obj.model_zoo) diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index fa67393..8daa270 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -1,5 +1,5 @@ from .specs import EynollahModelSpec, EynollahModelSpecSet -from .types import KerasModel, TrOCRProcessor, List +from .types import KerasModel # NOTE: This needs to change whenever models/versions change ZENODO = "https://zenodo.org/records/17295988/files" @@ -16,7 +16,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-enhancement_20210425", dists=['enhancement', 'layout', 'ci'], dist_url=dist_url("enhancement"), - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -25,7 +25,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-binarization-hybrid_20230504/model_bin_hybrid_trans_cnn_sbb_ens", dists=['layout', 'binarization', ], dist_url=dist_url("binarization"), - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -34,7 +34,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-binarization_20210309", dists=['binarization'], dist_url=dist_url("binarization"), - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -43,7 +43,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-binarization_20210425", dists=['binarization'], dist_url=dist_url("binarization"), - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -52,7 +52,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin1", dist_url=dist_url("binarization"), dists=['binarization'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -61,7 +61,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin2", dist_url=dist_url("binarization"), dists=['binarization'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -70,7 +70,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin3", dist_url=dist_url("binarization"), dists=['binarization'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -79,7 +79,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-binarization-multi_2020_01_16/model_bin4", dist_url=dist_url("binarization"), dists=['binarization'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -88,7 +88,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-column-classifier_20210425", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -97,7 +97,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/model_eynollah_page_extraction_20250915", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -106,7 +106,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-main-regions-ensembled_20210425", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -115,7 +115,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-main-regions_20231127_672_org_ens_11_13_16_17_18", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -125,7 +125,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ dist_url=dist_url("layout"), help="early layout", dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -135,7 +135,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ dist_url=dist_url("layout"), help="early layout, non-light, 2nd part", dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -150,7 +150,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ dist_url=dist_url("layout"), dists=['layout'], help="early layout, light, 1-or-2-column", - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -166,7 +166,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ dist_url=dist_url("layout"), help="full layout / no patches", dists=['layout'], - type=KerasModel, + type='Keras', ), # FIXME: Why is region_fl and region_fl_np the same model? @@ -186,7 +186,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ dist_url=dist_url("layout"), help="full layout / with patches", dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -200,7 +200,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/model_eynollah_reading_order_20250824", dist_url=dist_url("reading_order"), dists=['layout', 'reading_order'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -215,7 +215,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/modelens_textline_0_1__2_4_16092024", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -225,7 +225,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/modelens_textline_0_1__2_4_16092024", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -234,7 +234,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/eynollah-tables_20210319", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -243,7 +243,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/modelens_table_0t4_201124", dist_url=dist_url("layout"), dists=['layout'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -252,7 +252,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/model_eynollah_ocr_cnnrnn_20250930", dist_url=dist_url("ocr"), dists=['layout', 'ocr'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -262,7 +262,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ help="slightly better at degraded Fraktur", dist_url=dist_url("ocr"), dists=['ocr'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -271,7 +271,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="characters_org.txt", dist_url=dist_url("ocr"), dists=['ocr'], - type=KerasModel, + type='decoder', ), EynollahModelSpec( @@ -280,7 +280,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="characters_org.txt", dist_url=dist_url("ocr"), dists=['ocr'], - type=list, + type='List[str]', ), EynollahModelSpec( @@ -290,7 +290,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ dist_url=dist_url("trocr"), help='much slower transformer-based', dists=['trocr'], - type=KerasModel, + type='Keras', ), EynollahModelSpec( @@ -299,7 +299,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/microsoft/trocr-base-printed", dist_url=dist_url("trocr"), dists=['trocr'], - type=KerasModel, + type='TrOCRProcessor', ), EynollahModelSpec( @@ -308,7 +308,7 @@ DEFAULT_MODEL_SPECS = EynollahModelSpecSet([ filename="models_eynollah/microsoft/trocr-base-handwritten", dist_url=dist_url("trocr"), dists=['trocr'], - type=TrOCRProcessor, + type='TrOCRProcessor', ), ]) diff --git a/src/eynollah/model_zoo/model_zoo.py b/src/eynollah/model_zoo/model_zoo.py index 40e979f..512bf1a 100644 --- a/src/eynollah/model_zoo/model_zoo.py +++ b/src/eynollah/model_zoo/model_zoo.py @@ -32,11 +32,18 @@ class EynollahModelZoo: ) -> None: self.model_basedir = Path(basedir) self.logger = logging.getLogger('eynollah.model_zoo') + if not self.model_basedir.exists(): + self.logger.warning(f"Model basedir does not exist: {basedir}. Set eynollah --model-basedir to the correct directory.") self.specs = deepcopy(DEFAULT_MODEL_SPECS) + self._overrides = [] if model_overrides: self.override_models(*model_overrides) self._loaded: Dict[str, AnyModel] = {} + @property + def model_overrides(self): + return self._overrides + def override_models( self, *model_overrides: Tuple[str, str, str], @@ -48,6 +55,7 @@ class EynollahModelZoo: spec = self.specs.get(model_category, model_variant) self.logger.warning("Overriding filename for model spec %s to %s", spec, model_filename) self.specs.get(model_category, model_variant).filename = model_filename + self._overrides += model_overrides def model_path( self, @@ -164,7 +172,7 @@ class EynollahModelZoo: return tabulate( [ [ - spec.type.__name__, + spec.type, spec.category, spec.variant, spec.help, diff --git a/src/eynollah/model_zoo/specs.py b/src/eynollah/model_zoo/specs.py index 322afa4..415e55d 100644 --- a/src/eynollah/model_zoo/specs.py +++ b/src/eynollah/model_zoo/specs.py @@ -15,7 +15,7 @@ class EynollahModelSpec(): dists: List[str] # URL to the smallest model distribution containing this model (link to Zenodo) dist_url: str - type: Type[AnyModel] + type: str variant: str = '' help: str = '' diff --git a/tests/test_model_zoo.py b/tests/test_model_zoo.py index 81e84f6..2042b28 100644 --- a/tests/test_model_zoo.py +++ b/tests/test_model_zoo.py @@ -1,19 +1,16 @@ -from pathlib import Path +from eynollah.model_zoo import EynollahModelZoo -from eynollah.model_zoo import EynollahModelZoo, TrOCRProcessor, VisionEncoderDecoderModel - -testdir = Path(__file__).parent.resolve() -MODELS_DIR = testdir.parent - -def test_trocr1(): - model_zoo = EynollahModelZoo(str(MODELS_DIR)) - model_zoo.load_model('trocr_processor') - proc = model_zoo.get('trocr_processor', TrOCRProcessor) - assert isinstance(proc, TrOCRProcessor) - - model_zoo.load_model('ocr', 'tr') - model = model_zoo.get('ocr') - assert isinstance(model, VisionEncoderDecoderModel) - print(proc) - -test_trocr1() +def test_trocr1( + model_dir, +): + model_zoo = EynollahModelZoo(model_dir) + try: + from transformers import TrOCRProcessor, VisionEncoderDecoderModel + model_zoo.load_model('trocr_processor') + proc = model_zoo.get('trocr_processor', TrOCRProcessor) + assert isinstance(proc, TrOCRProcessor) + model_zoo.load_model('ocr', 'tr') + model = model_zoo.get('ocr', VisionEncoderDecoderModel) + assert isinstance(model, VisionEncoderDecoderModel) + except ImportError: + pass From 29c273685f69a437c71b0d6cc46f24d45f88ad6f Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 19:52:28 +0100 Subject: [PATCH 424/492] fix merge issues --- src/eynollah/cli.py | 5 +-- src/eynollah/eynollah.py | 42 +++++++++++++++---------- src/eynollah/image_enhancer.py | 5 +++ src/eynollah/mb_ro_on_layout.py | 4 +++ src/eynollah/model_zoo/default_specs.py | 1 - src/eynollah/model_zoo/specs.py | 3 +- src/eynollah/model_zoo/types.py | 3 +- src/eynollah/sbb_binarize.py | 9 ++++-- tests/conftest.py | 4 +-- 9 files changed, 46 insertions(+), 30 deletions(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index af1b805..d194565 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -426,13 +426,11 @@ def layout( threshold_art_class_layout, skip_layout_and_reading_order, ignore_page_extraction, - log_level, - setup_logging, ): """ Detect Layout (with optional image enhancement and reading order detection) """ - from eynollah.eynollah import Eynollah + from .eynollah import Eynollah assert enable_plotting or not save_layout, "Plotting with -sl also requires -ep" assert enable_plotting or not save_deskewed, "Plotting with -sd also requires -ep" assert enable_plotting or not save_all, "Plotting with -sa also requires -ep" @@ -452,7 +450,6 @@ def layout( assert not extract_only_images or not right2left, "Image extraction -eoi can not be set alongside right2left -r2l" assert not extract_only_images or not headers_off, "Image extraction -eoi can not be set alongside headers_off -ho" assert bool(image) != bool(dir_in), "Either -i (single input) or -di (directory) must be provided, but not both." - from .eynollah import Eynollah eynollah = Eynollah( model_zoo=ctx.obj.model_zoo, extract_only_images=extract_only_images, diff --git a/src/eynollah/eynollah.py b/src/eynollah/eynollah.py index ab04a67..dc90f1d 100644 --- a/src/eynollah/eynollah.py +++ b/src/eynollah/eynollah.py @@ -1,12 +1,22 @@ +""" +document layout analysis (segmentation) with output in PAGE-XML +""" # pylint: disable=no-member,invalid-name,line-too-long,missing-function-docstring,missing-class-docstring,too-many-branches # pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member # pylint: disable=too-many-public-methods,too-many-arguments,too-many-instance-attributes,too-many-public-methods, # pylint: disable=consider-using-enumerate +# FIXME: fix all of those... # pyright: reportUnnecessaryTypeIgnoreComment=true # pyright: reportPossiblyUnboundVariable=false -""" -document layout analysis (segmentation) with output in PAGE-XML -""" +# pyright: reportMissingImports=false +# pyright: reportCallIssue=false +# pyright: reportOperatorIssue=false +# pyright: reportUnboundVariable=false +# pyright: reportArgumentType=false +# pyright: reportAttributeAccessIssue=false +# pyright: reportOptionalMemberAccess=false +# pyright: reportGeneralTypeIssues=false +# pyright: reportOptionalSubscript=false import logging import sys @@ -21,8 +31,7 @@ from difflib import SequenceMatcher as sq import math import os import time -from typing import List, Optional, Tuple -import warnings +from typing import Optional from functools import partial from pathlib import Path from multiprocessing import cpu_count @@ -39,17 +48,8 @@ from skimage.morphology import skeletonize from ocrd_utils import tf_disable_interactive_logs import statistics -try: - import torch # type: ignore -except ImportError: - torch = None -try: - import matplotlib.pyplot as plt -except ImportError: - plt = None - -#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' tf_disable_interactive_logs() + import tensorflow as tf # warnings.filterwarnings("ignore") from tensorflow.python.keras import backend as K @@ -58,6 +58,14 @@ from tensorflow.keras.models import load_model from tensorflow.compat.v1.keras.backend import set_session from tensorflow.keras import layers from tensorflow.keras.layers import StringLookup +try: + import torch +except ImportError: + torch = None +try: + import matplotlib.pyplot as plt +except ImportError: + plt = None from .model_zoo import EynollahModelZoo from .utils.contour import ( @@ -3667,8 +3675,8 @@ class Eynollah: def return_ocr_of_textline_without_common_section( self, textline_image, - model_ocr: KerasModel, - processor: TrOCRProcessor, + model_ocr, + processor, device, width_textline, h2w_ratio, diff --git a/src/eynollah/image_enhancer.py b/src/eynollah/image_enhancer.py index 00aedec..575a583 100644 --- a/src/eynollah/image_enhancer.py +++ b/src/eynollah/image_enhancer.py @@ -2,6 +2,11 @@ Image enhancer. The output can be written as same scale of input or in new predicted scale. """ +# FIXME: fix all of those... +# pyright: reportUnboundVariable=false +# pyright: reportCallIssue=false +# pyright: reportArgumentType=false + import logging import os import time diff --git a/src/eynollah/mb_ro_on_layout.py b/src/eynollah/mb_ro_on_layout.py index 3527103..7f065f1 100644 --- a/src/eynollah/mb_ro_on_layout.py +++ b/src/eynollah/mb_ro_on_layout.py @@ -2,6 +2,10 @@ Machine learning based reading order detection """ +# pyright: reportCallIssue=false +# pyright: reportUnboundVariable=false +# pyright: reportArgumentType=false + import logging import os import time diff --git a/src/eynollah/model_zoo/default_specs.py b/src/eynollah/model_zoo/default_specs.py index 8daa270..8f5f8b5 100644 --- a/src/eynollah/model_zoo/default_specs.py +++ b/src/eynollah/model_zoo/default_specs.py @@ -1,5 +1,4 @@ from .specs import EynollahModelSpec, EynollahModelSpecSet -from .types import KerasModel # NOTE: This needs to change whenever models/versions change ZENODO = "https://zenodo.org/records/17295988/files" diff --git a/src/eynollah/model_zoo/specs.py b/src/eynollah/model_zoo/specs.py index 415e55d..54a55f2 100644 --- a/src/eynollah/model_zoo/specs.py +++ b/src/eynollah/model_zoo/specs.py @@ -1,6 +1,5 @@ from dataclasses import dataclass -from typing import Dict, List, Set, Tuple, Type -from .types import AnyModel +from typing import Dict, List, Set, Tuple @dataclass diff --git a/src/eynollah/model_zoo/types.py b/src/eynollah/model_zoo/types.py index 7141d39..43f6859 100644 --- a/src/eynollah/model_zoo/types.py +++ b/src/eynollah/model_zoo/types.py @@ -1,5 +1,4 @@ -from typing import List, TypeVar, Union -from keras.models import Model as KerasModel +from typing import TypeVar # NOTE: Creating an actual union type requires loading transformers which is expensive and error-prone # from transformers import TrOCRProcessor, VisionEncoderDecoderModel diff --git a/src/eynollah/sbb_binarize.py b/src/eynollah/sbb_binarize.py index 753a626..77741e9 100644 --- a/src/eynollah/sbb_binarize.py +++ b/src/eynollah/sbb_binarize.py @@ -2,10 +2,15 @@ Tool to load model and binarize a given image. """ +# pyright: reportIndexIssue=false +# pyright: reportCallIssue=false +# pyright: reportArgumentType=false +# pyright: reportPossiblyUnboundVariable=false + import os import logging from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, Optional import numpy as np import cv2 @@ -326,7 +331,7 @@ class SbbBinarizer: image = cv2.imread(image_path) img_last = 0 for n, (model_file, model) in enumerate(self.models.items()): - self.log.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file, n + 1, len(self.models.keys())) + self.logger.info('Predicting %s with model %s [%s/%s]', image_path if image_path else '[image]', model_file, n + 1, len(self.models.keys())) res = self.predict(model, image, use_patches) img_fin = np.zeros((res.shape[0], res.shape[1], 3)) diff --git a/tests/conftest.py b/tests/conftest.py index 9b70ae6..703095e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -31,7 +31,7 @@ def eynollah_subcommands(): 'layout', 'ocr', 'enhancement', - 'machine-based-reading-order' - 'models' + 'machine-based-reading-order', + 'models', ] From 4772fd17e2fd55e5b8b83490a2210ac32f9a73ab Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 20:47:13 +0100 Subject: [PATCH 425/492] missed changing override mechanism in eynollah_ocr --- src/eynollah/eynollah_ocr.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/eynollah/eynollah_ocr.py b/src/eynollah/eynollah_ocr.py index d32777a..0f3eda6 100644 --- a/src/eynollah/eynollah_ocr.py +++ b/src/eynollah/eynollah_ocr.py @@ -1,4 +1,9 @@ +# FIXME: fix all of those... # pyright: reportPossiblyUnboundVariable=false +# pyright: reportOptionalMemberAccess=false +# pyright: reportArgumentType=false +# pyright: reportCallIssue=false +# pyright: reportOptionalSubscript=false from logging import Logger, getLogger from typing import Optional @@ -53,7 +58,6 @@ class Eynollah_ocr: self, *, model_zoo: EynollahModelZoo, - dir_xmls=None, tr_ocr=False, batch_size: Optional[int]=None, export_textline_images_and_text: bool=False, @@ -82,12 +86,13 @@ class Eynollah_ocr: if tr_ocr: self.model_zoo.load_model('trocr_processor') - self.model_zoo.load_model('ocr', 'tr', model_path_override=model_name) + self.model_zoo.load_model('ocr', 'tr') self.model_zoo.get('ocr').to(self.device) else: - self.model_zoo.load_model('ocr', '', model_path_override=model_name) + self.model_zoo.load_model('ocr', '') self.model_zoo.load_model('num_to_char') - self.end_character = len(self.model_zoo.load_model('characters')) + 2 + self.model_zoo.load_model('characters') + self.end_character = len(self.model_zoo.get('characters', list)) + 2 @property def device(self): From 9ab565fa023f7844ea4bb4649bbbf38726f0e285 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 21:02:42 +0100 Subject: [PATCH 426/492] model basedir might be a symlink --- src/eynollah/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index d194565..5ab3c9f 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -23,7 +23,7 @@ class EynollahCliCtx: "--model-basedir", "-m", help="directory of models", - type=click.Path(exists=True, file_okay=False), + type=click.Path(exists=True), default=f'{os.getcwd()}/models_eynollah', ) @click.option( From 600ebfeb50cb003416b84b6afd21040734c1bb30 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 21:07:49 +0100 Subject: [PATCH 427/492] make: fix to use single-archive ZIP --- Makefile | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 4fcd9fb..fe321b8 100644 --- a/Makefile +++ b/Makefile @@ -15,8 +15,8 @@ WGET = wget -O #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz #SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 EYNOLLAH_MODELS_URL := https://zenodo.org/records/17295988/files/models_all_v0_7_0.zip -EYNOLLAH_MODELS_ZIP = $(notdir $(SEG_MODEL)) -EYNOLLAH_MODELS_DIR = $(SEG_MODELFILE:%.zip=%) +EYNOLLAH_MODELS_ZIP = $(notdir $(EYNOLLAH_MODELS_URL)) +EYNOLLAH_MODELS_DIR = $(EYNOLLAH_MODELS_ZIP:%.zip=%) PYTEST_ARGS ?= -vv --isolate @@ -32,7 +32,7 @@ help: @echo " install-dev Install editable with pip" @echo " deps-test Install test dependencies with pip" @echo " models Download and extract models to $(CURDIR):" - @echo " $(BIN_MODELNAME) $(SEG_MODELNAME) $(OCR_MODELNAME)" + @echo " $(EYNOLLAH_MODELS_DIR)" @echo " smoke-test Run simple CLI check" @echo " ocrd-test Run OCR-D CLI check" @echo " test Run unit tests" @@ -112,9 +112,7 @@ ocrd-test: tests/resources/kant_aufklaerung_1784_0020.tif $(RM) -r $(TMPDIR) # Run unit tests -test: export MODELS_LAYOUT=$(CURDIR)/$(SEG_MODELNAME) -test: export MODELS_OCR=$(CURDIR)/$(OCR_MODELNAME) -test: export MODELS_BIN=$(CURDIR)/$(BIN_MODELNAME) +test: export EYNOLLAH_MODELS_DIR := $(CURDIR)/$(EYNOLLAH_MODELS_DIR) test: $(PYTHON) -m pytest tests --durations=0 --continue-on-collection-errors $(PYTEST_ARGS) From 15e6ecb95d4bec0381f6fc67084c19a5d97df6f4 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 29 Oct 2025 21:27:10 +0100 Subject: [PATCH 428/492] make models: update URL --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index fe321b8..8744d0a 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ WGET = wget -O #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.0/models_eynollah.tar.gz #SEG_MODEL := https://github.com/qurator-spk/eynollah/releases/download/v0.3.1/models_eynollah.tar.gz #SEG_MODEL := https://zenodo.org/records/17194824/files/models_layout_v0_5_0.tar.gz?download=1 -EYNOLLAH_MODELS_URL := https://zenodo.org/records/17295988/files/models_all_v0_7_0.zip +EYNOLLAH_MODELS_URL := https://zenodo.org/records/17417471/files/models_all_v0_7_0.zip EYNOLLAH_MODELS_ZIP = $(notdir $(EYNOLLAH_MODELS_URL)) EYNOLLAH_MODELS_DIR = $(EYNOLLAH_MODELS_ZIP:%.zip=%) @@ -46,7 +46,6 @@ help: # END-EVAL - # Download and extract models to $(PWD)/models_layout_v0_6_0 models: $(EYNOLLAH_MODELS_DIR) From 46a45f6b0eee17cfd979c2ed9a35b82b92343272 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 29 Oct 2025 22:23:48 +0100 Subject: [PATCH 429/492] Create examples.md --- docs/examples.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 docs/examples.md diff --git a/docs/examples.md b/docs/examples.md new file mode 100644 index 0000000..8da0baf --- /dev/null +++ b/docs/examples.md @@ -0,0 +1,21 @@ +# Examples + +Example outputs of various Eynollah models + +# Binarisation + + + + +# Reading Order Detection + +Input Image +Output Image + +# OCR + +Input Image +Output Image + +Input Image +Output Image From f6c0f56348e578b8dae8f13efcb0e12ca1bd92bf Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 29 Oct 2025 22:23:56 +0100 Subject: [PATCH 430/492] Update README.md --- README.md | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 5d5d5a8..8353005 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ ![](https://user-images.githubusercontent.com/952378/102350683-8a74db80-3fa5-11eb-8c7e-f743f7d6eae2.jpg) ## Features -* Document layout analysis using pixelwise segmentation models with support for 10 distinct segmentation classes: +* Document layout analysis using pixelwise segmentation models with support for 10 segmentation classes: * background, [page border](https://ocr-d.de/en/gt-guidelines/trans/lyRand.html), [text region](https://ocr-d.de/en/gt-guidelines/trans/lytextregion.html#textregionen__textregion_), [text line](https://ocr-d.de/en/gt-guidelines/pagexml/pagecontent_xsd_Complex_Type_pc_TextLineType.html), [header](https://ocr-d.de/en/gt-guidelines/trans/lyUeberschrift.html), [image](https://ocr-d.de/en/gt-guidelines/trans/lyBildbereiche.html), [separator](https://ocr-d.de/en/gt-guidelines/trans/lySeparatoren.html), [marginalia](https://ocr-d.de/en/gt-guidelines/trans/lyMarginalie.html), [initial](https://ocr-d.de/en/gt-guidelines/trans/lyInitiale.html), [table](https://ocr-d.de/en/gt-guidelines/trans/lyTabellen.html) * Textline segmentation to bounding boxes or polygons (contours) including for curved lines and vertical text * Document image binarization with pixelwise segmentation or hybrid CNN-Transformer models @@ -81,6 +81,8 @@ Eynollah supports five use cases: 4. [text recognition (OCR)](#ocr), and 5. [reading order detection](#reading-order-detection). +Some example outputs can be found in [`examples.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/examples.md). + ### Layout Analysis The layout analysis module is responsible for detecting layout elements, identifying text lines, and determining reading @@ -152,16 +154,6 @@ TODO ### OCR -

- Input Image - Output Image -

- -

- Input Image - Output Image -

- The OCR module performs text recognition using either a CNN-RNN model or a Transformer model. The command-line interface for OCR can be called like this: @@ -176,17 +168,17 @@ eynollah ocr \ The following options can be used to further configure the ocr processing: -| option | description | -|-------------------|:------------------------------------------------------------------------------- | -| `-dib` | directory of bins(files type must be '.png'). Prediction with both RGB and bins. | -| `-doit` | Directory containing output images rendered with the predicted text | -| `--model_name` | Specific model file path to use for OCR | -| `-trocr` | transformer ocr will be applied, otherwise cnn_rnn model | -| `-etit` | textlines images and text in xml will be exported into output dir (OCR training data) | -| `-nmtc` | cropped textline images will not be masked with textline contour | -| `-bs` | ocr inference batch size. Default bs for trocr and cnn_rnn models are 2 and 8 respectively | -| `-ds_pref` | add an abbrevation of dataset name to generated training data | -| `-min_conf` | minimum OCR confidence value. OCRs with textline conf lower than this will be ignored | +| option | description | +|-------------------|:-------------------------------------------------------------------------------------------| +| `-dib` | directory of binarized images (file type must be '.png'), prediction with both RGB and bin | +| `-doit` | directory for output images rendered with the predicted text | +| `--model_name` | file path to use specific model for OCR | +| `-trocr` | use transformer ocr model (otherwise cnn_rnn model is used) | +| `-etit` | export textline images and text in xml to output dir (OCR training data) | +| `-nmtc` | cropped textline images will not be masked with textline contour | +| `-bs` | ocr inference batch size. Default batch size is 2 for trocr and 8 for cnn_rnn models | +| `-ds_pref` | add an abbrevation of dataset name to generated training data | +| `-min_conf` | minimum OCR confidence value. OCR with textline conf lower than this will be ignored | ### Reading Order Detection From b1e191b2ea9511821cc15cfd6452184d76b87dad Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Wed, 29 Oct 2025 22:30:58 +0100 Subject: [PATCH 431/492] reformat cli options table --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8353005..a663215 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ eynollah layout \ The following options can be used to further configure the processing: | option | description | -|-------------------|:------------------------------------------------------------------------------- | +|-------------------|:--------------------------------------------------------------------------------------------| | `-fl` | full layout analysis including all steps and segmentation classes (recommended) | | `-light` | lighter and faster but simpler method for main region detection and deskewing (recommended) | | `-tll` | this indicates the light textline and should be passed with light version (recommended) | From 62d05917c51072631fc1e10246b2d98584a00694 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 30 Oct 2025 12:17:38 +0100 Subject: [PATCH 432/492] test_layout: str(Path) --- tests/cli_tests/test_layout.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py index 776372c..cd60e36 100644 --- a/tests/cli_tests/test_layout.py +++ b/tests/cli_tests/test_layout.py @@ -70,7 +70,7 @@ def test_run_eynollah_layout_filename2( '-o', str(outfile.parent), ] + options, [ - infile + str(infile) ] ) assert outfile.exists() From 8782ef17b26c1ab122f4613d31a1c558ebe800bd Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 30 Oct 2025 12:19:35 +0100 Subject: [PATCH 433/492] CI: :fire: upgrade torch for debugging --- .github/workflows/test-eynollah.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index dae190a..e639d41 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -60,6 +60,10 @@ jobs: make install-dev EXTRAS=OCR,plotting make deps-test EXTRAS=OCR,plotting + - name: Hard-upgrade torch for debugging + run: | + python -m pip install --upgrade torch + - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" From c9efbe187159a72a9095ebee850a246553f6d986 Mon Sep 17 00:00:00 2001 From: Clemens Neudecker <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:52:59 +0100 Subject: [PATCH 434/492] refactor image layout in examples.md --- docs/examples.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/docs/examples.md b/docs/examples.md index 8da0baf..24336b3 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -4,8 +4,8 @@ Example outputs of various Eynollah models # Binarisation - - + + # Reading Order Detection @@ -14,8 +14,5 @@ Example outputs of various Eynollah models # OCR -Input Image -Output Image - -Input Image -Output Image +Input ImageOutput Image +Input ImageOutput Image From 70d8577a15d6a41433bbf3d48ce46fe4916ed19f Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:16:41 +0100 Subject: [PATCH 435/492] Revert "remove redundant parentheses" This reverts commit 20a95365c283e4b90638063173fed3b8fb65cee1. --- src/eynollah/utils/__init__.py | 2 +- src/eynollah/utils/separate_lines.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index aa89bd1..9cf30b0 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1351,7 +1351,7 @@ def return_points_with_boundies(peaks_neg_fin, first_point, last_point): def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, label_lines, contours_h=None): t_ins_c0 = time.time() - separators_closeup= (region_pre_p[:, :] == label_lines) * 1 + separators_closeup=( (region_pre_p[:,:]==label_lines))*1 separators_closeup[0:110,:]=0 separators_closeup[separators_closeup.shape[0]-150:,:]=0 diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 84ca6d7..275bfac 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1473,9 +1473,9 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_int = np.zeros((img_xline.shape[0], img_xline.shape[1])) img_int[:, :] = img_xline[:, :] # img_patch_org[:,:,0] - img_resized = np.zeros((int(img_int.shape[0] * 1.2), int(img_int.shape[1] * 3))) - img_resized[int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], - int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] = img_int[:, :] + img_resized = np.zeros((int(img_int.shape[0] * (1.2)), int(img_int.shape[1] * (3)))) + img_resized[int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], + int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] = img_int[:, :] # plt.imshow(img_xline) # plt.show() img_line_rotated = rotate_image(img_resized, slopes_tile_wise[i]) @@ -1487,8 +1487,8 @@ def separate_lines_new2(img_crop, thetha, num_col, slope_region, logger=None, pl img_patch_separated_returned[:, :][img_patch_separated_returned[:, :] != 0] = 1 img_patch_separated_returned_true_size = img_patch_separated_returned[ - int(img_int.shape[0] * 0.1): int(img_int.shape[0] * 0.1) + img_int.shape[0], - int(img_int.shape[1] * 1.0): int(img_int.shape[1] * 1.0) + img_int.shape[1]] + int(img_int.shape[0] * (0.1)) : int(img_int.shape[0] * (0.1)) + img_int.shape[0], + int(img_int.shape[1] * (1.0)) : int(img_int.shape[1] * (1.0)) + img_int.shape[1]] img_patch_separated_returned_true_size = img_patch_separated_returned_true_size[:, margin : length_x - margin] img_patch_interest_revised[:, index_x_d + margin : index_x_u - margin] = img_patch_separated_returned_true_size @@ -1517,7 +1517,7 @@ def return_deskew_slop(img_patch_org, sigma_des,n_tot_angles=100, img_int[:,:]=img_patch_org[:,:]#img_patch_org[:,:,0] max_shape=np.max(img_int.shape) - img_resized=np.zeros((int(max_shape * 1.1) , int(max_shape * 1.1))) + img_resized=np.zeros((int( max_shape*(1.1) ) , int( max_shape*(1.1) ) )) onset_x=int((img_resized.shape[1]-img_int.shape[1])/2.) onset_y=int((img_resized.shape[0]-img_int.shape[0])/2.) From 2d35a0598d6164d9ccad9ef77d715db4250161c6 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:16:48 +0100 Subject: [PATCH 436/492] Revert "replace list declaration with list literal (faster)" This reverts commit 9733d575bfd2caa19df0465a0fac9e5f352303b8. --- src/eynollah/utils/__init__.py | 18 ++++++++++++------ src/eynollah/utils/separate_lines.py | 6 ++++-- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index 9cf30b0..d6c927b 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -151,7 +151,8 @@ def return_x_start_end_mothers_childs_and_type_of_reading_order( min_ys=np.min(y_sep) max_ys=np.max(y_sep) - y_mains= [min_ys] + y_mains=[] + y_mains.append(min_ys) y_mains_sep_ohne_grenzen=[] for ii in range(len(new_main_sep_y)): @@ -524,7 +525,8 @@ def find_num_col(regions_without_separators, num_col_classifier, tables, multipl # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [peaks_neg_fin[i + 1]] + forest = [] + forest.append(peaks_neg_fin[i + 1]) if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -692,7 +694,8 @@ def find_num_col_only_image(regions_without_separators, multiplier=3.8): # print(forest[np.argmin(z[forest]) ] ) if not isNaN(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [peaks_neg_fin[i + 1]] + forest = [] + forest.append(peaks_neg_fin[i + 1]) if i == (len(peaks_neg_fin) - 1): # print(print(forest[np.argmin(z[forest]) ] )) if not isNaN(forest[np.argmin(z[forest])]): @@ -1343,7 +1346,8 @@ def combine_hor_lines_and_delete_cross_points_and_get_lines_features_back_new( return img_p_in, special_separators def return_points_with_boundies(peaks_neg_fin, first_point, last_point): - peaks_neg_tot = [first_point] + peaks_neg_tot = [] + peaks_neg_tot.append(first_point) for ii in range(len(peaks_neg_fin)): peaks_neg_tot.append(peaks_neg_fin[ii]) peaks_neg_tot.append(last_point) @@ -1511,7 +1515,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, args_cy_splitter=np.argsort(cy_main_splitters) cy_main_splitters_sort=cy_main_splitters[args_cy_splitter] - splitter_y_new= [0] + splitter_y_new=[] + splitter_y_new.append(0) for i in range(len(cy_main_splitters_sort)): splitter_y_new.append( cy_main_splitters_sort[i] ) splitter_y_new.append(region_pre_p.shape[0]) @@ -1587,7 +1592,8 @@ def return_boxes_of_images_by_order_of_reading_new( num_col, peaks_neg_fin = find_num_col( regions_without_separators[splitter_y_new[i]:splitter_y_new[i+1], :], num_col_classifier, tables, multiplier=3.) - peaks_neg_fin_early= [0] + peaks_neg_fin_early=[] + peaks_neg_fin_early.append(0) #print(peaks_neg_fin,'peaks_neg_fin') for p_n in peaks_neg_fin: peaks_neg_fin_early.append(p_n) diff --git a/src/eynollah/utils/separate_lines.py b/src/eynollah/utils/separate_lines.py index 275bfac..22ef00d 100644 --- a/src/eynollah/utils/separate_lines.py +++ b/src/eynollah/utils/separate_lines.py @@ -1227,7 +1227,8 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks[i] > cut_off: if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) - forest = [peaks_neg[i + 1]] + forest = [] + forest.append(peaks_neg[i + 1]) if i == (len(peaks_neg) - 1): if not np.isnan(forest[np.argmin(z[forest])]): peaks_neg_true.append(forest[np.argmin(z[forest])]) @@ -1247,7 +1248,8 @@ def separate_lines_new_inside_tiles(img_path, thetha): if diff_peaks_pos[i] > cut_off: if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) - forest = [peaks[i + 1]] + forest = [] + forest.append(peaks[i + 1]) if i == (len(peaks) - 1): if not np.isnan(forest[np.argmax(z[forest])]): peaks_pos_true.append(forest[np.argmax(z[forest])]) From 9dbac280cc4fc7914bd022f8b07665d1f4d70051 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:16:53 +0100 Subject: [PATCH 437/492] Revert "remove unnecessary backslash" This reverts commit f212ffa22ddfcdf953ec133d21dce900136cd7c1. --- src/eynollah/utils/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/eynollah/utils/__init__.py b/src/eynollah/utils/__init__.py index d6c927b..5ccb2af 100644 --- a/src/eynollah/utils/__init__.py +++ b/src/eynollah/utils/__init__.py @@ -1388,7 +1388,8 @@ def find_number_of_columns_in_document(region_pre_p, num_col_classifier, tables, gray = cv2.bitwise_not(separators_closeup_n_binary) gray=gray.astype(np.uint8) - bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) + bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ + cv2.THRESH_BINARY, 15, -2) horizontal = np.copy(bw) vertical = np.copy(bw) From f90259d6e2f9360cf31b4e3b83a83bbd2b9cf544 Mon Sep 17 00:00:00 2001 From: cneud <952378+cneud@users.noreply.github.com> Date: Thu, 30 Oct 2025 22:24:54 +0100 Subject: [PATCH 438/492] fix docs links --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a663215..0283fe9 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ If no further option is set, the tool performs layout detection of main regions and marginals). The best output quality is achieved when RGB images are used as input rather than greyscale or binarized images. -Additional documentation can be found in [`usage.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +Additional documentation can be found in [`usage.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/usage.md). ### Binarization @@ -199,7 +199,7 @@ eynollah machine-based-reading-order \ ## Use as OCR-D processor -See [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/models.md). +See [`ocrd.md`](https://github.com/qurator-spk/eynollah/tree/main/docs/ocrd.md). ## How to cite From b6c7283b4dd7a93f46db4d8989ec962d84b60d8d Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 5 Nov 2025 14:39:30 +0100 Subject: [PATCH 439/492] further debugging --- .github/workflows/test-eynollah.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index e639d41..47be056 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -63,6 +63,7 @@ jobs: - name: Hard-upgrade torch for debugging run: | python -m pip install --upgrade torch + find models_eynollah - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" From 2c211095d7034d0914c9d602451f83cf7e9abd41 Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 5 Nov 2025 15:02:55 +0100 Subject: [PATCH 440/492] make deps-test should not depend on the models --- .github/workflows/test-eynollah.yml | 7 +++++++ Makefile | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 47be056..2fb9f4b 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -54,6 +54,13 @@ jobs: with: python-version: ${{ matrix.python-version }} + # - uses: actions/cache@v4 + # with: + # path: | + # path/to/dependencies + # some/other/dependencies + # key: ${{ runner.os }}-${{ hashFiles('**/lockfiles') }} + - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/Makefile b/Makefile index 8744d0a..2c79304 100644 --- a/Makefile +++ b/Makefile @@ -70,7 +70,7 @@ install: install-dev: $(PIP) install -e .$(and $(EXTRAS),[$(EXTRAS)]) -deps-test: $(EYNOLLAH_MODELS_ZIP) +deps-test: $(PIP) install -r requirements-test.txt smoke-test: TMPDIR != mktemp -d From 0bef6e297b535d41e1bab327b398c379f78f79ea Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 5 Nov 2025 15:19:16 +0100 Subject: [PATCH 441/492] make models: unzip to the versioned directory --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2c79304..f9e2d79 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ $(EYNOLLAH_MODELS_ZIP): $(WGET) $@ $(EYNOLLAH_MODELS_URL) $(EYNOLLAH_MODELS_DIR): $(EYNOLLAH_MODELS_ZIP) - unzip $< + mkdir -p $@; cd $@; unzip ../$< build: $(PIP) install build From e449dbab6d9f6e7474d9ee925d9e263a3d5c389f Mon Sep 17 00:00:00 2001 From: kba Date: Wed, 5 Nov 2025 15:28:41 +0100 Subject: [PATCH 442/492] make *test: fix paths --- .github/workflows/test-eynollah.yml | 1 - Makefile | 18 +++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 2fb9f4b..4e5cf6c 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -70,7 +70,6 @@ jobs: - name: Hard-upgrade torch for debugging run: | python -m pip install --upgrade torch - find models_eynollah - name: Test with pytest run: make coverage PYTEST_ARGS="-vv --junitxml=pytest.xml" diff --git a/Makefile b/Makefile index f9e2d79..fb12cb9 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ $(EYNOLLAH_MODELS_ZIP): $(WGET) $@ $(EYNOLLAH_MODELS_URL) $(EYNOLLAH_MODELS_DIR): $(EYNOLLAH_MODELS_ZIP) - mkdir -p $@; cd $@; unzip ../$< + unzip $< build: $(PIP) install build @@ -76,22 +76,22 @@ deps-test: smoke-test: TMPDIR != mktemp -d smoke-test: tests/resources/kant_aufklaerung_1784_0020.tif # layout analysis: - eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/$(SEG_MODELNAME) + eynollah layout -i $< -o $(TMPDIR) -m $(CURDIR)/models_eynollah fgrep -q http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 $(TMPDIR)/$(basename $( Date: Wed, 5 Nov 2025 16:19:55 +0100 Subject: [PATCH 443/492] make *test: another typo; --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fb12cb9..e2e12ff 100644 --- a/Makefile +++ b/Makefile @@ -111,7 +111,7 @@ ocrd-test: tests/resources/kant_aufklaerung_1784_0020.tif $(RM) -r $(TMPDIR) # Run unit tests -test: export EYNOLLAH_MODELS_DIR := $(CURDIR)/models_eynollah +test: export EYNOLLAH_MODELS_DIR := $(CURDIR) test: $(PYTHON) -m pytest tests --durations=0 --continue-on-collection-errors $(PYTEST_ARGS) From d224b0f7e838990e6d66d90970dc7b24f3c672cb Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 6 Nov 2025 11:55:40 +0100 Subject: [PATCH 444/492] try with shapely.set_precision(...mode="keep_collpased") --- src/eynollah/utils/contour.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eynollah/utils/contour.py b/src/eynollah/utils/contour.py index f304db2..6550171 100644 --- a/src/eynollah/utils/contour.py +++ b/src/eynollah/utils/contour.py @@ -357,7 +357,7 @@ def join_polygons(polygons: Sequence[Polygon], scale=20) -> Polygon: assert jointp.geom_type == 'Polygon', jointp.wkt # follow-up calculations will necessarily be integer; # so anticipate rounding here and then ensure validity - jointp2 = set_precision(jointp, 1.0) + jointp2 = set_precision(jointp, 1.0, mode="keep_collapsed") if jointp2.geom_type != 'Polygon' or not jointp2.is_valid: jointp2 = Polygon(np.round(jointp.exterior.coords)) jointp2 = make_valid(jointp2) From 44037bc05dc6dfe70b7d52b6734775d07cf2e7b4 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 6 Nov 2025 12:41:03 +0100 Subject: [PATCH 445/492] add layout marginalia test --- .gitignore | 1 + tests/cli_tests/test_layout.py | 23 ++ ..._rechtsgelehrsamkeit02_1758_0880_800px.jpg | Bin 0 -> 225989 bytes ..._rechtsgelehrsamkeit02_1758_0880_800px.xml | 235 ++++++++++++++++++ 4 files changed, 259 insertions(+) create mode 100644 tests/resources/estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg create mode 100644 tests/resources/estor_rechtsgelehrsamkeit02_1758_0880_800px.xml diff --git a/.gitignore b/.gitignore index fd64f0b..49835a7 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ output.html *.tif *.sw? TAGS +uv.lock diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py index cd60e36..c3076fd 100644 --- a/tests/cli_tests/test_layout.py +++ b/tests/cli_tests/test_layout.py @@ -103,3 +103,26 @@ def test_run_eynollah_layout_directory( ] ) assert len(list(outdir.iterdir())) == 2 + +def test_run_eynollah_layout_marginalia( + tmp_path, + resources_dir, + run_eynollah_ok_and_check_logs, +): + outdir = tmp_path + outfile = outdir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.xml' + run_eynollah_ok_and_check_logs( + 'layout', + [ + '-i', str(resources_dir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg'), + '-o', str(outdir), + ], + [ + 'Job done in', + 'All jobs done in', + ] + ) + assert outfile.exists() + tree = page_from_file(str(outfile)).etree + regions = tree.xpath('//page:TextRegion[type="marginalia"]', namespaces=NS) + assert len(regions) == 5, "expected 5 marginalia regions" diff --git a/tests/resources/estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg b/tests/resources/estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92705082e498598b42a32a737b559bfd0df07ee5 GIT binary patch literal 225989 zcmb5VcT^M47dM;`Ab@m1DIv5VO_3%f61w!>@k=k#TR`a@LBSAuk>0Bas5B7-5kirU zf^PdO8|9y1Vxv_wO>WFw)U6aWb*6v9YtW z-@VTTK%QDz8U;mqE;iO955bIhu4c4WI-8 zZ$|;51IPk!qQKK&PdzNsJ2VZ#2O#diAt%tPLv1%B3;xn^MCM^x4Ph7!7$aJ~se$2Ux25M}vqSL{Wa+t6?~pd+_0yJSWG% zmz1F(AVKwCc!oW@1S%oW#dH_=N*AyOz8EQN;_$GR<7u%fed@vRkw_rie>rlTXPOl4 zW|!u9SoD{omDEF29hU{86bdmxO?iX=WGBt$mNpN0DQOrP4=K`Al>MYwTm7+iRP-L! z%sMmzhS7-s>ii3Epc-Zruj1^~L;EuPto$lzWDwmqXjrbV`GX067wupEB3+S5?Y?v? z4I?$r90h_kl^WajR4wDRVgh2Y=7-<1;zb zHe^KFqfZJeTm~CuuuXBC8mw+vs@N(vDF-4du$O(2roX?;D-oo2cug#;? zOX1upd55Da3Jg8M{yXYG-7e>y@L)Y{@t@)Moz@_ejQ}`@e`gspoBX42VX;9097W44 z7&=v;bc+?!j^YEUEhFYV z4G!gCZ;-ixMM}GhvSCtA7sqFo4hwgau{b*Kc|~ z5y`#^s7bkY$|qf6!IMey zuzjzfF%pMfgbZFEfNR$dveqq5l&pL`Yl2wI;@av8DgY7UlyzZ1pA`If0742Qzo$qo z!m4%$O?gtJY(VA)4Q6rBUr{yRyB6#>6_Uw`YlbP`b)qZ>HB$5I^%7@x3gS;H`dbF5 z=2|awi`jlj`W%XL8K@-+3rk(C+h>aad)Ii^^@<#f_^demySZswj9~bE)uQtAD(=ho z!}*9(v1ipyHh3SjSMUV?9R_yUoa=^vCk(baOl2(DM1>??H0=B*r9cEA?YKfCC!?2Y z2Hsn0@<8qm@i6g)uKN34qZigc^SlBI5+9IC(xUPMotr!;nI$fw)Q41imZE0B4RY&7 zJ-chX7Y6#G<+2DFwOX-8J;VCpP{EFsMvuh<^vF{hO1&w%+0(Kt1+sa0tjm6j39l0X z9l~EQ_FlBWnteQuTr|y=q3%57-wOfVhsnSsf@ZJ1O>+S*PjE<*&uA#D0eBwAB~mTP zaqO|2P99e_yDL3NQD#V(NkkNq3Cn>J5Y-hydqHXd%A_Q0)}c?hnGe6Lp58J#1CW#m zFLLVdf1a8cIw5`o_;AOXt+>9U$91f)tpm)ij_z0MG>0xmX>cc;21s8T8oo}e!wMd- zPw0EhO4L(?{dup@4DrK3or&Yf&8=78reb`W0?`+vq(-*Y7(ge|kYgYv8qRxM`ocUf z0Nhdq(`Jhybb0u%e%tDliWw61$#Uxd%T{lhLRlEj41qv@Q$5{@2v?AMD($3j#ir28=rI_bw^hfx>Z)8;H*O=eQD~wZ6|ezJ*X9#GoC20R!6w1 z`bU#`4@RVt5Xb;qq)W;2BV$SwMfC{&Btb>nYeJ zYY3!41W_49&PjorcgsVOtV7Qq7_x)acXUZHOjtJ>agBP~B`I9WHGg5_-*g;C*mVqt zg4hRT`I=5prXsjIcU0F|ed3F=oZJU&6dQvv$k7*3o>Ja$M6C`jebg*_jZkl|RqFPR zT%0^{IG7nZS!CPq4Pe^lYlG?fJ)SsFWC7g(XxXCW|79i(!QrbIgEO;mq)#^=nv^&K zJ%-Q|`qPigiBKf-Q_VK{-k@sPDXqC^9q0Tva$l6mHql++XJsGuPk7Xex74IG8&2#& zYRzhCCm_auzOTKjDADL*LOl?S&D3k;_ENtcx6$u1*>i(~S14uf1wMp%m?)&`;E%!GXL$7ztOK@hcY zc~0Ej#BcVaiRx&|xP4N-&_i|Bm(QWXs*s%VQq|i}k+=MTw_0B}lGGd;_R~NpdN7Wf*a%5cIsxbkPjJ**K|}^w=2B>>Z<*``^69zzcO=txEK22LhEI zSCt$Nsy(K5@B+0^Yx14|5JOpfWMEf-`|EugFvIebI*Zm`O9i~4a|yI4B4*l&F_u#S zpbdr|iXZ)vE%+q7wk`N)`9XqD{a(%upp>WUYf%~L%GUgHcKypEi6K_<8K?kx{Wlxa zz|s_t03-$30=$RX>4*~bK`Jg6b+E|0U zHHKfW?+GO2$q$KWb&3%gB}&ln&gbaf$>Gq;6O$A2;Y!q24mQcP_#2j49hIo#f+%+H z%E_OOo&W13zX!# z4(#csP74GVnwW z2Ed?hs6U#{y8;2J(xyR#$cFIhU{tExy#6p4*gtss>% zdV}aV-cdKeOW9LJrKD(ZiZqgS^mt2GW5J%pQL}bgK+W(ORLVluvxoNPCo(=7(4g0g z_90q)x>w<|Ux?{6(0npS1ayms*aDUzcgm5QcRu#L?;10fBa2ji)}KOHWRT;ztRSt} zkQUrd%HL{m0pt3Ehnp>X!=((F#P&gv577fGFaiMz=8lUZF5AP;MPoAdjsRD zF~+0Au~~-C@kdJ+DKlF)Yx!_80&7Q5U6Qo7PMNZe8f4}(1tvgH>Q%oesuCLRM!rST z2kG0N%*vlYvI>MdatZ87;z%zX1CS*=)ce)|qlxOay;ULO zg{mo>5wIYN`;U^h|6at=aBp+FRNK{Yw*~5cm}!iLtwo^qsmTE4((8Yr#bhhGT|2?crVQQ@Q;s^Pi-dZ(gkpGa(#I8b3 ztAI$dmrpmH+z1MyD-afNX8}1P6Ldh!5Iy2nvT^!{!fspdr zKo4h#pObOq3G?Ah=$hR2r6$wvKC>9HD8MmC#~jXR3em)LIiuZc7r263pPt4z)5i)JToy5NrQdMOEB_)P>BR_ zie@9&cHg3j%Q9at!s`yDbtH03^ugBFqv*9Aoe%mo%~N|pNoxc>Lp4~$JD)8Y7hA|D%{Lx!o2movS2)#EPN5&&3*FmdxJ%N+kDvUgqK2YBMOknK_;GT|WEFMx5lpBj# zI4FL6YpcsCXg?h!K!5XSu)jC;8Rl&O4;5w$Ce7N$z0mv3Jn1Wej`LSG6}uZPbIpGM z+9LbOD45jLQhYcqQXjK(ZFB9!RhR^r#@nHx&d-vq#bi;-`RNFeHam;l&+|!kT+%a% zl)GOj&AtkLf^37ir7L@6#TfgBXn5m}zTOoC_}=Y7WgWIxX@V(L7@)SMm2ki=xB>E- z(WnCg32~h%5h@XYgt{PN{X?y*{JV)AU{r-eJNx|IFGP;PdBOH$1cCPpXv|?etNQ>g z5TD-$2+kivGn|MoaC)hLqlX*RAWNvUbKR$nUGJWLkE z1-yPb%BT$JU^j@j;FdHE50=Lqt(o?9;()X$@&XCKgoK!vjdkGB0bQH9#`3&Ry^d81w zj84<~GxJ@DLaf20bBUfX~dy?7 z3+i|=G=UGT34M#ZS|CyW`B2^BW@6i%X;p+?vpwBmN!m$YM32;Jbi`aIGq!=qT`I`> z%D*U{dc4C8a^*Vnd3KKw=i|ArAs~D#BO=#BO&ye{jMV@6df-f6x~06BLxQ5!t_NOd zG{Y&~2SFbfkP}tgInab2&i`s5l9_AX(eX)&NZL6(u9YAYxMLp_C)htkDuGc!9;h{> z+NZs^0sL^58(xtL*isSaVL}LWT(|Q@Q#3y$%A3M9+=+w{Iw7kVVdhlHJ6d_?1Hs$? z3d$`EJVgC*>l!T8j%VM3{2xTr(^;EXLvB~aV`25Lcsq>qh+*Z~aa1SUx*8xa2!r=6 z%1Q)bOAJrBQbwr0(2891WlD_67(WbYDY6<;j0B^DMkMO3>-n(ISqhHu3R^Pnp4%P^ zgIW6?+#G8j0q3Js)%?gubmT+a)s*a?Xd zqfh=j-Sma?qFoJlKZLsv4SAD@i+EMh(r0_gxIqt-x!$VuDu*>l3#=%TOGaQr51Wr< zzHc`snZhw>CDl^$Lm9P zaiJe*Z~s}Cxrapk5$}ovG3ZJ>{#(6Zj|$R2Q&e9#Q!4YUAkl_m8=T;wr0InB)Bi4c zuK%>&AhnD7wCp2iRoFweDRxhB)uarBTf+COS6nGJ!~J4243YP#)%u{P-CD0xXO!(y zDRIb1M2Y?4N(=9NuD!P+F)Tzy-2HCanVnloM8x3JqQiw^*<1$iW+vV?pdnx>wnrGH z|DDRKQ$D^Iip<)pZ=O49IiM0}USWd*0)@l8>&7C^O{ZsWc#aE- zP?_96FwWxfGB>|7HSmSuChquqZN}L9RA$myO3dNxyShcTpQkks*_bEBlRm3Ay7!~^T?1I4h#=+rG$Oa zI7;*M!R(V2yxk3eqFVB@T5@vn^XSU{GOg0X;ZIDK%IB27R};o0Z&8MgUUHD_zRu=y z>vh6zv06iCy4niIF1TN|!R5obl~x_aqZ(bhUYPx)kmYy5rSXK)u7td8k#Q>;p%Rfo zwAoBl#;~vKrkT5)LgQGV*>g#Uwb&2rt4$Pl%&(xHdK+?`Sb2(^;`I8 z88N0COR%!KW6;WD=n%107wSgqas%MbmaUG$zdZH(jiZ=GzTZYL)b6fTGu^ikFlgmMn*aZHqyP%Rz2kJV)NzWnX*n`bUryikYTESHiSVH%FTSH z$=)h%`tM%4L4bKuw-YdVw0js>Pgr*-chmhU6-Yu}9HcP?BN7k-Gp?cZlPuQ;fX)0%Yut z5lyuGOvP2q~J{}6DH6D3NIB7{X1}6yq?|kyAHkS{O=Q1 z{~OEKV7`sr)-ZBrs7|HY4Zyz5qKRuB8F20Ba{gf^VGz@W;iu$)%25$8hga8y=cgeu zrdCl`=BRi6I;gHu_SYe{q?1kBQo%UJBe5Tbm-Ue%eWfDJ%$%mD zAhT$0?eTn?4D~CN!P_P94dAPnQi#^S`ZcHFMm}QEPiqeIx9*X_$@!bjrNc9~V2+LM z&cAfSOmmj(WP!D;adKVoPMFEWC#AP%fPCfxGjANKL$=LoVdJq1d-<oWX%z6Dwnj$@Gr= zHu!gvSF?D_X?))0m}WULfGlpLLQ#8oa>akQh@fA zshRY(Vs{8x<3KpIlu2~mwA!{#@c8k;#Z&()nP97klbOaF0HaQ`qH-v?1!Y848LsnJ z$mT!3rRjxK#=Rtk5oOiHd?dTe_on>mm#FjL%TVyFJNWdYDC(!xQ7JoU_6Be@^H3`3 z{*~)LhIqC9we=LWe?Q-)BlBHChr%2+&%M|$Xfu!-;aA1C=$b$<-MjM!eks&t`wd{e zb$O=nM>uuR*!4Z0kmk-EgWCLQyqEYX*ZaQT%xZ+o0-vk(pQr2|_kCQFJ$P>beR12H z#6h1E#BIr@>}f`RU*%j+a=#9na{ml{_1sJL!n7RR(MoS{18~*WMqHaR&h~e+uQClY z?WuQ4yfD1Ch0<;hb5NnZ0o2WKDHF7D5efZ#w{t9|(P_18^30BRg{T!ZRiOa@2P(u3 zge#9Am3$uXI?{&n9-gWB9RqQ_%iW8I9K_2Y;ANxlO0ECNir-J$ymPm(k6WPz7 z6K?=h`8o-!%sVtU08P|YrAnt|-mF~EH@{9)!k$Gq;5)yzY#a56sEXTob;jSneYjJx zal!s~AC7MTLAO~ikzLQS)Rp2Ej-WmZdjZP)z+I~hFNj0-z}D7^UNGBwe8Cq`v|C+jw(e}wwR|0+()^EBn?~%D&3vgVVFZ-KZ;Q~9=M)(b&%PhVb$t&HeSF|h7FkoQX z@Gmr5K5djBlH2fq7IV!>Tp?QzZQ zMl;5#!>hespKPKXbptrU2&yNApY_nxAE=Lll4(L`kIP(MiS0S1TicucY?j?UuDnzJ ztgXm0B-*Ub8`Z=Ob?vBxSzG&c3(pvpYZ-G0ROO(aiT{M3>AhV`GS)+fFEo%JNgI5z z1};dibJV(WX~Ut5)D!tJ@yftym(DsB>8k_HugN}(LB@Nu$3#e9dP4Cn>vsSziY zpI*Zxl)+gpO7kJ%;z}rDNe?akSAnGK1G_<)@lK9`uC$_q6ar{`fzZef%~ReGkh#2-gUd ze*@c_&NOk4kUvxOt)wg-W3st!II^UzN(Ok*?qT?f4TsD)LXcTXK-|pKczWkAdq(EV zu0cLCugDG2>WF5a^T{z5r(cUiOsH@ z?FCcv#aq2a@<&eCYiKFu=9QN$M27W#X+l1?J7dv{M6)6M>WM$2p4kCn*xx5Su?In( zLxdqAmGuWL0?H=sF7mn`ss`7l(^B$o|G^Lw+CO7=5$rYaq=K^nO7_6LXsNr&0T*K> zs7OTwH--vE`755Cw(MBud0kN)Qxo>R9HolP#P;x=i!1(eo)@=J{6WH4>FYqau9p-w zKSkB?+KhPci5(WL%SgGHvR{5u`L2ZS`rJEWqCYn&4?Z z?j@+)n*@1~oeV+)D5+HvU1tzy+Q8M14~j|$zhzJnseah(dd$ooh)Njz0stQ1Sx%N} z7Sq-=UeT)!NG3baMz)5wtVZO%`3>d?FHUL_8X+%I8pedIkJs68qmcCcoMjRfNZ`!; z+uRDTVp6mKT^f^&vz=6^f~d-t%QCBZ^jws}iXr_OV0;MI%S}*B+!AiVx-_%Kmbj%) z-|b)xgA?eP``lcc9nRUxd^XVYeLT6xRQ$7A$ z5@znY?Bl&?#mA^_iP5?g|Kly^*?}aqwcnusdvZd@o~D*a#*xIiBUI~Yc8s?wd!X@i zK!3`4&}14@ZOe8*8Q%+YP6uvKkJPwKN+o~mxO9i*a*3M^8PjU<&?oU$4OCKG=2uBI zHn~2m+lAi4(43&>pOATnOQMA7pX?0tlh*z--R4hL-Z8^4v(}T1kYqEiSZI< zDv;&O@M)D*S>b(t7O^^X9`a(m)C@I*nzc={aWp3d&1W*3%7OAkR$~j9z^K}XoRIj7 z2*iMY(P59MP8cZ5+C|Du%sXJMjlHbOH}9I?x2ZzTE?f$SzR)LTJ!tzppXnq=3=Oq8 ztR@v^SIACiMGJiyFlLK#tR>*^3Noreu0tNx2>^x4u1FhScjWeMG} zrVdTD17q+;;?vuRA8Xru_OrBmeS6nmHc83DNnN|mF$`W(dQX_XlCa%EK+BC2B!=$N z74JzY@~~0QK8V_x+fxX=PWEo@z0l_f4XK}D*fq~T8);sPIC*kc?q|_<2Yid<2{0|= zObqwGMIL{x#^W94)RWmVje7_t=Afzf0W<}vTYPGyi!U+>?CrX zx7hd=>lJn1D2+!bxR2Y60M1?l<+ z8vsG$T7YmDuxK3eWNl^l(1zHY{|Ja@QR%bo^ucmrbaH!p=j6Wt%KdzXq^mc+aX<6< zlGy>q9B!HYj=?j@`1F;TFHwUC7rH_xR;h_UFZ5&oxM)sI2lR+osk}V3W6;8=b0STC z9lJlBEpSF+Xcp$%^T*Vg3Ly{ZWdGZ}cI=uv4HJqg;&}R!BTs_*w_JcX^IDa zIp|wO+r)Gxs{DzQ{uVVzTmWIu2KD$?y`GkYhOH|r;7PB@YprQA>6#c(yE7(hW(c=6rQL|wi=C5$l1%!CW`OME%yYs zh$~WL8ve+`F(wL7lTwZoMjdwDSgikBHnI^9m=@=cmbm-p*tKB)F*vse@`H2 zPq`)~@mqj+yy$yrVKL059{tZ@L5|^G>%vN12cwvC^$}nUIbrxKJF2g-UW$TtuXyPg7X5MD?7!V z5hn!umMHOZdm)OaMY;&wQxnj?Ig)G@P_eX5!d1m?fAd48tBVlV4!ae*`3P>x-T-!# zQVfMWSA?z*xAAS7dR>{6m1z*L2IGhKo|N#OW#2hwE|-W5ibx2&_xAq763VjzCB4=I zoOJv>k=!x6EKN=v*PHR!xO}|>!s3XK8ZXRFy!C#w=Z?nX5@nNSar7bw?zq584V6sJ zzM5g1F?35DbDx7mQ+>&2kVA(X$o}W_fDQt>st^X~lFHp8-O)7LMX8Jn;i3iJ0Xn{B%oQ}QeZeI`Oa#F5UHdApq!V|k8bsyK>5PVlSM@d00*v(TX?1+bsGsX6l?`1 zD||$b^{i2KSi@l~<}0L&+I+h1U?0c{K~5fXi%N!0yifr*24^|zu!l~psx1Og_>spP z)FRhkgiI4fRiJ`1f5j3-EZ-iaoZCkAGaEqIYkd?7ID0|)GR}W`tscByi!UZBqYwYw z#Lbbx20s z3f|-25+UoDDnlnczu6P8W8Yfe0suPaBcJOzy!miL61AAsD!N~BQ-FHzy27(5KkNvF zi4ZGhUZpKyw1n8vA#6!pm&DsEtc_m{%(O$$pbvb;-@$v{_OH*wTV{faDYi!?%tvjV zk(a$Gj$f>EG2EX4{xE=jKKywwkyek+n`B& zTu2QZhdQ$+M-+r2cI2tO{5xS;vB6@Ve6qXjdj@5I79@T`BVu1h{nxk6e z)6n4?KnvgXZ~r@0k^^5Wzgj0O`p&w`WDz7ByCBB`SB}yz`XiyO-yeHBUjwrWfwR#R1KVxqg+9PSP{D^)12+nLVM1!4hb zSeF+$jJOq9`5VAT=ijep5_2S7*1Lb%IU(fF{Y!b2Hq|1kv8Vo zix4087S1asg|h;Z1r*r^TG2@}^I6s*S{KS9n8OZYv6aY(r1{u;7RAIq_|XiQJpg=K zLh{ZUA|w`}%X@H3FDQ$BjfVecJU5jncdkd{9TXza!5eWqJx41rd%B!6(liWk2(YgO z=2%R0A#(FI8Ja>hiDr>}$b?ZEy# zbpoVM%-&T8fzzjde_S;uj#VXTInPR>bxb`VWoT&0Yf^I-9ey$HRz!!Q_s*oCh#jRCFMqc0idtZ8QX0~4D`5qc7k8ft0Racc#|1w~fvqP+Qs#@1z zkbgxaDl3H5=wo|#bAMiVp+dVq9LumO#~VKDrE2X` zO8BS}xI2x#bq6l=>jCza(9^0fH7zarq*NR{pTv5(lGri#Wdrput!>|WAb+a+v3~ky zQ20DQDLnLxU=PqdVn^BD%^Cpj5s2n_L!!1elSMV0--pdo*jC*;tR;s*@&ld3ax*z1 z@&gUhJGl36Nr4V-*49~agOLyrVNj8p@|JHyaXB1gs}o1HRnB%+L@!25ssjC6UB>3# znE(LE{q01U2eBBWTZ$c>6!6qX1{qR+zGq3sOHlXY@70^mCRKvcvM!`-G71HsY? z1)gA8^+WXOng1=DhrvGOI<-7t*e7n}c-be^)<{=@Jb%>0qK z(3;Nz1)MbAd=YXvxCTGI9l(b8nSEzUEvEu|ZaBxaWJQSZj)K<768$r?rv4NoUa^1Sc3z>h?6tRYX= z+DMnN?ZFe&JwyF!B%y~TfvrAQF!n!5KrH&I!gu8$QRh;|iH7Q%$dZ>+YW4r z1pQk_lV1Ea^cIRfi#Kg`PvgdW5=SEG;)d_M8y;BJRvr_16b2iSx}__&Ph!VyHz)1n&FyOyF+-TK=8}841j?r9{fqtJ5+M3er~6=Wlzm=DY{SS-Z>2)GcI2a(`{eMJ{dU= zm@rkQNFwC7nZ3~^y(MHB{-ex~e1|#fyK7}S%)TP2@r;AclcrBQYli<#|LK1!^mCtg|kfXs`Q@fSQtgqm}z6LaC!4dA%}+>{p`VT zoN3Wigj_GgZu(n40EWwV$s?-Jh07k)s%}&dg9v*4k@r1-7n5SAT_3Elugd;OnCMSq z!#JOpvBlwqX0L{{)Ly>q^&q@RE5bMp@UJKmDXFE^@7E$je|Mahm%LavqgkGQQ1j}v z+G@3b<;RK&cxR!({pj&q-*e+^YoUkR+dFHGYL<6it^h&F!H(Tdo_Hr*0_@;Wy0RFq zM8i$eflumjWh;2qlT!8aYP*&%Nbf}w(wp`<88hgpib{5QH6B&jtMmdf%n^r*BLe8d6Y)0{3g z@8(HWwgcXZF2{fS&n=hdjh~mMebPaIWu~44Xg(Ry z?9}0vSx)V5{lwbjS>rYIaZ{$j3_JPqLuXN744ho9GaNoZea741be{0>6`Sl*O5zT32DiZ3mRp6>ZHoN+vD6LbWZo1G z81!y0+{CmuJ*xQcUlO*AJqtncrk}lEn`XMA!Q;cmK*F1hPaD3Y{OL=&BD&eO(yi>F z0!@Y+a~jvbUS5ba>K`zbrUn>x8eN54r{d`50xb{pvlScijh~sd+7G!i9NoX{VXpH_lJX&q3C6lE{51+I4o47~7Ri4~_lzO#6-~k?NNJw!rra&yOEo zYHYSzl=&CqsFuH=t_NK|HrwL zxNF-;RdH}toRoviF{r9Rtf$Km$}wRrdLI){SGrVg&S_ zVea!kSL)~Sn1b(d;RT78SgMYXK@~@h#Z_AF zB~J8;R4cz;pvQgDbV%KsF>HF3W=GP0qCTnWWxz?kdgN!O zT~gQ5q|xe(9_bbR#NS&f;(E_v{ZaHU1vOc&bk@uZ+h)(M5aXTl?V~|KJv%P5XQq78 z?@jR)GM7gGnOmx!hZ&QXiE0I{zdfP9lzf3b#BtSXJDn?RpHD9kIqR8$L0)}ORfd() zADZ{aU&-2;gO_D0E2oZTO+Gh$h>QEP=SU3dG!NOpF8#_?ahyC*82h&Sw$1g^aH6D; zA#f+>Hb=UCrsZei_Trr#s6lT0RlLj%VB(%+-7MKu@s=BbyYqwOPiS$o|D!Kb#{M3) zQ-wBHA_>1#CcCX2>O%82b0xJ$j|59aiw59!QRp=%?1-`V)W@@m4dbsN*%nqqiE_;( zGvQ*>?=AGlTwgS+Ehm#&EE{=dK+irtnSA;iYxqjz@G^OTDmSJsLPw|%cq2hF$g*nWEu9>fUd8%w;kDNgZ&oDCYi%Kh^xi#wMD zWYNN^#^G+hA8>BMJnH%QiD!%nojmO0SHsSX{FC=cz6*P!rk{Q1$-||Ap-Z(ZjV=20 zPPoPUJ<0)lXcpVeI_KoM2f2W60^=X-e%s9WBD#HO&}=5+>~EfGf9NCL6Slh(Tr(WC z(|JD5CjUDyJ}=&;WZ=Q$$Jft!K&LMo=W59kbnBtW4ob-vn+eU>|Z+E9xD<#n-kg) z7?gbc3fi_wF0Mt&@1BluXkjIeTl}DvFnGHeFLvZS?eMQZygS}t~Zi+UnLDZ(UNQn2a zX4)CT#WDNKpF*HYo7#7+v zbeId3mR!CKos`Oq2C79mslMc(4nLfkSdX8-7kbhsj!hQfV4h}8F;4$_6+Ld=+yH>#v0~)xBsX;6ZlYHl3NO& zrTOQw`fRH5&(m_vKTq>^U3a|Q38rpwq{y%(_6^tS|26|F%O*O;7~9RrO{U1AxTM00O#&{y@EEZUgO zO=%TF`{r+XLGLORJ_s~p=2Sq;_~f8TRl5Zg4q_sF%3f;Pq@SRm5 z5U_yTcu_%3e1=*|uU`(%9f{I9>-%Uvv$SG1?jrjJKrI5xKN~cIP~Pk0ZZLegGho{R z9hbfKF~-8Y#fHQP2ID*T^D};GCj2e6Mfzwzeb#Q#ymM5Bo29sS2kR0rfan+Qz*Cw` z_{&j-=F>wWmR!3}Fx0r2q$&L#CO(T^n)+Isw%c??Xi+MRM_T5WDrC`QEJR4>LefTX zw`fc(Q6{n-70&1Yx2Wkjxh3F)Fphq}mWnrU*wl>VkA1VsAS8G$2YgH!H+7egAc<>K z#WuR^i^etIX$~0FJDhnK=v`HDCuPcawPs0(;XNby-j_*gs=z!QN0>WBz;&&QE*DMn z0}a0?|K{Wf-Ts>Q^t0uehNU-)(i!AJfOEf>YRnI3+e~N z@M+8U>eD0(B)mvH>BQP7HN5laWja48fSr2q>wPNyL>=-iWFsQ;az+kzp)aKCt@0bR9yTmD*P{_;8- z@^x+^?%py?q+M)7Xr2gjhK(@q**LJ=aD8%17>Sx^76QbVynKNXY4-N%?2(!@XiU(m z8zUTEykB}SL_(4q&H;>HP+z!3@-0Nit6+aTA6d8)j+pP;3g8KvcK#k)hwl-f6iacoO@YtTz-%h<4q`)z5jzu zbSp6ZYWA|9BEBF`TREWc@7PoQ>jAw{_w9$z+9UKUEL3Ojqj8&YpS>9|veoS>E+5uw z;7(tq^D-D>o<#~r_pY=aoM$yt3IoqfPi0)`zhHRg3+;P3_EDV|2Je-B+Gc)Dl3!7! z5LN3|{xV^k_WYDp>R$E04;>-Ny=y>LPtH9gDYb_CVh%>I$XT6dy^}$x<}F2?4E1!x zq%^nd#H65)U*+7-$}@dYiG&K$obkmJo%I&+&7c;K0p)+ZVZTI$r40Wj`Lr_bTK zvPLCSC*~w}C+%9DH=yq1_-fizvWPCFxHkk0kO&wEYFz&S+2sbt=0*4`I-RN-7TTov z4W1}EX<`2Wv5jU{jH$!q@$Nf&ccAH?huuMa)05lk7c*O&6K!big$?fCL+$$2w@P$= zuVvDDzKN^4KyR_JRksAkl7B%`;=ovF_WIw!w2tU)drNng243n@KiJ}^`rZe((fS}v zs-#4ri5Vagqzbp_4wLF;zLQMUE@h8HiT-$r;^1e69zvXRQ+_pczOAUuZKzm=o-0)m zM{OE@T%HKW{)ZHv?m@w&?DPlS=8! zt3;H+sa!%KjS5YfVj_+f8IJnAG6Rf8tZ%W6zqMviB7~4HZXB z^(c!zucaVRt%3kkf`7eV{2;luxakIhctZ$YOtUTq2S4D_d@Si5TS(OP7QBgtyn&%q z?y?UIfA!?h^v^+Racg&}b8#>5!j8U-QWf407$A1VURHM^7r|Xh%Fp=q&FjStyKG=& z>;^~&{8wAFG5!QxjjkAEH;f-O$$Bs0q}Lr=a@v7!;kL46BM+ZcgOI1_Jl9&d^&l&3 z6oLN$!vnV!Ip+2p=i`)i@^suXt*VQkS#a&a3)udgif^l$)^CLxZL&)n+(?kD80UK& z3<}ouze~Z?dTdFktmyG>^vrFHWO?_j_eA_Zz0#*iHEWi-g4R}R8+_#uk@@lO^zljB zK}$3vM);A^@T;Yu()KiA#VBouCq!&TDHI!%5$*;Oa>=1G9OWSn@+}@B#k-hul;i zsQ7sIpAezfWt2%~#a>8{a>_dop*XEg7z}OJI&9Lh zjJm1ao;HT$eJiVU)#j5tvsv1`yWJSo+@~a<;1WmntnXI*Jx-^M?+on~@{OqZ`AT~g z^I9&3>C1fsMHcqZgm3_oOnlBj?rPk!L~|dEdaSx0L-{XScCs5ytZ;=L0HR>N0Cr@f!1|H9n$`K-ME=nl@JaP{ItK=_L5a8SPb{ z7p`=Rj+xUmy)fF%I!m?~t%z?-0gQ3)N$~_evwLkb;zhO2potxo@~M(vc*rWtk>K%~ z)2_Xw-*mpe1(L+p7BDnQ;Czkpa0m9OdY?o?)SV-14yMly%n&G!KmlbtMnU>ij+p4E zH2#g7LuW|@+r3xYw7z}nTQH0-!A)yQ(zK0LH5<5NQ!ha2Dw^rOG11pfgk@xRXzAE)yj;Yfz+FMH;()!6Ns&`cYm*LfMyPTjR+s(o`^ zx4GyTwE3)_b2M-&viz&PhJIfpjDLEK@e<}eJ5%ZC2_-b-h{#xSqa*hmQ(Xn4ORaSU zrKDzN(sW|XBz4EACs z7~vaGGGyZzCnw1sYBxvpm8OVGM!uUnu^!+b6`h!4p7pk*y)W?#LYn0Z!)InA1qMzC zAe?;ZKRSdg8$=L74V2Urm`95uAN<`&VA-E{f81?PAMNzuzdf1bQd(N#~!| znAW-nOS{&f)rvT1j&^SO4h9BC)5UA!xg68tCH1U75p>(v-m7ZJW@h}MbDV+ajMqx( z{a)W&vAMgm60NWVEQK%}4&-N?RDDOGbp2*hbwv`Q$++#|hy(KH+O#%0YFh(s3Iu^r zA}HViQul|;ko+U+TWb!XyVKqY<(Bort0Z~B+3Y!&(53?g?eexr z{Xwpk)4e04=y#J{TEfO@+a=qQ2e3W`O4NF0*H^Zh-%^P#A(Q%8a;MxM6?ZvmFt^&( z{{T?*)~$PE3dd*Zg|~M6&*i}$2a02*Z?EL|OM7fArV>RXMjR3R?6zBv09N}z=^Gt0 zO>I6k{{XsVovw47_p6H?I>Ose8hjS19k2y`FfcHC)SZ>eJ`8G_{LVCpE!I0|=JjTs zmo2pa0B(5Wy>_+6(a6u%plLczouxuz)9qomMs3WfG0jh0=OIUGx48BTao(6Dv#oJk-)e3_wKuPS6i`f905ja+kxT)!>{3`0+|!bt>}j$I9lKCRx($P|K{?1C zDY*&A$A4;l$RB>k>rFl~Pt%I!&AtlV04TSSJO2PWLHh1BXbw3i8RH_WbS=J=+5Z6Y z@_(AMz$?KdR(N<`eqRuhint*2$s@%YlwrUm<2)`Ymta{-23|4knsXy^?Ja}G2Nm^R z5v;f%EN3|9BZ?)N$-}~Mr<``7V8p9MkUJ7j??(*8l2TEhss8{B)KO)dcIHT1y4P&IIlMby-y-mZ!7Z=lS<7Qk$yl} zb|g_RAdHb-Cmp~z=Zfm)@-f2<43kPSS0wT6K#W)p4l|t5M!~A2BjvE5eNS;pcNODtBfe^e^OV3+2elCx%HxlvC($a1?+ z12pdE+r>CW-bhycX%yi}QULFddH`IfJQLp)?Zvpj&x{)JP`PcmB=_$`i?s8N$Ifbj zD*b~Fed$8&;f4=-P>cyI++#SQ%N%96J~2=uCf3~j>&iDL3{4&vJhy*(Bb~gQ^WK4( z6#oFk2pk`AL<3-8fQ^Q~&I_)^|#%L+Dy)7)FNbTWB<4?+nfkKaFCFuqdo>GW;5eUmNg+HJoqCu?ITV66|jOt^&fi@ z*y9A}6`*hdIZ7DfADD$XsnZK<6QXn;m#6g{+SH9b?bMFX8Jq&KWap2s*0up03~U(B zwM+p3;ALoAW(Uf1p7m`^R$W-fB#P{tamO_4!ZsMV0Q*$-2*eVEk>H*w0{r0ZIL~a< z?L}y|Xrtg99t-(DYGw4*nEc7LROEBXKkHX{QJ<5~nn1uW%N}#|tAenB8)0I}#yfv{ z^nsl1Wf&uxutSf{7$${*W&|PN_Z4y_W_9kLgHqFGuy&Cnl}IHH2&la^r`>3KbhzPn zwS^U8c0A)Af9YF1J1}od1N+c{{VbS1MNf=eXJHzY*QCn=LjXQIx3y0Yu~xzno^m|Z zm#4`gPd)kKyixEDKTJ~vY;d^$0CN#s`EL26jtL6}Dgg(QJc^&SxFvggnk|`Ag&78f zi0f*glL!h=B!S%3eT|L9v&rkq^KOu`7VZJ@R^Ui7JL8c~1>eiypM3VD>{y6lnX|bR zA8!={P`A4w8ZD<|WQ_L}ZdO2e9C+rEF_t}#I3w85#Z-Q^#Hk`7!t!?yH92FI;{f1y z?0eLoP4V(}9>SPakj&#esqB5Jm|$XxOdp$Q`tzDup!HYg0P~u4!{dw}wR>q9Ey(uG z111*;5P|wowWc3>5Ty4QODC2|0 zP4tHBk3U)_h~Rd{dsM*0Ff3{E{m3=xq9q9c51^-vYze^}_8Fm>3Bei2@lGUR81$L`hQE3E9*TT-7X^-S6q@l*{xH?#0|s)<0FAiEJ_fDZZTCc z1#R9~m~sX<9x5QRzKz>#xl(uzIUn<@r2#m|Z(*AAMyzre4S8o02!iW3mD$pPIx(|!oR2d#UY984rwBY$uFweO``}1=N|NicQ%&i zBX%+0p7dtlj5j#upH%^lc~M?FOJk5dg;5$ye%nc7^`dwk6m2Aec@-&8jP2wO;L;UP zepWp9su)Xta&wX?u?feH{Bctb(VYC)9Q`Q(Q^0ON;WY$lAOvkB{(_PSjxgEXQm_HQ z1L@5+ccjIzS#Sq`KJ{W?Jsbm^o_OZGAyzpp`ctO>f^u=^!LK6332YtS*`V^q6O)EG zp}I0O4z}f1TzsUCDZ>1A?dQb`Q?lWBrrZ(% z^IZ8g#aW<#yPaDn6TkVZVTIj}K;o)&W3{fi&Mq8(inmY-kiCUthlaWNd@~|~NmGt< zOB+JKypPH+!66y&T41SmQ1M>X|c60~cNl(9_lo(Z4|VJonXc*beP3f>qK z?r5Aw@&a-|@F)^DoJp9#IPN@G(H{-wGIU0n>JgvAa(j6-^X4}40mp;wUH9RVm+0*` zAno*+0sjEfHRSxiYMI=M z1a=@%#z+{#oO^bqQp1i9+JJZ%%F-Mc$s0%*r+6m>cILgDbAdxmKHvs0K^)UkGrYk@ zL@F|PYL_OA}(13&3SHR?DVAz}}oH6Tc3#$zkTHRcM#0Bt_>tj8k*iXhw# zxEVCrC;l7{ps#Lok(>ZH#Ui&E+CyTd)p!;oYL4{OEiyu=>y{bMJ?NR3@J>J{}y0r6QIijiVif60S0Kf;;!CA`rZUkCzAh(yK5ZHy>*7Qe2Qh z_5^mI0gI3hFmph8$;SYDc{GHa@TUiwR#qp0$TWx40AK?^pq-o(?0KfyvUvo4wBX!S z?igxqROcX(^u-7yE~%akD;MDAl>sZaIH3Xag~uJ}7Y;rC^q7%*f%Kw2szASfNP=CL}rT$8QKq3IDtHOx!8Y36)IAL=SIpS-yqpIW^m9jn%ldxwn+ZBB>=C>ZJD-rXk5K zocm^mke4U(;{%*mxGlV3uo&i=kVtq_`Ljl<+#Wl3t4J)i4+W3XkTA&(SM5w(Od&^> zS(X^U%d;E-O~G*u%n5gIJaa_jDClyeeGN?#q{6P?-S`8&c{d_={`HrBwdu&OjjX!7 z22^dVdDEO07XEzFW*IOpq5A&{>%n!oDZF5M6% z<-E?sC{>wr_wQ1VQE9gJe!b<~cI&}Dqk-SGB^ORfaq|&G9gYAz`&CYjujx9*;ili) z+?4$LA0&OnS}KCX-~sMxhX9cK5lG5k4ZkDlO+0MMM&d#7;+>Cjjky4Lr^-AO_u%Kq z@kp`pl?UeWMt2T-{=8R|0x~kE?LaFNXBq89htC3q_Yuov4tS=6i5b;oC6wUf#ZdDj zH_QMS@rw4wIo!ZzfNlo_<1~9qjxmu?31b^$9mje&z>#+b!1ks|bvbMtns9_w-JjBs zDFffPdL6`#q?$zexn9S$BSt|C8=PZttnxgdL=kiemu;Z<^F_8YGw1J2%r>wkl#lOA8?Og{9t9#DC<)=|L~4AT z`_cvAHsO+Ki<#I<6e@6AEX+p$5Bk*)kc9F9{cFL~IKcfVPa_9|;)lt|;Cl)KZcayz z*r0bKu<=Ss-Unbs42O~k$oo-1%-r$LDY8j#?@CEv066FNpmo|9fIm~(h;C*ZaybK& zQPF1fcS*N6IR^H&SKUw}tE)h9_it()WmcVJ`1U+!XnDz zCxP^!P~?%cjPN<_S_;ANGsyP_oCIwfc8+||1Y{d|Z1a;y+k)kOW8iMZP=+RHy2Rt4}aqmXuy)Xg(^w|f?;~vJQ0s*u0FanAlr^!72^ciK_oHlqAsU9RaVgUa1 z4mkPp8|Cv%OA&L07|%Pi-irj1N#Gjurav(_^UY9E1!d)qDI|qm2v#E}9qAWl0-(+Z z=|bCAXe>!=_VG|GigpdH*wW!PeR1A}N+}sQJ_UQTZY{wW$9kc87}^LP#)$3C7?Gc9 z@d=J(13vZQsjwZ{K57p%Q5*(2Be4{PVgqR>>6%p8wBo&lJT*`oVO9BZJ?Q|6d=t-~ zG@z#9xa0o-UMt_rE0ra_xS;bg(B>9A(*y&laoid77C*v`KFbma3kXb_cUX0 zBn_nas);*E8=1ywq9o2n2iG*yB#gNy9^@KHT2DwSkWP8#m>(Xg?g8_~BFOLZoB})= zY9@BdlFN@{NhaXH1D5;b(Go}IJa+!{ubsWlDln35zcw@XrrBqa83X*sni`S((ZOy$ zzSN*F1cSHgdzyy7i2RJ$K2M5s`G9_W{b`1ze8F-&&~wg7rigvpOODwT%I{ej8S*{x zO1h8KGu&HgGtU$)akWYGz^+Bp)+10e&!t>LW3EFh{wf)hB2;6Ly!iTx>{kxtb+3X(qF^gSmY(tS&if4BPaTWNpRKYO>3t5>Rl%8&+vMcv|*%0 zIYwUFl=&I0ZN22FYRu=A7{znHg{yU`O)kH9RiDvISmh%H)lN$v)QZ~4Z!06ZN-)p6 z^ry}TmF~9s(yF5QMLbm7iHh18k-@^`Wbsk13cNc5{%`$eqiRsJxO zO>q4errlcT-9jXaY^|-H#gWoRJcZ|h-n5?%^r<>?qtYw(G0wrKxH4?s|>va>K{pw#%5L+Q3P85B~$u$1}P;`{qrlI)zJNYawRRVg5ftA#FWj*R|z^Cdv zNIGk+bUE~xmMLzo79fZ)9y7x!_wZ}2Mi`;TpVqM22Szj++Ut7Wt1hd17{mq8B1QF3 zJpF5Eu2wu`Q{_%B{9&87wGycW5_8WS)KvtqV<6|n8!pym&)AO@aT0jX=f-?yyl@XB z5-JNb7s8**%^D2yPDj(4B1vLcZDG&mpoSz*%6VoapL6d~P?vFrBc4SYtj8iI0FSK= zO9cVRB(U-+445s39CyVs6uM(LBe|eN7-ifu=9N!x5)c9Gd7}Vt2N)F+G9zuO3zPk+ z*?~JSLa;eGpmKCr)UYgQ74o3&&(el=I}TWoX_VlrbKikaDXG9m!(+uCsT(d8vJVF| z$^{4JKQFN~+?yL|2W$Ln5ZD%k8WDW+v? zuaWQKfe@xYEeq!aau2-{oDNh1eW(t1XE>;^4Y5GylSnzwy&Ld6PK5bl zrIB)1B~R9nf;NzM6Yb)@v%*wjpaYZd^rwR(DJ&mK1SnS zU(5E#v++Ik);d_X;TvW#xcTo_@%oIQ5!<<{9+qO3<#2PJ;8prAIW3m@8uDMD=8blFy#3ZD5yB>ev~;JfOGT}3_9r^-;{sWy%<$Jx%&2| z)W&cNZONtFnBBM@{7@Dm3`o!EO_ICziC5k;`Z7 zC}YAegMmxDgUXy9#)PmxoF9LsMKL77-LMsvPbP$%w{Cn2AixX-zz30vM}5O+@O;(O zz%#jjolXGkD2c;v1bvT+NgsiXa(Sl}h6e6)?M+N5FWloEuQMFvxIbe?z`FM1&S^aya0l0lT40r1cjwM~ zV~SZ(y)vPXZUra=E;2{nyjXx20Q=^%)R>QEtQ3NI9kbq?MF4-|100_8MiwkexWT1Z zUk8z%*{d$51T4Zc^8!0iuy!kx`Dx{e)o@u`k9tkH0B1j60^n#-Pqu1Gwj#MJjl6<==t`D5k%F9i)QMxt6Oaum!~%d0N%NX+7-L~J ztcF(v@K_#x^btlQwon^|80Mc56jGtE4<|IJs_T+SBa9kKWiofGC=J|axBAfRR*;?e zZ)%mXc8+=f02D0(kC>6#mZl)jYLZ*BJ_NCRW>L2E+NXn8A0f0C) zt!S`*en*OI#Hn+MA7Wv`Bd!&2C=F zQEW8pI}2vDvb9Ti@>n7a44(W`(TNaYbGNbMwK^U)1r}YT{`CI<$eexjS)Gm;1o`F@l^$~NtsA34n<$G^G#Gfn`x=YT5S zSYRb#oT{+T_oPNfVg^*vNh+ciT)sO2P1{^sOyDL}jzJjmDs?6tk7DzJ55K(#lVg$r zY@a8!IXuu?N4{6%j^mnkF%kKOIrgh+;xwH2x!JAg9>kquoms z3%#;=#{-(3Nrw=i<#1cKYF&kxIRhEV$8${^m=(~R<(p_Z&ov}6kCk`_+nmx~<{Ol& zDH}-Vv7`kK(qiM-Qrt-rLgWQtGI5%3PPmTZ6_5?8MsR#p>h593lq{fa%AchRNwneP z|L&*_?Vl0rj_Hb(-7ly8}^F_3ZR z?O$2p5MUc184r<;)JXA+{D&>{{{R){8OGT$-vnctL~X|bhT+a=6$s~IwsXd7ynHCg z=*uDF`I#X0`HgUtX7yJr3HLsItGs+F5;sU#YyplOxBmcqYs>jw*!F%Oy1L&@IP?w) zWzAm1kI7a$e$`9U0R9!TfJ+nhtLT&dB0t4>ICyW=@+i^5<9OqmSn|r)AoIloh6>E3 zLB|xa#OV-FPCe_aj%OJ{(2jm?{{XKf8iT{~ZbEn% z#(U7rL}*$^1mm?1*W?EMywW^tG-YJniU#(DW}A3KY!V6QwK=!v3|lzk=|bp;Fi0Q| zKGo>VTXG~m{2ppm7#qgZ^O0VQ8v!S4b}LBD7pIWJp4k+DfHS!F_M{5u=57cBwrGF> za!x_;JJJ)#EAtOOO7>ThF`sGybV9i&k7@wQNl}~u`Mgo)Ci!x522Bsjq=YI|_^CpI zn?cAp;*}adQzZ7JjZs+q!=Drn6mCvV4~%%GRY;Om+sEiVsfU{lw*>L;O0KHAF_Jr) z31?O05rN-|sBYqAjdz7PJRhZ2M)wf0WRaiMImzu$T}&<&T~)?OQJy%Y1jeeNNg(;D zYE-o42ye~w?kP;$SRo-v?V4#aBn3`#aKQUg?N*<0?mKZ(Q2CLR<-ad{(Fs>@J3{+Z zIi+@p@xVUR)Z2?Hm*cqlQq@1HYF5@(&vkb^P{y3IIluz5Gxf>1ww`SxNWat~i@PF) zQ;zu0ddYlJS#^H2(KHDpkziv=T)}OA`lZzNH;HQvqX>N#k1F5{o!;H} z$F*8&UrT&P(zUzuVCyA~&v?>eF#iCrztXj9tXhl6Y$cX4b2w&-GI?cOWd8tq;=Nlx zN9r2tD3avg2PNdR91m>cx%*a*D72j)r|x=-NSbY0?$!qaSte(6@T%t{7Sne%)XnnCo<3i;I24^F*BY(fzo^9} zqkl5ht}NjLEaY+r+>CKkFLBo{*~D$>xCb4`^H5@W=Cox$B&&?D?Ogjy>z!tQhj*I8 zTO`+zO!3H7aIt~T;xpqoz@}`v_U$@4&Sr%q)-?t(TfmLO9R6eMDR|ReViqDGSe)a% z1aeHWH>mks{J8e6EWKOQ5_Cqav0OUeY4;=7lNs8C`0>F9^s8=?>ivIK>YE#Pi~j&- z+}W8`c5(-8%zg4P`qQSG>e1Z#o11q4<2|T-MN+YPmU0n~th;_pX1m_pmeaa@y4 z>Ro#0;SBoZOtN1<&ZbAkN(TJc?T`=lrtfwAdP^>pdxrTn1U|HJkD6BgpSK*<>883i zxvIlwqECHsap^2emci%jD!-$;%TLyzaAt-ZnB0R0s0(Ak$JVgARo=8Z-tR!y8rtp) zi)NeCU&|IebKbMI-6wCa==c3YpsmK8b!#j7aV)u3*~uf={X12hW_L$-y^-dLOl6Aa zji0831foAJOi*%UHbAwYy8?I&@fCGb!<2X611(X!Xb;#qtHNtdj-mac5qtfQRifgMPHcE5Dp5cC&6wSTw zRO%lK9{M}>wOc7%%K0Wqu-}*H27g*~)Yo=<=rYlnKsoK2&-G@jtXo}a_PSCg*<~KH zvTgx?(~m!DT z-#PyPPimODx9cvX)b1}IL+Q&%?k<)s(@8UxSo7Eqa%&}N)V~tvg}(`HYEs4h8Gzb% zH=N|>_NCKqXVbnZ-{`M&^Cj94vJuLByO;GDG=B^(Z&YSdy7lno9h1n!`ofWC9bn%eJ7R6GRmu*Zg1c9r%i{$s6lyTA!$8OF`iF~ zKVB!YXLm9I$SYi9r*zqMPLtMF;`-%ntvy4CNG*(wG zR^nuRtQY?PhtIeCsnWXX+E-WGUh3b&jjiRoFg}cvN3~|heW&SH@m-Pu!94Z{``07* zV{xWj>t^0f3!5;w?VJJ?^OFV%Y|J_S{Emojvjz z-3yQV!CY+}^PVdm*ZoPUS>D^}dJ!^TNtSQ;RdMz6R+JjdLT2qooCkks3sC25QMCux#l35MKyi$$C@Z^Hbef#lJ^(`Q2I!8|R zKAj_(E%rw^b~q^589Dt&>sHB_I?TGIy}CU2v!oX3-QBP$Z}km7F~V+RGD#U^UBfH~ zB!OI$sjj5z9cxz79(hXJM8AQyi{>*(+=4*vF`iFqZu-Xl*R6DWYq<}tb!X|yL*&L4 zNPWKCcC5}TuBWAH`c3?D!D)FF!^FeX3UW}M+5M{1*Hi=3eZ=?AVO)Qt?5#Ba0EzmA zjkCNkECGaSNd%mC`g5A=Q6wTXCq22TW+sDB)NL$dd#kz9C_u}Sa0M~b+U16?VAmRS zit2N>!Tm*Gx|Z@SU$3IjwM3CL^abNU*bG2a)i+l9#-(z$x>Ati*4T{MCoPP4s?M3P z)OF2oOV+J9oo*g`$=Lj=rLni_apI=w+D4nJ^{smr{O*vx4Sv zBsMyq;pU9D4U@)w@$FhRvX?~oi*Gz1E}N;uVcHku3>PG0$87%e>Bwt+A5rK@6q9~B zACg~1oSqB&eQEu3s4R4=C%C%GAS1BJZ2Jo2{Slzs>eFkMFL22mcWUqvpOgk9jCdq* zQ#!@pUg|wVPPT^O=CfF$wT)ZlL;Xz0?Z!K!B2x3c_1V&>`-END-4$A6^{ zOZ8rnt6op0S=_9W+YGlD3^RtukUS8%A^uhH z!}8Zu_4db-zB~@q&AQ3swfLdaw}w#?%91NHFxq?j{p(OQ@atIB>`tqsbu8ti)8i8E zcHG-B&&}j}8m(<#s4sP`LQu}Ow|YZOYEb;Bx#K>~-mbcy)M_8?9p#*YLb_0ZlsN^t zZy!ul9*Wj9yS|FKhS6r4@;fH{N);uzTmheNVeLd${5;UFHA|g4Uc0$fiqM$WF~Q1` z0RH(k)zCW(cF#4-{t`a1aFAPwWVF)kRq#)HiCFp1fIYsI)60;A-~-yFXc!+eoN@K1 zROIKs90~xy00s{nQ<0uXY;#<>x5ZVai(b8ucENJ z)Z$y2pJK+Owhz}GzgoQ1Lp7YcNd&0?gY>VPbiGSXHrmCGp(@94@4D5QNp_iJeslW| z){<>j^R7CEfA-?u#`CBjPSJl0_?_@DR$@P=J<0Kk+eNF|jXiZ}FA26W8BZDc0M2n- zqTww60NTU+Y2ht%q=hC@08@_3;2*VXejRDoBcUzruSp_7D#T9(VmS(FW+MmG-9f9( zXa3Yh)U#Wq$$w7P7h$t&U?3j&d!1uL{59phzL#-rEDbuSD!In}z5yU(HEbK}ZnM#D zCetmFRx)Za8KNDq2fwlJ`%r1Ud7xiGsZNu;ap~lVnbQLU$oJ$Qt#kcaEeA>Jj*o** zxsDrnB9Y9}w=Eg%$@e*|%$I3(p>?tdhoxOclFMszrWRLols)+XjzRY>KZ1e zZ?0&z`h~W3INSy>K^?_T+!zh96OU@4_+_R~q;!>?z3P>emXOAO_+gGhpQsqEL@^>t z?wl4mE8e--Bc|~n%8`mgy-T*z3E+%zQ&p6<&+0Yj*amT)eAEzIccNQi@7pvIqlpj& zCqBlcwCbc1M}H%VBXWccdG@9bOYx6R)ok@`JF4xJO9hnanALt{8OsuUf<`}Dv%S)7 zy4u0-thF1fDDHc;w3t~j#vOh98t7NH_V?DZTU6N%PkG`h~jB&rAAqr zLzb6!<}BUGH8pX`u7SGvDvt|^X0^6m=!6}rU%3O$D@X9o&q+2yoTVpNP1^MCVa{x7XwxRx>3 zw>>>)@VfnNE+d{Nh%Lh-hGuv2N$%A%;byC&&8OOFHWNMM@LKM91Vuz+JO(}wJ*!u3 z(%LPrPYnti2%5`j4I7Qw`k!<4uXM(TrF8U7ZKT>q0=^7{WR}MRB=@ZB?zq5p>?c?2 zL2V7S&ZT!0L1MOLKZk^$>wgE|6}Gyuxm`5DFpYLvm3LLb50?i6$I`TH7C8$Dfi2$y z8Ks&pRwj;B41{x^HFvP#eLa0?2f>|A$!?M7Z&xvlvjAJj_WIFJt=alL5pg4?Ah`7R z06e9*T}X+f-#I~aXCBbZZYArH9v)2b#p z^}KAq8Ks8c0VLXa^oRt8&p+T*Uq$sDpHJyprh#(>+;^{aFp41wCQR@EuF7=VR=-R0 zYail5dSY$AHr(eK_NOPawQ17gYgk%H@>WJbO8r5pyD?)k@Z&|h(t2Ln+GO;m3%Q1K zf=*3J>MKjFLc-$OSi_7A1wF{CyDOn^9LEc)D`6O)&-EWlmQ})I89&~qebK}I7wZ?1 zqhm3z*5|?C-UQ(kD+UJ8Zp=OYul8DON+_#9I-L=6y#uIxjrknCB3|N z4$83!n0({`xR0-zYHcRY>SdaHOKD||wkg<-!A-EsBTG7rs%->;k0gq(&~DOCgL>7!r7x_q zI3FR}#fTv1^ya&|TSxO4<3-Qr+MsSe=iZVl2x5oPNMcBsACMpb=f54Q^{#7&Yq8wi zd@j%~ZPHmISz=E}q5Q*+KT%gbR=4qdL+NYLrcXZTaLB2WgyaxUVT|Uw#FomaBlMF4 z8$kY?cEuz}WZc1n5y2y$(w$nEpO`SqKKqZaLMZR2qHJ@PU?)}{3RgEp71YnSd(lKyp(0-oS54u4$K(aE&^ z7hAgcO7@WGvo+Lrn~)U%x&x9xJoh-QWtGg`Q*~t()`NX$bhT+_S>yc7Q);50Imthz zbX%Eipiwo&%yC9eGRy$U?fcV;k?#yJay%M!X=l@qbT*x=TJ-l**~z2aLQkq;&t@bM zu}6|P=e0qS9d$k(>0$I4C63NzYlv7MP%0h>72HfJ2*dN-jM92|KP-xU#ZIa@=S%4S z0LFTf9;5sO`h0L&`cV4H7DQy0gRu$OXR3iNNC+VcW5oIF62zc8!a8YcqP~#*~ULw>6%>2cW9HO#3Odfl{otz zMMbsJA-2}8b#@!>ZV_WGob5&I2e-XvM>*E?-D{`nW3WAv$R z(aioJ^y^=0aop%vQH^HF)z%V!p@BfJ0fv4yb>Aw>MX?6IMePHrmh}UYV>@Yk7 z^{vjW)7DoylHSKH&7I1w(>=Vq3n@9l?hiFHM$vTbG6@;PBlL1H_&(pPTucQdlNpQj|bQ_E3z!^EH zJw2@0>yVlB{{T_#)yfGOalal%(yreqqv|aWS-tAr3si>U(O|Mh7Yaf7az0`Qoc12o zD@y5oLO+Pty+Qb~9fYu|tZRZ~I0FaR0yA1kOcA;)XiHSM=88A~QcgaU+IAij2aY>& zSsu0N9WSX0%X*V7#3)udBYbVooOU&$mi-byhzoI+tj}BZO!_XBJ=72h2xV0m{G&XR z_Ny%xeKn!k=(G~XZHu_94+r7j3uBbDTeg2Mhhcc_g?S;e|W&~E<#b;%`|dzxO}Mv?`NIF)vd*cr#Y zJKcMHr<%fa?^|2yT~l?U#HVV;KMx@XG7AAsXGTBtwHQA?lDHE1mt+)n^2*$ zIp&arkf({R3Mj4+D6KU*7JQ`|4$lc$+ zG~`kRL6syN`J$ab#Ec?+ITWAG12`a8n6?9{CnMgvxlyooINkGi8KhnNzdi@BrpR|A z18J{A9>|12-L{Nnj!FBPq18eO=d}@6QbPInG`m|Ln34b{LD{}L&~0u`N&1sjAX!K` zB#4dOk(j@=|=QrT>O!aP+`=bE|^?UZ9^pUn1K4PP|slC;P(0Vy$>HR}S*K8G_I)swO3lHWYZNTJnS_!&aM3UOp z(oHtqt?h7Oh_A|odsW`#RXAafZxk--=Lo}aBRHz6%kG`gukK))^1|L}AUTA(1GgQq zPA-9?$2HXA#sMYJ9=X^tCm7^ai4pGwc=~6}LA%y0Ep=;ME=NC$x>jhAAv`Z~Pl||U zJsV4o%vi^13fbdtpb%^yr9W$>*jd0KwY7#cUIHQoQTkSMUhAtJI`&9LnW^5SZRxzw zLAo`3<8FMM)BQWutEgNHYuzgEOqN&M3fuviPBL+i0C%J@dp@6MaRipL!4$DdHo7p# zBkC*3VP_wwi&#TMyJH~-$iBnd^r=6~AC*fJ-j~!-!TFE(6?_9@K3y+6h%3=}|Q8TTj$xm|4z}%WmGBsxlcH>MC|zNS|bBfS|K{ zpnKFgrZAY)lf18BDpkkK>f3jF6I}b^?yY}kZ*2v%uM87NB9=rrcHNu+e0}QUEtBdo zOCI={S=5!tKa|xjpRL~M8cg>cO*;Pozal&UTWR4}1GzPqO$P6z^lfJT+g6$2k{$4* zgLkIb`NSoI~{<-~AXN2xwt&J{rV{^){s^OX@fJL{DpP5K#@oaPs7g zl>~Od?^8SC*Fcu}B;KJi2W)q&mq2Uxx{Uh0_N8({Lw45DTc`?=h4b|$s5En_Xj=Z6 zcQy3a7MjcgFG@x%Qc$@B6Wcu1PK(oLR?)ha7;mL(gw$ndu*F8@*2Jd;4$lo7=9you?@kj$(IQaj}RD~;*hS*Tp-IzE|kcJ`87-J5HxqIR~_{MbGZ zjMIywYIgqs54xV(X(o=x)-%~jA;UU@$vNi)WA9U?T~hm0c_V18&yEc;NlB zT$4-dPNTVuy~Wm`?G55XBU!?NEN2HKf=T;TuSaMaoz3QxuS=*&B1@(;a8C zfIqmZUxs=e<@7hQTWdF#*6?a4bRz`Ik0kfW_p2nr*I(3iYkP8uEQ<+Z2IpY8C$OsR zSMirxo}U$!__Xxy1h)V-8c4wWpn2!Lb8U7BI=`qjn$^qc_vq zxyN1WH`DdTpQBuA62YY21$p8Q6B~1o8;k>vYk#CC;q=RNC71|MZex<%cQwv>qTb&3 z;;w}a>?et;)0Ccu<5W{2x{&DPT7NM%mrD!_# zwfs$PcRLsYw&aH&C{gv#dd&32qUl{(u4yn9OG}$mGOF-E9>?_}tNL3?hV$U|tGqjI zc_vTh90p8nKk8|Qtxr!{hP3M|KBKu(6V{ogoB48*Jo^myA4=C*PBdPdf$w!jxJb}D z+&g}3^ZRpwT=8JEli;Zr+oTI2Wjpx{Um5m2%}v*&wu4mZjTY$F(UqdSlHzNI;iM&w6;F}J>CH}+bl>)` zOO89HZ9>>3sVh5g{EP=`lcZ~lt6M{PVCyszau|Dkyc)rD#*t}v)?G^rLX$xh+l+Dy zjIigA-}kA%1%S5D<~srjFD3a;U;)7X)Vxhtd|TFaEgPr*02^g+A={8oa>VhBdGDX5 zD$k)x>D?Enyshak;g(t5&n1Yw;F_#2zRZwA1(6JPr1+IzLdQP>e&tYRUkzGVc^yf{b@!Wy!L2=hwLaqBH+emC* z({)J6XKWLN9nWLW(x~+~t!@4-S<0|Q8@`_G%%cq>6OM88q3GKf7pwZUur^RnJSlkR^PaV9EA9{bI`md<$HRv=BszQe4ac^`8 zA=}3zxIevTV~yM3{NCZG7*Z4O01^F@CN3_Y>WKWa?*DtA(K zrG|lUk=+??9i2lgLx_Pm1fL&oH01qnV;54%qKKxFNbUV%>bcx_!l?32Ya7zmU+pha zbvBQwEK$byVdWVHLaKNG_#Wr$SNes#5d2=!*J7%TG^|G*isv2us$>%SN2-^oHG9i9 zwYsvpwkI%5ISMc`f8|se1DpX`_ zy7ta_H3(vY<)pR1MOevsv!BYn{CTCd>m4rlq;54?+2BY?ifE4#D|XxWIpV2$64Ey7 z{dyr0QjwtryQw|umPd`c2dHC6L$un2i@2Qp+lMu~#%jviKD(xL&q!I_>7}K!^uU4? zId><=-#N*wUa71{t@XZxs=;k=p_^$!Ibx?gbB4z?Wh`Nzq1-T7@Dvh$TLV)M>C$@2qq$NBc&i_UA}8r3NH&*> z>O}`Q&prPDy=NwBw&DknqKQrb`M&fno`XvxHq{Huds4u_xmB52)AJ}jzVtJ}Gsh!1 z7$frg)^QyA$F03?r_^oFsp&xoYjsp5=4|6XLCC3kSHvjZ%IUQTWsdUM*gdpLS;~Q& zWLtI4AWurOX&@4_UeC?wi&p z)9fBm7Nk~tj18!{><5$mhH7`kOPFus&~I(k%3Kv_tFLvdEl*C>Y#Q3~&L{OYugJp$WAFZJJ<|PEf75*{rAl<&`g_jFH2bng z%yWP{ezj$xUPpP>9ZX`7803v!GRi|o!U+Oq*Eud{Z7V`4$6D_n`uo*zkN&f)WYJ*klo7j3cNY`PuMib5>yBPeWcL05A zo@-I3^*)8B>UR=b#I^zxJA9)&?p*lsS`Uix>Gs_R6fhW4FT}(nE8770{RU|uk<&V+ zxpCCD_WGToM`(c!yP{(`8NvGC{{U(~i29AjnbWs-b}&NHC8t9{#}2Eby@FUur!o_| zS5?CBd<^9MX}vq8Z&#^0i}6k)vecX835=B^ll1ng6XD!ia!GA@Jn%`WUR$rGh`gj| zxW~BqlU)SkZUzUnW%?VVtvX}z4-~I)EaYZM0LWkXMQMp7;CH16fDE5=PpBB=@l1>p zjt8|kJDm2#bLQU_J&?w|sSe=!NB;n-YR7N@XdWO|Ut{{Rjw{r+mPW3*(h z?3`k;!M{=QA{@)L-v>PBu&+qLc92x?F@s2Ulan4#oOz*?4^S0MspsuqS@Xv7E_Y^$RGMO+`bUGJrimI{#hi+2mb&SaJV zT(GJnU>6|cwtUiUf9u9aV@h(WN`u??pv#obGEakAi1cjcR#1LX-M+rmY@`q~%_<-( z^5A`|#N(2~9k}mQ(7NHfx7dnSj5aaemnC^V^xVyrIAXN`%61_oh6lBConNZ8?OtoB zIy&AtO^w1V{;1|aNe6Cw0fV14(AqLL?qJGraa`pzUZ>K!-tF{Mw;FDs{;kaObLsy8 z@{fK_KT4ieUDcgW)c(Guai+3E4u^3OySGIQ@}cF4Zv*Hl?Ng}wre3SJhfR1*h4!2! zwN_pbkO_A3NIYZfNpsU3Tso&xYf1F<({*$I0DOIfvM3nfAL=0evrqp33D)52TS+Zp zh*{d&8*#n4RPaFe;)5RtR`sFj`<+PLTZFK^Fv`V#Y0m6m0oh0Q{VPW{zt#!XR@z(% zZXmp|-EPc2Rd4}jZ#)y*>rBDXJw|#rRnbCPda~V_VUW4`^7&$YzLhrbLF&7HuF)@G zjtj^wZ5kP*X9__*&Ik6ZkW)p~ooISnP?=}6^)$QhN?CE1oCW}>9f11Qg7aC{B>0P_ zY4Mvyjr8W0N77Bgxu4^V`~;jbv@FI)VBdU$u_pHO`r+`h}waZX34|`}10FzIsc?=-C2ae<#tKe(i`>CJ8RuAJ5E%m&&E z5pN)W%&0&)9^}%|1}>)8+UDP=CA&t`mi_GIwN=^`cWqYpCYp->0I4qei>qu-v@PO+ z1Bq2~(XI&!qsJT*S9(7}YPR={V|jlJ`a!ujWfnX;zU{$Jum-2W(Kjcmb$v=p?bf=4 zI~pLEVa7ZCDr_0pwVh()qB?ypr@gp~P-cpGE#GX!^RNT@b5=hOwObn-J#K%)YO?A! zY>9R*aPp8iRT$%rYE_Rw-`n)HzK)u_vD{nC(?M_nKAF$TG3}2ZdbS-A1>Ti4uBCT8 z*Ecexw|5bd8%)YD2OI_ZRqT-ONp%BtPMLLa6`Zz_q~?YZ?S{ zX>l2DEoE7KF^sWa{bv=b>HeOv>1{MxX>c@g7T(3!voYu`oUaiw~+egx7u#ZW&R+TP0 z??u9ZKV1Hm7eUi?TMvsm)6HWnme(-{Nx;IO7$9frb6rce=0E{FoO!5GYMOkPjcB*F z?{cSW#IlXx{Y5wFG0QqK*Hv9NsI6P4&D4*6StGc>Ld5Z(aJU53uUK^6wJ$@qTG)l7~{*F70~rRe%zsiu>4bEePui;=hUlgqw7 z{r;76tZF*PQT#L1CH@CJyw?#Fh64F_9{&LH9&4gJWT_6I5*$-Hxv)mvor|B0n)Caxwkt%UkKKYDxs0E!yKmi94=8_knR zbtoik1M;aJbDs6SyE4yXAdWSWw*E#q2kA%Z?i_sC@#3WiH~@g76+6+%`ZK3>`&|R6 zblGiE6uD^@)nw1iPDlR$d)psslc;pgzc<3$eIM}AHS8@I5eLu850mfCc&>}7>UNra zl(%=2M-8+?F6`I>lG^$y?c<&TuM4PUZ=t2ML((npBhoD3zr$R|5Lp=)3P>2>Rtu{- zTU@y73r$)rM8JXew2Hi2#sT@Vc;}ptb6rc-c>?Sz5O?>eeL-PisM#AGMlVv{QUfmX z0q4#sErT`Fx?#P!z1ESpM?J8$wI_mO8OxukJpF}Lbf-`1+9yG4R{EnN+qz28N5Lw) zPEJR^d{;-e5JM70AEz3s;Za5aA4+FZwTk}AYfF&+jHpq_IN*#^s`wnz!=`TU{uo7~ zK*N0Y>ntq&iO%dFzxva?MbdKW)_P{2KA?-PSwOO`FfcL!{WII`TliKTD@#;X+ih(K z8=IUekMF>&t>x9_Khe+S$GGoXjXzJa(&7==+s6vI z+}!-keaSqUn+q{K$6WDd`8(jp8HfS-S_dek(8I3S#Z;)LCTxXR?7X{yY8OX7Izm_cf4}5v8SK=a)Qv*0843V10bstmf&35V2dOUiBHnJF}ca$#X9o0SY?^kU! z_NUNVZTC^8wGG_%(#X4R76;fRU-T! zUR)R5acK(4aRN3)eQTj`$E;ajG-9{(^*v95NP+RAaj^z7u+NRMbM(I62OzOMc zemPmEyilP}WcT~$`&N|1xF;Z-d)9YJmr}jaQsY&LZEj1k^wj=E&l&pSvzpgd-dpu$ zuTEIuJz|-i*C9qpU_J6Nig%+&YtlM(uToug1QO1-7fZH9^$yS4pj-3>oLWupoVPO= zZtfa*!h}oD{bmxbe8;{?%WnbgrAw`gW(O30hWiWDLXdCU`;m`K^Rgqp^r&dGEod zZUwdVq_AEjR)It12vr(hpsj6X~JyJ?h)2xVY%;BI~Gawb`P7#oMR| z!N><5Nb}yJYh3}NYI^*7*=g;s0R~5cLOnn@!T$iIPSCoOO4h6e)}MN_pd>g41y9s? zrp~9SF1lhC!8Fu|{o(x2CV0ovn?#LUrSyGMOOH;vwv53M1~pT+VI8rGkJDWprgZ4I z{3FjhPsZt_-?e|?2f(W~y8t4S{K9z$iisYxXC|XHiaBS2LrA^`dt}po4Pz(ixFOaq z^gRkDgkLz4`r~dA-XH$}CWqa>=UQD~qjVi&3tMZ5__b9;2-x{Y^!=*R@>o_xMP&uE zj5aIQEfPrha9i)%w3?Y6ycV|W3@f%M)BvNf1GP}~2T16BM|*93ejVht{HhKRjOPS- z=C595XbU4F{Ja%82AGO;&qsM$viQ>N8+}@|x8D0LnV$g}tSvx?9@A95BYaQZ?WK^ruKrLw7aK z`X^Dly}P-(gHfJ)OMm45061l*arvD44`3>Or|}0wwb!E3Ugt6u{$q|+4}ed<_^X2d z0AJTMy$;*fzPL!)j@3cSx%LCiGx*5zllpENPzh7reSxeG#(AyGIyKMrm$J|p5bx2sqnZN+hox*IM00x<|lW+>%@sLMq ze{c<+^np-hZ*B!S5F{UJZ@XadDd_nGgTbzR{5Qo%YsXXSANZk&{{ZHz5JJ&m+q<*LJudE&YHZTg=Ni5#;K2LnF!=5yto4+9jl7>&zp!l`0F9f$8g#O}i! zf!KD%eRt0b06=}mK55hekas7sK51kM$_oxW;Q6Eg>>Ek;BfSH#`gkMe$Q*rZxcnPe z9U%iT$PzdU`sTQNS=oX!?U7x_;MgnCF?Yd|PxD@1%Jw~fiGSwneJ$&>avX(LBZ1?A z&1j+A%s~fgDfd_A+T2-^@Y~DhFw#rWa-YH z*6nO>U<_nuhkoJ*Q(8gxT+*zjx3jvsiaD;N^x{T8nX%e}V+^Ut zFxW_*HSDfvOVnVnI01L(iVS2->bv~Txi)OgAk+e@u%%yP7PdIOI zJ64K*rqZD4ul9h?n>Ik|z-601(g(kcP+hJ_=R6AZF2tWr!2sZe=M~R7=c*yqz8zj_ z`Ua}ehzgksaM(Bn8_4fg9TBbQcEWuo!V_;TwX}B=7eYNv$_Z|HG#j|tfXfmGnj+45 z1U5&Gb6odL>wPZ6r_%lAyp=Alc`tY?{wVGh4zlcre>}+82O6oQiJV&IS0hd2rC}Q>U!Q+ji^*$G;R8vGsnRbJ9AV zuWN9?LXWv0hK;d>1a}!YtmcxvEuwVApI6Ur7UV!5dXipAafaacJ@QRO&@JNBG(NfX z)D~%^R%47X3P|FjbWBdo(NJ9t=`0sp;**spBy*nTlZx+jSJxucJ|XEEoG{MzlSlZ5 z1xaRP+%gx*@mpJ7sa<=iOkXsTc1A~tTzZF@l#c6BtPp0iG;8-J$(p6NA zcu~#;M%3*g(|#msO$;SszSCryRU<2qKQJFtR?S71!YQVkrlr(pnWdiQ<(gTaxgZYz z0BVqaBTjof8kJ!oOI(y^Q^wX|yZdqa)z`#^ zu%Axp#lLM3V1Ky!@mt0%w z9Y3h*uWxY)u~KG)sU#d{1K%}IsNKGs@o8S-LHK*EKV&)JBV^d3> zY3#18OCgpAm2ti?-H)|Y>i+-|rh(*bF2SzsV2~uY1Yu9U-}&~V7fgODlt{@VNW!i? z+c~U$nl$NpGhBvPL|ToHsEfI<#24LwxxxPcHD?%_?EN>W{{RuuEp-%%NY-`S7D0yn zs_l8lKfPl<9bYZ?Na^cpONdY-=lWQ1MQn2Hsu??Xtaz%;{6&V+PL{iENu4d?UF4^6 zgWn!&mFcUj^tH9Fy{GC8b<_6_JxKCK0~}*%?Tns9bw7#zqjW~8fH;A(pKpBD7fraf zzxZu!q}x0fHx9AHV&8_{yF9P{xUF%$ioWX(ti97gO%@pLu2SFEbGzk8{{Z46{VFd} zb&b`$dJWt$w6@piT2}AR18DX+J?fiOcp>hO_ORVq#)AYZx{v73jPv*HQobi0 zE(>m+(}dRWTBN~>#>3={4j6k3`Kz7cG+hg>Y8uVfqYk5PV3x{FgaC2RkE#CvoofF8 zpCS*P!VU-?YL};P;nZ}occ!9A8?gnz{OmZ+eEsU|EbD5!(=6Tg_^eemsggA24F3Sj zU_r^i#c_=W`&-gFKJx0y7*YtYnP!n&ED#Lv2X6y+*1Hb6{)V3;+&D{k(6o+D00Wxl z-79rqdQVoiV;sZzlFt|%fJYhQ9RC2NUN^FQw7QQ}Lb^7ab!v?z=A@(Wkuzf_AJlvS z=CYSPZ`4Dh8GvDx^2rY?%R)(-;bqN^v_spy1cp^ ziDX_0WoRyB+4-COC`bu}rrKX=((rp!o?Y+dfTF{3q1beJaCCi!#r- z-O4URL*2c;l*yG5x>H#n?Ke~=r4UAn;LLqNoGS5;Zq+jLQ|di`PSlm{A&*D6)80s5 zzRv(|{{V>j9x4oaHPpRl)cSg=NbhNulNe*TWN<#Xs~sm;(^uh4_U7(5=YrZxd7>u{ zB*E}~>iEj2I$iF&qjX13>UPox(xLwV-eLph^MYI3J}X<)dbF3(*jQ;YVq54;hEFj^ zfriJEk~plkm8e?kUk6&s37ScxgD<56a8m zAKIC<2huu+Rzv)2Nz`22+}S{{8FDw4CxB0G)m`wjR@P;OleK0_~aHAZR13PKjXr=gR zW2|a0>N=Ih_p5C*lKR%~4hy(nm4Wfkiesa1H4Q7_cB63=zMID@?UQcps#^z-aZ>&m zYIlcB)~!E>+%SSkB;11-C!M_GpQS?67F(}{nytE6T@OxIY`ELI2X+V4RnPtrX?d+* z-t?}4qqWl|&aMn^I*qKT6yW{I#(!F_y6Ne(&Y;+D|TbcSJ zr+S(@2}si%Jh7Ge+B|J$TzIB>vs<3~Rq5?VO40A-x7^B(W7u4<#?hYNf7+z;*r|Or zJ=TTRm-;pJ%#+((#%`_Vl;yMJXY3E{RXTRD)LOq#>G$x;?{6i`qgqAzQV!eC?0Ge* zpGW>H)vD@_A+WJA%;URC{O!-sAMPrb;f2iFZS?&!E{k$!Nmv_@fmBDI>OZd)J9!Z8 zPf}dpbq<=B@`zx#+|q{2kaLAh@S_;5N5Z?ELieWqTib?}Qg%pLzcE4o02jHYulY{>4 z0_2h7HJfcmSGVZ>Kk>nC8cVTm?>cVvqbz=GAAkDQr^L$(?K4-g>MLj@zqXD*w-9XJ zRSr83ul%Xbu(!O|`cfTFOVaNJ!rUW^Sw_d&q~zm1Yo{NC9-*S!biYz*7g|hyjji;i z-gppvq>+$ExBV&>iK%Lf)V)tGp?cFpXrp8}z{t)pJYWocYTKsoZ#t~Udv{@TVIG}1 zCK(s~7au;bi01xW@J5xOmXMZNS)EZsRkvt0bPcusVCiFX#EWNp>1oza;5(h_+j zcpb6t%~(HBUAJCG(^eKyy}HHfT}sSynfCGQD;9TFlm+z|D*^zexiLH?G0r%r+94PP zR+rKYp@IT?)D7FHiUm7CAa<-{B^Y=;=4tFA-#(<$@OM@s0=Fa%cy`Eh##n>I)>PWXCOL56D|R zzxjTZ(%$IA^>p%EbYDxwf9a%xb#NpfGR6-adt$V@s@ruP_MVJ_Pf9~{jp>yw8t^;z z$;EB;-k#C5c+oWrB$;7g$;JpE=fAhop;%jb4Rd*QYF6fV`A`scBJiun)1TI+$}=4X z`u?ZYx{jfA^IKTPq9Fq>%EV*rIz|U&>&z>Y0K;EAp zdTe144!ep&3ROqfh~cbZ!`}k#$MqVS8(_|LiA)uwkMDPPu93=+(|26UaI9+PuG&?0WwI6W;6{DueKjN4{`=!n6=};fEeGR2?k2v}3sU z{rDAo3a@@h?Osj}zNeGPxf!ws4oF<0RdmHHN|yE{{RjD0Pg}AMmU4iK%*Opi2mm} zuGUdA&TwlLt91L%@k>wER@vGMds3!cjBfdcJ^a#n9I8vbCi6?t^(&o&S^hP?`eHF6 zCh~CJ!x#hYTP-&F&HC%AZMARU!q-tI$mA=MquQpci)im{d(eG9KAK`pO3PPB-)I4ecO--V z0MX{O+I_vQr`1}WuCF8zy|%6{Vu_FQB|AWmZ?fQc?^_7^R>Ip}wz##og5J*9yvqtM z3(o!A{f2S(s~NN%8f|ekwy;B}B6@2v9EJY?*Piqo%R{uZhWD?;w$VPx3y97zS79du z$KJL701KqGlcDSu#mmBxmED3cLG90SGmO>Uw@zqq-e29rrp70~UAIsF02uNc)nK-WZAj2?1zL_<)tarImGNo`HHku4O{SO$kurxYc>QVxoL5>lpP}4K z4D#qv>XXcK89tn6A|gk9ypvsa`%bacc8lv9dw`@B4uP;oK59!v6R1{+epABZfr^^0 z7j-Jb(!FVB(Uh2KNhysY|T61>E&(&^c957xR&_Ll7w z!EItDfA^U|^YM?H-l}?ML7u`}jY9q6mj2@l{Gdc}Mn5Vb@ITgrjB1+Ro<17s*7Lq^ zKZ&t;{{Y}slXGW3LMu@QsHfuwpLo&DaRinkJ5v&Tzdmq(`Ej0WrH-A^e+tmHlWOra zttnQFFy2l{=j%>e=-PA_#`-G=7IH9T3D0bQiU(w3>;C`_cp39lopIBLsQPx}O){iX zGX{=Lxhg@(tE<`4G+d|yI2>lEy02fGOR4wz)9%wN^aBX!PEKP54RQ#lqkbL0gx@31IRT)kg zW}0CLVi$l1AXM5p-%|8peP>VVOWjsxYh}g86o!xvLZYv1o(T4{kl4gk-Bc_;5p^`BNAoLhL#n{jhJ!+LN&aUSA)p8j*%wN@80n;5?mh_e(M z#AhlG(>&F*WOR>*)_QkGUi9-`%X1yp_73BS$?QP=kJhW@_<1bdAr_DPQJ(%cO_vM~ z)I~p)M~*Z7tDsGJ>EbfEC!Es2*~@{(e`=jnVmguy3jY95k3{MjTG^}wQbRnCu-qtK zKtFNKJ=1y}oySsNYW5b!+fJ6vY}Eo36Mw|ucg8pe&2273QemZuBp`gdkQ`KYm;Ue$ zak~{ava<4gHqs7-b)*UIj1!aStdF=`pL8Dd_x+#f^J&^WoReHS>1QZbOc@XUFK&6_ zwZ6KO&Pc}<9JCP;>G0X@p7flrfla7%%n|GIbv?4Qx`J6WF+#`WVh0EB;+{BAmenoUh?5-m7#Z!m+PVweMzX6gAd!v_ig4XS>X`QY-SbvlMIXc665~Ya z?rApDU2g6rxP{`D6?ohgDA*u_?^GHmM)dZVs5YZvrpFD#jk!{AzW5!5 zWGs5yU+OIyIJ$P$G`#e~&pgKwMn~pRp65TcSM(oW>bice`X5wpX=I;jN_Z+ygYG?# z)~uA4IxkUlrirgyK{lyl9MV8?j&cv%j^pWG*Lp)k*KFgrzp#v~L~LaF8FAPGJbf!A z)_47Vb#B-CWt-UP@a4S~U~Xgsvk}-2{3D!G{{Ren{{Z;%HJ$8cJwg@B?crQ!zth^8 zb5*s|?JabeA+fcEAhw7EjTeBU+N?Myx%&#J{Cc|>=UC%8%H_FidsCMhqM?p4BIUdT z^JCxbSwq^Wh3#a5<|TM!Qmm{qxF34OXqru&*Y|o(nPogyey^!}aUuovx#A_`k@fbi zP1W2|TZ@4(^i$?yq!4K>uAz!`gh+wq$xsh}G=tjylce9-EvBVu46u$>Sa##MJ`dWN zi%iolG^3)fp9@$O$D~(}{{Roxu$^_UYJ;rpbeZlWL2`s0_mMQq}XWx0K#o85iLL=lNsHf z@fT1^x48P#nfY>GBfVh$AZx}QF8T{SOYrwGC~(Oz4yPlGdE&IKjm4d$H)iwDiEdw2T1AiX)UQoaTXfx`Q?uuP=SJKZkwt`-7jT-eQ*Z&BY7l~3JD`QKGiPT z`p)u2p5Az;Kry)-FweIYHpHjW-Y8Z_w}v>`m^7PI4d>kZn)SLw7t?r=RgQ1~=if9> zCKRx5&{l7$#m&~CVFiw~x^$}hnHfPdE_(oJ<|eg^6}O|aU?Ug`0LSf7uPx(}*^&a0 z$`pv$u0l3AIH+x}X>nhF43h2vd9!ebcg>FZ#Xz*`O)BTtxq|)&iV?a;R|Ql50GryU z_^G`=rCaG*V%d7nJj9tMnY+s8yBeSA!hn%k+b*Qiu5Mb|JJ=+iRo;lLmMiWDwGE$E zXcOKosMKL;OKccC79Y|NZ2c*e)$23WL>_DQ971TIV@L>{wmHZu9>NIH%#ft8{FzJ zN9qmBhTI77cdqU+Gj0e<#>}k1gRnPh zwA32bX>=dOYySX%Ff3*(WIW7x9Q%(H>FjHNB&^Gb_Ta7;@9$JS9n)tYg>zbQsQXdDD>%CIWx{ejO0{3H>@o64N^@h?e8rv)dMVdF>Efr;;C&TxwN$E znXQ#BB#!0XQd5;|D9Ash5wWqpiu%SYsDbpQUFuI62BFI~M2-PcJ7u$hStZwQZ#q{) zj@ngAolZd!WE(v-$=!kDfq_-MQ>H`HdPb>nX@5TbLP+BRM_hUq;n-X5kGKg62$D7!TZ)YHR?ViSlf2iiTJ+ob0 z##JyfdwU-AK&i)YDHun|+(0yrqtfM1)~XkP*#P@z#V^VCp;Eg+2M62Qymn_G;QcCu zk+>|pWN-~PW?dj2sQJG1@hac*G?~T+jMND3gb)WNn+N930L3`%8jZl>uzg9@dWFsX z-kH+hgK6v`Gu+Q`enXGwTzT)3dsQ2p#;1{i;;Qwoq0=;(poZ#3^{!6XVMW~}pJg2W zwU2|Y?=Jd#OqndAv%A#o=am{(%yIf_!N;A)7!^-E`kt@UofCg8q%i7FYc!6tsZcPZ zoDOl$arLR4$5l6xhy<((pk2ps#yHP4S)p|lTCC0?vz;OI)>!3X^2n@6sbgo z+WtO{y}G)!w{23=`|vWM;R6=HZ?+F=s?oKbZ$#5NpG8EB>Uw(qD%g-rev@;B1n>tq z6(n7CuT^q?Ff=bYzyNce_0Oz&mtKoSy6TNf@b9IaHaVb$4hWaXC+cdV@2L8&O*g47 zA-%SEZLN}6u3=0hWRIFP_hIkNT8^%g<19OxkA0|H+1kD3;|O7ma_;AWT&7J|t6{w9 z8;91$?MrG&Y?0+Q;1b6?u_M2+?Oi9Rbeny1Ojs{HWmIt}Fq{*&pVzfN^0+-0sC3O_ zNV;{>$nl&=gcWqpx}HT^qsfB2XWV(NXYl_3Q2zjEI`>r8q)4pV7{3V>p*We;9Pe-O zT{TRFxIv0k5R;5;JH4om?D5-1Dq>rSauPK?$nwXY!`xPWQyA{5)5g2k z_OR(T`@aem?<5Vg3>zv7d8;0>v+EDVTk2^grPTJSUMpDPI8Uxm=08f0(_L@VIu*2< zdL_(-WjpRRWuzyP=bZlap}FgA4d4D=HIDq;^}=t zeK*&+hCn1(bFK>lgn@f^-ZMbBM=02kySN6^+J_g64;@0BP zw)GXvQ{A(Zx-o&bJb0_SG8=Ij#(ep#FU1a}*7S`dOPfrxmJ@X%@7|eZJbx^KxA8N&)DyjH`4zYKl$TYo1J20wC=Yt?0(i_HWo!F;aYdtLm z)sCdmHLJQtH-9o+zwsZYIQR2iE8-+NjNLahdR$1;Hr^SwfHTiOYUh0mVb-wR&7#F< zAhdTbE@V5FFx|?ZC%+Z0IfB9twA82Q?I1jn!>H<$J-@0)%9~CLM4`dkxGSKxHuboa7S@g8upK<-TX_?TGGZ*Wn~Z&OScH4zkho6 zwbK1VckxLz8x@WlNUf0#%dS8i?g~7bY+Q60-a6$0IvCB&X80X(S+EWS;`L2SHnv>VBi@ zTX+>LRb>1-W95EwNcwSFEf95gRq5~zS_RVV88I?~M3*1`0Lq%uU3B|Hqse7rh_JMN zlEs__TwsA&ou)6BEwr`Ox}o&#a?U*Z5+ut#pWbZ=mEvyt$cDtLw|#BWdY-4z!$+`|Id1oT&bQMwT^>egZ!Q!{ zuH}ek@t+`!n!M}Yjn#DxZ%x#!ZqS$jMiVF@Msi>3cVR>f(P&8)Y`|KnN`>4m1A%7C&xkv#+#`Lersin>*>A zT~677Urdsrat7bH_xe@txzin1pAcVO(j~K4<%`pO+~5ueA8vd2s}8s6oqJQ!E$wZu zO~k1C-PuAjFzt+R0Prck3>Jmbx>DcU^#pG!U6_Zcg}5w7B>w=)wf5+VKIR-_-@RwL zM?%!Bq5MX-eD^n!APpFA<;FdL@_o%|aFNUcvW8)dR&6m&@e5OvOX)kyt6ZesnN)H8 zB=Mipx!$tcOkWA!RgE6%NW{LjGTyF#KI6`7u3lT)-PuWNaET;trCD3CQaKgPr1*1h zso2e?SYF=RY0V}^_uRet+Nbu(t1S5$djJqY@4il{PO7=F(g}tuOK>59=J}RZ3xdp} z*p3I*wf_JQ^g*NbE%VTR$HmMLrK?l2=6rpn3zW0i78b$IUIkjVq7tr)jb$~GU>9( z60fPJ$+gHFw`yOem~{VQEi#uKK}DE=I-qaK{Ee+`s|-avr3+z2>5 z>W4(O(luU}v0I*@mKg124A7z`eNu72#!n}`b}pak-6uk`dtE*kNS(8Wc5HpK+Mro< zPfFNopTw>7h=tB`yyW2Lz6r)@)M9;^jcrsIn*+9aHJK!rxD6@i_44dw9urIDBz3mau`CmVo-oN zZ!Po{H{t7szv=@jNAg{=h8>OvdfZs__JMCLx8d~IA$w554Xerq?~05%KS8!jn`kT% zA-Vucps+8_dFHimVTS2fSiTr)&QT1H_>u_0&nKE2rIgxqZ0bJ|Z5(6yE4L-~esDaI z=DG|WBh$9la$3iv+lU|YMilKo`pt9Ci3y=;+Mb=G2!Dp!A~T7RNC-*n2<@MLwkmGX zYt!0XlXYgZs7+?68|*fMR2#&KKi&^);9g#{!GCvjAh+(yaC7SNYmZf^+#8xnF9L1hgMkQ!k$MYeS6nIy6Fy^ zxz$%wxYE(?muXkYACzEl4k}H)gQHq%GwM-k(Jjp2i6j7$;P?M_>Rn*%4(7htrSbI36+DpSmrL%9-Q6J8G*O2&oa;tA!rG zQ;x)d0&r^z<(+XowYOJj_g4iLCAiwfkxG5-K6=o&?(g|)Jq8)zbpfH{c(4fW%_beWMH zZ$Q>9G@h91i(9M2wbUNsT*CY_XMurI;<|#<;kQp+ekEQgFXCy~0>N-eBOglY*BSwn(45`=|||r6mg*J6kvbSm4c|{ zvHH}Ma}7VCWxwbP4L1DEr(av0>mcQj^Xz*ORvKSKBI$Qln!cqrrq?L3`j5uN9fE_~ zpEa|)V9GGyC_M97?y~ALq1i^3w!ezKm>)(g;1UNU{VMGedRM13-kq{G+I&R=ozCpr zna|YwRlsb1QIk|%8>nj9j*gd_#fsWoI}jsW{$B_6tHsZzcVK7R#Q~;97~>hpr(ik! zp!?Ky8Rxj|Qlc2Z9@x!u=HC@1!5{5tAO2*2^NO2+xaZoS+yFYk03ZH_-2VXd#ZPQ< z4tT6^^4~Ya#Hz`X*&O!oP3If1&Ck6ekQ^P&hVM?Z7m-y$9CjpkudMmxXZ)a?jQ*5N ziU7kE1oAOMGcZ--8&By*tmv!Cux|MRfP$tw;191g+&&H}o{fYdCBG3M0G`~6;p)F8 za=dZ%uG{cvH~So?aAGbWzuZ@s^1Y8=;``m3rBxcplE7iH-?tTa7Tw!HA78anbiO|f z+rR^NkF{R~K+Vq;<@DV>Pc-MqK?jd+Xtu`AFqj~Cq*98w+zk?>ISfb5_pYu~r(%o` znEO|OOmo}Y-i3r`p5Aea5sVCA9Os_(LyBVq0|5IPNlJpJoaY$tnno(eE^;&AQUx8r z!8~W$fk<8ZlwgsJQZY@yHgE?yrB9S$+i@IZ4*vjJRw_;i&mL$2_AuaNeQT3k^p>w@ z)mp0PcQ(UMytI@%ImwCNx{wZYo=4KU(==ohJP-3&eMjN-oyGOV)uy+p*e%KaY;edh zLGOZj8)Q((~|Psi&7eL)VhpzF|^E!MX3 zCF{yaju1dB-uS@4`qt+{(scbNOoGD36C4n(6nQ@6pC+SqUWH}UT4B?!rJg%->dqr< z7XasJ`WkCd%|F@QSEzbZRKL<9jbByO1@sF$70hY*NcyoJYU51MI)hQubxl`Rwzsy{ zuWWw<967=&4E)XYIqmcn(Ct<`WRB7sbx0#Xskg8LfCWiR;F3tDrE>inqk5V?g4J{# zO77i*#j?#J7{C}fWA!5xS~iQ-x8DwLbgRg0?Id^v7SL?kLXPE#@@uBBVouU9DO1~M z8TO{qIZginMD-oi9VXw3jv4gE+bM|P#;3?WNCO|ObbV28{{V){x__tmzfAf@RXcZM zjN+%A$AuZk){`WA?)ucatcO5Y>d9c&-9>8%)c3GS3NNY?$RvGxeJfh7T>~iK_OAwF zoPbZ0M1%q|d}FmfKrZ|o4i7W~)e>$Qz|MZO`Emv}kbF>#x6O=Zfvmath(Atvs?AqP z)35HJxxKNDY2ba)GcIfkm>DeM{E5e2a$jt zsxHX@{{ZG3()9lTN%a2!PXjKOr$&?G238xK{-&;);fsaZGv}HyDl;%a^DhKc@1!>a zq=c77=ikriPb?_w>9k9?z$1ZCgy2e5%By%8sm<~PbSRR>7aWY1L zIXqH1Au{W`X!1Y3M77jX==zrPMKX@<8-@e+tY^fopwjR9Pg9QHRswr(lo)0ynLpv& zI6io+&nn%rLv*9nhBpX9rsW3=Jddx|yjjqYv3EK8RX0!G$7j+yb=~dMXrG3Rw6ZV* z@{cB@>KfIJzK<@f_ej>UA!6#lgOByBp9=+9WQ@lk3=a%3;)2Q)wTID)EFumEd}g8P zy1k|Ln;xMR$}CKQ$t(uUjN!eFO0~R#WX-k2Qw-&dY#X1_t|CAZ`gkeLibsU=<2$K{l(N+DkPivY>k!9 zJn{`Xb**&>rGs+BuNiL&7!&(coVXm2eUF;vKMeFOK2E6BwYVXFO4{Y^?K0%5;QYjY z>)y6E{a2ybT{IWElrhMx74w`SJ%J!#eT_@lU!frLf%HA83o&n;;8jkqtXWxUzr=2? z5p6&WBnKh0+ZgUmRIgoVCi)FJNv4kOcL=db6D#sE2?ww~^hTh?#sK`?>A%26j7rD# z&)>ac^#1^edPT=m%b~OEOsYXSJWTn)P(FvvNWJ)hp<3z^TD{b+v9OLwqlKC%+qN0U z0M*JhBlB$p06YC@)@Jmwpk#Ke#q4@Sy;Wm(CZ!;~NQjQzCixv(u_`J*Q+2kLq1~;e z$Ks`IsFmT5D-L@WJ%`$p$kj&8xE4Iq%EKh?Bz>zft#xLdquIfADW2h{;s~K#&nM~a z$mW~r?yRuZwF`|u)3~v{w>gsD<#w_9k8w=2YduYzvn~kak&0kV6rVCkq#nc^gZk7B zdg9vKO1OPO^I0u7RnT+}nYNNGLTA&XB@$N4Gd6p6sfM>s5suO_ zosN0q+N0ar+gl>XZyG@ol2ue-bIGio?^)>CnN}!Snjzfo_NG+U?Sg_>YXCrLbuk$@=0wV1~36Gd8a>d(;op z_E22MVfdxa7>B`5!$01w;OZSd-q%li$&x3%Dv+t}#EjrqH0!Nu`&j9ibsH-d{uUhn z06C?@GUoxdV}#eMLzeA0<5vk5Gc^VtaY>lm7=B*E>q{{;Cf89(a!5G` z7{xmz?XZMor-eN-K_J#FYH*xlSHR$al>QYqx5a>T1C_U0K_jZt*5m7Jd#D~sWPjK zpPSrpc>7kY9X|T{MSn&p!F*IpTYGz*--(H3a&pS+k)PJGJvFFxRqdUwu`IR{+sSOt zBvGN>#|H!+z~_Q0pGWHsqPFU-HrrDc>lUJL*7(E1d{mO?>k%w=tF|Z+6VAl~o1ai< zXHX-^BZH6cNgS@^sT)tVV}2oZ4bGJI_BwfnO-AZPjTTlIKu_j1Jkwg+o9PvmhjY&Z zy*nlI1zg9wcWhTV(>1QD(^pfQU$ur?3s{-qxJ3$Fj9_Cvb5s&_mr+Y5ov1^0Cs`Rx zmQmU8_$0roQ*b9*>xsMs-^G~hMsR`bg;B=B!VdIO5~Q@JHPqjrs(}csGU`G=}_M!mhU&LmhBG0zte`|rVUNz zo1|)YDRY0O+gwb5g)&AMf%??glQkkmiM-nl3Pf^ z7W6j^0g^l5RNGFW)-?`|)Ab+3Lj<=Ew(3#<3Y?A0pFCBP{G+0J-lF3mAFV~YEUbh! zd9GK}eMPO@bk|Zv73jZ~(aOPazycSXo;|Ueq?%1$4v?_c$5CpAd1YdX5AxJ*^Yt}y zj^xumLWcYCOibXhB#;Gi?NM)Zj<@OSsP%if&HeLl_o{$2^k-yN$j)KXke@hZt=Qnv8fG>AwTY=e_jeM5Bu^ySPJ zt|ftW6%$;{z{@O!vOs zAgPsbr?;Q;Mp8A}6Z3fb(4cMEU89U=-id=J%jy8$YmD-VCKY9a}bmISZ&sRdXlQGw?<=Aak?6%X?q`1;pt_%u~|MF<6n zxPCL=wQ)rc8fS8xeQUG)8rvsGYyhjR%aFwQz|DDoFWB||C%xMGSc7XJBP2gz+ZYvl z7+fAU^I86wIJGN-`MEgyRsR6W7mv`_lY^(JPwu z1Xq_aFRQiHA7~u@ZaneDb-6I09l+ryR;^vT+23R7#wvf*X$i-c` z!62WdX*!Fd;@58MXSmh12wKku6@;h($MyF7#-!>UBL&}5SnHQJcSdU@3ks_RT=^%A zocmPjS2pU|Y_7E1``GTD>sPbASpNVVo1;kb_y{?{z&(XoYdUq6-^KkbUq|bSbkf@- z{H)87oPE4jn(yJ(g>tuO=JjE^o7ZT?!Pv5AJAn76?>Z|?y6QW9ajtJt^4(PwtmG~i z#yyXb=QP-iE{tW;^}f5a(qZ&sw}*TX$JjYj$Q~+QnFf@83%PcV<~Fx!9h(8YW<36& zT7^&^A^gtOp-z z=(pVmpxwo%rNlC~;3M>+VKXZf_F_3Ap3*u$M$>f5?J@|Ydt2r$I!Pg5axurXO@lA5 z*>wJo(>gI!TYF=39ItB>Tg~h2;Rq++2PVDHt`p)$vuy^K9+;nojI=SIl(E6d1C8AH ztwjADq1iFfIGEXG1ZDB~m^=j%?c z%ZrJS%7r9FAcLMb$7+U+H>X`_y#Th6WAUQZ|00~iCqs>`05(JmsDCDWp4<&)Hv6cDc7y81DEj+Wk~nVqldQCTZapGxKHkn36Ft&}X-f~t2Yh{L?~GCDkmz<+ zuVlewonW__epGtfrr%Bdj-CCcmn_uLaPE!E+`-!*js?k~z;JwY{{h4^BgB zoqg2Sq9`JnLV6==sVqS`U#D)>lIY>n&7o=XtIk)V6|adnZE(l>!qj)-lgK06vw~^$iC_)Gik0{{T$4 znp6X8$rxq;axqUyrfE`1B+qAT?(4P7LJ1?-`_fc$KCgDgLEZy5fV&?x@Yyp@^wnSBx_ zqCd#P)`C@aAYlUY{{XM8PU?+j(oV7Iv`G{)zVJsmN8oeyu9$4BWPuV(NEss-P|>ac z_5&5~!dRC_{46<-{n^+uziPckInPR7MHf?bCA`u=CsH#pATi+h`UBe^FD$NA?ju#qp*IqE z^W%!^>J=HJ;AgqV_o4wn{JGC+PDPG&)z{jc%sOl%OTPRC(Z$cEht@;E0D;&V%xOI< zsB0ZHtlPt5bEnA-{{X9Mw+pv^J1NhfuhzS3rU76SH-6RV%$WXSgY8yw$XMpR33Jt# zmoUMoXz*J@ZZ=CShvsYy4XggOy1DgYx07;bjQy&@IK4J91yP@+Bjtt}aJ{~@n{6@6 zdXuNMh&qlQq|p64$;*cH9k!G5;DCE$6)x(}s5;9@)NQp*HMFlsV7EqW?v+U-=gRgK zy^7WZky$buKPWywN|wx3IoscgyJ+Nko}Jbj^_7=T+-S{WfrsNR8PvwWb}No~?~1gz z&@~%Ax3<(RY~Aglw)CWwN-#J*!R~SGT_QcXKQGdW{UufyP=}V!`qOCTx<-SoK`y^> zcV!iowxFS620+XYBgb!i(DdGu)z8zHI!)%Dw(D;LEHViJGchFN9kI`ITem!(4^!HZl`dq81h&0k63l+Lc zy;@B2%twrPs9FWL#0@#4(fx$gx@)6 zkE{q`+aGc~*HbZ~HaPve(F6pJ2ORw=)l73n>5Y3?=&M^T(W}7G*h{~Q07)WusjOi5SaiBj8YXsagN^9w8u2*&Y$XA2zA(`v$piF zB$7*f+~)-Gj@%3!{*_ZFM|C&wO&?DSXk^pw45Tv_ibXu-xbf{>d?K8T9QYJ_W6vYM zwKkaMy*JVtp|a{IFQtMLs<+53$IBSUd>TvMg4N{c`yB~y?9kiGxX4kyWf{gkyw^vb zduvO1CY91SqFvHCI0^@nYAm|7gnD7r+UhZB>H40jZ77j4)}q#$BvBZx zii@_Hh+*7d0pw%NJcn(%bEoaD9aBxSS!VgjWdxia_&&tdvd0>+7#SV0OCq3@Ovt72 zv^F^d?@%uFjXDDXYoz}Gyf(;)Io{lp{wQDZEFQr{c?MYX&)T!SFV+?vMSpu~X0E@7 zJTV+H`~5lbS``4kNyi*ip~X^U9Px^jpC}&H6;24_$fvpDWqEDr@z}Ra(-#>@MYQXkT!!_r^u^?mVO00K) zxaYy6lRl^<4tt9F@16z$3X;d4dUNFpx$WDUY1NAc+ydv#IJ??B=QIY2LIwaFXSH^Z zg7{vUV!t-)hwtMXYl_6~BP1M39SxvK? zWNy#)6?+?pd}Lr%FH0_L+arR&gYCep)Iz!WMg@5I_;vMt!$}T!Q;2M4}KGzGZr;ABwp z_+T;q4KC>b$ROj$6eU@JZNndWi-D0c0A6W93PO-ZeW*_gS2_0Pia5jn09PV|$AicA z@kTSi2emUQxCfRv=Z^F!Sbk%GdvjCdJ-ZKp!?!u4@rvH;)q~y59w4o z9IFJ8mvQ{RS{GBCt8qx7atS1mJ7$4^-Lw<;J*WbFWD~b=>`g$_^-FCpPMZ4mb&B30 zmvjD~T+{c9xE1v=&v0v&{B*p%hR;pY?PHVh^G>o$DPNS8BWmaG&*@Cw#NZU1WR6MG0Dv>jI}BF1$a9x-xQy0MU)K{zg3>jGuQeN?w%*ko266`{Cx8dN zRonIGeKS}5W|u9!{*@S#cs)SHl%C-B#Z3Ag02W|*^4p_ z%{*h>FnsU_9QHo-g4BAaTGDj;ZCg>&V_i0KzNe)zGQh3aup=LQR?Y(y*0J10Hso~# zA8{Qd|lG*{T)B7v{q3x6Cj#s8}p$! z2P3`@8T!$DaJHOT=sL%wC9}1iT6D*9*dsqq2(3;s;%!?+@gwkBeYNhZrCznOkh0!G z30PF005>ELe$}qjb;vFHa!qEz4f1*>SkVvX9xEr+{YRm`l?PAH9`mc)feXks!~=FE zzK0cAe){)9{5H0LPi;KuaInVexDlb`fAbvYpRGv;MYq+FG^=~PK&7OTsuqo#IA;Ez zuQeiFO3DX`XPskdpJZU3S-pq7a=x~1q`m6CoQliPhF6Tp3w-gbQ<57#iyA!yl2x4P8@;HC;U=zHPvsSvc0uvE}@QC+q%f1fg|rwY;^59%6GZ9 zu(yUsAT)^>4A}QIhwHs_R@;A~>9?z8abaZ~^G}Q!oRNf2Z1esq9Gb6HKT&k(5}0Jv zEEqMrj4V%%S%>v_IPc=GPLIqbRP*MWcaJ->jQjCb-9M-uLq)N@k`RwH3hor- z0!JWzxvEczGhav3a$MNo&1Ew27>@uu9>9`3(q8yRtQhxhaHl>;nlzbPB#ir4F|v=S zXZU|?EN|}R)U^_!rJhgsC!FAcz`&~ALet`Yk7XU)-AQXFsNyw@N4T>w0aSdn!Ps7P z3UaveaYEOTo>P!}{VSYx1*W;K^&036tj}j}ZDkzS9*z~IX2>}DWK=ygbuFUn{V{E* zHTR<&V%q9eJC&s1qaJtzoYd)8RQyt7a;8E50EU7qsAQDIum?aJ1bF7S&ZD69wvA;Y z-1VDVLmK5&ZaDyU2ON6}-uyd?`VO9gZAS82tJz}RBTCz`lbyW#W|(VJHNYx%r_J`I zhUkC;A*N-xL_xp}jPcr#OS&D)w3?dDJYRD$3xYgUd&~5(vzGGUY=$)@+v!gh+Cd-= zJesZQeLwh?i#@m}5)OGhjCieID;T-1y8i%Qi};?Id26IuvE2j`E@X7@%yH-1gHG3^ z>b*+KMb(;1NiEUd@({&85W@j*M~>u*)byR@%UiCQePKPc_VI--Q~6E@8CL%QO3Lec zMYWg3exIJm6D;A3sr@R7FbEzy)tGD0!t-^3=&O6#Bh%r3#>%EvW_39JPe0R>nw8d< zy+_izoO;AMfwJgVrP>0{u*GTJH|lI>__cP^_BD6 z^}f!iwP`wfby)4rPQ`rpAa^hg+I1&ZMQ&!b(={D2udRmpVYruX$_FDO`o5KSq+PZ4nQy2v>}law z+D|Kyn&Ba@*GIOxew??ymrA>n@wXBI{Ll}QIX(G5YVFzSOg*hx|5M=hbb55lUh*D`amS!1q6D z=Y1lnKgEqzZekv?+rHz!cO-#~{ir%aM$@I~uBMWBl2L1MaF7WGL|`v*_3h8nu0+>e z({57Y(mRNxNh6SMR#T8cs$Qg5)U|7UB688`%^OWQR>!IaFrN6y$;DNC9gk;1SddDW z4Cf^G+B!zIdocUs>sXo7DQM9TqD~TUEEW z^`v+}48u58$M+_zx6=^Qsr4LJT6M538WER`e!hQ7&iqNTxYIQaKdbb6nIr)g zDcMLnfgCrtBahmx^bH3`{tu|@7IWO*9ZJ#N%+hWo;GA~g=loT@;Tl*vvrC$58)Us6 zwfugh2tmsXXY{7^o~_fgL7VHJVYL@?08TPTwPpSsXtsCVH>2uzu(7$aX&M&E@^R0; zF-6gH+iP7ru3q246xS~-!a&QC6bz#MfUMafwVJO}={8z4I<3v(wXDPl-angp`T^pC z=c_b(ZC2Asn&hnaE0j3Gjyn_XYnsVv1iu7exB)lYxhi|@%$Pse)xTR?+FEG+CJ_*- z-9~cDxrx9eXTK-?DI$ESHQNTf_>HJS_tx%_#M0*f01_x-I10G~J+Ys9=shE;E{V{# z*BZ2z$>~H(ej6JPJ^=Id6_C8pwCzvhGsAfV6I;or`m#pDA)N7_q59O`q!Czko{OQ{ z+sF6?v6w>vC)3fH7D5y&`wXe_8F$MzP9Q6?xd4Q zwY~KwBO&pGVn3x%lUP2e(JeH+7p5(vk)|V20L_x45BZ1Nin`V9H2D51=prZ~QkJqE zpkRVCkM*qd#g)<8RYlyOVDe6XddkDs8fD*8{{RGlI}(Jf=Y?WBf#m(`O>r{EY+aLt zT=B^@$D3ERy7;1!&87YxVGx4WGa{z^j!XE?4+4^DZZ|d^ZKvFwQg$jW^zs*Q<>c-% z2h@xYdS*X~8W;OKHF;g4+{FwS%j9xL?dGZadde+3;%v5oNrrnrP%D52Wx0)-F-GtSGUTK$pCo&0Ht1-n$vYZQfLj)#`jQjE}wj=iW7q2yl3s+sd}eh z(gw4lXm1M1r#8=0Gx;4^aDb0E?^$hAPoB%+b;XpfWRBlfD+YP_L*xE+Yw;=w5xR3w zgXNk_sF8rjBLkfIt0XmVt7_JLC3O~+(>emmWtk1oi#U&W3!djE_7$AbqSm96tzyx3 z+uW_r?2vaA`Hs~-4l7TfUHTsqI)d8fNY$oDUKJz)$`>3DHDA%%mY)}1^-Z!)Gc@8w z##mr4%yZacfLp4DrPDeiWZNA^Q*Set!Hg&&$Uj_vdZ7LrduaVHZK*1h{8OmFAnYe2 zIqXRP05tBkrs@`*C!}i{Rj02dz3i7O30&@1V~#xWioeu!&ZO!XbS*%`Z>P@<$E`F@ zcNo_kAF29%Yh95Wpdhlf>u#iq*;XCWE+ZeWZ}VLVaCq}nIv-9$r|a6Lu+Ai!`GFZ+ z7}O8TpQ#mn7~_upvst%P^j_y5@~2`Ug)+I_^r;cxeY0GzhWM&ACEL_?IP(Of{{SsX zZzOl7budP~Vo!4vzy8fU%Z6i~^^P80Irw}+z=<=oWSVitSbU%n&pu5Agn1-$-xQ>8 zAbNv1?O$EJ?VWm2PMcp)CCUW19|5ZK{CGXTL7F_nFeyCZ^d}4x68cy z(`_3W2IpX)RR$HiZy##&#^V4sGu#>!j4SOAm&SSar%WXCyNU+DJBA4ZwRoZ1`Hy;2 z9?^os8SzLX=K--%bfq(m#1E!v8yVUM-i)fT#&h(q92}0{N*Zafrx?yD8)^dCDx;n# zh(`Id`%;k@2XP?KGOUbu64=KCj`XH9bssh<(2y zsUImEHv{WTzSYZZJ@HF4D<>Sbax>zDMkHRvdHv|)RSH2JtH5OAcOJr~2zL;s;5T;X z9`(uoJWFvjx=xj6Z*@LoGsq!EU~#}44)xUal|GJR7$D}Hvyo(h9oGy;8647E61hiT zLbm$mgVfrEmDR?Owx$`QSkCy_yt4T3{i<~HN1r1g0C^xB zS5Yw|4XmSS@1E2z>EH|l&lnWyTw=6X*HZXoEK)gy=D^#sNQGN|%z3Q#o29o>{3?&9 zpo)84O2#k>OpK${LU0K^#dSpVS+Eze!LK_kV{#!UKl(K^a~`XgSJt{ZKA(q1zFkJ( z#}&goc#aSV+}pbm#~7_vqpE3rAED{c>NBy(OJvxEncEoc`c{)_FdLM2&TA2@Xg6Bd z#4B5y*qG^>d|Tql!n~y940*+0BTeJZtq*F;@W9uQBR)ofd zRQ#x*k0%`VtG|o4T14F|9+7c<4ZB@Pf<4Xo6-XtB9svIUO3-y*NN9aUXAQQaZWq%3 z0CyyONWW&Lt@;b4(@Sd?(}lv214}cOk+3j6Y~+5Gsfw@Gv_74`=zC}u>L-TA0Bx?l zKz5M5^YzDS#dR<73$KG3RnDU;nIpSKX&WSj+`&)Rj8>~m_({{&j(6POT|koW7VkDA zA3_Iu-Op`nC76O+c-|!d2+D%IcH^}-2Po@(W(z*6=^aq5!L?{f+9NxTaex=x=e0}f zeMu}nEoiqMox_`(A4OwT^3CV%=DJJi_E#R8-CD&djzJ`4kRMI4!4$GsSjloOB({m9 zjQ;>F*aFB%<0gDLzkl!={Cw#3JbcW*U2;suq?qtdq)mUeG-G^>d4Mq?-X zxg!;*Hqo<*84$YmIOG~NLd1c9J;14>GtgGImK`l?r{7pTq?Yl7=0M@Tpo#@(^}eMW zerru3uoyQ6;E!)RLH*5X%C^$HWcjEaJ)}XRSerX}Bz5&(UC+rIj!63Vscx$i*Lt?5 zn#8u+4uN$gz=TIN{84@xcAqpS4{T2c9}?26o6{Ja;=Q8Vx4)o;WOq`lCsqW&MJODvKw z`Tabo8OAvBYTNi3(^Bf;Z!RuO@`+!0A1U%hTVCDR!KaC2nj|a&fb2OW@Hz8IP{y5c zCB~C)buGI?B&Zm|-r-sjQ)5`~~W`Y?#V}%Ltk0ZZoZ%*lrTF2rfnxaT1wz;(1bu@>Q zC(sVwIQFiGdvk4RXD!s(SfWDiet>4Gv@K54SkvOWgVc&UjI#QJ2O0LKT^Q{vr+SGl z@3b3>n`W@NEL!mo^LDo+W42EtJ~Lek*G;vt)8VwXjaDd7Dk$&;4Y{|9a~Aa{fD{&=y(0LYQO~BQjN{vyy3qQ!r?rhrT{_25j$4@Xxj-Wzc^=}ET-Wrg zTbQ)cjd^o$!Z_{I1i3ln=Zp*-=BBKO?WO7I(QRQQxwcC!g_!a{Ui|sZXMP@OSMvNX z^x&J(yPQhUJCDj3@sa-kuJx&hrgXU=i6)L!iAZn4yv3ta-y3s-{wne~;fWSBKov#+ z905&66XEYoYZf-re-^)Rh#n{{V|=Ox?%z^;XSFw>bRF%MvwtJ%+0$;5=79lMcE^+8 zWccq}d-?5bE#CTR0wim>jx(S6ny2Z$s*6?Bt}N~}okGE(UE%Ix!m|DRAEjQaR~d_M zhuVaF1$)!7YLTpyP3lBc5~#TcY4UUCtTm2^n^MyBOIbCSS@ib>=TyONF~_xTNsdF6 zD%tF4hopnI0CLposg8Z9^cCgLSbrL}*Cw?|BN5B#E+ilfU>?~209v2b-4wde)1KE* zn%4Hin63}ZNb(!@%?_*9*Dz{GntxB->Kb~<>C1H(;5axtN1Rn=nW#-a#Cg*0;_^9S zm69uUjrKHTe5JpnDCeH}t1m0uTy;lB-0Ip+pK6+;B-XPmtt+l~k=*0Q(w9@|7rW}c zFI@a&+pQvHWd|GE!Q;m#BDE_qR|A$`e`8Lh*(HF_+qE{48&EGEn+t1=-ffZ&r{ zkE!&irO`T5P_YN`uXwp*`JI^VZQkS&U54)F3u}2UrX^#H65LfMN!sa`wz2A?OcspH z?2)_f36?)Hj>=DdDoKJB*GfIlSz7fE@d;yFc=m`u;Qcw(20?{CrB13Sb&i?SH9nd;0a+ln8+Np3=3E^2KK%am zP1d?KoxY2tX)wgi9lgXm9oUxvSON2UoYvwqmn0Sg*w>{f!AmMI2R!lts|I&j=^Y?_Kk zvW45mNawvG0I%|p!#+iM0v5^L`gX6b`NuHKrEmf6IR4e>jEYCh6!`|cb`??><31^O zMmWg@m!2pdh4L}M_CEF4z6Wmq0B0_a(iG0vKF0#Myh@n^cL%ZduGR27%6e|;6at3i z{`*CFzbn}F{wKZKdSEezOfme!A6^A$;O9HY03S7)>1P5NRoMEQlwc;D9r{aqmh_qsyG81(_;-gSet4jDS0{j192yg_0)+G2^FScIi4{LW9``8_U zVf6%e6`;PsoT86HRs?xK$YS0U6I9 zKx&()EH1jgQPpFLS!BGo)e@`abN)vNqa~;e9eQl>uhd4Ln(>2gS2tSYJ;Wp?Unxk#7?8IX(d}kMUcdQE3<%k_WtAWl~fKGe!#WUe`z1DTT zH%5Ua)bAp<4!|zs%O9ZbX`Z0h^sO4nZuJY;;Em1@lbzmsdsjBrbl2*Br|La8#M0aW z6c*D+-z~CtW*$_YMhAL-sV%RxuBp)NKL)zmPp6wSj&%V+PV7nT<3CE06}0KDpr2FJ zE^Q>9((FYV%Cg$$U<^(a=lZzKMfjV2soCfT<4(9RI%U{~0f-(2W_mBEZ)`0!O%4v4 zj^9$Wky#*!;aIspGcoW0&V9R6H~zHU3Dp;RhP(Vu?5Gk5{{Y-T*&ix<{*=z=`kByL z_JyXwqv`3gU0+a$Np*6^oz2b*9_PkBmA2|WsM5N6-CtC=SlPeb#=N_Ig>&Aq((PN} z_JyuOQY*_ffCo{H%EWQ|^ZU}?!FR89)~lw=6caR>dPK5Hav%o!Y-!V*Z5t z3(5Up5OK%rR=*N7ttQ8zHDXz}UUmYQ z>T5%#-dk#xFzPm!B1mRX$VGEbjDpq=h5Gc%g=cGx!yxh4R>R@6YC0E5SiV&-20jNl zH02~zjeD##y(e3?(k>oGo@P<<;|sXpAE>79y01=)tDx!Tj%2x&3kM*D&w=&NxUBcZ zyIXO8pvW!L)tga`48RTesXF>a{{U*5D*Ugq&Q{LuMsreenWd@2u5<;ryp>*gmetH= zC5)Gmhkc{lzb2@9u`RlvL9o^Iy-Eu}l7k45v9?p#9{i3tsl6R#ZoNm<_c6r?b-Dgt zalk2(KAAP0f;gx6Tc%wCsk2h`UjSs`$o>0%)p8oQ=}xKEt+mMdYpU%R)|r3q_bZG@ zM>yky_xGLwYT4@^vhZ8G7Ii}w%lZ&w-v9uirQN_ zgu8@HgfQe9oW9Y%D6EF-HXdS;pRCo6G8g;)#mzEed%Pa1mKALVn zDIj+^BCVToSBuy2%b>Qbx>K12$Dtx_;Xb6}+xykuO!b|frPQmW!(}(4Ex4W=f_RX9 zybt+RPhII&dN;#+%X>&xUsc4H5|ROSED2-!`0-jUh&obOY4)o2?|mJVdx~RtLI-?! z%}&{<(cCjKe64^Ec;>NPPu6dw^&N(m)(p`|Gc4FSB=7={JmdGRxDl%zfsJzi0E=Z$ zQ*`h^Gn{$sbRxP3s$tH&^K0RrI+8 zqA+AGr0pdzPSSj5+LBJnin_j?4v(g4niiFB{BKdVG21S{yej3geTF|?DvkglLEgU9 z>o@&np=vi4kXhYJ8Ijj_<|&YVqagOA(qx9tK8VyVWmqlDawg^+25ga&<285I8lIhN z@dKxf_b@M~xXZZyUBK<<#%kWwi++vjd#n9(RMGWxh6~$Q4I0IrtsXmqd;b8|p!J@; z)c*j(zm0VA+qRiz#L2(&+yHRcAMpPGbKaWQG|4Wy!uID(kR`+D>o)g)1@7z`P4||tv@D8p!)tdw!yam6!&W~Mx`R;v0AxCT{KF(>>gFk>jeG_9g6Hc; z@dv4O9Ydz9qS9?-fa(`w-O}YsJN7sq_)lVLFIL@0sC+5V=Z(;;a>68$Pbq~U@_g}9 zektAB*=Zdv^2_oq!Z)f;a-l{~?N&&QuSd46{3Tmjw70gq1g6*yS7_mAy3O?(WFMib{vHirSp<}3fG!X4Ysq?okMA< z^tu_XoiE`DEMa&uk>5Vtjw?a%-s7m=&rtsWXiGG)-MU1~yK;lu>Fr$iUe+y^UXH!K zEesJisGyrl# zQV0&mg!<#&vRcfJ%~i}N}QNvxdCF~E>;q@Q!g_o&@js@m#Z2-ooXvBs=D2XVL6#~A~_J-)P`40ZRt z(W0=1-d7gzktF1NoA-WDe4n*Xja(NT zL*$S?b}QWd$)UFCH`V$dPqVm|OIwH>$!{W$nE_+85%$LwsfR$ZiYqpf0W{W2{D~zc zm=oOO_a?5$GXDS#Ow#LJL3OYjS#G{r2*&0a7z4Kh&(^odcY}@xielb4t>IYXjZhEi z1B}zrC;*aAC$?%^q3FjPe)T3l$_Jl%iw-`(RJgEP7&XtEe0-}lPBq;;{{Y~ZKd`4| z_olT%KaOet0Q|E50D(@+&Q$UZbK~W{ULO%)?-&EWIHcW)4l+i2cBBV%paneVj`*Wd z9{e%n@x^_2&n$~4%wxePf(3a&E6y|Z?kHd7U;#MKa%sj3v|ya$z@SxKxyVo}vV0H> zy)||6_PB5St6X71`fHFf2a#Qu;FAybX6xVaEHnQA(G}+WuVdHvp7&|#hefq_afU1a zA5sNq*XBfEH!&CoJ*zv?=&xkT>ywd>*jA1)8b+uF2p)aw$HCXt_YGqCPs}rnA8I5} zI6R6dSLGySg&6~Y7PpROq!>`=w{9r|fTzv|Y2nLo21mG{Dl&dernG?@0*rx0ESB&p zw5}0Jz$eKy;lRrQ`qGdjUAqBZ+>G-?MmIjBxVD9)9gFq?r7qAp1e{RB!0sI9w%*jJ zgoCwMW8RTy#z@?)z^0vKaxwCNbN8b?xbfXZ65tQYN79G_t)6~iUMP+8ZNd6c_TUqr zBgGP_8OQ*A4NwCL3XXCpUC!i*eX>ur87#k*Mm%DfQs(8Ikno@bQDF80af~0Wc%}#g zJ^WXTv0=xWBCL7c!1~ZNA!W%YKIa{&u$!Uu!!Q`&_oo=Sc3*Eb;2Rmo{b(b{7plT% z9x$pqQ>0^+Qa+TYFhjY>!1Gb=FBTa6RyiKnsbbWH2RS1Kyy^hqagp!EGpF4x((YW0 z_-^8#WPQ6w%Z}AkQRf3_E&7w~SU-xrHF?xpHK*{IVOgx^GcM-FPCE~;-nM$Qa~RIh zK=%fsLZvN21PTfHhrLen=*o0mpHk`~*88jBjT*_zTPVioaxh)7#{;%K>bBED>fI-; z?%PaAA4k8KO!5={?Dshy{E_=tNnp-If0;n`sN^w@RYCzA4EUtKD9FjAYqp;gj<)6* zH21X-4scL=W8XaIwHHL_nzoVDT|s|!=|yFGbG{`QEdI@(1D@5bNTfD{)tA(8GEOik znmx-9W0Ci#P^jx|BJW<%TGqmKwx2td+mJ>%AFXCCIy+pr=nH*1TZn&%+DIjVQ0)i- z{JXvRH}1d_wGkI{x%LCzn(19; z&t9{Z^32E)A7d)OKpnD=zSlk-1Lu7=~weDvfte^LdA1|xE;sna5$;m5!1a& zk|aG-sYP*XWg?q2QV#Y50g3h=0rOic>zivGK1;i~qlyF>k(l5CRX^=VPCc|&f5mOq z4dnxpa9DQ$`KH=4okP;P^}k&mTc&UA?H2t)UHX6>xE!_*a(~FH4yDlgk#)^KSJJgO zZXN;Xn`@QAZ9kcT9!@!}Y+@^tv!tP;E0tmA>M2$wkpOd$1_3oEA)S7SOV}qjSF@$f z^osYd2g&N77!eO4e&1R?tm(@W(NlEwoF>-T$Al4JV1tkjea&dBoD8X#I3x~yQ0n%^ z>MuyJC`Jwr4nOBj9M4M7-s;lJrIPWkWU*-Obty}3RAu~-sqM%5eQN8edOmBNN-b+m z)QyeU0UlksAx9ZpdB+^_R*p2TkkFYRjN^79jecn*koNkLea&Yqy4zFyO~sz4)AtZG zBXZ%v50=1CdlE?;{L^#kdzXQ|v-&1gXNOXQX8TmJx>taWaKj_adWL61r9J9W1S-$@OO3=pc3=GO*-k`D`>&I`>qjhgkS@iCYCH>hhpoLWjl1as9R;xO1 zriG4|b*JCYE$o*Og_cPekVY|u2e1O6^fyRdXi{7>+T_<(Yq&vf0mOT|llAtk28F6= z`luHccM(ns<&1BfeJXkwRUZxC#%kM9iqg6oJ6@XAw9~4&TlpB|E&{3XkAHLYtq(?N zHd7Y%5#PP7 z&78ip4x>9*3}nlf=OcZuz|KebvW#5_mNe)4ufZFscLuWdvP+~a?w6Er9ipq4x5Xm z=h8EBEaaQjRrL<#Z1Im_pA_}KQ(D~XlIa$aw2}?&D|Yn4oM4Y6pY2lmw^D;g(<8fR zePvP$ARX8GkL^>wh#fQ2*4iD(xVY4AFWOVK11aJaJ-{DY*hvgh0RF-m$uSs+`AHbT`_E98WA1(Vsa*rpwjD2HC8P|~NYA=S?0_-vNVNh@7Q*AF#)|eawU4GE^p5~7F{$*ni>TXLY1eiU8(VC8 zNI2LB+sD7XSqtbTv4VIfjnYsGFmIKIj2}6tJz%l|WL`&VZ*i9D?H{EYXz%s7VU8

U zETid*jXEoLTXnZ+)+o+6#{hn~tMri=MEYLvh$6h6*KzseiyMmhA4=r~%=Fp{p;BfsfbEmkDd`o4WTS#s+lBUmz@_jqi!J&3^Ou6`o* z5b3Qp7g>Mr60k_}bFg`AdBCZZIq>&OxzY47Wq0zots@ogKrP4i#ae#{u!8DDiq-^M zkD6sw07;(t9%~;Jp091xku*J6CxZEdG& z!1Wsd(cfztWrViSU0*{Cmjo;-?}Fs^z^Ro|bqjdzRL43jkuV^u071<~xYBIpwQE?=h6Nm z<#`)aeLj_D;f-OihBy!5(G;=EkOx11dUD3v8+g?sK!ey0Dh-ygr`+8=oxSWbO&R&4 z4hw$OWEi#x9gSH#g}9E~!12g_SIUu|;Pc@!!Q%t^o>TElBSQK}}3tg=H5t?iWS5~+Vvz7BtCg{oW4rrM;MuB$!h)b1i# zcW{WFKpEk3N9Zcx+vz&anX0VEPP&eNNwE8TAJ(jcEYxIR`cM-#8Kn#@-t=k}28=$T`9GrzK@jpvFdd2Ru@);Dqc9C_I5o zAgDPXO6*?)+Z{1o#}uRoxzGKmOW0ouHLJUe>6;Vg^+ z0YN`X_UBe3-?btc6|(FC_#LUoFOTnCBa^}84tNYHRh*JQ81OStUMu-0EG{FBGs1+a ziM))SF~@49E**h5^X*SP1o>s0^}K)eOX=kH1Lw+y^_=9C$8 zo>=y;6>yx8J?I-|0P%*;xv0&k6`Vo(gO1e2@;c6k4Y&R!>_18_JyzT~?taw`6qi#= zYa>EJNyr3>TN#=gbV*KGK;4t(m_*%fSQCKM<+&0$bvarpe)nzx8UFwj*Q`qw=OFRL z1-4ADjzJXEvAa)zSaXy0pfr7s!HayyIRch1SU;#A86ByJ;%0fU#|nRXD|sZ6;G+fu zk)QOTPfIk&NmG?PoYQS_F$&U>#zcd!E7G*rY1s=pc{rpIHr$Z%JM*~jrqk_ z%N5{-5jLDYc|5I7D9Tp^fS;T7qIj5~BQ2K5&l#x)tcyi3h~bMJ#;2E%m|d+nP*(~8RH)H z)MrB@V4g<;xzER27+<2~xsemqx1KW`oruZ7$LanlEUEG>KduXPbow>bw9p9=Ud@Oj z*2vHI{{TZ#bq>08i&D9?vXV<{JEga{n79E>c{uKIN2yrBtLxn*KAUrIaXq;*O)56x zBMLM7w;yv+IwsP>>r3kU4MJ#2NvKciat9diin#@+QT3PbZ9`4dp4LfXy^DXUJeZE`j*VH1B3&e-v0ol zN9!)TxYKX0AhNVKG1*BNyqk6wJ>2aW=RaQ5Giw(5hgRslM+-A)@KfFsX)3w}qRpsY#$|#Q8$Mqy{=>K2@l>5nrEmD1pk5@XV+U@|{{SlX{{Zk) zx`wKnFU5%)Q<6J-d#eDDI~*<)l1J_hFMJiYTF$A|eKo7;I>nHR?!q%9jko1&1I7nq z{i``N0$+u?3alifUbAG3> zUrN!=Sx6;^7*XTN{U|!B;?f_8T7Av^!=|5mWMEm5usf1Qf8jstRvm3Ujhx*oZ9Nac zt?uLr!fXqGMm@7w&j9pob6>UVNVHv2(%i*-zN9{j<2&nC?`d-!9+VK=DFG--0vasl`gT=w>@ipp~R6ZtO)k)BK(NHT;spn-l5v{1>*FNQA5;L zv1-Rmo6&CAkdx<(3~*{pclMTl7w+SESzc{95M@scIsG`QYcnpP(*12~6|9q=#k;P^ z^ShDzo-N2ENFz{?-vpfYX+TbyUe=O49oyHDo9>VUgj1Ox1AnAvKY zXH@j3lA2S>U08^0kC>0!>mS+6` zsJi#6wr(wzi?G7&10bF=%|Yotq_ye~TJp#^jT_NB#y((k{s^mGV9p|GB2@$_$KJ6D zb}a_NM`H3(4F!j-L!BCQs3)Vb~>(+)Ll8F^yD&2J^Hi-)lh>OOcMD%1~K~AT!GG+ zh4jyey+ioJy;96g^clfF;;V0@>aAa=?)A&7$j*~!HRR_S&d~;PgMg67E z!WnHA3E1QCidrhMGFH6)X>FcR%l2y2woD8D)Qa-unrgYAm9=Fwy^;NZ_T3W{ei3v@uIX-dWK;R!W zcd*@<*`-3w@~I>;_ucg)Q8Y`;^qyV=DUAZZb3;zH*m)6>q{;lxZN#fBi;#-mPLQ`|93?0WGmaV!+ zRn+wkla}=9{{V%JAqi-X-^`P0;IZ}@997=K4>hgsx$x1od371!lS@Le!x8yak-ss= zbBuklN31TXXxvFdZ&m)Iv!l}(3?1@^!>&vI(VqiuE5(E6^GHjLs|ozv%SqD6nr z-@ZNSCraCE8m^yh(^r~g_I6ei11-bnY^bFCfO#j}cC9N?$z|1Az5bPSulOriW88}5 zdXD2cIPeBI_pF7rjoe)c)HZPHlL)OZ8WmYNEQ|dk*q;9YrCd!0siS;Kk@TxZxPwxJ z5(pt2j1961VsxW?b_p7lSjt|!!WTdtfp)4;a&0##UZzIoVo;{UK?p z-AxVD@dcaGl%1?N$ol)5>v}GYZ>Q<7+AYM>`bo4r&$*k=Cb{3l=r#CtzMs{sY&~NU z&!yZXhZs^vIQrL4(afg82Gk5+iYN?$KPYdeYdpKiukp# zsGj;sixggcHO4m|VfXf}?zdt5O4Pw`dp)ZY^E5S-k z>|}*_{$lV)?_RgE*7aVSzD<3tApAsSRv7+a8vB#v{i`YPdc#|{gHW@-v<;=pBPo(b z!QMVn0P+vLJkIDXTcoXemqLrFtaS;_v|$pfdYDbv$xuJ1$f^#vytvhV8EIOMqiu`v zF@V0_Sw?yNaat=*gVt;{+oiVE=eN;i8<}I940GK;@P4%C#$7LONZDH1fRV#2VRoMw zBDKfhstrVF@ag?Is@!Rp!Wf-BYrw=X1aLXzjQRRtRqeamTzqRT7jmtDCA@x7f=BC5 zS!t4W95y-xa7eZmVm7^=L4rzvL`(0_b5n1+3iJNZVArhm28!K)GblJ=fB-@6qzau_ zGLzUz)!N>_V>-0ANpz`gD}GC8!94q5`*}3?#N94EDSApq6lEhW5-FK?w= zxkRiae3X7hLH8h$&2CuAw?ok^rqZ;l*4Aueiq>SBIpL3NnIn{D&p9>G)uY^U0X{1k zq4WlaqiX{8BXKEK-myHNBDOo=cRy2GHkC2FE>B=R>AO{t_@^zH>BffQNQ9f!;}4EV zAe?@cL)I7e`hQn+_NO3`;4reY#~ESB`EiQc>bgFgs?L&Xb`e~(KmTlLmu)Me;7UdRn_i?cdhjGs%q^WrTjU{z(Xip zV;uM5wXR+%v%dk4Dn@Zl$P}YFeaz z2%!;FcogJ7jGTA!YEb-mR_BxLPsDN$f6}8y4(y)wDhzJtzqNC|8{(_g0}WSCC;tGp z06+RP>wp;GQfnXnHPgPWCO`YyS+Tnrta0+|&%@$HJKu2(*y4om4l$FF=h}`ix!kVV zq1m~PWaH~!UGrX;lBAXVrxa>Hi3+zp$e{q3_Jf{$589S3oP6Y*#C2k%k8D?=EXO>Kc%)T#B#;I? zp7gsw7*cbO-iRmPxaF{E6-hgpxW^QQ9Dp|veW`Yz7~=%h2qTeLg=2$Cib~@-%{KAN zBb-cQPj&8S(lty3b|)G6j(Da4P=uUvdvi?59%NDm(Vv+8J*q+Ao_IaQE0hX15Pw>L z7ARD3I6l;g5~Bd2LX6}A-jC(T!NAYI6hkCn@rnTS#Ar$k=Dg9cz}UGwQDlYu+p*$= zFwQf#=|wjjY^-v>F+aTmLozR^Al)M{`Emx)PWF_ysKda`52(MCk`8gr0eWGN3!i>y z0;?7Uexi+t9Pyu}1Z93;Snf3_8)$)fB#;tZ@I91pDc}(HzyNunm4G{e>%6%4#6HkuX_4Q|(g=Ha8KTKY9jbk~oH4mlaLa+8&dy zSbiT=g>GRdAZ{ZE++wXS7AYZ6SDyIuOl;W+1+p`o8lEYtj*#h%7p81p9X2-KD;9Ti zm--C*RU`a8>37xt0EjF%5y;bel45q-A}yYbeFcS__tT6dNLh&Ifn4KO>JFxx zrmi}g#_rPI);BL5vqTx1FmbrHd!G0d@9;L=W766$QAKRG$tL@D$}~^4fd2r8?OxQB z=^AyVrk$pw)^YroFskMKGN+XuPJ7l`Ei0<8ZFIe6%U))@xJ7B;lL8f1=jC4ne|qRD z*^n%X7=!`+vr{O@Et=||!)z_|*Rj&leOEzt^nA4Qi_mhJ2! zH*>@3O8DiM*whO?i0P>G>2&`9;ef#NiQ6RSa6EJ0>58SE`rhj(kQJ}lv>EIxXY1~k>ePre<}@sO4D_FDDAIoBc2GH6$~-6 z9kElS(X`v>Z2tfiXB4Su>ntFQ2*?~@f2Ba{V>@)k%yCO34CpZ=TrlUs@mYR{{yjH9 z&HO6)R{icy$!-Z{PzcE9KUz-%dvT^(Nq-)s_K~&JDpomh^D3U;Q_;^N{H8yX!KfEH zpwjgFMAR;0GtA2pow)TJj&Lcf-EPJU`zsf_Q9amZi5H#N89!=*yW%dJb*^c#*jmI} zGsMr&U>p*EL02=_+TB8;=F&M#Br*m-+eqXQ?M};o3=GXI(V*?QhysBxH#54jGLe8) zv!D6Z?js#OGjeqa?V~f5*v=Q`+wGdO)2yxSp(4`SD1?o@wv29L}&=GBac4C%4#lHLr_OhGdZ2+}p~^K7EcGAGJ3KE$TZsIr`FnqO+#-bkRC$ z(#8Z(5M|kT+~>Lb=B=k|sU=wADijR%s>|J8!ptST#pSw7yXA?b0k|2dnr@?KcQZ?A zcMPFG-Lb=M%`MSiqiFXQTGp|Da3CvvGB8!|!r<XPH5g;Kg@8MlC?xpLztWxP(?Mls*DrV>i`QYa z$+T`^?V2H_Sfr0KTEed<=U{^;$A2{n=Tp<{Oka%DA%Wcds_qoG*i+h;uVZCuv(0%L zMBr^Z$IbTlrB!UbI|0^h?*oOkvyA>8*+2-gl5+U+aaJ)y>NeY*j4}=g1Y~wUdT!5B zvc7$m*Re|Keq}}sN8j43be(rk)irCING>rYw7!!mAQvN&N4TlnTOZUP%X~#o}TfX`%^IWK96auy_LoKLoo<8w~|i+r*(~N ziVE;YJbU7+glJ^MtgJxij^o;=nUI1pk&1vsb6jo4)>^1o(a0>sgwJE`ODr+^s0(0t zs*a=Aw7m>N{7UFTk~YxHc+crp3h6oZL#aT!m4WUt-l&mt9A%fj&_T$ha}1FH4{Ak{ zH;W_^Sa;1EMu`fj033r*H99HUazRnpabB5Yk_;b|=aZiF>#~dz7y;P(eds1szy~;P zPAD3MV~v#v4Y>9^fNFH=o|qtKZ?`@wGRBI^%%wvA0D6&}qDua{A6)<^rNoLBV;2(Z?uR#KONH78p*z;a!Sin*h zu&Sv9>dO;b~ zp8nP6iZc;zbB{C<6{q~9x6;Q z{KbY?`Qn*)6_xM;IT*<9D_Drl{Ko_W2=AI^NL{7ar{x}TQ$X9CoSbp=rqV)Pmx>zj zP@Dsk+ce-LfEUF91RNajX-^%6N~fbdk0;Gbw;61a&MGWi4*4Rb#5nxe_pW!td{vs+ z#<8aTbD z{{V9NJ{SB7(Url-;1V&T=TWjAwT^;)iGhzYXU!E(k!XcAWP-)rc@uk@FQR z$7)twoMbWguV5D+GktJr9H`DSkzWed~gIbKf=Vh-H!hfq@|N#XxH9!voGd_B2JM zb_F;Ck_{)JB7^}(0QtG2nLb_t$o4cIw`T}RK77=6b|sr|I6PFSP>6Q1?mUbf(S}vSz>@0<=$1J4ET!u~n7#xGeRdvnv#iv%pCDQI$ z8rVkCY3xL|3<^$UiE=IKt>Wkbq?<2(%2zeaSIQs`0Y_VcxgTgcd~-m5V(V1bZF zZfd?Bg4Sf{<i>GaLUy9mjww6_n;?7st&RFgP z1myGE`--&aade$`)Y??Hw-dn-iQ!ovJ2!L3??Y+Q9Y}hUO4qOTm~LgWh{qxt2^pvXa{O<(e@WRtN{m2aJv?cEw!vgo$aROxGZ?Y{1~0pKdv> zb#3^oUxIh->d73*KK}rcToLmx8U1@#OzV3MLsQVDx6`f`8HpH>u1EmqkF92H`VQhw zm%h=zi#Uqu)ya5)2tmtZp2q^D#8h=%t7_V}MZteP!z|FBQR9XaL2pB#+&%JB)E{VOn(X@+ueQev_X@u;#So67icF)jK^%qCp zUTT_tt!=N#YpB~mA$bHq`lHVt3FfZJ;aQt&SJpbaUDK{0wYrYNDB!n-?ndD;jl%%) zd*{7Z>N-84#|V# zobqa$pyHILRajffY~*-UBzX~qIU~U!6WH_Sw*5oVGxZjgWo>(_&Ms{jIGt2{=gHgP z_Nu)L;iNWNmZub&obp@W+q6e$3d@$xPr1mXvk=!pO=SIA_VekoX*!g4o}0j{{HcIH zoPBGf8(88}xZ%5xrBihGMx8fR{wsT_PpRFk0^T_$P`gkZvf)F#JZ+qn%=S?%cGy`?C+q57Up=~}^E@JegxSmq(%Zy_+uF^rv zVTRVzTj?pRAegkbME*7KvRo-W%8ZaX^H!Z7p=dYVd(_FOK$44jmPo{e|-9obWj2r|VieSoEfoX)W|xf-U-lxQyfhpUOU!anxT8 z^i6A7H_1KCt-(LdF%gFM?^k1?^mfrKw5Jyyg`P}BWh%szoa9yNnlhyt&#yG-k|6+= z8KZ1`yTAt=ee=yl=^F&L(|U(fx3-1#D|?wlP7dM#Uc=jr*81b5bg4C{wTBaaE)@YE z)@}wdj8si4q;$$YNtxl*p)|IE*YI+Tv(rYPaZj7@`#Zz|u z-H-Ku&aAcclGc4v=v&)rnnc%$_?1@KG!D4nAE)}(lH1|mOhc^=L7|nT5_*dXe8-&R z9DO~iZSPFz(Q49a#@=A>AeGe2eI{|)igqTO6{LhlA`RG(JDzE_k~l-nCcuCaPJ03G zT-Qp!yy~Bbtm$_m>H2H@yEsO14o}z7 z-0QkX>MOXTjV7E!V+%3)VwA1R$?}V3Bkz7 zBep6`kt-^w0|VUEKS*22r)co$_cw2MBqMI}xZ}VTcj}F!PIZiX+f2Qs3r?IYRCvXot?ZVo;3O&M9E46Dv*(8}@d7*e^;d(aG$q7ZmKJ5z`Oe2_8kii}ej z5(31W@&0PUHC>&69@RUlG+-%aApPj2W3h5U&po?Q%E}dop5O|FY|SAB@)-V6oYM9Z z69|N|f@n7@AR&JF6o9_wZaD8ktW0Im@N?i*l@elfhfgf(v$~&U4 z+RVcz=}d2wTW(@`J}R1;jvNz=DWP1llmMvC1u({77lDF3sby)*fd>QJcBCo7uNx3s zu&C0I6_~RAlnS!D$-TCmaC6$5L_UcbGLp%Rr#9G1=~kP;6VqED0zbMKl?bBtp&Q?U$o1HYP?2k((lqro6lm=_6L z>wHw*K_ZP`OJ42jzwNNpiAHnJ)}!?RAH;OC{{Y*YzyAOssSDo%JK z6j7CB00ZxX;(_SoWJ1MMXB-ibYqNY6+TAsD!wSENJbg;7abzQ791Q1-*Jt=58Tw}F za0mV5pZ=(?H{|;spT+mPUr6n1b|9Yv8TJ*SP!yKI9`&B+-~j?WS0T0l1F;>e*;e59 z@kpninRu@-QHEs$>yyn`gA9n{2cI-Dwou>$lis|nrFRu1cc#SgnZ!RSC3j%^RIn@r zG6L$#anG6|>4CHWL9Z87I75-`?Oq{EmKezxHA5FOp2sif~%`3FzJ2b*D0Uh|DCBkwy9Gst}BQl;g0(@~zHQIm~MoHs}8SZd_ww(Pu zR9JCalDGrSJDeQgbM3`O^_X#u*c@?QYld<8IRhesL%N>B1bt|7yYfKDs`&3+x9(hR z@69;{>XWl+$@)+{h^VZ9Gh~uC9mgl{PP>$d$VUs@4oy^C%`;ucfK%teAor_u-H!R7 zjds$AU6VP+MMREC+Sy3Xe39O!-^n7O+A@4p7);)QV5iS|B|N4@4WF5@!KkyOh9Y-m zvyx3ox8E*#BgSey$E?{{)RUhzn-41sc4mkYF@fCIFFiTc`gWW5E7s!@vyqJZcQujivM%`jL5}0;NMe-6n`GlWoYH_w$jGFed7_0%z%Wm0 z2L64LDUb|#rE(2`1^N0?yd{%)&ppBOOi2UmP(}x88araJGOSEUJho32??ctNX-uCo z{#>5aHu18v2Ot6DcBfK3vauOpM~>7DEC>~(^>BI4Jkzm61+d)5b~8$;h@9=lX|kcM z^88UxEMbs1J0JYfL2RormEUubf<1)>A&MpByo%2ch7oq zPxpxa{(Y&LWRdO|xE-($y+nE=5?m78mi(fnOwPeJ3}c=JL${oIW+EQg*j zaYo4;m?I1Jt122_{oIqt?~HLy#X6gE?T|U~LzHAG8$%j?~|Q+}%KkQ1JGFL24mZm@@U!pI9qDq77@j}-QlB5e3Re?FpEFKbWNn2?Ejxj+L;8}oB zocmOqff>lgM{4qgSjN==9@V5pyqWPEM5G=LJ?Y0UxpKJTq!s5SF^{Ku84It?k^uJ= zL{wL2cI-UUK7@ryV}qVJsZtpT8;BV>!14E{rE(t#bB`j3iRcw9&ijrz#R>(+NY5No zj@yY;9A}eE%n5CT?ErJl1NkRtUcX9R}F*1IRbq-N>6vAea|&yVR9#!*N^ zjARbv*Jk(-Py0Q1G6rtH{gGaO$^N&e@xAWV(nt~}EZ*Jw_N^KuOUja1d92S#lp(SS z9Db)2r&2S3xUUxne^cBdc*_t}_s@#(DmHf9&AN8jmRU9Y{#&Bz_ z?&NAk^&|j0E(q*9)PfO{X|8(L+SvmzczT{nv%*HCXRU%`H`QL)C`cTC||BfG;oob zG7o&y#yrcP={=|llQ=-6WE}BY325wDj|2`sN-L{~fKO9^e=pXFA=pOFSGlE;2muOY z9>*EYN+6k0AS8_Z_!#d_ysRKr05B(leAI~IA$IK;&m$COA?b`D!0|y0PniM{o;jjP zu{jF70pwHb1{@HeH`a*8*vJE({8SKjnLSlrH*!D})Fs5~xL^S9k2PwLj2sMmiXFRK zaoVToC@F3|MrcrONfu0#S1`ZI2p#^EyE}ZjKIV)MI3Rs0VG-_C*x(`O+JYkB72DW; zVky~43%Q%}`4r!*UBV5gBe|+jMkQ0mE5J&v{{RpNBQ#4gv#|McIPd0}xspKmhrgPZ zgOtb#^v@%{4K)dpMEQmR=bw5zP~Mw6zT$iHOAP6=8(3uUDXW13Muf)MQac(^6wT?g zFeG^t{{ZJ-7m&w)VFTa)sg+XYEPB~&Q`-j*wk=nF;{=}r3R3k+k7f;`ZJlx|E6 z_r*2nIbKH`eQ5;eYda>~dsETOtTMjewF})EOJKwTNExRU0?D;;+w$g;d6jX_2|h&x z20u}lSB2Sp}k>?ffT>>*Q?dOIxvP>fhh{-MOS62oh0gEXle~M2_ zh6IXfZZ=J|l;Hkdzw1&Zb=Y4R0DjeXOhlYYw-BmEtsg$2&m)d8USOOtOmW{FP~6<2 zdQQqpvjry|vrgQERUl-kAGKsP9js)0t@0Mh!Shc^Zmk&Ok;tg=zo!;CBXws!AoXC; z^z?$&@qasQjeC_|?D+Z$L8pclNnTvGUl|1P{{TuhKxJ>H)( z6ZmR>0Mf0h^9ml`gnvzG}6dVD`6f#^vEU5sMOoI)#BR})3hs>%RMxgS13VC*tYNu1x zwCyTXH}_FMTk@A|RJ}7*)3rE^))$amAUI^V?RtYwxVkV}>K4;DVlgr^%pbAFJXMrR2&&wb1lNOz)Uo5}1q7pUS$pkY9`w-NBzafu&MF=f!z_q# z$nlDef*H4xGI-5;%^g!auru1df9fregLsjF0E}@=gD8Et zQgM~yiUV`E{`0kD2l_`Cs2oYaa-HPIwsuG}K~Lk>dv^$;}B; z_8e|bK9qt|eq4i&;+TnFcGAs^cJ`!HWM&w_tNx^?+1ZcEps^#(dPiV(GY&@s-k34W zv8$FDCXHj2oR;z8mg>C&GJ?*VfbGpVvTo!9xDOrZ01!d%zHPzea5?+YY<4_+hubv} zNX*3G9AmkqknI~+mhVj}NRte(Z?!C0ky(fUgWuY;u%5uOF?9|=XM1M19X$F=U z*-#wtd;Mw4umnUGAfI8HqAZRKK;t~qQj)k(fNrN)sEtWH(=$sN$e=b&1X!eVBMyHz zpA^iBa?Ir5jMRl~)19l&e4iBD(X@;dpOBtv;v`y@F{mKq)5GLm**2yExntD)ik^0nr zrz6zUi0m$x{2GxVJC8l<9DKTRpYe}~BS`mP=ls{Q1ep1;&M6}#e5c1^D86q{jE%V9 zkzZZ&VN@WEUvqBr^`Zpim2P+hcc5j%V>}x10Z@G3y%jDdfq+SaZ9Y_XuD|d#WBw6z z5Vq5Bc_X*VD~*3J#-}Ha*cI7+24LtHZqhD2bLam62(Lfn{{UOl_`dgS={<(kyJd0D zzZIm7j(J|g99Czf%7iLEGX*&MVzfcuc*q?80BZ4YclCY4GQ!T$jjZeWhI>(rZpy}Z z9l`HHoCX3g3FMq$pS>RAw15tNmDR?*9LUo&N(sksM-=iC*Z>ExsOol;v}2!oG$c4A zG~ zN~l+cJXO^yy16+!K=z>}5zjxls@H;bkvC32-I52IbSsA^)W|#>)I_U`q=RTD893sV z94ZWwa(ES0c&M?Nn3f=pJNc_M^u0CQ$0Gz#4cDjX6>!}BY7)FcrgG8yHza8nff;ZTi z;eKp;nwIL}`=c92lwuAC>s}0fAVg<(y#~Z586Mr}_ zL~Y8PV{d{D5t$ZN-TISA>kqH%dSrY1P+WY{GXO&41Y?|03K?EK%s9`zayzDLZ;H&C zvGtPeWLO?Szyx3c{wuD>C-OD~Bp;_Wo9k|pve$Ik20sscgE2~n4Ux`1xvg=yV{{Yn9zJI5K7eAZdhbK(jbmN4 zyt10nZEe&vQOZdS+~+Ij?^)}6KC7Fr&8%9zoLa0_11_0Ko##9PJH%N{p{ncH_Bw z^HZ0T29migBL*4Z4*8~%a$+m7mvuOcF>UX__5zf)B`0Xe9qT291(w?zN%s_xw1N^E zGANPBQ;<%33i0(EW81)>^|LcX+mmU@$J)Je>_AYtIpf7Ps^Ic?&wqM0Y?aGob``3S z5_-O>FpPtYQ9I5~!oMi)d(z7*5;6z3G}~OV$`?8Jz^tu0f?fGu*zzf~GKX=6-R>*P zIWllg59>=KoDzHc_@ZI7c}&@3$N8^jV$K2iea(7M8bO95>FrDNN6QoQcF3!y6f}&( z1aVHfyD2FqJmgTTZ5iA_Jk!y*IV{|JQmz_UoGwG|=h~cxH&97(#P+70nl&yCM`|UI z1kUW|o+=LokU42GaqMVJPIA%aHGcm9Q$sw`dITtK$DHGVPg`Tq0i2#WJ&CI-VbooS zqjr9|p}3672xiCYiV5RKU^fwpDFkgE)o#_PL}LPoz|L{+O){XAHq4R6YD*RXf})rt zfC=tT6+kg2Lxlq)+qE%qI!1);%Xl93?^ryH03cHb3L01Wuy844-^a)w544WZ(nEMMz929`zzJ4~mT$48uLWs!Ul6+rRd#S&hX{Qm89q{{ZtzpYKjd z`Tq1BO~P8A{{T8la$Ir8isi@4r!Q;b9Io!i6pTjoc=9;pQpR^25s*BSN^t)GDZ-I~ zz^}0R#JC&=$Bc|umG}oIcWeq2zNcgu^wuN<5$=iuO7tLU-%)l^b4Q@ zkNeMW{)nza7pP(?z~>$7u6znl{g0?FIa`sAKk(O^@_()A{A>RJH*o0^{+DjRc=}eD zhADyuPjOkEj}nMqYXZ0*;3}X900d zn7&5b3+LLQhM`GWSxE;SywJ%D{ENRV-1o&h7>yKiS3F{zbxd+|j>F!Hj@Hg%A(?sO z?@wAp(Z;CE4n3&fg#Zo*9MU59V#l8trj|W2ZeNt`&$l$qzTpuJNo5#0rP{0G;ia4UlIc%Qw5;`hwCOPgu| z^8igVDjmaXj()V=%Cxr#t0`Oz@kkSNuQ8O6yZ}cP6(S~OW4-z9oKuPce1Zt`^{DEu z(C%Dix$oMF7AjZ{e)JD*UHmUS`%?O(va3c0MsrO-vS7O$A0Bf=$n4R&k~!v!0V4)+ zgyd78I}N9R1vPSvqyWHfJfCV~G7amzmB$>^4>?{G;E;Ky?j04fK;UE_dJAOlDPqL^ zX=I1ixZR(Zz9>Cx%`BU?;SXw}>h6_~S-pnnYFCzWq7@Ab@H6BO2kTe%p@qXBBWUqU zdN6k*V*>(^fq-VAi&h8Dw_fEV7VZM)x&ma#%iB05QSln~@ICcMt&1ezbUL z?Gq&P?0Kcyn;y&h%8Wr?{{UKYO{~Fk573$wjIGkRY<#%IDnauzZUm0p)B!Y7s_q0} z4&Ex7-=*yT0NSI~3oNU2(0VMJzEU{n@5Nm>XNx7U4}2c<_MaMv_J+wc?ySzK&{{UD zbtTTDE$sW8vm;$FL-g7>#a(Hdex0P*C6$e|cF?a1s4Zu^e zd;wRw8JkbBmhvmft>Ss%{{VL@C@i`5H8wmExlTbm3XK*TQkXlv{0ed+5RO1O=M_7v z*9el}HVH$O`Wk6sWL5rVIq-YbfU(IFGco%fDYaBsR1uIl&02?>20fec_oiAQ5mpSx zKbT^?@kf%zPDt&Z)Xcy9J*1z~kx~SR$B~5|X%?qD8u~jG5SHamG)K(6VoG zBVGXJmnawi09?~i-9)8>7Rfl|(DFoINx0-^Cmw0bhmT^F+}Yy?wO8ue&Yz@MwcXsv zshlo5vGyM3qIAztS#<@2UE8CmSqyBhFjS9vxkb6+qZa^LzNDSu&fOf+z`4q^3w6 z{Lx1;{vSRG?}`OxEWq{$wF^JWvJl z>u{%Xk(zQtmkdSDJE8NXG63!)*5s*%DGtLLs+NVXYX|w8ZSV1t7S)ns2 zURP-P{b_>cA%z+5kJ_CenKF9{sMB>@Tir_L+6Q?*3Rl!&*fIW^v5VCK+Cnm0 zpqU;C`FrCvB0S><%|?$ZIOoMlQag_VvC6l_Rn+U0w`}qFh{^rw*)g{xztWiMwjYIr zazCFR{%Ud0%09KrkC#qf*Tims9ia)y;AWOpbB~mc+)xJv@=I>b1NB5`?b{FQ|mKZo6TI-(!H;pFz zeq7wIjOXR9LA`+(K0j*f{{RAc(X}n^2@|e3{{X^k&-p*r^!_)!+&Vwh5xFctIL&Fo z5Hr5A-60&fB(^?KFb!#tWCSm?o=?`iTpj&iCw5~5ILG&;DoF=_>qw}X;qhMDpp(0t zam{p&L)t@UaQ$f*0B~4+v)+Y9=NND^+?w%;H+RX!X$}NM!vllk6gw1<#fZtl2D~Fi zU(MUzlhX^Gq@2|TV^ZDt?ac`rTPwJaIiZnPZ|5T%bMHj!j1KthXb6QT>FrI+3C4Gj zziJmTjyHwR9G^ba+?OFh48&*L)@|+*X(Enc8ITtCr1hiQobjA+Dzf1reNarq50jjl ztLk2$(=;70-(4({ZGEwX9F{&ks+zx<13#(i5TteoiU>Yg14!o?!3|WFJwzwdw8>_) zb$vvTix5l%d?h>1A|-k@SNSoZ)u`++`5&dw>R2!nszu5y9UlY zj8(9nGY!AX^0DKd^vG&)x{YRNf)YT^M{0Ttl$Ibs{V`Rt&#~sjUzK~~H2#fo>e?)# zHeNez!Kx(POs=I-PDg)YYPK01ObbLa&p%3?bjQ;y5b}BAm}CC{bqCWY+Lb;~P1t+n zj44)M%n0pI-Q9%5Q2=CP+aKPYmL(3dh=RT`yA<0&{OpC?=lZejS-skNNelo;`Cvv5 z9%%zCgvbU#=LV!OV;Ecw$C4=uxWr;G0T{rjh9;d_WK`gNu~qu7ROs4EGoF9 ztx5JZT7SeYn3Oa>#NrICj>HeAioUSaAk;U#v#gPOh6ElfoOHiVbSG75(9LII5OhXX9xE~tI{5IymcSEhJ1()HbH+e@_Fb0CQT z?nZG|Hk1A)*(D5iI)(&!2C*G$taR_;w5QcX>~7g!BNjmlFi9kM=aJ^7b>CLQ)A~R* zC?>p?J@7|^wG?-D_EAzxs?*)fZPYf*raoo|wRu36)JRxu9PK>jt2$FxU4N&obp_hO zM8k#b9B{+;qUwEe)2Fm{yO~R|V3EXw=8=zO0<(Oo!K6lX9Bs`vd2U#4!`bSkPrjR?+8dy2Hv`pWA60IPMaG4$=H~R%=)YIwjFX4ZZBaS?kaBxn1 z52>pySE}voI%3k}NU)LTyp5!S*vEnta_&IqALlhJ617gUj^6O;_I7h!-kFRm3Xn3} zcV{PzbKbR@T(>i6&^6S4jHLl+_XgN~v*x&HT}JP(bbH$yXM$_ncAX-bv9t37{s-u5 zX`ppJr~b@qO?MbN)qs(j*+&WoY#$>a)x2zpJ%aG(8e~tC*f;D1VfToQ^ZkVfxhZMsK0D z-B(qY*0`24Ybj*d6e}cv_8jH7^H*AjQHx%ogG94}OS`sQ&9m~tzU2G;D>)9a(^?Nq z$JSbIfb4Ii20*;*Vn`#O$}`6u>2Bt_i{d%Iv9a}Ly3+T@6F5Lv^U3;S1pTX7v~)W+ zC0UC%ARb03twEY7S+lqv=M|9X9$Eibg@X1P!Z0eQj4k7|tvS>IiC5VK7@ zh%W5Rwh+G!BMw1k@sJM{oTG5kG&Zq@1%q*s%@K{FfEMgX&OBEzgRM3Bbde%k?~)7U zhJWR9+m8qb?lWCVG!Ji?h6l0w)UyVD$Es{^x{iG!DG-LDIGUx|>MTy?s9J)(=_cz}Up&91eWM{k8=XO7+Vy-M0Y$FE%1H0eoD-mne7bD^Q zj-xG~{?NT52x3wcGm=K`I}YZP;a;Pu$*SvGMzJ$W){>2{kCbu92m9meQ!2NYZdEd* zXC1RlI`ANopxIfs6cFz9* zQquL!66*f)Ys(#41%>@n#BN4-P%t^;>}vFl1YJ)qt**zW>B@?ctI2d(zGVu;3}fHF z+NN~<*0p^l3Z1+oQiNm?9?08b#}+L-CyrPj|w zYWl3#KD_BUjuPYY59WR-;H;Pf^9<8hH+Io9QHe(U>2@nwF#0I$uylbtUrMMCSX{{E!Ttsyt`66p{_T znw?8=)e}v6RxL0g5vB(uo-ljo-|t#O63BKjVS$S0zYMPRiTbNwiu(F#V}@J=lLTic z1bc8hS3*XRO2@$SQp`kax$QLTW}fay;e~s%oKor1*xo{yR+Fsow;`0|S3i`+)m?V_ zZlyGmS;cPBLkw(C1CY7M`+`Do}9^U8ns_vZW{Xbsmjct_yoo?hR0t<+Rsi$ zZFP9I-kJj(uGj$aoc;TKjZ5j7G`k%M<^CsSHNCL@V?d!%Fg)WV{mo#6vfr;lT+O~j zc?=A$xmL#H@d0~nMWu|6bAiCm zw>hZxx<$uV+-oor!r^s59_7fyQLaE$_s_7a*laa!u(!18i(L}c;g18_u&mi8EbhwuPt$dMTTNKB@le+Ojzp|BFbB+eoSMxw|b-0n%hXwbUR6y!|C1&fxI3R zu^@ifuAd_FQ7c_J3;~`&KU0JH8lvgDFTwPP>I{{=c-jS--{eIt zjo&~3t6M{<-1WY(G*(h1`D6L&Ih$^G6V8^_^8@?q;STN#;A$_P@{l7s#XG= zvB@U0Lzyii4Do|fQ2d;B@0yI6BpeJ>gc&#($JVi1ijJvZ)-kXj{CNKW^9p&#$Ud3H zH`O2wHBVvdN5}sFNK>pbJJ&8gT{(MS5&*&3Spv369O97WTr#TT%{eel`$xZWDYLaf z3;`$WUuE--PLCK+>J(!m2gM|X_GAD?GmmQY(q%&6jCs!#ED1y;fTxc21B&R;k_HE{ zJl9_M4s)dHfRX<7PI2dH^IVQ6ML>-hCm1!>egu~O8L9x-Us--%f0XlHf64y0r}4e+ z$lCA;6DIY^#bpQgS z9y!RM4gAkIqYv|N2>mOhaw>!>FuacS;1IdoP7ko9C7UAvVuhdOA1Tk*&1w{QU&_6{ zqnh?tY2*R*uPlUM?8nqlK1d*WJ+V~_>7m)ZN!oMYfN5Gz4$=2GuRAsfzySRyBvUGc zZ^{TY0~aNaQ=RzON!S_ zjR;4UP-K;X;DsHQvVRewlFP0-e^7>Ejd6Uj$h*E!Fgg83YipwPeTJ8*O{l9}++Rpe z>Fy;i*gTLug=an_+Q)I!eLQ_7E&M_-{r+|Nxc2c$?ioI~n)g<=)iv!iRBKo?d!cP@ z6bI#yoNi)$@^S5fT0Wra$$Do*(xI13xzwdFB!=YSQ+5aS4>aZP!)Ya#T`PJ8wxZsp zy|4sF9KZ1pJ+a5$rrdlvwZDf>8lIzjH{q?87FCnwkc|A_OlSL5mJKe6c_r?TX?JmK zu}yOlyT`jYBxf1(R`4MTFrhQp3W;Z?t)8E6YX=Deg-&?@3{`;03UV>npK5`Lh>g`r z0iUw_*M|OAZvg&+lIkqZlAcJ;4I*!0xtNkXiehXbR$@M0f3c4ysn4Z`1mh#(A>9kIz zSP|S2+NgC4XV7|fD;+-0OXwln8^nHgF%$EB@H|$4453sbI6iA3XVY57rE`3Gi&D5~ z+1YK@*5RX1O!mka=k+yw!YKN8r!=JNt8G`OEuIT_a`*r5aDB z+^x`46GFlB?Sq=;x{pBhPJ@2eogJ>Rjld?_;zCY*@_0Y5HF=}-=T+*8zwLImh6Dhc zTQ%4v`Z@AGf}E!J)r^N#?Xu81Bi52%OV5KA8TW z`5H?F=Su0iZjq$Nr|BygwTzH9Hte4#KK0H1E!;kz*BTb5cLRP733e+FCm@~=99K}$ zG`kHSO^;8pK@2gB0o;@1pQ);@y6MYasL|U@xsb*42aaVKF|hN`j}<3sy2$lbqX+$| zbi$<3kP+5QH)8#>`csw<8vJS1WRcc2T{%8tMlr^7R}*|c(5JT2;b_~_(~jg|-!zTJ zcON~0sXE6)=(>)%9<6e+dc%~{KRbX zbMASmy~cN5p4tzLdN%u*(Gtn`Zg0t*-TkTkQrFV!ty=2RE%xuDlXL$736SR*`+@zc zR=dy|6^w9dpM<=l31uW8EGol~yc+$+IxXs8PO%I-O0J;z~JKMWU5pQo+0 zCcON0@q^RL;50#aD4=#3^X9fzwiR8w~b3SGx@RLeSVeI*YcHh8-9kVw4Rpf z-8)yg)ok?ij_P6aFys$>4;lJZ(==n{QgVCL<=*xjvs2e}&Yt+SaW_U8ni?eAAH==yAWSkq&(g>3e0^i_Ubdt#4Ivar%@QqscKI3ZrjqX2z= zw6{cw7x{S#GCOfxTT$v;6!@QQs$KphTU$o!xnUspAPjj??_G|2+jv5()5RELfLXxF z_U5wMuT4pF*V=Zs8p77FLn2D2-Mev)eAMG&*v@Mf@#`9AQFR@qrOLr~X0bq}Fw+I@ z;CJ={j>7){P3n6MBSW%|=uc-PGR|a#ucop0@9ud0Yobf(Tibw(8Y(Fq37*# z!&tw%j>_Imu_CaXfL1_B<0G--v8``pXS!+Ti{dv^`qp<*$1)*CbooN4Jm>4;vz&_)|AgSy@S-Y8ph4#by}(A%@)f z3IP1a&TFM1+5ivx(W@0t%numFHC*rE){m{}eOs*D+*<^(xqZc?{LI-TZTbLtu7@VU zmIJ*o@(7ilY5g?==G<^9KAou9>b9bLDa5dnKyN$}4QB1`Ml)T}j;qyC>U}e+rM>lp zixtxC$W=W4ydK?+4uzpC`rf^#!+kZxGDGRjJ_iy%Bn)woa&h&m?xoYZdd9|GR`w~O ziZxLow~>^{Gk^$EGwoNJ1)YVSpA>q05!>5idZ=7?=bx=vZVY!pX+2-3th#md%5?iR zbhl?vT0(y*&UgZ-bbUKq>AOq2Ii{LxtGV6_3v@h4&%;bd{3AYlS7ZVKc?&B&dv*z=F^pImco2?@u9)fauLD)p~N^#+&L zv+5d7r4%|<^q#HKGJ;g+BmwpJt^8@Zht4uMG;$PG-IIae>Gi2?Bc5wLGt}+z4&PYQ zE+C5DMAcY8oF^SbRd3`$&!xWvG$(rXXPe?~(ow z)}!^OPW3GPO{ePG2!nJW#0vqKv)zt+is*1X{{ZnBgwf&v2F^hSoDs?vLUx8Hx4k#G zuF!PpH2oGkS+1UWNG4evcWnf!uaUfNJa+q3 zewImK=@r6W$toi>e~mbI@;&yCZ#4&@HA(fzZ#4;zrw6UFC^b^J*k6x5LJjoiU1r_ipp330W{64JAuCgJ_m|%Mi}Cy55__7;-6gZ zivo0_z zuKWfHG_638d6|Fmn)Cio^}Roh?{?0SC2h){_#BV5X;MM~%A9?xJ9_O}cEmDV{IbM7E7>t1&t z<7Xqy6Z)nhfrI{N0yj9oW8$<6)MZjY&OVf-n{NxwJL0^%`)vdY96nANd-n58LT@r) zlk}lt`1yVFUdA#r-o1%CFC!i36s4 zGlSonrs|HT(=0FcH zZ&kr2>%DtkX)m=4$ATMnlPCb}LiqdE`>1-mN6_br-%!30$s?+TRV|I(i(|0+)XY`u z2@Vv2jP{~C)DS`Ew;WbpZnn^p?^3oGz{{q`>fB*+Hu=Z79&ztex@)LyHT^z4Lc&oj z4sp15Q|w2BL2jZ}b}n$EuodT72e2PQ%{UnCAmE<#13k+F`cgojHUkmdV-*ta$_uxN z?jedtf^{G3U$OmJN=9PnPP#AOOrJ)s^0;dWW?cRu~IQft5-kK;!=I1B9 zIDwgph6DS4)RkT#_!$fJCp6~P37y#Yrb@E%PDj+!fkU8O{Cg2q3{J$L%z%$46v02s zAh5@Sns(+4rUbi5J*y$sTKj1Zntht2+Ks}Ng&5%!amydy_NU4<5sllo6(b$7o+)63 z5dx?n1IA5r4Knwu^j^5L(zT2Gd#JT7%)I2NUicUo{{S3QpAmYCTu;VZ=u#|E>4qn} zmv_#^j|!vP-k3U(B&o}I_pFaqb%mc#$iifmkEX>~j4QAM2tKDj*w$ZF>JFrjue9rU zWpBgkwhQ=Zis55&Ic6S6KiaCgsdZWU8%2{_jIN+HRYqrAGYqiF`ch2tS4F$l?(Ho! zr~Vq!T;E8D=#TQm2+29eIIB%Y;jQ%9CyX?&9Bj&i`iLE?AJlrzt7q3*7LN>2{to4L zF)fCF1bqM;ilL5ft?DkWxsyfIVz$#X`{8WyGh-lrcO%cR6dFE&>dU^X)h?Ya*3!b+ zRa?}_c_bs89N++JZn37q5wnxeJ&kaVn$q=~tsAS(i>k?OG&hbtBvFY%huN9oNan}P6 z&AUE1q+pGYn0>wJBaduK8}kv#`qwPgE_H2N;sa=ns$JSaBvSfuz~uo8oH1d~e)T7Q z#x~tseRZXD-l2VE2%6#)B~Xq#@_$bBeG_qUKTBBLY8R&F=6HmR&Y&Ega53%Tx%78> zkNuo=CW&_P8_g{YW%p&73CM5TKczK&rfPjVr}W=a*=jd;aH2&W)Kb3z8yMO<0nR^a zXZkw~zL^-4$GPubltK^#fyZO!xsP3HSNgr)qoY~s5?IA$Xe573jq}XJZ9(T4$8R+Y zsio>WE1r+Ky1Q#@xfEMV5jjBT198W5wEp6fD|7J&Te{GCc5OpUw}w}UsAZL~p-IR3 z_N@0qbv@pv)E3s4+L}u&*2Z^)S`<|rV{4xH^Hkk$qifb54t0pHwO?L8#(Icf=VK1x zh0lYNpVFpi7W&qcqW=K2*Y{R)#~zpkym7;|$P17d{{YLqUfm<7OCoJ#-SYvEGtMfK z!S!d>bq2m@2A+z7)G6mU`H8FtNv@|iQ&{UmQ7LH)_Cq8R_P>&}}tyt6k16AS3Y1f0ZB2+z-;a8IR`41L!kZvnnCUJWy_$X@qE` zWtiX$@C8i;2gu|WZb0*j!2Djc)P{uGoxR@3Y*}V7k-@>nRWwJt2HFMo`1tL6SlX& zy7UaO`iwFYcO-c@r(B^qSjKCNbS=KIebsgyMCMD+Ufqr4o6aI9ASm(e!0n3bABsgr zX*Vv=N!{QImSPiC)GajV&Bf)+cF+#(BkdWlHmPY1i)s?xM7D^VjD@~XKK0M7t+ejD z*ELhChLQ_Xv0KNuj3LkS03Xy(8TJ)Lqv^JC{4dw-*`<;zJDH4Iq{$mGwAQe7tDSF7 zx3Ek3@1c&__Ux6PgPz#;sQp2%Y5HBFPj!0)3n2>|Iqpwx-Yb=L1UB>h7}4(~Q1dB> zD`$`iAdjfUS{**eT)?xtlTZlg`90I zzo-kM62Xa4=f4%y`gdHl*K~;W3nJnf3C88a3>;P$L$QJ%iu$#mr4hF_D4}tX6a^IT z!@GGjUb@$`D5Ni9p=OCW!fxE(9RMTpg9*w&Vt?t?I2b#_4mt8y3 z8g;V`PUcvzSPMw9T~%1*00)n$r~WX;jnVfmVUko#G1_ob9h$IxL8%BdE|Re^0?6?{ zsOOB4{yD2Ar^4>jQ~nF_2Vc68E^c<&qX39l_#}NRW7GXxrfXe0{9o5dqk}()4Bzg^ z@J(d2EkezHCu(wDO9Tq`v7+u^Jx1gAs?C1VI}V)cyNx+Z*vowojZ}Q23=88P0pgY; zRUco%*IKl1rrs;qMkPc)nnXXAPYw2=J)kaA0IFjfao`WHHCgb7RnbO;XJK(NJ?z(V7-8GF*XzgS<2=-pw}q|t zu$Gco&G^s_utVx@SX%E1~D+^Njmbr^X|F@cUYBq3JtaLTm}x6qjhg z-aF&zP4(`ft7$qE(`eSBX_X5zw`jl}!18M2r@w{CqroPWv3?J4^8F!&$GEJ=#Z=U9 zEMe0$i|Km;&r?_YIn~Wz18_%#m=BDc% zMl^_YMbdOgnKd?3@{pL$ILE)u=CZchqiP=sb!5Mh)@xv}Gyzo=2qOewdGqxZC#v-e z7_WLQKNks#21b;J$U%j{KT6hjJ{k*Ntk)Z>HBA;BA!U{b)Z69uf)93c;8YD?t8eu$ zhuYkiD7H7YvVImujOB804nKTU{SN9O@iSc!h@7>YC4O(rKpXE$(_Js$c{$ zd-)nqBm;wQ-rC(cbfuUF1kU%}~ee0KWE!0b{^;NvmN4RB+ z02DUez?!^glruM4Yr2<2Nq=dlME6iR7gB#hC_TVE&-kY<`j=g`yXlwoVoPm9XqB^_ z`SFh98O>Gn?aIHxJz&Pl(g$^6oWKAQaE-zx0Ab_NWy(5#f;N#!5YbK3p zy43DIBwy+6e18VE0Hk057k8hj`qo~?^|arHdZei5GhdjF!L;D81bxT`ra^V7&+#Jv z08ocbxJakd{{WY~;gtaSSdMT>;}svIdUsIO`U_F9x`2Z%=c#VaN|HetKiG`Y30+U1 z+1T`Mr+&7g886H@iF+chFg>%3l76*isLutzR%vb`XY}-`TPQLBz#|^rvHH}lC#7{8 zzL*v|?d+w?=U{N*5OAxXxB07GYeW)htqz=!!9Suc(nbdXGBQ84QB&}s-`r_envLs9 z7VhbuB$0{09C4n*IsL0;B$%EQ0Q|(BKGh#ZuuBat-pxK`g#zwAapt7FA+DpBI0z3O z`K;B6abp94j8n_=;L|Y^%a4~%Uf0EiK34g#$2@idn^a=iE%gVzC@Z)EIidNtZ8Cp92|S*htu5HX5+yW zegAts9Wkcc2P-l zZhw~S4u4;N)}-u53Dcg!@1XTq;4#Ih+v@3Y0SSg7oCVrPzdTm8g{|*hYkFT$-$!q6 zEVi$yGLv_+GKUQ1C1%x(w9lXDdm5_jLZaido0M@2npVFt+VAbv{ zqkGGAD;&N+rmLT6kXXm@SE?R$fR(VsTo3pL@%Q(qJ{t87pVR*U3r+UPAMmrVk}xs7 znX~ef-vb}DbO1gIPa;DAPX9`yMLSGniw+JZ0w1p#*Wz~YJ6w}rv(D>twf zP{4rP{{V_W0-!E=135IMHmTzv{{U(r5rzmGaamIgKqt8v_RSg)SIEcrrP!e53Bj*+ z=imEO2bkFH48z|w<%Mvkb7Xr`mK*{yMD| z`ExX2v}Y8xsaOw*clH{0#9o-ay=IOZbay9dJ^OGzv>z8WCT$BrylZ)FWNYM`Y=Dr8 zGt1+TZaec`Cg#gb)O{H)ts%KFKKRLxJ4o^~UeqGCn`&G_k|1EpvH(VWAH7MPlDWrM zT_E_E(zl^rNS+d@@}74e1NJmMF{o+XDb)J@nX8tzlJ@WMHsk_Cp}a0U*G-x@UTECG z7pR%oO60HD)ONG9mh~?twTbRxQW94{SwZdrrCoCGgu1Py8qZY4CAGvTZsfR>$hcTk zXNKn>)D3q-xv}_pq-tquT_V(Ey_!C9sUamv1L=?6x+hR{;_57kZez5ynsGLuBaaPp zj>=EAM>V2Qj|#>jQlxF$jxpQzrMYz1G3xDh&!|2WUdyCucM-q>Mgv8K01>#1cTw+A z`rb*ex__f3g4*EjxsA4z!vYil54LNmLv3#o2&1(@3Jt3mTqqxI)bV>+94PdT(f}C; z2mLB_DAVRdWWE+#N+kHdg%Iv^`4&?lJepx zSHn5Lr{S6@ph*Z)Cgs;&WVY@Fli ziuXs!t#t;I9*wQ&FQ?ktn9^At@MPJJ0LFWP&uZumG%?7w=wjYiD;{~k`g>Mx$Eh#x z`j%TOeK0nk6wd^=CQKA{9^QH4wYLTjRXVp#{89cbE{SodL3}+Z-6I(-*kiujWAz5D zJ}cc`TitZ+^qM`)k=xt}rBwNVZZLq4ubS!5t1#TaaoZ!BDV5kA+gV09Begoa>N&5( zh1YMs8%Xl$qkQB^JW;pJ1Ym+Q?nj#Tl?JWUKMy6fmrjz#CAC$7i|=X5ulzXSP6yVy zD@G-W(bFp4F_378Fv0>a9lLg^s^vcqZ?)_FGUul+bVCTS4keV0yyq&Qd2Vx>i_;o* zu?Bm(v4=X`*EVnOkf#w$nAT|s-)nq|fG_EFo!Co2OXIFU!5e`-#;&~+_)O8yt8 z+`$AAwkO`b(;)HtRnCWD29u@3r`vq7LV*ASpTFzgmknqBD@SuLN?hC9*&D~5Oj`zc z<2mE)S#5I5Sk*og={AvQ*DrRSn-mz{fjbkQe`@MZH#f}SgYQU$Zrh*G`J}j-=Uq1X z&r~I@t9z$5ogSL3B8pB-l97VLJ%AbgO+x9d4_i9FPzhs=EUv>SlNc_?INS8(=DLh{ zSs8GCW6egEAq@VSpXI+Gj==jO%6yYuKTx$v zA^F%G_a5}*@Txk#bB_SfOSWH>XK$WyQr)ANL7;1i*Z0xsjV-m6BL&GRMF+b^AO;e?{p?{@oIxXyJe2}fR+sdP3pd7It z;MUIw)2Rdl&H$(31!QgP0ynpMomCv_<3YXAI+}i{)-}!c!XOGpHZ-gU3I71gn$z?> zs|Y03Z&Pjc@%5ZHbWi9{&{g&2wWY$wvqF;vV8A+q$n%O-MN`PyzG_>ltLp6v(_FCp zJ;c`$D&&tzvACZH=~&Behn5m)P-*v5`ps`Kcbuu*-2Oq6!s5C^xK>!xYnJR7_8v_| zby%m%<(0YZ*wyfPWOZ(W(e=ytXO&tNgesPZ;IJM)>st9Vojvq{q@)2Y3Ir&i4n3*p zB@0+C-AU-j z+eqY+X-*ZTIWe#NBmRBsRPr!h-`bKX)h1@cG?7RurAWa9a7WgtFSTYH{AT98!g+BTP}q-}d`y-|bhJ)3VM zBh5$Gx?e)qmKM~kmPUwzj=w7mc+a(7ZQY*(`x*u$MUlbJinfq&LktikaYnJmfGVIE z01shU&ZN~ph4pp4{oSG(AZ zJ#u|Q!xlQ7<4FutM}o*ZHav6MukHFfr{l1)uohvWvXn}n_}|^V{qgjz0+lJ9ha!Vm{+aYVm32a-p6m}@K0ohxP1+FM2InXX@%Wo+Xd z;0}0ES*aRsulR=&!qS~9PAey<2?KJTG2@f{g><pAY;El(vWiIU_WCO8ow)3OwOTTj z$pa_t*n3lLD;37Vpm+B*zl1UAByP{KK|RP^6GHeakhu30T)JfBW82z$s&!6V zXs6I^UJoSlYCJo;9{uV>e<@W1C%LY6>wJ8w`n5;!m*MBZu>#hhKeqPn*{GVgf_}=$z>3`+g?L5fDV3G70HKmP{ zAqzKbWQ=CB{U;al8JOj`A8rL`{WT|r>@pAQUM?>Fr?}_HqdBi1N&aFm2VqD;gZgut zP*XoC829mSK~&lGZQ zp4=~}yy0>(DsS?#1~JF)O|U|6R1Ehx#b_Y`hbKPYN<`X#C=_VwK?vMu!KVFK+D)W@ zRQ%uRQrrkzCm=DSR*Xl7&O9FUk}`+ej1n`P@%E_Zbo@%dx%D<~#yA3{Tsov=?!h0^ zfkIdp7{+<=N`E23;E_nJw;bc%fWr->WD))=%e9{cLBQbRyjgNdKYl1#)O%!mP!>QW zcjw=MK$56qmGk6Rqbs$r2Rz_Xv#9=&j@%9?49pmi!*ANW@}dBCf;(|dqbMiJ`+HY} zk!>hPZqyG66Calg_N6i}ZMhxuijF2h{Jz;7QpYe@$z?zCpmTV^Z*JM5S0|C$fm?tG z`u3$$fN}bPLYushFylOU6;}GKv==t^GPWkTVk1B@P6kidRgJ*)B0iqvZhv~?b7_|s z{{R&Yn)LrCLdSeJ-!ry!rWHIB9s&Hn)I`oeg9Mvg*aEuWRWPUG|@uJsFhYi(blpt{2)>8*0ZUAJ59GU(3-j>Pl&QuGtgq;w_g=+MP+cQxDW8TnIj;J4U+%B73htoV(tHjUF) zHw!p|M`ev&d=@y!`hk=BR%=Sox_6|N`nFwO=36&F>FUT>7Gd0ZVsdz@Epty>{{V)1 z&E58|Ca-&IIFd<$J7aYtD5JIjW1LZG&n@1A@cKL%EZXIkRc=8zBa!>n&dP6I>Wdv) zuCxt8!|^vZYygoAXXao?01r6tT?0YtcsiCHM%uv*t;MsLLd(N)KQ`XuzwcQ-w9_m! zp0eomuz*^^w-Nxct`u$~KfO!4)7r=5C7s2g5v}c;yvi58I3)e}{itUj?k=x& zd#mZ*>gpMP43f#qtZGjg_Trgtu;{%zsuuEiCtI1Bx7=a96mW1(I}`3es2VNR=9SXj zV|NbNr-tFyJbyWE~P#hqRp31X2j$)4HqS-nE$%0Gy@H1|^f04hrY zmV^2gkLEu^j0$U}t`RzutKhUT`s}{EVMoe9QU)>lRFG&}uQZAJjvWdKS*4NQGbmgj zb^e_BKWfw?NMZN;8s&W<)39rLevdS=LL<}Jr9PW~Gu17)?jx}6$9m~P<^y>iNFeqwuh73|9VWa5af;lzQ76s$I#p@-BOfwdAx{d7Of2+iF0z^Mrs zF_WB+KU$4(B1MDP6O2&6u#!a!w6EMyV5Fa$pBbq~1%nRQ_o#PfF*1$cF~>Q@3`w@! z4>(iLG|r_5t+Nx9Dsp!Dr(s}6&Hd?RSeuQ+k2vu_!$!EnIO3Z)j#ZEIrSev z!2}!+d{G1nPE#2j$)>6$vNA8Iz~#9hkJ^l)vgD}7dnouq(I9s5!tDt6~~+FFuh z(W5E(0)@r~JJGT`aseM&ejTI+1Rte(RzL!bA-*N3qvEV2G05MI?FQOML z7e4g)Es>laGs&SL)&w%1G4!O7SU5fpie?$sOM?=~g_nj4?@zJXnSKUoD&eNex#v0U zUIB^>u0814mn)7-cCSSvFl6$l2NXLz#lQrDDSl?d799DdP?%3O$+UJ8@69@R8D zKtKZwAM-&XCU;|T6nh5Fc|U&inc4F@eXCI_GslKF1Y^LUDs$hz98<3+^D%*vIHu%a z9P_{xQ$&G6;Ac4RM4*Ss$v)JG!#ExVELq{P&w)ia?nywZal0H-#qi3C2`q41+L%No zm6+|uMg>V;zzXMEioUE|=(3MyQ~v->1*NcO{tK{mG18Zf8<06R18n!_nQ9`W=1PpUWzCHoq{i~#MqQY^=&x-R9JFjXL zbAp$KFd?0&Ud5LhN=MGQ&K8^(R9;B#GXOV=&FDJH3*E5{w| zdv2s4GNBwEJ@5uTwI`+YEoVp9x|$$NT5aGa7k`+vj6X2@Wahdpz+{X$jCbIUNT%*& z2ht%&{{Tv-P~_IVH`Yz?`%%#5Skfr7=)|BGobL>I@H^tHy0@eHi&N>19qn}xHGMJ- zxcs6_et z&^22gt<|rkxmL5fJL4Nh;&JEOAKw)Zr+PzDi>9U1Z?zdMl3T`^)(%U@{{Rkur#;Rq zbqcrQ2rcsC$)aX(NI31-R&Ap-)wY&;e??qr_Ll0F?*k!J8~{|DpVW7%O(#ixXW`bJ z6`n0_Y=YccJZFI*&j;^YYpZ)(`&YKR!$!(OjN^{fI!%SGg_>Dfjj+PIPzLc$?5r15 z^uJK*mbzxAd8`k_*`4$oC&26MswpieeP_D&XEpzUtCb@$1dc_+)K~@~I zj@z^QRc@Q`>iN1Mw43cgWxWr&*tap;;j_j){F>>u%sn5^$32Zik=7Z83o4V3dUa9F z^@|-YZ^CFd*K@7SjA7R8&eG$Jzg{Wd@QZhfPPHDUOIu1*<;x3vT_ zvqixnN$yV~n6{oN%&dcOQbEUxD5MQ;%&VVodV*A@O{$>a=aMKh{O|@r9#5JkW>^u3 zCqBb8o1{tARY}hvk;M!ZypQX)7#s|dkw%j^jz%gE2gLw|y;pG_)@)?s86&lNAiz10 zfuD2#0In#opD=_u1mtG~;-)OD5lLK+A4+M9JXlZ9}?(Nn$uAl`W<&%Muf?Lftq zoL1__2tu;=KU$RKWZD~yjN+8XC`>K}?)=gf!CYXl8RSp`A!##!2pAdfM`{~;GLpHZ zhj*yvLygCS;)>NIT$kDb?}MDxs9D640lW;3X{H6CWNZ~br9Cq;kCO}sY?=hiTd^bN zJpHOEiDCi;#xwM#c*+wiMtfs4>nj-4CEN#K$`$28@_N?dL1PayL|=->^We0x>OhO*w4&^}}IrkmD3G4b4DoslIC zxIbao8fBj1M>`W>C%?UG5h2<}OsfD#?@TNwdrapojA!XbBv0w!u`Et94Iba+fW(v9 zm>OgxZ^=*vdRI?NjFM>aj^n>>X^EU5W_$%4=dct2+^{199nCfY1TzvxdLfB0z>GJH zR9pW5P_PKa02w$Hbul(J%4BvK%{dqY1KOi$;YGOl_jk#uQILCQj!i{63G94RppCo= zj8}4yPagTH2nI4w*0EdSuIiq2OP+tH{b?g1kw`f!-j}Jz{>8X&{59lIA_Yb>-nnt| z>C4*qkjPeDn{qpkr2&&N0Kkqs(5r5hc?8k0V#6c=IQrMweB$m?mQ*+xqWMk$I5_=2 z)Gn9^>1*C$mzo_EGT81G$Y;8K&P zY9lLwG7tW#uRrAb9-qedyRSz)UWYgX01!Uaq)`hfnxVMn+5lmk zeL4MUcPorA`A=c*MDh})f{&(6C#o_(mV3}Oa}kqiE6O%U(!5x+mB$$Nr-^1v6elM* zpkl#DSo!_&K-q9fRZua(HB}p43&N8_kUyv%{?%*`r?=0JX-OXlP!AZ*D@Sl6RYb|hKGgbd2e~|Rns+R#+yRdi(GEziBaKQR-UpN3g2gW5-1h-k zw_vp_`q}PqdBYlS`IygR{&@C2fag@-bY?|$;hdWvM?Kn9_GA5V`w=7m%)e^ zFkY}v!^^h^3=cUJw@^DYEX}klWc_Mj+@PQwXPWT=h6(NC6`2GWp_B1NgQ<*+07oFv z+*(UAfRb%F_VGmgONM>dBo7kU;ImC|zP>w;nv22zq?7J+T`KE^snAKYD%EGlJ)l-xT5%Q@0sE+MGs@ zaSWge2p@wY;LW(;j@9U`mMNWpU|fTmocL~3oOl!oE@mm|ouCYw223%uyY>Kc+|oQr zBW2g-Q-DWmn979wqmO^BCehywpKNxbR4MINRt3~H)0L?zp-=*>4m%Mb0OTB^p;7}Tyf6#Eu0jkrGIohnEHK+ZeV#X?8|##Amr;~vy@ z_Q?|MSjIc>G0jWEC;^inT+k}YSoYaK#G$qj0330X!LE6(^)#~eg`ZF9*AYPS{Igw! z4}*+=x#GG+yrBdxN#ePuQE3)>W~Zo6ty;S2cTE(LT|fzIc@SWJUPpfYz3Wnuq3O#$ zpt9HQ;JTDuX^es5xNxFP{AEZUJJys$6O4hM6@t;VtKB{}aS^ZK4e zQ1s32o1t~?rKbsji7et&XJ9vhli$t($LUs0hlaCW>N#N54wa9SM4{^=DCAUNBpUCiUVhe>7*dKG~}M zPHTJLijZ5`ji%>MXzt4Q1GvxEwOJ>FPFs9mu}DYa^!t-G<_FNhTm#2*oZ_tD_=Oz0 z2-Xu#yD-UcV1^<9XKwlXd*ZBmh3qt2Sd!kx+3xO`TidF_pl@#9U^}0+MCy$(bd4WL zylco~O)g<9zN+ylBL@I^B%D(qcR}m5n^U&aHC;A+I^yRB65o}x*mliswy6wEG(W7p_z&wm`Q`Z1905)ZpyZ4Z#Wbz4`Qs-5iYZ&?+6Nft z0)g0!VVDj{IRN$_N>@Y(c)&hIMqlNIZQ0N7LXP;|mgB*oeQnn~Fu~@J)8ME$J&DFD z6n7FUe8B8HW~QetxE}uJwE#RytHHTSu#j2`s2A=rfk9p2OeB@f*1QQm?| z#q(|qaCq<9htXpwB$JP=M@gncZTLRIj7=eA<8DT1B$dGfCkDKVNjN;MMw&g;=uj8Q`qMHIyoEix{VBHvlmjl-9@IABgv7IM zBffi3KIahz;5IqttK^W$1IH?N;Ex8T+&YlORP)~zTOj~B1mJtn7IvTmudva~@IXO3#3WumG=s-O+W#Y#^63=Y*E-8RJP3au=%J~2LmIvE3o_o!PE5?8DHn8%6yHc zxdck)J~m(uYp?tX0rdO-0L${sr~d$|YtQ*U$D{GR?(5QQKMwgJhX8)np-E#LVB~h= ziq7U)ldP>1EtPkQl@4sbgj#*#9=dmi8lEx<3z zfR6^c$0w3iKQ`YTsMcNFY@t?Qc?wj~#Qm^}b`dVs ze&_OUhQA0Aw%0sq&h~k1Il~*OV3{l*l>Lj1Hy+=aK#YrR&Z#7w`7|5e1Je*Jy zHUleVM~>7ntCHy^2o3U;&u_2Rl?Kn077BciG%(xl&Kq#gk=~9sR3(2f9kbq*)lLZ{ zpJU>HZD(d~JQ8THZzfH_m<)beUsJgR@GHQ?K15vNk$p1FoQBSEQ3xbfi-*C@0b(6U zZVpD;Sk+2oDJ!39b)x`f6Q4ChQO&i;$I5vbqmi({?7;W`0G&8ObBMUbc|vfkr=EEn z)^SQ*7%9i>C|$>30R1V$u~ZBZ&$TxpQrm*$`NbqqBd-1l;CbvQgBD&F&vQ!>u$8_{ zf;-Zus~!<}9&##QAq&bA8+TyQZY1!TIqlkz?%Fu#J*nZmFE|8!b4>a?6VzoqD94)f z%PGMM59xtO8Oi*)@yBX%RFSQnQB)G716By(~{nw^Sdi5b-Ay;ADBz_iqV38-4Mc5M-F#4!F|e5tJg zffVUZHJRG)P!|eIZeo*_AM~tGPV~P{UwlE;^r>yyOIuixpif!2>M2_$apMvX z;Uo-8CxK2IbZnEH{Y_5e6{Gw)>02oxx)xGAu*vEnd5Zvk-fLyIAx3-BegL#Zcd=9C zQT_<3DD@6Unq9s?#5R&iC!Q%nlB`3V{VB-dELlEnx#yZl?L9DdxnNFts2h9<6*nCB zG-KqMkEk($z^2;J8TVzwcJIY4+)fI*C?Izp;(;Y95S$Wo*qRk~5)g1W{MG1VS>hxW zV0ppuUWx{Z3bSM|@0uYdQ|Pu>WcH^mR$Ys^u-&pvF(jinF_X6#&%Hlp9irUhZ#lv7 zRu4{HBqX-bM{(ktM{Vv4X9M$8n}|270(c{XL2qs80yhjE=iZszM+K*ZvdQyj+r1JQ z5tK6#wEOs}2@VIQ4nd@Aeb1F++qGN`i6wck7-JPA7E~E-{zWqtzsy~Nk=rEE7|zZJ z!k*cr@I50F9^oc&+aC1HexO3EbCcLmoq}zONZtwW`_XMHftZm}`-*lbcOsb-Zu<8W@l^|k7|&r^5SCSvP8mr9niz0rkax$uNoW)nSkMos6uy?w zSbz_jsAb7zD!5)e4%GzPxwmgw@wvPHwPq8LFmaydn4TQQRnAE?5sXQdPFN471sa6} zh3 z30)TC`Sj2DsKD*Fi1^qu`&S-5U3q(74gyt3P(j8IG!G*?9ORsL_N8J6{KXCiLEL`T z_FpurRPe(DcFjDTDviE!dGAdU4%Hao@(nn-3-l*}nknJ8(H}26xvsnL0HLzfLHSRt zo^#F(aq!=nLFD^aTKEE^@YhpeOv`xcl*%@r!L<)+nlZs z??k?%@$!+_flxCWIb$0gr#WA>5NF&`fGPm_zO-frEs>0m27#B;W^=(fr+LzG{FR8{y(xjw+ zE_3_R1~NaA$Bgev5Wc%m_FVT^48o+2=S zs}(t+426Iya5(p%VC^_ka=z37BVOmXU`;IHeP$?0#s)vVc$5lBT$9ZaM7e#z zeo}GIdSY4f;x2ZBk}=7pqDjCbx7vqz-G@=T#R!xDcBTpPMR{D7M{w2m+=Ce2Ubd%b)37XMsbuQ~v-n;)X|%5TQ@h{b-g~J;C5} znwF5Lr+`4*Jkfz58T0Q&7paZ=d(sfuAOVV!;^0wAlFq>A#TFza6~d3Hp><$#6-e<- z$m{?f2=}I#bLI?2e%Yc$EOIvfqN6eXPUda6IVZ(N)AfrB=6J0xR%eTn06)0*A6kj> zwhJL{Fhv8R`>ubz1-A0X=HUH%Q<5WLixwdN0BX9Xf-k9%Pi$j7>PbW>etdYvLyRzD zS3Fb1KGV)mkx&xEL;S}hzZ6cc$^tT;c|O$7&O!O`LB}+xWKWft5PZ-&o6Bv%+wN&L zZEuu+e$=u8FiVPQ?ixH5JMvBd9qP&?BJCVtlir9hC=YD=iiR&EX-o!4_pr%KjE}7r zNQ@v{FJa9zO7xMr2lb}Z0q38sdf#uSwM-Z)URD{#z4j2~4(wZ-h z+T`p+by@X&<4X4h6@it$-IN_o}H63Kc&#NF38?2uvs>06bTNE_;AI{p$lkJgS01=da38x7=K->zStqBpt3jP&Of_zhg0JsF_?0eIN zW8fYLrrqNiUHN|;(Q)OE=3)sSN=X3+g**;DS8*pG z`x-_q$=kY#$Z_0`IW*fj-SUikRq|0YfHBxqa_Y`OJXKFtYYC!hGF|Kv%g?7C;y1Up zYNK7h)$U@p)3lhxH`g!;=8^vZED8RnJX9JBi`Zh})yFmkJ7rH zxi{iEhg~wRop53)ju}^z^M3WN(7LA1`r}hr-B#AeZpSA(lOLP+2CgGmn8xBVe9^XY zHuyL-MbbL0#<8c%c{FAud6-13FnvBSPv5;>#6bMry!~lZYPkn?4tX^E810&m8y9Sj z-Rev$v$sE5=Wew;nCb=Hq+BjL#(%{|1RGfVjf4DDy<`!kT#?B!u>RFvzx%8wpUAoW ztCaHVd_EmKvBpC9`_V+svJ7oz!NH^;j48_;`J-4#$qV^@wf0{$GL7gM0;4`?#4J!} zX&w8~DqtPT7#@4lfqf!r#sioRC1J9OvdE+cZBY4amvuP{+Iw4h4G!z$EwmXj!)4gO$f1`_r+Q@GwS5 zdPN$~|#({WIfxGELAd%3Zj|U^a-kuR|k1U@s zto)d6R**}SjN`p5E*-E?bK0C;#A9hA*!C1igg@jMA6i-?UtPszX)Vps5yKjmc0H7P z)eW|>so&1C*<0HUK0riLPQ_i%f(N(KqkLDKFHJmTgO)UJODoIUi%du41~Lr+e_ZHOX$IwU*VgyG~1;`*O$i{?%{P zI;67M>3S5}qgzjRX4B7c5de@A{ZF?v!Mu&~gJObl!Os;>)i)a4T2zgs!m`iL9AtgG z6Zfb;3t`q{=~yAwE#4VXh={vonm*v4Z?#c;X};9#V6rxM(?SgBuM&w12h4b`8jE!JiHL)-G|KAazy-}I+?mr}d5 zk4Do@pcYHOe-Sp!IgMAA1F*@(Qe0{}-kb2t*0^h!XA_80M!5Vr- z_0E~pnYVz+1>9@QQg9CoJ?b3-9XqJ{ds@;h z{{RT{U95LY9}6I5$Ru{p1XVV>4bN3;8ko11dGD<5k~7Lam$y-^(I*x}Yqja8y#AQUNcmRSj4N>%F zk9DI!seKioD-ej>nF~oa56j8nMm$zh-q!C<=?Q)SrWb?oykN)&i`h*c^F!bL>&ht+V^z3Jm zT&~~_cKx{@wMD=HY9H+dwx2BWMPqd;G))FW6dmxx!K!I`YTs7) zqpNAB)=O)=G&AS>%R;IjEOWP@LDX1nVP8KS&s zp|`Ys*5&YBnBWpUu~GWlQ4?tQnpM5(t<+I^&*{D-j~_CedE{dsTFPj$vwTz5uH-U$ zw)Tc!Pt?db0~!ASPimLa`cL0|%aKTg%sF7E9uqxEH-BSi%6cEAzg9PaVlR&#C*k5%e3OValjx*hxk#-)s*gA5Mf9>D#4R1b$!SX=bgnxBe6b$W(< zl1Z14o>+|Y#bzxb{{Uy#s(MvewTyPA?%Q{CR_-(HQM!*q7M&TQ^=^)2j`G47orXU; zao_Ep!``(Z>e_|Py|t{DS62xX5slJ2oB#z!xYg_-g58?s8LkFx)F1bQ8RPV>dDS<~ z)LPB%iz3Y;$EK^>!4m#?-v>Wo_26XHe_Cl;o|~d{r>}JX0FH`PwU9*o5#t|GoK|{! zjb~T&1<=qY)HI9eRc+t-%(!Bte@;JI&ipsjI*MMW2U?p_Xl^5EA!cCExW;)H`Fvug z>b);*rgW~jv0NRpIk;3t3xWnfKl=8m9W8GrpV8^B%d6a6LM%(H4~?kU?mnMN)RIGW zwf-Ss*Z%;6HwpNUPk*Jmkh%QW_Nu!$Zz0yN?{Bp@-pylGNLTX4Mu zCtZZkhqYm-OCdOsk6^j)`fxE-y&oL+S_fDQhnC@^xR4={xNW&0@%E--R`;g5f-P<< ziDS2D;Eq!qGCAgD9oP>8HFYM&jP1&{I5mR!MW9&2rd#V^+#9Q?O1OCmLGv8pe@fo8 zDTWF;JY;yO%gG#m>r(1A>bZ1#jZaY9mYtH)HQee4WMkNWir#c4q4>U=g5u&^x5K#2 z&;mdM9uL3Xuv2KWf7->>g|@SK0eB0U*J*HBbGI1$wW8FQ@YlLOMjOecTbP;WnUBvR zuk->tXSdi^ulT-OJu_9)bq`QkT|;oO$+5z=aB0bI>|Qw}OPg~PM685JHmM!K_O5%= zS{0^`@lx*E$WI(CWF^>GmCiuN)}m+{3^!K2Ot-Sm)|Qu&y2OouTW)fF{?(jEPle*S zv_o$y7^F~DlaR~bgHJ|)%Krc{+yKD(RyW}C%l^j3(@i9bWp#}o{{V)d;{*KG)JCE} z>da0tS2Bfc1}D@3K<6j5R!MmVrKU>SRs1e8Q`KHTBfVsL{{T>sYc^IdcG1~9!6!xF z$W9J8G>Nqnr$c=UU9R9^R?7@yIUlI5Rn?Z$9)an3jmxyYyjTu*WVgTD z6=!*)>GppR^cgLXi>sHCTbYoN>YQ>&`_^n78&%dc{VifQ_bC^xBE=Gu%P}|@uQsoy zemYwrcL=$PF(iqeRejI3axGrQ=d1OdRM|A`eQl{mkTbC{!y1lr;0%o8xvL9JC1v=p zduu$v#MX%|(hTBKKm||!9P^5Ft#oZCS<^LWrjt*(j(Fpcp>_8i>SXdHQ5PiOa!qqT zg*tpTom13)gMTVH{W#-C%7rAH{l+V$-NLiL(7JC{Qmi{<5${&^XXToQt+ZLSJG)4v z4;`!qX(k6N-vvjx?NpHbOSG3myVT>fm`x0g8e5!}Yz&4z!yWyqE{CRRc7GPHETXjt zBeV}1=PC~wIHvk;7*|c}86)()K`hEPujLzBnH4Wr>v=9ZI>T1B(|u`f?iLXnfwUa+ z-`^ZmJMBYNv-qKJYdp%f_t2^cz~vVN_x}Kh8l*Q?$Kf`KA$8pq>Vg0#YX&D6_r+;- zi#V=+C-nWvW&`k&N|^_^&mT_pse>P7H1;|tSf{TmC*qPXQJVyU+?@N7SK6k9Wu|Fj zSaKfz3s1&>bImY>laCbi1RmYu=&SsM0glH`5E(GY_w_{_ss>$sZ{`x$G7c4*;E{y9%={G zF@ccZdg{LcN^R9XW5|zgKl)Y57t;uGhk>5;)xH6h2UGO?Y{));HE{foWq164d%bj# zZ$z^E!GO<-((Y{k0M@eo9#}%`4;WtF^{0e5Tr-6|$J)GJo73w1juw*`RmeT)qYmeB z0C=SU;DX;XcEGPP9AH1~T^yc;94Lx5ATOG6QOEV<=glI8?%6r*UMhERPCt66Ckz2Q z)Q@kiAj=ZF@W65auP9PU@_)5>#=H3>eJX-bakY+5)|qhSvK#x;k|f5XfqSi(M8wHi|78eN#8vj^H!c6aN4R zKjNvUdpn&Qt%jQR;}>zbSjN=!!BSi54>_g{hyA4JKf-TyASY<8ZUhcC5J^3b503rn zA69FNtn}^m-jS)x8rfh-sfFHK82P*72gOqLG&)NBJlC}t@1Vc9yb(3a!~&`ujj90c zf$zzy4yKn$x%gSA-|0^qEyPhIvqUgimtg0Q6p&p_HI|06SJ{8krgzb$k=VzQ0|D3$Dmt~p9I9|Loc{odmnnU@1CQ-Y z2^<122?mrdtMoIj=#p7TL~h7c zTmBM%>x$+*C#$Wx4w%+8d;Jekv_6P!Wzb5)?_%@ww4w0tZ+qMZH2}4Ndxns^rK22k~JY3s#4_I3r zF7I2ao0@`WD4htu8`L~Xzvj2;bMqYUU*ch{P7U0&F|lea#Luq>DodA4bFQ#RsTxzN-b#zj}?Q^%jq*-Ww}DMj58sNmV)E zdsLSN9=D_F)AW9#kfaT(UdOrBcwF&>2hJ2@n#bK}-C?Qp)`Z7ShTdxnq(rrBl5xiO zJ_j9#-nzu-%@Gi`Bm;w<4GoIU>dP@Df$nQ_b6M60t8BWhdgD&i^xZbzSwyEN$unS$Xk_p`Kj=^Qa<@9abo{ zXhBsGNH-NfnF#Dh9jZ&?6_tuDUN49mM3<8ZZ=`^&;D#Wm5DbqwK7U$Hleg-f3thR; z;L?&y8D}$#lGsH?JpIpZD|e^q7Phzd8i8k)?p`F8M#fRvUqEM;*u_maCTQ zI#*c7@do1aNs1_~uHz8?CBA<}cqHTgFKRxy(Y5i_x+VRDNp)>*q)8)Zf`2nN-^MGZ zvng|&o+u_LZOD-b9@)(&By#;9MAhZ^jdQFEWrEUpw$2@Ui~)uBJb!vE65p%zD@{wL zY_z1gf;V=yy^*${N6E%Ko_kkIf!EWGnRd1+QZ@!#xgIIiM@ z*6ekS4$do+X2rtq0G>GneQTs93N&Sy9^I*83l+8*v*YVha?7Kh>6SX*QS|n!s$AK_ zrs_A=pM-)bGxDmFlHXEE`(mQ>m4{Z|**8yXmas#m0QW!$zuP!TCBY9Wt_V@!fFqq+B&wG4tDGqSagq-U*yqM7N5q{o zsBd(wD)QFaIqqy_6UdN~cS-C4?x6Rsj`9KW?IR<#S4FC6lHJK^4eh&1higV81h3N> zrN1K$sc8D;*TdVvJ1xemb^O_2186~AAY6DRs_*)fMzga^r@BXuS-lATF}S;D7#Q!(P3?m}@ZK(}u!?OjS+s?1 zZ2=HW8!skL0DF(ELg~#HT(=!fZ+UMD+g`ke4TNrT!1MOzx)#{>hRNri-KrdVmY({H zX^AraF(VQ*S^0wJ8LPiTFYubvUA55t9bOcI7;_vE75-?!Ic7QXPZhRgU6~5GJ+gSF z;|kJcP=-DEsUbk_N8Yn;tCC-69b<3tGEG-cx3-*ILKD_={J7L$9FKqVQ~n`nI@H>a zPwQGtLgP}gkSxVc(BSza!uQX$bb{&}fH@uMqRYQ&$33%J>PJ30KT^T*H&4|{M>Vvz zVmrAPghD%o{c(?eY2KXEHTZ41svFyA;{05awX-W^yA>lKdv>kz<7v(x2bv#RIG}kX zzzPp1umjCEu*&>1u+;Sakhh0bj72zNb|1_E{{V~YTIhCd+)g}Zpy|4uuA^+Xc2g|Y z@13rPgNm2xp>91L zzwsGk43I=UxxvZgoL1qAdZXvgJFO9vBV!%@nX9_Ya~)Tw`l0$GLDOQ6*xp-AjWX`r zwMQUl{MCi-kFLqq`UayCD_g^E%W}m0tIs3fJd9TQNuy))F5o_u=;e=gQQI`3PNU|R_-Rv_g3`~koo`5o(H zadmHJY}Z$EEHTCjXJdc|*0MJpb))JQI)mu5E#2L$49tVC$H9Do-^O#IRl57mhW-C*~({J~%zAdc3a5lrJGTFNfyBh52i4FoC8PGy%S@m!>Y#jHxsU)&Fb%b z9j82f`8?K~JBDq;9RAeH$iOgBmgfV;Y1D|CevPGA>9A?>+Qay3Vhn2B;EuEpiabDGX{S5V#SO{-sO4z|$RpFl(p%NYQcVrbt|w&|{pNo}T+ zRGvj=hsl|aSf8+>;o+&p7QJ_*=Rj(f-p$`07|rV0At>%x<5*fMbRO!jzZ;f&gaV$lUGBI;CoaK zMx+99$C{A`5d-Hu(@Gd%k}3GR5A#^%b*OJqANx9C{{Y(C{8ep|{{Zakah^m={wm(> zcUek5@DOUI*Z%-`5OkL_INmtJ1i#Q+eXtb-#p?_yiIImqIH71@d4Fz{=~B15|) zKK-lfzHql^BYp@N#wipT94R^TlSPuOPnPyHi=D$bIO7I^$k3drVUT-+;<}f>P_f)< z-!?`gM{N3l83lxRB=#9K!}2|q-|_wN-}*$W2T-7JF^|@? zsF*x3APzHGev%myF*`;EG3UK$;8ZE|8~|~F=Da*SJwC6v_Qc!@H;!{&X!eYs?b?M^ zdxM{*X>yykmFW8%RH>u8N$Q(JC-Qga^KJ9`*3WW)eqMVTMsh|%W7v5$ z24-%I(j@9Rb<+d>B3SmwzMYmiMfj0J8V+zv=;C=^G}jC1}7)$Lb1I~r`y`;8g`WrPQIt5q^s*k83p4D%CS@V zvU}4VIiNI}*0rl%*b8fIMkIkPSO9%6FiQczC)&3p3*k++>`jw`eXlJ~YBU0N& ze4kIh`S;I%)}$t%V-}n=)8Q>;{{Xt> zKizSUm)fln5Bxa#`_o~|Fjin+Zb%23H8S!JHsgW~3SvOYga9$(vRcPg3q4lXO}9v9 zlHy&CT0o%yKQa9UY-J6wf_OeFiR*nmr{4TrveTxC{{R6!yU6N}RI`BG_sHx%^<>m< zI@320b@6 zH7f|3JDZC^>dMFFY@8B3yw%rA^w+I?nw_vP=})GMb+>$vP>?r1JOlfl^>fO!w!KrM zT^Jf2MkDx)h?JZbImd6kOum}t8#yO|cS;A!hs|=&hJ7VztLc-=aV(L`XR90##Qdm# zDEc1xuAi)ixU-fEa(A*2ubgc)o>K=u=pLogv|C+PT~_#;iFFoD&z60YOT>d-u`bn)b1xnbK3dw|sz-g~v_E|sv< zx6Y?haLE^ENXgo#I0R?>QqJn=R{cYyU+6cQoEM|;u&bn!0gMnkkAD1n)BPQ3XJ^wE zP-(Ng_P(Cvk((^pc_$r&Gd z!t`ae-oIiE#QT;#UQ3ClM)}z1g&xD)pS4}sL8t!!+ST+@+eS*;Ea|y;4sZ((t~}Dr zH8DOVXbq!XYOb?JV3X1i5DU5P2>N4^d{&F4wX05{(^F8g+z{}{L({AZSLW~^TK6v9Cb5`+@G|$GbLAqzsRz z0Lr3ug{8KU@YZLuvWL}@%@SLMU95jEC&?t#&dRjV`o;}Is1r#14EBa=e_j`#0JuKD zbK5k`{dcF^>YBBUy|mNZSPYdM2>|@WdjU|jJvKQszLVDBjfM5JmhtCy35~`GKJ;3B zv`_JaRAUavn!$IRe5_6fH6)P_buUr=KcEj$>9;1`y+6n>aQ(1PjAo?h5p>+PmUrlu zhT_^4n7%-GvbiO^`8=A)rjm62o9i7s9olR+8||_ECjgK>*%Zg{xU7B*wf(#vh3(`q zi0w|^GvheC4dJ);|4`P)CYWa@7}I8T}IlU&Xf=d|^t+KPXyeg*h8!Q1)W1V3 zFJEcnMYh%Bu)LSm{PCa-=`K$!M{dTgH4dWFZ1i~bCT6)80KK$GGSVDpf&Nd5!sxKX zzXxt4v% zXRb$LOWaTBw0pRWgY#tlg*`9CEiyey&NqT_XLQ>V>BiM&_gwcMTBP`gquF%#!)a~c zW}Zg6jzc2>g6!S#*bG#Tw~pDDq&LGDQFR_yZa~N!RFYL*>3wZysdXvR7S?!659G-p z$EZNb!0rY)&zfCpt!ykc+kG%YCBr{XAy zApW%9L+P>g6^+2vM0kr)d7V+P+rLlBKg}^3iTa|_-unLA@YfSHs)80Fxurh9c>QVa zi|Si$qq3G{f$i=fa+BN1`J+BF_Z3&5x{rx^^dxY$@i1JB0sjCxkMOP0?mC817SP&@ z9OpZ@@0yv|Zk3@b0}OoydR2>HXz+j1gvkhH+q4XJJC2g+ zVzV+7hn`EG20q-M(zCrerSyze_I+L=2{E@BXJSEq#F6Fm$JEug#A`IV>D%q;9Ey-M zh+T8Ej(GO08Mte1i|J_HBWPv547h!x0Ft>suX?g}HGkDQ_woLR)$TObp4xi}FjF{i zK^Sg3a7{zjB3o?(qHz;BTZq0`#&ULpQ@Vpfwb42wsBYtselGnt&g;U1o&frD-_13v z05nd9o<>lMbpU@*;Bz0J+N_dQU-bU~SjDMz1=fvqXDx-yBYMRza=uCW{{W>2PitBR zr_~y}+1oT%P^!l)&IsP4J+s&YpTBx%U$c+>xzaUDG{nAxi+~6K#y=@1+a%QoMH4Q$ z))2(ZM3Nnfxb24i)g+jUU031+);H1I+i3IMX#vVfcE8kfjIV+-`_`AJy0cEuwEJtD zh^KeD2hoKSbFs#8j~`!Z=CWUt@SPGV!pmuK7zv8Y;5(RD{!Q_+n!KnkSko85TpFM@l7I#Vo zz06G)Ip+^Y9u>;++UO?ywi=$@XuTb-|CaRHJs`G(#OK<)nkTA!}mB;OKZhUW1t zt**4~k+C0`6Oy2wIjv`W1Jzx1WvFO+EYYlO6q2O(us1kBaCq`Je~Q!T_x(Qht*L2I zTFmn|pbSV9$o>;{;<4@N9vB9p9yPX zvbxE!aJ$$6*l-R!{ppiCDt}IO4y?UNK82|0v0J*vX16dAvuOVSKYFRR>rSB5^n3KR zw}#tMf@uSnA1p|{fcI|nx`wX{d{ee~*g0zkjt2+je4hv3JaJJvE?65%>Zs;*l6dEt z*a8VFwBY@_($3paF?!Qfo2PWbZKH!x)R;#T6P5bDKHTD?bYRqU?z6wsZgqRBxY{8y zq`%b6d;Zl?BzL->!|fYV)9vJYjV40ssynDSIQQ}W>e-;xC+kfgQkns7_i&hAAaYbF z-MIe!s^EvB#M5+FSAdBaH&cFJBoUgKqepLTsC0V;NaHqAjBoD6L8~5~={;Hv6JOJ= z?jVhCrFMv?INjv`0E|?j(KeFw)}5r^OC!lHI8)U-jLQL=;LW5U>3B(ey_ zN1ptj01j%eR?ziX^xHe#30-HF`b)Jr17Lo{_{IRORa^!TlwR-R%vO)K(Q&vW*xMLo0jrj_|c6ye)| z7a7fFsN=~jN%K;o1pL_mRCvdck2HWu0K>5At(y7KV&oDgLscgMeaS06Y8^4;)zQkCB>0uhgD@o*%; zUPq4NzQg86hBsqy0CUftDbf(d{__kEwIjN_DR{#BZSzMWggyxKnyNtD=XXB!(|!al z>#am*KPnUS-uAT4{V&hRD^R_(ucQxnyk7alCJ@MZi}Boqh1EAkAFeobj&Dgg?-wUSf;ili*XNCPsM&&-_$~ zpyAjIW}Ie4QZ|x3$gZ)*yo;Ve`B$)|-i$kO^`%|7;CSurLYQ2YUj8b;SzI5K(tW`Zmw?RI|#K+<`UhDeSVdzn16Ish>c_ z8E{JwSM~3oYLm$K!=9tF(Bf9rBE6dW^HO_RZlwooXFrvRub9W zPR939hUqQlw~i6LXJlM_-%Rt##%imsTU;de!gRyOB8CJG;EaPP-gsPPmc{bro%;NVg2_ zz+?3P02rt&I{Q$T9=+4`OM4g95rUV1HUGhHmB1% zwYHM-J*B~Et%H2Py7A9!4=3KQf#%k)^sNHhN70MgUL}*=Kk+#?yhAutKID_dJ7KPC zH=SA2$#E1m(p_ooB=AcQ^Az?x_XB~&HP$xTq+M5~>y|nsdW`P};pK^s5S`f#pQk+1 zYxbJb{7TaGC~bXZYaG3 zvHYVRE3XjDgm6!Rn&#aHp=-KVQR^R$wzn3SQ#A3;8|HQ-4yzqqO5 zf|Uu8L1KM1$-(k#ll4ZQ)RzAM5^eQOD^H4aC?XQbaw7*jiSjr#)mk}HIShnub|(YC z>?(s<*EF3q8}WCMB&380IVF!_fyt`NBhv|+e7U}^V!@nU2z{wwMW9?9* z>8)~0j;Ye3iDS07CR=%BZ}+a_>Epe0ds#mbYXpFmE;|$}{f}yv;UaC@NF)lHu6ffM z4yUPfW!{IaM-`c6DH0-btByBwf^s}npFwGzTc_#sS?bqT?y#Yb)p5A6@XX%c0Pk8k zu3@!V?(SvuqD3t3dwA@=eY=xcjz*ATBW*1J$f^&Ewv}M()IMW(x%*HAvqa5jlTZIuX?wm&G$yzX_{QZ3yUl@yD{=R zwA8Erx|q^=hXH~`iD?~?$G{b}C8 zl=z3!@6;M}FvE2%ywOeSqo-hTj((L3qPlMTPq&tBYf_g`yupnbiD4Xm%bt1TKVe$= zq*#e0r~|(=+;SmDa6UoFJ?d|49QQ@&T}Mmm8TAb}Sd!Ah;$K!s5PZ$g**jO>79jCVK{cj62uNA#|xrQgEZhP82j zWfFsJZ6V?(85utK916UTQYBy+d_0@ICQXmhss%^4+XaH0zkz_ThlecK-D0vh7A|UC^Pi_^rhsz)Lj|UpTJ;KHl;n&rsRQl5gI8!@aMnQuQeMrytI_oj@}!4Nac9UWR{TQj{S!3-W_b zs$(mvbevj;N-m2op>prMNNybNV!-lFe0@AqH{B6wc@CRyH5=&0oS-4~KR6tKN%N37 zt%Neh(FSY+dr~BcB#ayKpO&W5GgEZ6wB11#xpQ$VOB7z3NBtw+zqzH;xCg==s)EsSKJZq(mN^ytv$)Z}^0t$A_04!|#f2fw-V zSN3ze+ei3_!blVrP}u>!v*xJ!r(B;;XzZ;me-(Lg#0j3{cPH2iZSaiuRYRz2eRpZ4 z+FeT>t)xpe!HJ6OImpTX0Epust#1?Ng;kDnaBBmjbv~cM`Z|P{DFT!j^@$zN z(NuTMZsGvPk-Km?Jk;fJP&%sC8=WTVBFq%q$^QV9)<;3PKP^kF{S!$Fl?Zxb~>l+Kq+f z#PLOQ9MVGDWJPS1KU!$5(0X%E(KL~4B%110P^~o0xtI>a=m7CmwtYEi(^p!qx2M59 z<<#h0Tkgh3d=IW_+p4vA^lc^YUNG$u{#pZq#1l7I9nR)JQN295SB-eQI>CJVBdj?Tk_hBxr!_PjOIW^)8CjZuAQs zHO;_^$-~A(m~uyapRHL<(%N0-t7WQOLS>%HFp()gFSsOmBCamM5rClQ9m%K~z18)u zog|h~8SVsRq5+(+6?V~N(I8Kh4a{&yy=S_{_f^y3R?@Wi?#|^hMU0j~`LI3lR@xl5 z-@>AkQdC>B7-=?+17n&?yR^4~uBI!TZCsBhu=-@;gCG1aOT5vpw0#RoXull5+)nWo zSiw0bp5%A}uKKH_^o?HD$}8wyn%fH$h{P}h#(j-5(j7%6y(R796^_mVAz1eO+rY~b zf3T}v0$b~iDPL4oNm@o=qm8MPv>&c9TGeOzuSE3+PieNXdQeArr)n<1^RM+MTdD9H90T<+hEa*d@@lvW7pzv3ZDJ%ujs( z0LrW;m17=}b@{(2JojypeP0pin_>0E`C(E^CQ`{*304KSrx=r=ArEMMEtcxrzxkgq55l}kM zS6-jdXVpc!+%#YSqmJj#(zQ8T;TK!>E)OTi6;q??qfoV)f5XXck$Wzw?(om4Mk9%PNZ8%9pKRmwtDD$#4vN;N*JHW8mN6Z; zm?R7RMRDBy>rb?8&JTLc^c92Y`XZ#x9(f$eF&(!b1L!+`m89NJ9jaI#kBk6w#Xlb6 zgrENa0I2bDa1UYawBQy&b-pWwmgZtN1_!vQxQu%;8Q<3E4*DXZEKQ8b`<_EoW zuYmx`d#R1s{#y9+lU#oz`s=^r`{U>?k#*W~ypTTLYf%{<*eA7Rx;@Zt0eRXF>yuh_ zk%HhU&ONKa!RhsV$9V&2CyxM85H@nB6w)~)u^q=4@m>?=V;0f~0OQc_-SO+)p3^GySTdLXtL~4stO>2?uW(?MSbe`A6MPit%lZ z1Dq4vilIwBNCJ?8M$o%`iJ~$T0QViK33emT+JU{T+lC{>SL(4w%9EHQX`#zRT{2YT zoEn`x?t_3g<2d%Ll-*w)#=Aa=WoI?@${fj-*pY#ZjGpx{R_dAzTE3}osEvsPB)8$! zaIGsIWF&Vutt>WzH~w5ny<^%BkUo_<($XknD6s&o$r}2tTbCmbt~rdp-xL}&p-31 z8Au}aT>|bXdGmwZQl+Xgst^b*labHg??=Lv}ZU!vv2RsrwMkyLJfi zK!j|KHkRRlBNYAA1;{{Y3Uv)sTZJ?V1CfU6Ag^4|?MIls4<}hTi2Z z+4fsA`Z33oy+P;dO?Ih0OagKTwR0Dq1M#H0I)VH*0f`;KIS1R`lJ^%~3)C=m9j(sb zBDlAZi6Q4Pj~Vy+pL(w9p0_$|>niD&7g}OJr!DG!Opg0~@!RX-sz`edr0WBxFp(cl z^}>(oW*vq^7-a_5@r!{XKCnstln4%%^bNbmzV<0PKlv0a!dv}=M-9D5q(aLom;#yg4S zxQ!xf02RxC9QzONQ;k&HKDB9j4(nYuBwpG^-mR$MhWGc!{EaiGb<1iOnl-xVu3^8o zW@!L95-8^b{v7d>^r)A1(Z{HDUG113f}YsMS$Q6=!e1Y5KU$senoT`=S4+2+>fT$W zw=NeWcwNUQ+*3Ph>2&_7j((rkb&EYDiLG3Mr{&$WU=}{$cOJr;yz4DdbWJ~7gHMgD z3aPo1e=NrzDi6OIHA~Wt@@RcyYN~{K)3PXT{Cj&EnXX)=x54caWpN5Awk{XI&Pe)p ztBAbn-Cp)v+kF>Cgh_ooKD55C<%%GCpQ-Ex7es5ih2^fH4zFh%4X4ZLyD9EK$;kQ) zbK0qOolxlC5^S|gkUxdr*?K}?GOF3g$sRj?^$VsiBsbk#rm9IHxJfSULkxO~RPr<9 zky95|(e-WI{YzsK+rt)h#6PHT&_p8C)jQW-Xb1Ni}a=N@Xiqi^H4v+Bs*7B)Ax7$ckj zrxdViJ{@ZpT|=Njsa;u!bCfElxgE&*8qx^DtGeeI&)8No;SJ@zm&5yO%d3=*7?I4b z$ArTZ{wr&1duOTGrPYk7aRkeiW$ePGo$4`KUs39JU37HIizF7o9iwnYLL3lJpQb7| zT=iZ3k@YmhPr_N>MQ?V@H!~I)Dx>L~ea%tJzT4uws*{+NBiR?t%jHG~u?HB%Na*XG zCM|bSwCWeza~<E^VmK=WkR=Px;cc-+zpz1kn?4Da2*lpHg>VKBRkC)}} zexjJsdZSp0|UO`+6pVG*|DwCx~n2k)QSoyE?9)H8I}DIN zT26r1G|MiFxwXE&g(iYY(nVvm@=iGa09u*!Dm1%|cS`tiX{lP6{RnOhmnfV_<99jF zAAh}c{T}MuR?+SC3s&6?#9~DE0M|O`eP>FS;jQkMCF*`HA)VP9^DgY;?^|z%x@E?h z(Ry`_?nLPpTh1~`1NNsO?ytPQvC-w1PmNwxQpC4z2l>WN=~#r;dYWE@OY3{M<6Cge zEyRZ}A>MGKj{_M#dkWrl!dZ@-x|a5KhA7dWa5(UIu34vQ7Bl=Jzk%*dZye1V20W=_ zk=yD1D#_bLroGg?Sqf;nv=GAIiqUs1X#S;ZXyN3mUbtCl{ z_pY0vPxzf4)^irdcu~p$fC*8>bMK4RMtxhS&#%woeKMtU{Yr8^wVpoVWvlBt)O~fP z=+H%MY_3t5vwmWuJC1x~upVlw;s;ggsTJh96x%KIyQGHcfCS7)&Ihm<$G7{XWshHKnjX}P_{P_0>mi_SavSHMpU2F8j=h7I*(Po(zH!3E49=0T}sf(CzUcXykHM= z$*W$37uT1UH=3pPpK);ln{WH3GT1rX$KUDqs^5rLT{kWN08PoMJh5AueN>P{LhT~~ zN%518J*fIGOR?P4tx>K2UlEJ=syr`ZZ%kBlTDh7Q$Kp7#mi;ab?Wrjk;y8%xapVjW?rOu~ z_4c-U*ILo_E7N$E;4or5ljGa%RhpIL3Gr`DozhoRY|-aw2P{d?+qFvRd)Zgw_PYvO zNb1q3c?ab}G2h=b&dT2P3|Eq9cYlMJL2>CZS8jITS19#0$+{xXs3X+wWs^#{aL9*- zPk%<0K8#yHQ z0esZqI~A?h`i6Z{%SwjA;cQ~mt(s@JBm)YC>@)TAS*f)Sqq6v01kz0{tf_P&5;y0X zGT$p?^N;c>Ush@JXalFUcCecI-oZ>W#up$jzCQl|=~I3sPd>Bo?km_d+0-=AOX)J= zL|mK_2i$)3WSx-TTS>1))O|1@yi)fd#A5`A%VXwpJog-ZDa{{ExYNEO-B|ws6uP>G zOCdGDnfGpTcp!MiM(R7QP4D_L>GX@e>o#PKS934QIM4U1TwJfko0(0sgc?x8e@mS9 z%{~POMn^PX54xsHN!mHZ#N;p-DnJz^>KcT771LT#zL3kS6^co-wbea`*PLdc=vJDr z=^mxiCX&Td2g{580Snp7H$>)q3t?La}TGS6u zYF4@x=E&|nMG_1W=NMDu53OoVyX##~Z*s*qY1YO$8BgVvz{-z&5A#;;cScSdZF8wL zoj+MisiL_TQW;TSF+&Qr6n)Myk2Pu1x)rRQS=4ac%-1&JWoM2G-7|ckANYOA{?%)t>3WZ|!x zbDaIpnz+{VeLGvU{63%^)P7K`SNu}>8SbCGhE9hTFcg;JEjW6cIotA_bMN%5rG>4< z$HToM_Qu@YT`kZlEtp>^9(@fW>4b04DkYn<*cpqWK zSzJUVF{7fgE)EYo_@{$MJcc!Jc5uLdPtvB3X=qZ}bDXw&)bKa++n*HJ!U4CO3OKW{ z4cz&lkb|B(9`zz0l!o@G>V9E@dsDz*4tv%sQ1fIYjogqxs(WvnPlr8<^ZS~)IWd!t zFlv_DojM$M+y17x{JMNTBR<&&9l;b^S(6*Ed;KfK{J%mu`q1!7sZKoL*VlaV<%T%*T%1(2P&=&S)XK!^?JTft3Eq(MUBo_E*K1QDTH?fkU8>wsq}dP$!rSi z9BFn)SY!Y_vtDfP`IjV))!@<}n2p06b4iC}EOG~oc&devI9#YX0-J(YlOdCztvFUd zxFnpK7|{BhoE&2w)kBKX5O9OPit;n>JdESDI;zSt2;1#I+0P6?`%w{Z1Ml2GVg1c= z6psG?0Id3zG+UEvI$OwtI6OrJ0EvVNiL`9mk4AS=x0a=CjiJPM;LdZ>Z{f?cU-)%@3wNcE`VO-m{CY>(l%( z>R9jfA#pyPaBW1WFl0bLAfK6=w_!y~{{Q-Y( zrFAxxx9YaKte&O2INU>#mM!}6Q?yvO_*19dY27nzCDx@ia}^1>2pY9sASY z6z=1;=)38f+6%bSJiK!PRk`y)(Kh|VjHCDFADW)BOT2q#qEz(3QEB9}q9)wMlX>+7mr z%V%?^I$pYxsS}*-5q-GNx4l?&S59k?D#z3p5IhzOBvz>wNp0f=iTW=9SGp&px`xKy z^GnpV6#f?)NS2XCGx@)$`QzK)H7Lv8HlUvk^zBu3tH|ykz4YMpQEXJAC=ay>woJycgOZtsbp5tPIW? z<>T4UkO#2wRT_6kX|U>gwdKqaPIVVa9u`rxNaywEn$qZSi&)LPYO$a{C{ch1+MSVM zICC3d;~w?P`iG{pdv2!wEqQp*#mh|_NuA@u0pA4IOu6DSweSa;pu5xTE;X2MZ()V* zqRK-f<0C%9$*3#(V@Z_SUteCpQtMJb&Ws<(m@2A2`aS-psr0{xX%9_mIxM<`k_)Z= zk!8q$6PyA14ExsF-bq7Ak%ABJPQr60_9To{w+A)qjT~vZXG`gU<(X{tC^yR>{M)x< z2mLiX)*E>#5Z$0|nRNcRlwVc7i*NQ|_6 zvtR+()s!ubx>;DoXKn&p!kZCu*a7vdpHlQLkFQ9w-6WAs8UFxz6KyW1`cKlL>OE}I#KUClEc(!VZO!spmk)U3;B-HGf0tahc<+T<^HX`^)H zx9?&=xGykX2*}zyl0`dtU%ig6qDeNN5sF(G zAh$9FAaWZWh&{Yhx)z;td#~K}1?8hz2`$#z-WTFQ7$lZ#b|>4wtNW|ldst<-x}745 zB`nNN00W%U7TTnDT5!F9JdwHwiy(sF_Tc-|S34`TK8WevA6A!Bvb9vXlk%2qgwK7x z)OP(Zqu9@PHKn2}UOP1W_XmPL)nzl?If>#$z&|%Vjd`Y0%LpKm+-Hz#>N)3KX|pHb zdOt^m+gaQIs;iTb4qHD#$8%fl7frX)w74v7$y9*?DIC#E@%|mjtF;t*7fNXQ0iNSew@9R%0O!9R!`g{@JEvyXA6J&e9_l~|+;XFgWDeZ< zt9g}~a2;`qL30do#^Ol!fr3j6XYWm;7X1xwHM>nWPt#(9OM+j-+)Vf^=Q$CtoDv3p zg17I8h(LKil}{Kn5M9L--kg&nyqv1!WRHAPv$9IdjN?7UE!8)E7UI?6mLzo}ki#JU zl}R5?=&|S$X+uweY~{}Qw(!R}Ca&%zP%y*Ce%PwmwF@hVWiZ{u!WJtdZs7>-54BAI zbX_3ZTU)-LZ|K{Q@JFz1Mjk--r{}S-yL5Y-IHY(Vd`!b7Tij5-p=8>8cLM4!MnX2o zjyAC;wtn<*-dhW%w){kYi7U88xNgV3JJUcdm6F^`FX5BblM3z>WP#%~kJGvk`l2mQ zQh6-@020%SNYDf)aUe17M)GZ~GNtQH7<&0nziedC! zI_CB*LQ5DL>MVqnkZu)G(LGaT)K+r6y1Z8r98U}WWHoaxr)dSmRx;i~mvV!$E#q^2 zaZ|h&6J$vm0g;~+&Y_}dx`W2|_Ez)B8;3@C!61&+T)Os_{{V=T>K5LWqwSTqj3M?X z-mNsPPTNp{F0AHBVR4joIRtytx#XI*kzuP@i(Nw2E0{74#~B$NsNl7N+$1)zq*1Q{ z6?TF46!S7B2w7K6?yOQV9wbS3zk==#~_W*N`A6hMG=R&LwagP-mNNv{R z*I1(y5r&g)7>~U-ZFz5XX%+RnzN8ZUIpmqH>O3>$*Q@$UGxxaKQ$LD@P1pNp0t0f%*8#p5dRfP+wAy}`>GI_x@F(HXi zy2gS=A2Hf8Pt^IUy+d8oY%XmrF76d1yCg;S1G(@A)|}9_{VQ3ux|2`2SrXPll9Te_ zk39JL)b1UHgGI8NNzfxkih1o7!DQG!EhKW#QPbew}ftK2p{9?L|U73_(mLq*b&7+(zM8QyHvKc^%-|FySFUt zc_g2$QS`4~>H5}(e}2(TZ6%6sxrnYx_wDc6nuo4Zeu6fd0I4$7c9i^2fb%w>hzaG)9$S-t7;G7OgKGhpS z{6+r&LiWa?OgB&KHck5VlS*USg&BclF?+5Etsb4e6GM`h4=Q& zHFv38u7zp*gtgRfwxn^$cL>~!^N#o&)+0mJFEp=&mp9guW`D#;6`PHje17%sh`0K( z!P44_7?w@S#DaN|g8u+dwQDqNv=}e+tupQUhf|92Rm_4?yLP5I$ouA^YTa3-LNwT8 zlhytpH$u^lFgt$d0~JTBUf)TnSoHR+t9thkNi6<|+Kfg*FVGJB(;B9cZKc7|c02ud zhW`K_qXFmUKcN2rN>B7Tb=BI;mbzWVwVAaN-#ijV!zIG-qyB$ULC~<@T6Na3Z+(9> z(lR8hr+`a&$IuK>^{wr_)z?nTWAnh;mQ=>1?!X|PKDnmgvee}2o}{_53LM*_K_hXN z+riI(IjA~h4I9Laha)}5ns_)o9tM4@G4S%v?#HI$hVtp|WL)w_HjiQStt4403!YDE z2?$RGPBH!JM0|t&N5w>puK;A!sIU*pFe?`8Q73^DV;*XX(0}asao-30nz}qf#2+5j zO=>^x5&pmCxlb;i4~QecImyq`lr|g?K<+uEi5Xm;0Uq9Hw>B9G7|%8KA35jF0X+R^ zfh2?K5Hp%YWzQ(29y|7;2%$zFIq*DDN|`xPBLlaet#t2!Dx*-p4nh7}j(l@m!XmqX zBM0hhrTh#EHEly<40RJH_#m}WK!4}0ds(UrnJ#aUQM_s0DgkA z-5!Nxm>dj%f6ZzcNXIMBn(%OXeP1ANSF(;hsH83sD(ArBh@nAxp4n-P+jvx1>!zci-!9FO45;+-3z}kMb2Jc6( zy40c6uC3Brm@=_Q**i1s;2*77GL;1GS+_sjU-4<4y)YNMyu2o_t#oIy`HfINpmAf zw60CWU>}rwbMMa;Jd10Iqqc=*nVCb8zy$ZFw*z(~Qb8-8++w-RdW^kGuQVEWS4m>l zw+0yP0*uJ3kDDjHdHscT7_IXfG>i)a$sNrEPng5%0l8JNM;G%;n8*VaVqsMTZOZq+ zsEKIe=0_3`jJVnd1M5Im5x1sDZNu+g)P%+_kl+*Zam^Klw3g8NW=+8479Gd@=@kYs{i%VA#r{}Czz4;0J$~z{F1{m0XJD37HLPts8{6dM zV{!ig!~w;1`ISgK_O3Dfa>q~jzb(wyGBhs;00Ke%zP7+lRsDz_KEGPj7rkTD8m+un zmYP9{ZnXPUy1bE>Dgi%{ey5Y4YOmC~lc+@9CwXk2hl^1gCxwOwsCeh-2O^_tm)dTP z@lx(yp(Si??c*@BWXHI9W%JMNPy9{SY%TTPn7NvFhSDj`#D~*3*|cE&{*@%~t%pHe z^##m2g6p6Lis_<)O}O5p&ON(SzOaVm^!@C%Tb|}PS>pk*^8u0f`%v`mu^z9d3;5!i z+TDrt#G`9C@YwJTTFd_cc(9YwBXLqWtluM)bjG=xlpCD4nJSg ztgP;}JqzLjTIv_KS5~@&3K69_0rAc}clvp(zL7IW(KA_q{*=0=*(6>Mb6_M_Yz#3xRQSGexi>C?gp@=K~e1JFBI#1hzYIxQv0H?Od0qFLk!~ovi8b ztTM$c+vX1CL}<7GjyX6ZitCOdirqNeKK0FX^jjZ`n#Ag?ci2Yz22M{MziQ06Dk^WE zS=6m{twnVkuUkvhNQT``*!?B%@{DBT-lp|lv#L)oN$UDl>5dD>^x7=oGqyYZxu_a9 zQ%RvusoQE=P3^;K7t%Z+Md0uO&u(d+s)FyQbPkzjvXbIGKJZT|iEanV{dx(<>fpH#TNE<-EL-39;(K9!jG zm8oi4pTnU!(r%)fV;tU)UB)mQKQSG+{{S_n=*ye!OGJX>Q?q+L>+&NUwg+N;vst&W zPPyt9(JWw!BA<%9nQh@M*;$X~KS5X>Z$Y`~zlfR@<+itVDb#K@Tr7@Ga5?=Cw~Enx zXtkDZoLjp%^EHjcb7U@CXguTlR)4H-wL8iBTUwJ!nj~AaQ3)HIk^#x~K#d9my@lPy}^YMpOx-Q=ilq} zpwl7Nt-dACfBYs>V=bCTytn~zwckYxh@_%agRWd+s1xe9~d0fM!RU**G6hxUoN9KyAm@&953?1fq)7> z$u)V?mY30dMCv&&Z01`Dz+|1s3O%v!=k1EG(KJnd*3(_-#u9GFL_ehTg;_v7WjBmv-f#dV})-Lx>^at(8SsnT`b zRz8!O*40vLS=LLGXW}oEWRgDBGUuso8?LQ-e@%g;BL?bRWkO@0wMuVxzo;XM=FQ-e zc_IujSq?u+<=q8fljyxo6~jp?iDUp1jO|l`e_>r}@6l)onlQ_^<`}L|qUl{tX?d;b zaN5HSz2iKIa?D#f&Hx`!eDt<_?XvrXwbC!bdpUPkCH;9 zeAP{%Xj649&XulP`l58Xd9N`8KY1oOU_q9xB(WIy~xq zD%vzN;(UpgSr08JIV{-sG+jHU^&sh+n;mBQ55>GDBxD5a;Ea>ztyKrZ4F(B%TS?Yr zW^1c)DxnSefjGhU0~KG>8XS7h!~I6uYk17hcQK8M@+1wxrcU)hDd9+XVPwd3R@PZyn9eybUWL+Fu2G)rOVQI*N46nnt~>Nq-&8 zex3l@BVVEJO;nza>WAs8y(aHfh{S{~paXW~Q^0fIx3_9f{F-y(4xS&PG<{_Xi@P5| z<7My20~ztnRlSs&{{X~2UrM`?VTOAsk%>Px!G<9`XFhyY-jk=zcQ&8W*EcVD6@Xif zMn=xk{o{##gT6h5P3rE5x7RhxZARNrp2Ap2Sz01Nk$r*id}fHD>YWdpk#>hrR zq@I}EGDutzljgb~PwCoilroJeV~fInEYtZs*7xpt)>eE_DXf|Y$ zaGv1*0LAsKW`m^J=~_f~*1?q{?~{_ud$B%AH91{`S>CjQH(rM>IP z{{V@x^+kgEJBT|*L0~@l&*@zth}s?)lG@p<}D)x2ql9wo%o)6eqve&T8XY>1cJ0F3!^W?r9z= z72Z_B?myM`&uY88zP5)@w$t3V;x2A|jU7)b; z6nV$JPU8C7*47Bt1hq{gNm?S1a674L+ae-NlDlKbD{7$#j`ocU5 z8IVICWzXr)wKcEx70t{sE`>FymP>!d7PvTHN89dy(v)j-i|ZXy!G@VS%NjSPN{nFf znz%uM!5GFXE%4?jj)b>sJabKS=4swBg)V!5J%nE^#C4wQL!q(h6JCj6ileWeqwW7H{eKB)vwnef8D|4eS23h5)}XtG4EX~;ED{5bR9=B^)+H-&fnf9#Ks1ddcpFO}wJAU|$-x{N2*FXyb4p2Hl;+1s(H_^+Rrl411nOHORF)>q$DNs4Y5;y;pb&Sw$+1 ztF&ioXZnso`qx;OZgaTf2hDQ5dTX`U?lnu^k-NUXi8k0Gfbp*+ZpakYIMR})a zaa(je^X*tJOGh(FDZm6|jOUI?6?Lpz=vq%!^mUz~jyo{YJWrg4W7{XqTIhcdZSQ&( z&q~+9GA!of5uRc!+qMFHV}n{PbEI^wQcX5nh;rIv@3#YhJ%RTe1Nv3?883(GiPoK4 zHQZ4MEoTmyOtNPrk;k=ddQ)D~`jR`R(hLD4rJQ8!bQX|qv#xa62dm2Y78ykk>z#jhe+^D-i&u+$p zA3osQRCpqW7DiF|xERKBS^Yym(I(bl)#bK`-NlfSk%P$o^>pzDC5v)Lw`%77b8{MX zJ(iVss>xwza*iZ$<#%`hAL;wl8yo8lF8byl#76|}8*W0%7%3iphKm0HNz*k8b8UTX z1;kr}D$1dNZ+>f<>pDAhRrQ9eqhHT0wD(CA(9Few-RC|p z2MG*khWW9%ze>LvIt(ccaWqVWXJOz|xCv^)5)_}xd;Yam9M*sOM23}4 zK*xX9tNNE#g>-v6Z93#4>QbyxTp`ZTc_e*s59(_dy5d((=$5){S24e*zFVngJ3X1Q z1x~ZFv9>c$6cEQV{&rOWwmrzI9UG%;G)-N#2z5|VGRR#rPv+15yVD;KFC&lm zc9jmH_(Z>-(vnmoa8Ab`UO@e6JNQg%T3AdOCprAM^G%CgI9>iJfc2aKxQ<2#YQ<=J zW{=TdiEI9#xx95xQDJs1=%DkoXZNpsMAfcarJ~lf%g490F=ZrC%QNs4j~+hM&iJ_9 zEOxU$^h@L({EA~!ywYr~i(kzIaOVek9JjC@^_tPOJu{{HGQw-}P&845Pt5W=44iwM z_pGIz{fqohhg6bTX0npe?vpB6cVli*h(;*wfpti$+ep#))(fFt#zeY^x6kG)oO zuTg2eN2j&*qV0)Yqr)GVVD}@&d*Ib|pIKV0*i+1OZKi076o*4w!d;vhSg z_WuA{vA^mI8!HiWDKC?Vql|88TNxWioYNIsYo%T7fuo&SHIV!1)voScW zAk=K6xBOgFN9Zzd^^1JC_s_LuuDYj1)cQG92%;Nr*%2>(_?$D zJi2zko}6~UWMmv|EuPr+8K$+~rO>)Q&MOsw1q20#CE)=)vGyJD?^=yM@h)v4k}Mdc zK+4`QdsWs@rk_U9E#OC%8(86wY9lZM6R6%>Mgzz}?ZyU12Q_oH ztZVvLQohtIWS>y6{3{155gEjbjvEJ&`qVy?(OXT{^({i`Z^T_)K7CTS!gwEgh0y&) zp)Q%JLvLeqXmsQM0C-v0v*Zr_&U<2@{{Y%$m-{>XLJd9cCRs{L3}oUp-NQ3uv8j}F zMQ@np2~$lw#5s~womX{l~eVB{@hwBLv@%RZxTcck4~%WR>egpJZj5B?tq z&)TBD#0z`gkkqwnYbiZTH}sw+!B9#0Nj~26$~N-O{{SeBHyz)#PTbMQy@xpD)*IoD zwRfpmn651ER!EdPTwD*9a>Sgk)9LM7OM)e}mMQ-L-Lzyl2b^P?Uet=qexTE@ZtiU@ zr%7bCU?c-41p6O9rCJF~?QOsHu0gEy>^jF)T(3^*Hx{?{h_d=ccAy#Tq;vc6&1$+I zsjjcIo6q9-ihU`}cM^6713AaPyGshc0f0b}M zpMTPriHKT9a+|T7bJ)=4LbxXk50PB^LhG$>QGF1umHz;Dyr0mxlqLyq2_;8>KEB4R zdV{QPEwAllwYH6{68`}3mom2`o(IeA$@A@3DA~<0i|2VZ3<1gRO;L33Q`zgAL;w?S>0o*^}F3_OFa)uj_M0L(Q9$MZr_odpZGxc`c_{+o2e{f>Us1l zJ7jq7u*&RmfkDYp*!%NVar+&TRZ#+{IVAV@qM2D%FsfLC$C~E7Q`OhgbS|HFro_&5 z0xjirak@9`SC4!iezmjcnYCGTwf-k$wl5ocYYM*p02n!`W+LNN)UC9u2f4hFf+*of za&S%u*NUm>{-e|ShfG_WIa7OQbX&&s=g1W^sqSp-^qFpM?PR)0!EqXlmLL;ceL8+$ z;F9&HOuV{%%1ZJzxP8hH9?E<6@OZ1nY+CKJLS>C|-Mmst8)-m(oYo3nvA(|3bn9I^ zYHAnuZ8?#L%NPrjf#i}Yog1n3YmSoA-dUClyXmBWTusj0XAO@$;8etJBA-uNm$tT^ z19cxV#;!Mv`6Im_;pLKD%EX^)!}MLAv3Jx9KBs@9%{7@}G?v&iOi0Eb9tCuW?X&X- z#VpH)B13M?8<|cHef&`(X5R|=&1JgRT+}UfICPCHL+jlzj7uwUr1=BR1!wv_dXGr! zsdPL2T`#R(+}sjZ9OoDr?fxl)r8GE*0>m-nHJ|D&R`Su4OwnPE<5HCxRY@OeqA&Fm z_uvY}bw^Y62B8_f(cfeiT9~&M@f^LzG5h;t^`>?0UtYENtr}_8X*BmXfnbVP+BTKR z$@&m#&MV=0sn~T!x1rlUr(l-z!*%}vc(jgD{P^*Vf_NSCTHOOpHhML+n93w8v?r)s zudv*E)kX7NbtE?$H{#z01bNV}Qb-~n92TDHBp=&ch|eNyfVz|kyt zljXr6Wc!}SHFHS&E)|QruFQ(0fk6Z>6Q=hzo{(1RS{gyd2h&6{GZ~z zsY&Sc=Sjq&0yWeO-sJ7cZ29rtx1I28u64NO>lw7zwD8vuS??DFep27l_U%{*buC8g z;qJE-()v$fbG5dw`_(5qPwIQs=7}ud_OGgklO^pGAp;r4GsS1@?4^^YdcNOrESD@d zw*ZAX{po_!P`RH}Z4XPB89x!HGcvjPS;~RAzQE>~*7U6w$5YbmWw;;rdl?kO@3c&y z@DJSQkEK{@T`hI1L!sVTUVaLDTXaU1d?^DYd!GLQy=i39G)XP(p7IE;Cbc_bmO+-t z9{y_h416rp(#xh%Q+6I!alg0(@DJEl!9W{`#&LmF`Z|$p3VITI<9EZ~?Hw7oL%H)#0giL#w1PxCc{%%17`L*G*bB`eh$tPv?0KQewC4okF@k=km>emP6^>MQAoiyU zV|UG*eQB^_WAbD+KYH*W0iNgURC%IS1Du~U1_dM%*!HKe0`5DBJPv3;LR%;GplHM~ zfY|3J6hQzRL+-`D!hKd!i)8vg&hvIG|A)_bQ0h90EwX9hI z5)s?SipF)8hp6k920LwT;=%@qqah>p36#Qmip*^Qpi43 z&y${b?NNRj>Be5J>aleyyG+cC19O7S`&52_vg-{t;(Z0ZIPIWjU&OHdgFVoHR(`#z zZ^Q1M)wG_q)^jJLVI-V8e9bXDj1%piO)rZDev2x>@ULCA5y$+CiIbHBZZXdvUeyn& z?PrVOuTI~|c`$h2=3fLp@0k5+!%yfQqPNra%^CGb#kI>b+p;e*vHt*%$Q`mOUs!0D zmcI^ctgmMhUBNsn@^&2K1K-;n76JO~~LLF`nrPMXz&|AWP7ykgl;10=-!2R=6aa8)E$SrpnYTDHG(koVojx$M06XIi?*-*HG2<2pT!{ zgb1-5e=YzBRr`_2`_nxk&;vleN!L`mo+yhnP!qKmKHzsWx3JqvVK&aJszC&u!;Dux z_!j}0$t!e`umifCr$5@X z8vg)9SZW`e>!I!10>J>s=9NsP$57 zsdsl|_yTbQ?e%fTe&deyuCYN4l3TzeR6(?JoDbK%FZhmi)iK)Kd|uHZ^kP`dU(Jo9 zA3rVMj%vf=FtgM3PM4AtwSow5$l4Sr0FF+33X#>l2?n#&R(g_OTtgz@OjU?xQP?T| zpFGv(uhLp3tE0!I-B}lc07OJF-h}rc`~B+d>_%a*N{?^b7^G`VybbS9SL@X$%>ZZDQ6$UeaRDeJzA(=T;PEk=0e-(tHWReUZ7 zY>%Z~f`>=CwqJ}I@GX~XXu!{d#z6M&D>0@uwA~NZd!KVjcIRS&$0Hnnt#n%+i_+oh zdG+|#QFS+PTH0;?}(Sh2(bYEJ+$Oj3{A(MtpY_SJa3tdRM7+tukho9Y;|~5tQ)JDH$W^ zNj1|ou8z=k;w03x=%kboOpEfd$AU+3PHMU(r228OXd@PeGZ&@@0eyvQtGK{*?@?J@ zX#F{Ij3kWRLp+S5h=u{3ahn2)*=42V}eYoPQei}`ug}t!Vu0z~g zUr8vnmmi$B4hMEQAEj^lW2dxz1Jz9`(QW-k-6Llt{W5%gDn0K^Xg|f*_#{U4Nw7o}Fwbd@+4RZQV12xz5eL~Mru!`owWt!cN1dE(DJK~wtdVfp3 z(@vXvW|LZtp;Y%=cER!YsrL4Ew$Vogv}qJkjjD^lA8MBD3~T=Y&a<8++)Bp9ah^^O zx8Av5MD<;a9Vs_VPZZ}=c9tZE(?2^tarW=#y5-uaY;Ir=a1BM5Nz+E59-u94&DxAC z!3OLe{MD9bRC3;>jxLke^eJ!RmGrf<{4AkPS3lMN06clC{=e09TfUjo!o=~}J=A`j zo{?RMKQUi!D{*;aXQ;bHc?@?Du*pdmaLA_a>})Qy=CHW8iby*Yb;zaVE(|7+?J6`OTq6Y)n(oo$mR5=O%2u8P|N&g&L*+BgLE;+ESuq#P7Y4q58QS&FHh5MyqQtT!*v zaw~NT+Qsux?{3jPz&37&JiIi~F~ltX``{87{v8+m1iQ-Nke zaz^Fjli&lIiJ@9+5oldqquf}_a9QM$8a~n&9zV@=c4$rNg3G^YI47J__R_-Tk`_iN zaN$p5^&TmwL41#G`Eu^Y5{IfCm!DY>YBq$gHn>~QHm@3F?4e19L4HjV2;w34Yvn;MLM}eU-+MrC4e=Qq0)uhtw zW3;zep^ait098?dDfr+4A2{4kik(89+e@&v)aTTqjlUGp2#c{KgcU)jXt(Lp`ptx) zQe8soR#`K<3f+nS0F_tSbpp?>YPWjbz=Fp~ltz%o#4NIIAQR;9J9e$j&Zlp0rwiXq zFklKQ5K9*92ZL9@Wh~kYNUdFnNaI!9Jm6Ml;$2GMomxA{VbJWE*`x0B?cnp{CyMrk z)Oy~dcdOZIx>w=nwvC+tJ^SE)*0Gvonzh%%Tk~@?NnngpBwx;4ND0aIg2419#dz@tkI_&fJ0L-l_U-J9~XD2<@YaG}x?F00SfrYTI(K9AnogDQR3l`dVVZtKKy=_&b1N;F_Z7&sqMe_FhS>Yf0I`Ven!tYs!K!v z0CE}kZ}Mv&_I3XNtKt-8M&S42jx=J#pgi{(rlKT_ky2JUt+#&6K8oD zA-%uF6o7^ScYUaNl3;MB&oqnqfG2=)#YHP$3)ubZoT*Y* zcO%HIjqo@B0O}jA4nGUu&*l}!@;|P-dLH=tBcxJ2#BvV6NhglxwFnh<;MQBDLYbTc zw1mO^>res=Wr_NV@Ns&*Um|6UwpobwrC}Sl$`0Y@#V^Rp06F%}2n)Oc$Q(8eb&f|- zKtG$mY*B=a;aRf7oKWI3!ng0@yy~HM4o}{+h1i>c$vO5tsRDoxo_NhL&DbCtyywL} zyGopsk=z;uXx9aS=fKT-Vie}Rl>;Lndl5*Ax#Ny#Ja&a68K4XEV2iBCmF>Uj|vV$9C`Cf$OB;TK8BI-yF_>=K21P~pr7>iq*0Oc zk}>9-I}7CUao(CZE&#|B3+fHuDfvekqAMB3?Ebzff0T=oSL;Y01fB@-O%2JoNuEN1 zjEeKN(S^Zd??yH;EuIfyUSf^G#(n5OKEl5KXMyJ#toFUHX<9YJD}OJfWnhu{ z$v^2@DRvRJ_7%^5FGHX{NoHLRUE9ytc@=%|>r%he`gJdLTXP-HsOZOT+~KjDe{a&5V)7XPR(+^99Agz% zuiDsL>9;MZTl&_~r>Mz;l2_*A_NaYt)Dr3Ux_*_XK@(ozxm0OE^y44#9Cz+Gs)_oC zsWeW8)UUPcC=K1a7~*g;vSi>fQR5_jl*MU!cT|T|(XFq<62Tm(3Z$Qy4;5uQ0f`eV zK*kT6=Y3PJ-`gIOyf#H`E-=!>A87Rojl7OH?g8SbbpHTR>#+5O&WqGhTgCW*t!L;Q zfG~bsp8bzL;+-^T8(m0mwGne`EEd)=M(=P(%6E4I>@lCcH=*j*{ZnUiDX>|z&r{Ql zq#=acPc66GHJ8&mitnhjzNNRng5EW>l4zsD6Oa|SBlPc6J`h|*VesbC;^E_l26ty? zAS#UcKczFWHts$_>43X?R9o#tS=!ApMrDj=<>a5GVY>6JC2bzc=cjIcHMzQ2y;B~3 zQb^nZ@sZ!n4vRJZpVimhJ*?hMdllR!OIMikzz%Q+9{&KfPM?)s^d7r=uUcH{HxS1J zNimKjURwZXljgMho?-xx3&$ka4bn9IVlIG_Rnzqgh~d-kUSGq+HsmXO%zU?wJ+oWA zL#wB}(KHmWWVO52eo-Ok(ExnR2V#EptHvlq)3)Ica~znWaJD1&l&Gs57lE~6YLw#Gm+Z4FT)ERTKh?k z7;NM1US2sEou2RoaND|&L^G}b=Pq@z*?N-UT2FFy? z?=7`$KGN39@iE-H3E!MV0PppyD{Jdky*5G9@oJu-h#q$588wQy)HV73Ca#i_MHPeC z?=X?bxKrGF5I;-;f#b*Q|HacAnPrf`EJX#EHIrv4VmeIC2i5H;MF5J7TR<~iKi zIc%QP$~rI&_?8NGp8o)P7ZR*&W5I7@=Al()S5NY>RL?h8b}!b0K4ZMyW{O$5*tX~)cUBc>Ws*Qa56atuFsL~pVV!2eKs50 zNw>o)iPy`E(S^hGmm=0>CY!fuT(&)8qU>4)wdvp-oKY0aZtKjN4>r3 zdo6Ma<(3Pp0yPO;xGuvWFZSSYDs-)MXs3nYSt6P_0d2d34sqa7T-@8+C#`jJ98r6@ z3(wxUCq(E|>NYpFCh62^TE6&$6M(rTzWL&;^%yOB%TU(swHu3sgG^x@5klC@70F}n zaxqE88qTuU4uhphsKah9Q3xzI?KmeH&+Sh1h1K=;g=>4L%^8+;^$114PMzACd*+cyR0sTV?^q2_P1ZDzhdn^jNMW~Y zk5bh^+Wwhf4)0)iyonyPszL3m95^Jd0~4iy`{u+2%N;_8#rOyX1S!@Id7%>KaUbFtORPlv#17{k z)gI?o($+ajMaHwP=z9IBpL|H|e{(KJs!2FO?}5j)FGy&* zZOzA2Tixb+YkR2{IN0&K1nxfP#ZHr5G70T3?IO8>(nwuHBZHiZkfz;HK#*`d9xDOx zWmt52zUd}OCwye*Uf3D(Y# zx`nvB$1(2y+4uTX>+8Kf%GOKU$(9HZWs9%_70fk#8%1pkrZsyjXnk3)A!+9ynX}7e z`R6&Qy+|h2y4v$bme}1}t%hx(1a02O^Pg;g_$u_G{{XCYCYswxw$m@x81)N~hFI`U z!ZJucIR5}N=9|)SSlQ}vUh6lPHg|9oG0wwmUhEIOV|wdDpG@g~p0YQt)7#CliVQ1z zkbGjT?kAT^)HGh4d&`|h3x|1TdCub|2NC}O^7*BfPRp$iKt7jmVA^%QzS^OH1tLwW zwLs%)j~`l|-A&W5*v+Uup%3BVa*X-N+&h8pYdLMD>2Ul^h{0(Ux*G-|5Mv~q5*mgmxq+AE3HBw>||W3~^TKeaP^n%$jCOl=%$KZ%djD#&cWSY_Rvk@84B)o;@pwu1Uzp{;4fy(y+?CM0~t zhaJ27WA9a68qWPk)r)Y^$P9?PSGOv7HCd`zMWg&aXf4XaD85ITLua2*91=WZ6>$|k zXVhb<^rn{D%6ospU&kDYAjUyHKBSJ-x6`_oR_cb*FCbey+pEB?;Jl~&cJWX?C`W3& zF>AD>B1_nrwy49L@=xi`X}+geF1ouwo*4I-CHZAW$Yt7h4Ee@Cyz3aLBP+Obj`b`Y%FCy;-k{XFx(kI-DYy#@^YS(c82xGnkEmSP+4P;g=li9) znl@&Cmnuq++a|0c({;TwUDqa&!pCzkHlj2B`eztXlm7q+IXTgB5h_rl`oNy>mp8OsoTuqnStvFHsZ zO)o@Cdx@G^OBRvaoRjVJtv~~Udwr@MqpsbmK2UHzwJI`C1pK46YbtSZ#{^=eDFJ&N zc&1cewL>`yi2WM#k{aokt4 z5tA+LjAD(9M1lw3iqa#HUg1@j2%k3})M@exu>fE(=8Gwkc8+{knUHwJNdhTQFh)%v zkxvAU{%BpJX616A@sZlSC`Sc*WOu3;a+d^*f!h?Nkynp=awvGm$_@`>?MTi8;ei7k z{pb~$Mlw%5@kzfUb}|9>qAdX#Bext-?vJr26bP;eEXl{$#Tj(Q7y+KtBLy&{zZlIX z5Ha1~6#__AhTZ_rdLYrNsbDa898v(e18sTzJe~$V)d0mVRH#wGIQ{7-KOx#M-rws) zp$YuI-iB5SB7Z2ACm9q{qo|Aky!)CU>c@`b-@P>IjdL7}l~IsK=}tD%NL-QPgU97L z2o5`QLRe!ZK+iZ)L=JLsgX50Wo7Z-BwrC=`%P|0F-_9!o@wPZ$qn<}_j}-C9y!Slu zT>|B{{KY%_RSu^OrOc)~s8VEcosuYJKfe_|MlSbU(YlvH(X8~#k6Ps}V0J~wX!df% zj^Ja>Q>^+d9}v2N$EYq0aNSEB=@f@-QgAW5-v_YmT?aww%@akumhVo}(iv_K&f}bY z2eA54>YYEL>RP-vm->CY*EYj3yDvYlzw1O{y2kTOgW@kuPjhm%&{{tq)EP<10E1nKMz3Q)4>Fpz1^q*0&NzJq7HX{UmIPvtWWGa9pXcQ{r zXz(gqv16V5RJl5LQ|MaDBRmjYlEic1XZ(L^lh@rxX|Mb=)Gsvlm(m72I7|X}432%X z&26UQsk5-cb(Q9waqzQExHCIN z6t=)RrtC72yY|86rbl@ zVlBB{zSyXGR*R?UFg?DVYL?Iz+)_2bC&{a$nDlMkv@O<&aq4bKd7pf)6aww;Y1=y+dl;79 z*4-`Q1LX*#k2LBWlj047>DHDKw1sYxNTrfEVVnj+DkO^dcLPk;wGrhXymXQp)in36kJy|U8V6Yy}^IXV0DTYLVh zv4^QG^jNLlNj-l?M?f%-jogm-`u43o+)>X9dWe!KTrxW39Mh{2&idDGcpOo5zgD&9N1wuJS0dKh-C#mUEhFv0z#a#^Zv7=TixiQjHti$E zJ*jQ%t>KP1;*L0@!NP?Crp&?3ehzC-ZEZG=EUq4Onc7F$gg^kmC){?&eA68*tben8 zS*dAPT1CW?+}%9Z?2+W1$2@2Hap&K?bvWTzWROQ3XdC7@P%=-NX`)7pdny?KAUGU> z><_gzXuuoUr1;BeDYTj5n#W=lUKDeZewTiZS> zqS`|mm63vn#y1hpJ+tjft7?)-bPN&i#^>N}`l#tA+TCm(9>HxaC+MP^os#tuD=bA3anE*jrP>Fdd?lGfVuJH*3u zk%lD{Dd%y|G~wEmx0xv(8N9f%lG%fh z2Gg~Mcp#qqR*49cVcm{-2Aj0ENZ>KWg;hT&90B$e}G)h*V$#W4=kNsiThty<#)p6v4K(GsOOy7fiCg21a|(a{JwTTj`so zYyO>f(@^gbLvURI&V8}po_MV`oAEoQbqE66o7pbhacqz?J0GSv{cBF}qg*q#8A@P} z`O|P)=++M$al>bCG^DBpK&>i#w-)}gBb>Kx=k}*pY#isJ ztooike^s`g)9`k$cGnL(atfZxhsi$vDBDBS?E0QN?H2wStZtXnyLnFRNT=v~*GSA! zqbX7W{?(h4rlGYnHI}P+Wd+2BM}|G525w1GMpT-;Un3u5)7ps9I-5eWWxN7LTYdik zE=ypr$m2CDsAt-L85F%6**U<$f0 zy#=K8PJt}iZQPK_q{8eXkess);4iS}%@;`Mn$5nauch2J@j-bj%M0%$mLPzB-u1nC z;yYt8o^SyARBcN3%T8r$nPY`X0ZfMgpKR3XrZayG^vzE}>9*6gHiSy9?uecSdz0jv zve%~4E%XbI;|+^Bm_$efW7|jz^T^jWvS^~FD;f!d1KsT zk$52U&S`gabDQpsy3%?w>dIYR?g^JVd@0y5KPdybC;M|&TBl8G@2TmUzKMMr-@|T@ z+=-6_0fJAznzM&f(Bs!X1)ynbeSL1YknhK!0md@jkI+`S!JcN3j0JEoxg46iMhmU< z>7>&%dwZ+4n&4^?p_6XW=O+WUYR9iSZszc5T851(!F^>mRKH>lGg=FsG967*@v%5I z^Q_Tfv6f!nrARMw^6d1EGr{(xsL4;DYh$hS-FDhYpZNwm*L?Clm3)0Z+|!ybOKSQ} zp0@<|@!Te%Gdx3P4p?_%{+~+TMmJ>PbK-(c#x1zr&vQ#sIh49pq+Kmx(b~&;cNYmG ztlVsdKym8HhM0eb&V#%!sctqwC56B&%C(W2LX?6_`va7EBk0~SJAt)hG^91 z5+GqtT3Mx_G22^5ZwOrCB>+{tl}Xc`Fxpn5t6qLOYlXd#Fh@U^x#aopkF`0dbl#LS zJB=|d0E+F)N(Omj{a<>t)pZ!Ots-l?hboKM4oe=u54km-hU=>DZ>F)i(-PJREz84j zkGpGQg>SBXs!gCj*)1nj>EzLGHEWCZwPF0A>yBzcl`jrYzSY_FGeF1-@`nk5 zTDzTq+*oOedbiCCD|NSElrPRlwrTcUvc{km_o(t-*-K*{sFx6%bxr17**`0tp#HC>eT`?hwtXmE>OaI9SCqz^Ac){BU-2;dpL#uX-P>vK zt%NBo=5!6avZ+5Y{RL!X>n%q6LDXf?ZILc5q>wR~@QS?W9_OAu^$)1E{aR~olbvMr zB)pV7@uwj1+wIM3z6(yKp5s!yzqCzNH0wJUgIv60GPXu=Kj9d~S9IHH7Cj#xt7C4h zsY#e4l27-IDbED|0DrYp*(1mC6I);AGS6nmYc@li1CP+vSe#pQR*5~z%#b#q?XQ#MempRj6L-9Wx!qprp*Qx^F+$Gv2k}mie)PS&o9fg8N8GZ0*cez?F|983V{3 z)u>)c_5!k}P#!{xJ8b2c_s%J}+mZpv9qH8}GTF)RS>)6{NZQ939tBWonHG$H`)r^2 zs{Hc3vT@p|bm5CR03D~0fAx)VzFmLn_?9uS@H6k8^rK_0?Zk3x&db7+h6lj!LPHnI z;BjAN^G;v^%N8B@@kXvylt%d+9(&U$9Ax+4P)W2hu}*Q0-Ng}?a6ik#0oaO9D@0m7yG}m8T5mCLAmre4`q#IkFmj`C z7^PKIQp!Iu-S;(6dzD5&;8PAnkCh>OjEZ+Wf>dUMa^ni8+JKWCw*IDw>|R3W>52tl zGD6_}X>!N7h`P$6;MhCYIYomhYrk0krikfjJcv5sg|g+qlW9%u$h8))>=cVrLJ zoFN%OI93$Xk^up5am6fnDhq?0d8vx#a zfuk-^geozd5f#v`1fJG?Uqv*n zG;zl%^-6$^j>F$P=B^HvXDd8eM^^`sR|Fr{iR4R2Cb_u*6%?~E>;Udf7fxu_S`L*K zon^ei8DgSG+(u4F&+AFlnp$f5Ma7J)%N+5te5VJIismXm+A9y?w%Ux72qL>+AX!ce z=e9oeHeEv2&foDDw@||ybNtr|2irBt^gRaB@8R~U%(6*+rq3$dySc*cU?w4!SSsGm(-NteVAokB=^{r$b1+~pQX|1aZxQ&oPePDiEcBmJ9O`^uu%`T-H zE!NQxEz550$BM*DWe%Ov-BZ%nE4eL4uV=XaZNReQ>&N-1n&yRJWAMXD)NP=X@v_Yk zGM6mbY=XT108bsMB%`RwtZC`0N2MFsMdPORfVa$6tX>J^Ac3?F)yg`fNwCv;>!idG zvdeQYYln9%(jCK`{mpiG+@46R*sf9WBJx`;dqB50QV2gBKNO1iUj%Ob^UZ3sPN>m! zr@Vttyb^vJiF=G^$j^_hWj-Vb+pTm+q(lhXntZ!(SnXb0+pKA^2B>~{c71TBszAEnw`&8S@i&2E=f1sC?Nj;F@96?9&k^J(B5@cjMn;d z$EZiL{!tyVUe0{y+ZE6HViO0#zOPwg5=560tU#`K00H}PL)7+G@9@K<=8h=CdeI;u za7O&+`}11q-zezTTK=1=o7ilx2K_zu+_MY?bbAuCyziVjBg!BXI&Jb;7$I z!|9(H?^Qn#ZDG-Ld*9(P7^lD5*Rqew6>tvkY>q!#h1D`yE$>C@*3n!39p+Un8Vq2C z#!s;3vpC8&TK7=tS}c#52w7}ng(Q^zNcb#Oe3}lm(PGo@`cq4`W5uiRIM0ypTkFRA zr*1DaOS{`>?B(<(Qy$XAxFF}BsQcB%rE8)uOX%7y{nz0HuM30?`N+ZfzJ#7n)}nk% z({1#wi`CxBH+Q?bTZvyk)Wi?%?NB=AC?8emc9#rVR1LUiBMN!_>g2PqzSjDWM(Mlc zx4pQKJmhTvWFS6I(y|>TePN^PT}`R#5Zzp@oWIBnrbZu|_WSWy`W~NcsdeX5L8-)- z3mlNNVnL8FILF&R-ks@9H~#>$9<73Szmq#4$s>?K6qyWYY%VnIQ%$zlBD((oyE1Ri z>?j^V`yMI3Hq*Zl%`T$M}s#$deKukrXaR+~=Isyne*{zQ55l`yD=ZTb7FL6}Kv6L>vAy z;QcE}X*IRV#Ook+m~M_h30D~Ru4k%1ko*s^5Qxzw^o*DsoVPX9w5TDAO0`k7RbgF# zb^{eG#XB{+5;^31Q+LY+qz`o?g^gD(eTSNqtr_4t`(rd3p@dk-8FDem#zkbzVKr{P z((Qzwg15W4jtK|iWJvy01mlu^wAaLaKk<@i`lg$#t)`#}OI6Pk78$@kKJ^!%H2XXF z`nyY=6eRa7fD%pzAb);psx_Y9Md+z-CX5?hLE&i`zE(g;2R~t5GcRFl(OSl*CcJuo zsh~0HQ7M)=LE>foI6n1psdY}3Vbs=Ic!}Ix%3y1SAIgpY0O8O4UB20`YgW+Rb>4}2 zV9@T03Eo*3<~uL|9D9>luZFs08r{A%ONgB;btxxXbzQ8|6Ohh*y@gLb#l_deT|O(_ zTGh1shm%fWwlTv2}v~M#~MvVnipvJ}UufZE61iYaK)Q{{TsB zp2T{FTm@jkbNX>zD^L6)>GtB&)BGrnL_0q&+;NZ6rzs%Cd|ioCq$Wsldcs%*`6$?-6 zmAmMxy1z#I$4m|`c9%yuG2~>fJ^TLEGpP01EW*KM zAFF#kqg;9|*%d}hAMuYSvi(mqKlW>&qF$=XtqYy6*khd3?PhyeekEFsLiHlGyVJx$ zumEhI%e%&L-mgqF<>y@7TXg=1tSzD&d$WtKGT8?kSM}zZ*RJmRkED8v+f=oJ+3h3( zGrNEXBa`R;bpHUv(WS?uX!g)a40fq^9Dqhf(2K!8Zfe7<^?fG$;r_KYnKVoS`fVdF z71(33^G@BYRV~J_bYF)ZKNNP!IJyzUcc|Ju#y$P2MzgGI7Mrx41~tCAw2dXYR$Q=F zJSZN1r^Qh9J;kiw4?03;odht?9`;k55IOz4)`LyDv$XiDeDhq#96F7ZZEnT3ha(4r z`c=eL=uzrX^~SgO3u}0_MYImdE_UUxcp|iYE!EMtN!(xRjsF07)AE;KcQ4}u>)NUG zjbBWU<0h}0$j{dT}Xr*8Uj z@1VZ*?kBWiG_tS`pe_g_`at#-nTF!kehJ-20NjbXCSBRVGI5{Ymj_7c+u?ns+ut-2 zLbFCB*#7{TbK<0Gcd^=7y~NHVjR9g!LFl`f?z&@7ni3sa6dA_^frI`j-lx+JsiNFk z5|9%ym%lC{{V0APSzpIeLnj__>FCBHT#+3y$v(NAawCUYq8z+)0n9NBLOg;~GKHMDrg+HO#>S@)T zNj&y#1-#9OWN+(iU}M}>8T&~Fnb0~;=G|*1rSB0Nu`VxjFCu06LSU={7QJI!%t2-ni1s zNpW98;)ZW3k=w167eC z0l$Z!-PmNF{{Yxju8Y#F^*)q_+f=qmqehX4Zoz{daaWca8$qjG-?Yys;y@Tb(g(4t zZ8J%{em=kJA7s1cmKNxgeC} zQU*D}QZescW0e!S5rv3@p7cuLfF(f1H@S;#$j5VDc9U|F1`mN&A;y#$2LJ(HE(yx2 z;C(3&hRDV!(Fo2LBkSUbY-E$Q2l~?P^w$A6qb%ouxbaNjf&&xtbDF9@QCS0ecqhdU z;rVu)A2s7}6~Zw2{jpB0nHVvH_B8;AFh5c%TWHSwMd+#Z=lpe!rzN%O0MU-? zybcezKT%d)Bh$KtrKB@!SM7VLEx)8?CxjjFJ^rW8DDC>kMweQLN|NK#xRxO(-U6>b z)1Uahm8FhEW$&JG-j>|TvCA~hf!3|8HJuvkQ&Xqh?rU;@D?E&MZf`j3F-xiR4y7+c zTIf(}?Z~?*qJD)YzWwExq zfWdH)%#OHqb;x1*(^_VYrRj>pPq4SQ$;mOs1x@-jjq2`&)OBrNPt^3iS?sQ&K@G%! zkapu6mmX{15xOr>)@&?4hw5@A<$+&FyJL4?Sg_|jX1Y3Hrb%4)AX8J6%8jEG(y;d( zA2huc{2FWNWV-(Vc+;0Z%$LD$IQo4lt#hFz)GV|sU*gl5Y#cPRGiBHiG8Fja40{US z6+*ZiFZiU80LXa#X|!NEtD?0F-lB(9(sfqT{Yb=Dj0xPzI}z@GBX?jqzoUWRqHx7o2gB2CFQ!jkT7UijO;lFkJwd}HA4?d zt>;C_rRp;O0N(DiJg?tutaoSYjs-QKbe5_9%4!-;v8>HJw{e)1t7TwOz}uhiTaZE}!A#T{7CQt#Y$0o}&yZ0C@xG2luT8yLBFg(leNzX&TPg z3jNub3=I9vU$BqTF2jTSiidq`Z>T`f&V(Q;aI9C5ayb<(89#%1b=(>hnx3gFjdQ5U z>X7FoFM>ym_x-DPa$uDi4oG2wed&Z))NL3WMg=>;J9hEkk3VXjdz$Au$HS&K7Zd3E z?d6cUlQYNNSZ6)QIpo#<08(^bi>~zTu9Ygy0$Qfn3~z#`7|9-MPCX!`6(CTfBc|4E zzfQ)j?7GT)I_Ud8l(Lb1ce%QgiDqsF2VkSXrY!y+>6V>j+WdDjtYy7HZVB2S-@hZj z2i~@mC?$4`0sZL)9DvJ%pFC92hUyQ8T3)Ty%dE|Law1|HTVQe>hu6S37UQ~}sFdbq;aa8OquyW&Tt3coYuJRbzBS*GmkVlEI@AH^IDsUSi|t| zMNXT28aA0Dl3_8B5MP7aKG@=?bvH(7Jwa(_Z*;F1wP0ds&JF+m0BWL!=cTlJof6wgYbeFFtEY+ztN# zcR5g}85qXjx6-t(8`2I3&D%VFwddXw<>Y;Pigy@}q3O7EyP37EA4)eGvqqNi0r_Gd zmv%c04(I7wT@Ocs$ETyx=0h-KKCym8jNGw5a!puC>W0{HwD$L}A&~s8-)x$fQBBkR zC8FqdH&#}b+t%olDU$?{f2ZszZ8}dzO#*!tEdaBWpH)funf_R}KeJUL=TO-@Rp-~+{ z9C^X3IX4hU=L5%zVkKb3v&{x;ue593Q$;qmQ7dg^Ws8BFF#iBd)zaF*JOVqY(ZT>W zunaR(lpVQdBxj0dL_)>6&vS~GvLRl}Zw%2!VD$;f4UfzE*RGfOk*Y^L z!j)B20(m3Hy%{9;z&_Ln*hVCb{{V_1LBPWP^x;xYayE|NS_no|40Gm>Zq>;rp43ss zqw{SbQ_+P7XgJTBY9(fGnDL5u4EC(@@Yf$AI8`|FRF-G?cQ`+m;p*{$*@~dB;ciAU z1=H^PR~zNldtVdA=K)w}K5@kuVi``+j@(e9<$wVDlU^8B!10>(3apHdyr|zLBoO%7|HKVQ)QcQ8RYxYOT92L!Qg!;nKB2HUdKF;20T!C z4l<{}$8OZ0GVcIULZdILK0@dyzx)9BwqbN2qdf?M+-YB1*)u7{TKn)j`#I#F{0K z&pMNcQ`Pb{-b*EeUOBLtS{!*r_!%CCmy#~C>FKC$EG7%H=D?ASz($3l`I!9O5G;WT%z0<61r?!Vsb30`3jdC|&eLeG9 zzO?G~x6w39yDKHSx76lEKA_!!kaq3=0E_%$s(u$-SWVJBL2WZeV;2_>D)#vZ@Aj%q zZszMx_dP%2OL-vFUK1d>LI%Vc;IE#4 z%@@P%Pf_XJS5@h~dgZ5!O@!RTaK|Ik{{TIzzpV93eG}qli>=;AE89Jz5My4-bNl!0 zQ$0vJn)9f&%{Dn61Eq^Niz#1G`DXpN6+STq7guXKk5Xu@14j*(s|>!3A$Ju6Azl3U z{V`Fr9Y?ISOFeSX>M_A}ZE(vZ<}gG-LHSS*e0Mp;JMeQ_>G^Ff>}}wi_`1eX*@1Eq zPaijBU#Gn%!)wT{?>f>r1iR##M_*K9zTw-&H_EU64{I8>vD0mLt89>@h9s1Ca66Or zt+CokDgC*wS@8Q_{{Uyc790DEDP$4cIz|A%a!-G@c&+zLYf$SNTz9J+krDur84BR} z#(R%rS>@V`rIiT$ng0MZ#NR$bfl2BiViy_52ilxjMnG>PJ*pGAqyQjL0iwtx22qaU zjhp7qIPh^o(ymAu{iq9Smj3{yGa~^n+fcojl989sB~pO57Qij3od-l9t~GJ0(mTRniJwVBdyNPn&M>!^&xVbDDBaD(elf?zS%S{e;Z7cbQnlEGFi=Ei<;)VN`2b^(C+uj*&9vzt5 zl`O8~_|By(ONg5^A%8Z>qXXc_y87Isnjc_)r)U=zyXye2V_42ljr z9`w=QyB&{=dsH@!36?f*QS;})ptSV}`G`A!@j~ogLQsH5_o((Bs~3(}1fjq_NU32! za5425tF$&uq>O+CI|{QlS-g98uLb$|R9y^SjRGq9M z_6OdZWpFYZvF5dI7+^ro2rSqfgF?HrjoBX5`lE&}4n{rcL`{){k8IXRkLkNjmL~vl zlSlY=}b^;RAtUh8$`g82;1%`L^6}O zHb!V<5fUolh++1oz;dNQ1D^QipD{M&ZO80sJuJ+)$j5pZ2!kwRA8eDFG(ty=d7{`< zoJqE$unc)}KAEeBfjGw@!5+q&a0nn|A8Is@oaBA|=~w`#u&AaAC#S&snobo?F@xfs zQiNa*-sYPwqmZEEj@5BBP|a7Q0OO+*761A+CY2>G{BM;GNJ1MNl1vh6$`^oZD8(I;+kSmf6ez;FTg z?^ISto31z)4ae<{iM53mIsWOq2N$H9hm>f^OC-h&?h08w1Mf9tN^hrT|M z=?%f#=H~~v`qr>3mkpeQ$7;&-j^fRdpyQmMsjWaq!6AXcub;!i)$03?`h9?ZKJ;cE zkmQW!nFzr{7C!hC;6^+GK*v1SSsbXAGDgt&$*&k(H|NbZ(H{)kK&1%#K{+k&RSW63 zU_)a*E6$Qe31%3j+S@=4lkHvrZrWSL0R#kmr!*2E^op4VMR=j-=JP_3?s6Q04m)!} z=wk*x4u8_U94fq{l1S$iQK3_XY-7OUy|u?Gc^K_Nyi^P+W5;1$^aA4#npO_afsgr7 zEN#YIZL_~%CQX4t;A7^;YVyW; z&uS*fADghC`S{qyezd@?4+jnXsj|3i44nD)qYCZZ_whtWA2tu3^u};_|KX%^25XM=SYJ|zpzPWklilPB$zD_ z@5k%SOaB01G#i-ogq|k<0NH=;@P9b=Z`YdE$qMcSoB})N%{dfmk^X((YL<#N>6bjL?&S^>IiNXcubCEfWUPxSq(O(uJe-o}}_e^S`2XAF4% z0Ig+yS*-`7bS)24xW2KsRhHoKk;G&E74{SsJrSbWbxiv75Rp5Nm3yKw{LG`=)}CaG z<>#DWapJs)54Z$l2CdX$b(;eJ02H(vgX%g9S-?NizpYv7eHEc<9Y8g!%V90RK)7S_ zXTxL5R+vQ&3mv3^!SW~<(hY*xByei?!Od@adJ)tg?2eIPac8GpH{$ghtP&+&;x%sr z#c%a37fjTwptZZTNg%g%k|@suE8Fy`!Z^V!ayk9z@S{8vpK6=ksIS&Eoi|yrYg_AR zrib$FW&Z#k-*HQ&X|@`6o-C}QjyR(j2tME5rlhf=<7Fd-Ru9-x)QWoz14`0v?r$t? zVuEYJ49ugG^d7_MQtYg(;klYNa-bY}rfyEsFbe0~Q_9qI8AU9h zF*y5CNEdEO9l&uyWsy_>nfe-aC6VJK#yy2v&%vgVy*;=)zMd&#f0HB*Xc^sqG+%06 ztjY?j4h{jNgVMIh^R(l+z@e5#mL*Y)5y{7TCH3S&hB7h7dSc}zK&4clm=jmAry?#6 z3IIN)k>x1l1UVzfrHszR@4&}6#d=4R)GSx!JPg#L46`5N1Jp$rUgwHAE+xX1j6QtR z*ATpmxF9YE8K%}(3AfY13*b~y#p*o7x+oa0F5`3pfw(F0;+vXN3@t6g1&u~o&m4Q2 zhoI|PcA<47RyWr7636+{Hpv6tiK{qH&`aaFJ?Wt%5bme|C%C6BVwcoe6*r;oE6(kO zK$!3W6k2K*#HT&`gY8mbk(miX81tOdO0M%^lZZIXPnUuyWJxj^ zz++E*!GiJnid>R#-{z4K<@HNteqK1@q1&UU34-0pHD)&#!5zGsjuC^l-bOoP6srYm z3nDRh%Qr2>Jb}pIU`PB^I9~pBOH+U8SRR4 zXCMGFPq(!SfEbOeXoBvMf{YJwSovRqpnpw>JpKIBB}%44oc$=oY#v4pfPdvct+7ZA zl511KUnY;Kk%uZM&`Ypxai6VvLNK9;%J>!O+%gCGfgJJXoq-{KRi8Ne(wN+zQ+FO{ z5rYC3<>$Y>EG>n`a!z?Pu!4{YFhRf`^xDL}r!Rr+UQ*kSVa_NU3np6}RN`_xgFKGe z&or|rMdu?p`_M$?2q0u1KJ;^zDS{6^!mh>m1ejF$IQQqhGcMI!;4>cF(d})y-JW^P z1I92xIVb6nO+d)QI0v5}O7a2#+{Ma_F%&GJaru~ofq_V6%N0~48oC^gUCcmIaYV(* z2N)RdLYtRr?8ZCu-o14pjZBgclny|k3~%{~$Ltf+m0OA|{D(SLVdsQ`{21~)m{_uamt~bl9@c5Ll{M(BHIUia; ze<%-}`1YhsZ97yFed)-tk{w5W>MQKNYti5j%yZkd400b+m*?J#8w+RV1mqmkML{YM zo_>`S`uQb?gNodI9b^92DFky1dHRaN0KkpIJov5G!QYpwBKbCh3wZCyu3oqG*Kb4n zBk4|+3}kFK1ZM}wHLFzj-?e($5fH-j-?`0ojw9InnOkrJ7&-4twUp#Pf3GyEpEm8^Y?>8C zEu4&s(j4^44corIT0s)A{NwaJsC^|3%kiFhrDcoqH0dT0<4UTiqkw&FhlfN8t1priZ1hy%~w?<);#xa@% zP(j16@5X5kck)ILY*LWBj!p+|Yse$V%0VORQfO5e&R-b*_2t-G1+m5`WD0&<<2?7T zMv*D}#QwDJpd<Q z+;Baxc@)}gEI}l3PLkdJ2o&ls;H!E&CfE6C*Nff81 zz^-%8iV37fIN*WX0+}ry{B1q52&Rn1V%|sG;)!KO1M{EDwRZUWkF5!I-JB3R3RI9c%mz8dBIC}1 zV^Bj92089&qmWl{$nj6g!~u>@c%=E5bBy`snwV^0#!9L0#S$~19CJc_MPLw**U6!h zSKYM!wR9fimL0R(6x%ZuediuI98`OWZsV3eMMJ9`F+K%JLnhoX1CJ+~Y86i`*EHz zO&sguX0>yD6xueCZTOj>UtZ(pJ9e)-G34W%RmVX?pkKicQ0X>sLp#JGkf_02V=717 zRTomUy(`zzYwrntF8M?k44jzT7|H#I_n^}e^dE}LZKsA^J64F!Q3uGaf^s}}{{S_w zfz!%13D4IQFB3AW9N;!b)}2{U_E7t!X#f zr0^}QcCwXV&yuPJa(=ZAKDgGUfwencnYf!tLP(fO12PWm7O@>Ss9H_ay-lOqNg5Jaz5PFtjkT6GpM@G&cmjy^^0>9 z>2O!mT=L)X0rdm>)@~1q_Hv`TXxHUIz#t56?Tpg=AcDtJ(ylH&5b(i$H{m@_LZc9S zRn^RJYCTN`m2Qfb_AImAZC(q1N*}ME(xyq}4Rv3{JADRdpnW#tb}_5YQHCPVV1Biw z>CIl#ThgJ_Y-KGJsg=maK8CTM5ju*}7;p4@Jt?o8$NvC*l^2Lqk0;Awuf85!XsOZm zsiU+06=NW7;kRez$oI*qXKh&11~~)eVMKTZxFhz(M75nwxGVExfGSKfEQ)c$CgMHY z1F2!$6H3jK-{(GSD+xS{9C^t4{i&!5HVJH>e-u}53~}aMZ9E*+Y)cwkD#S>P{M;Oh zZAF}b1ClbswF*xYmt2pO?HqX&(-fTwtMlZ8+L<=7`X)wpB~EjnK22Wh**j{4&j%jD zivfu%gU6bUVQ?gev21rZsTUV8#)0$3DYWcRR6bY&Xb3{tBex^zQBCYl1K^CF4{T8j zh29ZHN4_YDB$ZwUdFP5uf}6Pq$fiFLAxBkF$74j2G?8s=h609}o^AgCBxrI6d{lDt zut=QxWsl59ky2EU#deIEViZZPnXpM;>L-dOn3V}WgmN9(!6msgH*lLc)>L9}z&Ouw zQ(;6BNZ3+GW9GfHemzVcbAja5$_+kMLc7j;SBp2|JvPZunLc?Yog1dYeB^X4u3+DQ%AR!5@=ggB3} z$sbIb)x#rhQJ!n(aPanezE7eCUzi>!ilJUk)7%WuF;gPCk^mf!b4&6m8BkBXb&ew3 zoG$K9IHYo!U4zaJ4>XdjOhYi?d8Nch`f@nNDxuW_IRFvwiWV{Fc7L^d)XQxE{=O(( zkP(nG@8YQPs39^zVcgNXDFH?^j^5PSaJkA1SGXs3NC(XVhC;*5!Kl}FksGM2{ZukZmfA?p(e$j^vTax-fU2B+ZuHgEODZ~( z%M51}fq_gDk6;ftqROs$^Vph7q-8*vKPs&jJ86P>F{aiQY_wxa~^j22Ng74Mien)*k2{(l3XG%-hs9^ar89ROSr;=mB{12l^oNf?n= z=FfUyc6_MEJXR{6x49cJu&SRllR60sg$mCL5Y)lqJ_SMG6L}209yq5xQ50kaz{eg1Fol*@R8o2FO-YqlL&}qm;;2IV;tW^k z`%^G8#;m2e?gb|AoGfE;$2m3Sk(z!G4mjhz73R24=mC>ih1H4{(SO!CI1kOs#%rtfBS*{~evoDOMhS=j-T<-zTW4Mp}%4(#&W5k({V zzPb=fkSTY{B$2zmIl!ZqMO&25wg42&6111HC=3dWN41YWd{K=49EA+6oKwV@ z?Uh+`#R1r?k`J7K%f|+!6~eNR8+U_IbkubO6VKM9%()=DkU`@few7-Y6|h3ytb{V} z9f6{?SNuGLzIi)H@${mING6f-$JfmWBwI<{PmiTkW;(w2N4B(WGg_TYH=wE9zm)ht zenF~_gtk|=uW_pCQp<9-u5EPrpCkES)Ij?2&2Db(EpKgNhUO^UvN62@1NW;4p-Xf~ zlW0{PgqAiJsprC*>|d%~%W;{VF8vj{ASBJ*z82bT`PK%qp$^Y7ORz zrCh9rOB-2c1BZB&pYc{ZcS(`Y8O0^49J{^&JYuyG&pI=wZ#s6{T)Vu3bW+?IArJu0 zQ|LT>t2eFKT>62q)7ppW;Gvhy=OmolF(R3xbO$=V%Tg?sZ@-&wVBJEz$M>p$TUs=7?Zgedlr=?)vX61&_+m_F7wrf$*T{j+$qD804F_HfO zJcORidx7+=0f{3}>IuiSExytZ$jW}cY1P{TLj*`%&w6nqjiix*+It#qRe#2YRq>4b)3ESlc1Fn@RYP|6 zs;KeL;=C?^q`HjvhQL$ z@rsPHh@_LaRvFv}YMVX0Y-!acP63FeQgX28p?=elVp7UW8VU^n#Wh^ky`3S zW)ewbg(JZvasB@Q#aXo)%PWOn(%+ZC;+;|y;P>xMt7n10%{qgh1lD;q54J+&_o_Q`Ot*Z0@z3}bb~h1{ zbDE@t;(N9BoBaiGzFmLn_>~xNOOK$WGN;tA?M-AAVpj)>^MxKw>+S#We#zp%|7h8yw@_iZVA5gUu5bT(d@goX`tMioAk6 z=DGdf#7H6<)R9^}(pWG^V}#}9;O=AW2fc3OlWMj=2oE`~T+qUuanuc9k+sCOOp6eV zjl>SvOParmZ@OC|#! zp*~JCk7{(CP3KH?M7pNEb!B;B_-nWkT|hgA#_i8+_N+HvO{>N64Ye&QU}dtl^<-X8 z%ba5$UQh2^4~Mh7wn;J3?kwbZ*L<^#Jgx=_K5-sUukL{aX+ z$6=4@A76TzG}X1oS?Y7xSjVNCX)f*~c`|-vP>YxT{8f4D9;K39B4vZ*)NP!sYJeY9 z4%t39&+AZIK-9FYPfEB=2k|RvTuTH`!+M!+{y;o)?N+^8Z`5DLG;LPfLl*|uadcTe zR4y_54|=%`U0Z6p_0LJQ*c!<$K7z){9IUWfA09w&S z)7si~o%W!CVz#}oSi>B!R||}L@OjNp+;r}@Oh8l|1zr7fJcBID#x3hf*d@5dD9NOdNuCs5sL_Y<_bH{nCd6;8+>xKYO^9sdAY zmw%y~kBBz$JZgygREsDBCH`%rzm92oAEh<>PMy^>YrEJZT$t?)YxzcU9liSwKJ_k9 ztLgr$u-5M8wX;dtqfpFQFrz*R@M_w4bDndaIPqC-i0HXAOV_`<)$Xq@rw7#s<&P(m zfm+#vMh?{^jzG_P&nl)793fD^E_?p~r5uX8Rd(c&p7hdO7htU(21Wt*sSA|^vi-7a zA}q{=sZ`oQqR;v-K=c#hd3 zW0I#HKA&1;gH3J2oHheC6dHY`Y?$O_Esp}DxEKn|G7j!4f?Zcs9cu=W1**wvVKYxT z$O*{+1KT~gspQ6vIHHR#K1BEX*OuI)GbRf7=9G5BV+VuH0gweMuDl#ka6%wkb0@!RY+j${LW0iI|%C>uog&wV^Nb@vz(G|DcC1Ke})tpj>Iq9HI>mW<Y%02VGV5H<0ZtTLXCD-lY>Nxbsw#o__CUYPHH5>o|?n&`h#A|+3Vxh7RKT$!H zC`NnoYL`~j^y@T7y1Pi;RsuCAmSflfPFZT&bFqu@aeB&|6a`6|5{|wpYeW6M|Rm{wiep?w*$KDqTX#t?2?> z49I&g-iWx47!|XO1LHJmV+vFU~kK^zTt!-0D^m-RaV7+ak$@MEVZd=k9B7 zqU)A=)|(!qX5_@&ZEgr7xFf|}Mby$ppeX=~50E}e@rqLEf;*u2g;y#=gAnXVzl9ri0}{6fV45b7=fPElpTN5 z?Inmw&M!$3Zd`NQ2iu=|znXYtw-c`*bH!f&uIjw7XOmL^Q7JfXnWu%{pnV!pMq%vYHjTiPW@i zgboQ)ky&`UpGwyBrns}aXrYfSBM+634{_jDeqBRO(Ryd_&}v3VuP%OTkZ?k`<|7@r z7^YP0)XlGH@ZPYJ>ezaD+TL_8Q(M!l zqLxVRE@UE5lIJKu!8z^CX>B3UJ{oCr-Ny6FadbvKOc@9rh6j(WUY^Ffddw7O9ln)M z)tdY0+IhH+6_LxTF8qZByJsAq(z0Di*RN`td={cKYrwMl?lP$#894XxR@#oKqTY1; za%y<4rC8$jnHy(Gg{-8rf1H=1daQPTea1u!S&4V~QkcOPR@4cL^TpgV#5 zqqpl>E}ZJE3g=6?Z8AsMZ5ff{e3>viljNGK_ps?QW%gDQ5xE%#!5FT(lW6vIGiLR#;1dJJE?m3}`>fPsPl;xCi zK9$OU+TBQWh1?#YgHqGySS4cHi-O(3K5?3#q+3s=bXL@^*BQV73?CS#Lb&bTpVc}=&CTA4sl}*T+c(b|xg|z&!*61J>szX7 z_ZPYyg}#{}x46_{XZ1Gl!WSjFkX4g#f`eqo%{z59Y;yE}LSq((3}&QE$~FmM6j3UyoN`FItMO>y!yAdm-s=BZ)k^7t=5 z%>(}csw(XU#&50(HA8L1-R=oFn?L>48m0P8q&yO`gP+PQkZ%IoO+zn zZXYgjn#**VRk(7CzbbMF?agXieg+pKziRoMJU!m8laR@~Dq<^x!6&^)^)N=<{*(#C zVS@MfG-gs4X&Z%ijuwZ0`34sU6x^Yt1V03J0-Qoi7QoIZ!)_RC9G}{%1YpD*la&?d z%Mtl7!@VeGQINUMU@O7O@tlfa823cZ!zXdhGn`VEalzns?cS6-ouNkDo^wYgc8)PY z0$H#KIl=E$_cnHOO%?2^vUC_pG4o*fsn;d<;goG2X&l7T1!AS~y9Dz}o=F170u8Ys zB;>GO?0lLCp$N_sEToq#PJ5s6S^Zb2r5#&uqT9l#6;erBKt|wBG7oM)y=a=|+8LsS znFP~kV&|5}f6|KzyO`xyw`xX5F06_(&gymLK_CRE&m4KC_0I4-rstf zJs-^;LWX_082Ipps-h*}qK;#2iUx+&HqocN$7M*VqH7Qq>DzzZGpOCaXgmY-sX<*v3iF;YDyvo3A#E0GohMST zGZX1WB(nbiEIxSz{XMF{UZi$nd)Z?|R1GVWh3(%y)Dpq@d0u|~>J`O|cL-8wk>e~I zY5E+G??E=PVGQs>_m=4Q19S>p0^{lQs;jQC(F8YAjY=g;LIlU3mp#7p7j388zM{H_ zfm-EbF$p~FBoaThNrnK9NK=w>NvxK&)He4TcAGYzuiDv*tC;aAAo_`(ReiJDCbdz^ zBRL_mX;ka(+I~Ti+MwLrI=Y7&mmTVPc^!9T0tX_i?_-tST{et=PB=9z3|~94Mk5My zMB--JjBrR92gM7C4$xFDIHI`{ejx@s4t}+e;CU37&&&w?#8TL90!6?hcLtGnvyuQg z)RdaL+*eW~jGI?Ji(lPE&*XdbJTFIsOt8V&Tsd;0xf5tNK z2{{T|>VML$vg|PuKBd=n9e^_H zO|7z(e;5M*5%oE&-@+|MEjLt~M!eJJ8n3R=Ay|uTO!Lq5k2Pb_w-Z_XVCoBVYcm^$ zBpta3I6IHBnprl*bq!DPQfbaDWD!Wi0Fj-hxsOH7sp>bKQKa78IWWjo<%x*e#2l3# zE2m8Iizyfa%sB$N4@_z|ns-hcB5iRd-tv(doT**As zs^0}FP6{r2clYP@soIU5#5$Kv>sHq?$9aErA~A!NNY5lLd~$PC)=Q~hd?>nxJuczQ zS9_eQesHh#o?SMV7YL3H4zI%S7>1pp|V+^K9;vX<#NyBH@f;ji9UYoboY(6mRlF6pr8+(g& zEihBD08!`9IT#eW{obpk^{wUIhKF-<5Xm3ajHORjWn#OF#Dw-Q2Qj^0uSsCMIV&tc!aUHTbsg_xqzez@~j9}TT~m2_Bb zFKm-fwzO7NR#F~j&Up48Uo_u9X)h00bzR6}%x(mHj>L`K{P(GEk)E};MEFatN$W_( zF5Q4~N%>e{0pq_lX=SHc^zTGi-RaWH9Qu%gH)$o#%;RxiW8R}#bk9*o(OUkQbz|zc ztp5OqML#owa?Cx(F^Vg_E2(uKh3Iarg}nAil0gas>J?r&9vJuis@d`WgzEmD(rtRj zr>2G{gh_A_wiAa?Gy9X?nY8KcBU$*7tHG#8Fpecz;R`O@5=L|Mr+SA<>igRcpVnT< zKN|(glWwiTDI7DOemVBdO|#JYk{^iIbN&h`EU#fzmf_A-R16S4x$*kcHBS|v`%$QC z+Z~ef-ryys80|z%@O)sO-lo`T2`9wCX!k5&Yau1d0mB>&hWZjS`cXcW)txuh7azmz zW4+V06TRG=EJSxF*n#`im9CSlTYO87`a50r5?Kh0rTYLstLvJ+G}JzWvWh;i>Lj(6 zL2C`d#IcUUj{gAZ_N!ludRt!Tau{9C*!P4`KQi;2{jw@QNaR!V8%%KN1ov{P3k;vB&F=&{VA-Le?Vqado5JX^^ev#z-TQ zPFZu1N$`K=Q+giHT-9_)^sQ@5iqg)~KT9she9pMqN3aBU^H$WG@~zMlcg3Egk>#CY zSn#89Am;-=rhiJN&^l&)Yxw1oYHXt4QAS35lAvTa>x^^nTAedO*R8%I-(5>+hfTSM z4LY#-&mD(+=lfI+m(lvN+o|;%BWSjoe78nvE=k;7d<6Ud0BUd0wXl$0b;hx#U6C!d zy`o38#&;>jC&P5wt8O_m+UKhr2J;~sX zDa92JYWhZ*)v?&zeje1x8HDcqoBM&D#|EytXQiPUvDN)1j>1b352Ys!G1>ugJOlp# zTE5k~F|{6ymrj=EG{3pqG_xZ7rcv9EVcMc}zJt|P`aIfhuYYZ}D4H8qMJ`w$>SO)| zU5d>7Kk0oo`qaJCgiB3Holtziw)R~6S5rwNds#PQ2OtAj4~N|$ujtc=H9ORcPliU3 zBZHGMK3L-*eJiC$=t!*}rIiDNeR zJk`Fxqe684kd8&iqjL&v+rjxc2llHDtLV$EN2qMItshZ)3y9Owl#cs674e^;sjy{l zH2XU~v*~RrA?w>hvO+||IRlfN_vF>m-&-Gv<4st?MQ>oMZ6EZ5oVEws1N5s7qv&b% zo9#B`bs6m*$eq%t8{Z&ggW&P~==JW3T|-&ZbvsMhY~{359Il+SiI0DOtxZ`JNj;6G zztu4%yiD&Boss_l@|=&MC%re(^SJm)c{DMYSjRrnqmj-yts-<~xaw=IUMsm{ON(Ix zK^ed&7~D^jkMB%$XF|c#>31BrGG48~XvXqFq<+}#ljPH@u%PH$TOC70(=`cWhFhye z^zGMwD!`IS@=rgdG0}H=U8cL#H#&ieG@9iVB5}EyvT=dun$>ilKxujgw>|taOF3-% zDIv&=N(_EX}UMW zcqYGhh7o4T>I6O5_Tsu=nj~Vo@Nu4MHN-k*r4Wwl-W!$9<=&yW`ew7n;f`z68Y?eI zYC6PLS1)s@UPm%StMiD#`AHv9&+AvcF?sxdRkfBqKJwT2ep*v>n{v#!P`}*PuIflI zE&Wik9?B0tx3xWLn@|k%j!j+cW1MuZiPdm*9;s_{7`4+Ry8Khft(KUR!m0N6u9FVo z9@fDb_N5X5l0e(1<2&oytNsKdK*E9UtMM^XIqgWknVC&vFA87p}vaIAy>K) zl>~C&U>_an>7)ZBe~Q9&<<$QG?GA%%_a2q0k)@V4JTV=<^{KRMU}k9WNXR5{=bAEO zX7u*wjzwo}y01rjJISr?H;WOva7Ho5kMUXkFR67n_3cvQQc)d*D;%gh_Z%qtoQ^%| z({yibIJ%H2!sC)hnw&VnB;<37#(X}xy1#qt>#LQPNu(G>IS~LjUu^s0w#c9X{akZE zYD*8j6dbu;apcep@_#RWDcG0gI62K{lThhD-l%PV_q*8R1vh~`_LNXV}MWit)Idz{{ZcTTTTz8`560FF^I?{oO!K} z!X2Zmp!tX`3yk|$FIU-JeII;%Gty|4vgzP6L$8OoIpGM?LR5&0h9Q))} zv3G99?tQD~@Oyn*Q-nz`T;N-KnBaBuHP3awG!>?x4ZKt%4Pq+lm#Sf_`ISP5kNBmyiDX}r1*7ZF? z>{@j_mF>D)CHbjCWFvM+Z*qD1Ri>xXI+UGtW7IZ!sf2nThr3;)ETfUPz{&6Z zsj*~cX1CS)ldL5+_m?qArRrN$sl;(asxqhCw?6dNh0_C6>P>dST|(|S?6owxj2G_2 zWS_r)YjGDwTSwKCYti4_#Ix;bEm5%>$Umt2{i*AIkhHhzskOm#aLqT(=x_NwPx!vP zcs;qM%o&f0_Q@sZO~H8@wX5nNNtVVwUOBAhs~yiybmq0DEy0sawAA4sv+`q;49HK@ zis}7h(smt7WVW~A+$(O4)v(bY_>}w8mR&KVL81Qu343(7wkiaoQM|}Umd|s=O&rIo z?5woTg4A#9+6PPJL&sXO0Nmq+cpYgOBBsMlt)G}jioeY`W=AXyz#1%Mc*V;p)ZtfbL8BH`8{16}%3h8PM*IOpnW z*Q&bLTDI#3u+p!i^n~dUhC6c3J8n56p67vG7yXgZrPD1m>1<v^u?jGgLtgd^YftgB1`MREY$OB`*qHZXgEisOscJwUxLCaSuU%Pf~h5DaUz00*$~ zlaDpgZS~aBx^~jzTf9_nMNG1=##oMfQt_*+>&tkqBlPD*bYX^EWFNg(Yx*^Yt)@1c zs9QxPu~ML54V+*R>_>`?2DxQ%rVU*#Q}8aEgoNN@-PU~yLwbxx4i3v8G0Ni&kDirL&pam7oks|3>QqJX3O6=8b4PDDojVmJe406%(o#twamlE!dMFfw z^NjWT!CW%|-DljWKsr9CnV}EU@&Sewa4j7>dpzsMN z+|_2Q*SdC}Jk}Rab8j&DT4alQ#6RLR%{w+tGQz|^1IN~lBZ-i?<(OoWJk}=1seLY! zeOFSrH!&-*ieVPlnKxluXV*BP=^uzTI>xU0Q$szp%DFrVSRTqp+r4C;Pu8rugHyQa z4Qb~Vvk^VCg8;i^?H&zkaZPsuVvUGa+(7X_lSFq9DmnKRoajw-?0Hw@h z`Xn92Tsm9L3&pvR2KksAWP2Z_X9|AO;?D9d*0ZGRxT$4qjl;OA-m%nU(X;CL8DYQL zmnb<^&j&xPYoyiZ!_87_%Nak2%LMW)q%4tRl{~IM;2e8az?k;!Q<3(rdH&MbX&p;r z(%OEH=-bNykVu$N0Q!46_!TnI(RKZ5!$^r_ zxDG>pRgDW0M>NMxYnpdbL@(^5oW~I;V`bU{eSPbl>m6Zhru;^>8kw1GqO#iG)T-}> z>^$R))`M2+Xtez!c8vsb-Cf4l5JIcBDh5lD$Ng#2n%UgWsNoVkll0=2DdFF-WyIp3}OqM{iObmdq!r^_%^Pd%NX4t-(f#a60UGWm%LcY~) z^kHjo*Vj9YbELdPNjzZvL95bhs5Rs-0c9g3WFPTVmUlL~^`-rc_N>uF{*1+j71(ld z_BGFXR{sEA>8ty$p43^dqx?CR$=mv{Gqaz)TXa`c+&@U_TE?kvsp!h={+Y>draWVT znw=)Lx|DX8No_3L>HSNktQRS71;pN( zpzg;wB>l1}I#XHHG~Tql>D_krTc>=>Z#14)8OPK9lyD{?62d!(p%OIAV^Yn+zm7bP z^@!0;?yb@q?01**gKadexm=CRyn&zic@)N{1md()8m zvMh0{Hw}T0p!(ofv@JT*sPFFY^(dL=vDDZ=LBPG_yOd+} z$RPbVs*6c(Y; z`;k*-zSiRFjXo)!7PGup3kcw#9OV6KpRHihtU4Ok(vm3@-=oPQFeXVmvvK>>?ORaW z*WEmA<1-}C608UGx1XT%Ne4&DV3UmFwrijDtg3qBr(0*JKbi`kYkS>CPN3B70a&HBh{~!vHsDzO@C8}eTtTmN zv)O16ywfvB9I{Hk?u&PP`5+3{=-M1Qc8MOBBYNr+YneGR6WpBp)Qu-j)AXBv!s)ij z3?Yj!QQe8;(|ZQGS46eYwEqCX+Ks7^mnW7!=C0L%J4R{}NUr{m%rJ63wK65#eq8P! z-lnPUapM%?;~ZpA70UeGvqYttf#U#HE!LozU=fZDXY5!T%|rhHy8-_I)vM{7f#7>q zcTYd}>;5zR(f%ui^6e-5Ul|a<0(*Od#RzxZw<8$$qEDL`;QI>kOgUf}5P0Ig+vWgn zQZU0H_OB>>mOr&DfRah?oY5}EDty%n!y9 zJ``j9uYe9giNXD=m#gfqzK^~>m+2_W+?}n48S(5zYKH#+REIlHk7HRLlR|G4hGiRD zkFgc7WmAE@c>sV-e9j&o-&e@h6u3KxKSNIXV2lHS{pqxbs2t-b??lQOc9+2Ld)HXw zOB`+UVUIuBm(u_PybyeQSC9}2?;{>*NcnNQx9?hrQLqm@P-Bu1jCiGuFb*4ned)J& zmUN7g8bUz8G@?n&p(Trtz8mK%85S9~G@PtC>vs+||8Y;~8J zY@?qFqCn3XQ0RLwKE|?LJ=GIxT`6&?YZ`skq)7;7G5{8R!?F(ePZ&H1h~^7mNw~QsA4H&X+M~+kvFu3_;u?n!#TK+*3^{eKx)pSUU%V2#(Da#hf#s`owNqdaHe^{z0^?3tp5Ng=Yfx5inNl+dVk^t#kINxvDU05a5nI=s5$=t*VNPfQPZ7OuJnbJ z^QEl1s!o>%C2fls9s%-dPK#sJTK(O{_OYkOrW;sZJ7|d{Nh7Wc6Yb)Yz`DtBB ztbIUKybl&ahEtuR>Ef$(Jp%p>bEf3E)Y{`%GJ0|>xTvPY&G5pKN%^~daaVmU(pvmoL#^D*_VC!> zESpuZS&7cm$6!3uI)6;{lo3OqPMUA47TXx z9tg^VO!cMZ-KL3kHHw#!BXf_L6$QY<&W3enCc5FX}%TcvOFohW`;DA9L5g>XWQPc^zVk( zFzNcXn?9lH+e*yVkdu(CMgyKc+)-Ne9;W>P27`U6l)i>hGP>lTEBRO70=Kdi3$5A_ z*4-@;RgkU6J5F!_^ZNe)ii9K8E%lD4((Tb5B*f_?;5vNVvXk%a_N$A(huW<7x;6CL z%e~F4g|1wW%%{SE+uQZ2snL49r&8Q%8cwjWSze6YLn`lOKln4p@7kJ}#p`39xmW_Suvv-iH93URUkJ7ERH}rli*{DTW&9bOq-!Se!y;&zfTtU`<#&oS_ zTPs=Lebu^-Sm!t%FnrYQL!uy?tf1Hb03UD+ONNU%8@mi~z~u2xsF$wOy1gV- zkv|i(#z%F*$f|#Y`hK4JHn*x9Xxj1ym2Kq5&lc1@y{mVjbbLBbQ);@(-peQBxd_LF z&wTrw=O0Ro@N-SIw9vH$C|RV`Cl9}paB))Hf{WqKmt(5fXZjIT?pH_ojk64{?|l1t#VzkupSwtt;^l}!$#48WYaC%lI}HAn(-^`6F69H>vfA%N z+3MQ#8kE;J3$Q4b9Q?8Y`A5>Gb=;#&>4`33kVP^8u`xZHvX8GdE!;)z)b`R_G-6dH zcsMkUk;!ZdjldCrMP~jP^yBFLGZNps%Xd4O<}7oDZy%*;6G;J z4ylvV%Sm-BF7w>&!2LW{bVcj@AJd*e9_7>($`Qr`oYz9J=uIx({`PS_+rf0j&9gpZ zMj(Pm??C?m5488`bm{iFoFYx@db#G!d+B9PHC z95iHk6|Si+t}wSZdV73E(j-TWm9mjolZ+NQ3Vx@+r#fa!DSjYzCAHkJGE3>kkh2Uh zBPDU?o-1z`NNA!<8J5N~EtF+qC&$$Lo;&?%n?8%tW7iv77Aqa=a?B6QyPumL^=*vf z5!5<+NAX`rz*3h9y}?|MkP)|^7{wo1$qkpp%OiO&;cV_Kj3L(mcjOb}?^?Z90)8Oq z()udEK{(t&z{OZ~FHLEENeaWNS&23s$iU%;&>v$?uNty2KM^F;A-3tS!}hm>N&|6_X8D@ z>2w!g54wWR-brGT7H4$;{SGtw8tv9PRi2-xM`K|GaKfA_DBx6UJtE6bf=KOc3nq4Z| zD|0Irkf7w8_WOM*)#As@ugBV_E2!lk6*YTR>Mb=KJc80?mC8g(QIK(;emi|?UY#wk zSoIVdy^W;M+uB6&ej(EWMk5%<&>GSFK+`YvD{Ba>Vh#o5a!x}9I|1+Ytqc}6upnjke60Qss6gPUguwTkTz@B#UVRHTj2sdMAe6 zA^}xUFC&VxmQl@mi$K-p>K>fBw1qPQF}Q4GCv5ZQIUrO|;2l?|bxqtFOo*Co`}*%I zE`2lDf%UGZF!>J8l;b_Az4HG6Tx0>lljf&g#gV~pdV8!oe_M-Bw!U?W_T;2yIWg*F z;IRYg$*!lY^o6Fc({X6hh5{8HoZZS1=j)ocdpDm_hVn&FVm~H)XV2cP!mtN-W9?S+ z+-DhN=&c$}4%0@sgVnaQV8{viw-4&i)~%$nwHFc2Ez{PGc1GlKs(XP`Bb{DCHXXaL z4I_WNC%V>csu&qrod-CikqU=l*m*uFAjy=nE;01)MR}940mmaCc{QovGb1VS*k=^d ztd3WA#aC_#C-tL3Ll7Ia3b0&&OBy5U51#;oUv2WlNj=*qJb9+k zNf{&q+wDn}!u=@XbDfLN-k}7nLB;_+vHrEQ_)Y=p`(j;B$ozhljpPMwxgg_%TJMD? z9c5{|e+c;ieJh`wFdPOm^vIogt{{TwZ@KQ+0Wl{C>PBeLJXFuRmNCNO% z`_rH%><0b(&=RKABtIFUWL{r@K*2eqRz?I7+xyZ<61;fvql%b*||;invv= zX-QN4L6aZn6>S>DDtd~cay*V|XQweYpafIQEO7^&Q{Coo}Y@ zbf>eDGbiDRafw*I-~jgVpEWuw9e+^xU8q>DgZw_~Yky8Sy+OKG;4>d=`>rj5`p5_5+&Jg8 zb8djwx{}jYpQbvC`PT^t;vkrET}O|v(w)(I(l_bNe_gcEgmc3CpFDb`U;@Y74&t+T zwXU4)1S5EUJ%^fUp59JI2+nw}U3b^|nrRw-u(~I!r^&EgOwEp4x}fv-?Vc&lu<8wO z{ixHdbj=!Dt8|E62$o%~DInuMz(xtgy=|l^z_J-X|#NPmo0pnV{*q zWQ}qa=DCw9)6V7Gk8Ga9?OC3x>wAqR$U$ddWz>?_@iRuvgn>BDt zH+}Ga)w_MtLh_9Ezy_u!rQ4X~Ur~V==N0SQsNl9~+m($=Hap-9*DupL!>IK;&0kBk zf(R~ct~Nv+30Ttymm|M#?@_F}=URuLF11;$qMleKF)YN4KR72JO!lpG*lVU}8iYE& zkq)LUq_-2PWl29U6;rGAWtFDC9)yt@FXk!{SnX1z6T3dc2Q@>kbq2jZO=;S8jMmU= zSJuW{56X%LeqX;~?O7Yh`iASR`hE>c<~TJcu*<)uH%Id~f37{MPCsL`*h=kc9_>;o zNjXJ3htrxXdz*PAk|>!OhYS>gSMDp5>0L$C78knam#JL(+KrM)JZ%2}kgnba2aI6; zwJ+ev>IsYLS5IvAH$~-;{{ZotO!@xw>9QK=TIHpc-LQ)81eP+{*ytDo@6AAfv!z^``GHZS98nw#I<+Yn$~huhkaaL84qO)KSH#x25oKirx~`{te-mp@rvCs8m(wsdM*je2By-6JkF{CHOI?1(_R~=hac6dt z#u%;_`bh2%wL4)Iy_nf=3L+OS;dsj)NvzkyJ1FdQA5>i}{MU|VmUKTUbKf7@q5MbI z?{xdEGfT98!nUO;iKkV|C>`l!b_3ymXFy`K8lP`CAMUS3FGK4tdz{?zGJ zde$#qf7r zSrL?eK1IRg{&9-d>it7CwvnLd+H|RJCGD`x=NX9>F_JUJY0>nW=q)B=1$Z8F?M+8K zFT4i?4&2u<()F!g-=JZN^5LVsySel&u4Tz$3w~ptZgEj`eKPw)>v$}eTf4f3>KC4L zl;n~RAM4FJX|9be?ecY1y^>j@Y0YfXOEEl$2LVM|ejzeg4WsK^%T3nx5BBF;w7R-2 zZ)XNj0l@bAcc^_Oaj9JBy+?08uQQ0|XBQDM+`lezGxZ&*(rc%gadt|#$lfW)r-Vu( zVmA}Q`8C5Yr`DnPXKQ$xq{SBCf;aPx#0;L~WM>s)uAOGvTj}jlwH7zGGK4Xy#7n^c z03h?9YS&4w*E=FboB%u26DxYF>9*uO>zL{CYu3LNA+n!YlHxnKT1jIkg%~&(><72r zx@E*tE0GI?IKa;%IQFdGIBNwrUFuDBsl_0Mz-jWbaSKdN4|9S59lX~1()ZNUt?ngs zc63(A7zB}?)yeus*H_gwo~6_5uk8{402B@6l3VUs4oJd}9D;eNx~83buiteZmbcTZ zSj3-)NZ$**4#W=m#V;7Ly1aLAEG5j!6mi^eXePdeWf2)rGYpPLHO2MYi`_%vu9c`H zuLRcDXtKtHfW#fYHa?<{UDNJtdgDg8hW=K#ykaEv2`ae2b|S+OXx{? zqHFf|SJy@>c_eHh+F8KPtLpKzIxd5BW}4dW+_c4cc@92MK>G7egmjaw z>5Zqwe--QvZz05TZWv>nV0~%JZBEMOT>J z`~505$fF@QDQoMy6UEo1qLhXb7U=8E_*Od&ZZjCiK)3^Fr=A#a%D zCyH`+ia0XNr`mzHDj^$=Tg4r@B}c1(4`G^Nqz2Rya9X_bNAgF3!ROyJ56TgwDh@I^ zG{Qp~#@Joi|RFDo02dtQw@q)l`E7y`pPy)Ca=M;7`WgB21 z1HrFc$tDO9gU`J%1QWc8AlvzS6YWSt;dh(^#wj8*VX}Z@_oY!ORaRk>R{@-f0NsW5 zry~ptZf@hSsNyLWL6^@Rv*MhNXxD;BZ1$uL%jlrQM;P!s*OpZEc9m@TG_wU6kbn=q zDROuUzBx5=27|jap>ml#A2d%BIupAM;*vXdGqXDoPmiT7&M*Ndlisg@$%!!An{guq z`KKZnP^g##+PxWs5(W+j7^P`|Oo92hpmGduImtfM@{^pAP~n-Pl*cAF5$#h4{6>JY zxEy(-9@)-mv)c>ajAM@{&MO?TxbyM5cjhO6DuUSm0PQ>RpXy)nSMm>-eN9k8qxh}y zQ=gfC!LA=J)7tpBcFO~}cP5nxC$Zzeq8qqU{f2Aap5@48%cD9{}4 zKKT{pQZtgh`_k^+CeQ%)`cw|8w%m?K*0w(hNB;n37R{gNanGJB9lzznaqWuM{4Qo* zvbNqBuE)pMiskEnU3K(*@$~0RqMMwNv~4E?+ak4=<~3}B#C>Zi(wj`HwQvfMf3<3( zKIzBM*UjMe`o2y{wT9p`kEJyfQT{4fqluKVjqU#c7CTdgxW?1=qC(xa5Ix0rk~ryi z59(YI*wU#PJRE(y(K}>;w+GE3BOfkLdZ-d(Cuvd*Ir0DpazA=S#^B*|_Y_f_G5JnC z^Filmm3>BF1$kZx6Zvkf{{YhMUS$k~Y$u|pmNaRL7T$^fa{Eb0j0oP7;78wE_9 zl0E2WcUI+3e|%Rd>h7%SrntV;dU`uX(X_`YD?cqWc07If7^;r4lCJEWccW=yNWlyU z&N6&gHoMm~lO_YnYIFxh1Xu_B%hi-Gdzxf!lY(wd6tH(Kq@{m!X( zso2`PqFzK0m_f)O`N^hjuQfP&*F(9FTe_F{y}zw@ZznO37dS!V+v!ZYI^_3JWOf!_`4jGw+UPMh+&=-o@L>aBZiqi9!pY+9j& zPcgUzsTs!6_0E4vy|L693tKpKOPgVRAOV6LE513}4>is;{VLAu;zvy*L%JDkj`bM@ zwhxr#bCFT>*ls$bt8XR}dJ<`Rv1OT?ERm{!B31T0@|c7uMwENyanV zCnxPo&{|hdf3;fTTw6dc?R66*8`})ICje)^H0d?hqLMQhDo;G(jLEkogS*97K`RJ| z^qG0?d(_yO{UUbcd*YaBo3nwO0#9n={{R;5Z0-8brf;U50>dE^f`2H_b6tYz9#p_m z4`G8@E~n^iCtGQ5FKwJ$mBLDmf~0nNaU;40r5I-ry6E2hez|^-l+PJnAc(bMUvPEF&R^mL2$R6aY!9eM?xhv-r8C%X4s$ zTMa*8XFL+w!25GriT)mGZ=l-g5iF6zZz8y0a>pN-AHGjC{-x1cUG}?atWRcrdsW&O zKPv(GLk~EsvoK)w3;RX*sjXUSx`Be$cp@8kQ0f3anPb~O-lug%_L_RrrEOq@rL<02 z001W50^=Stir94*LFk=Dc@^%VZo*_EcE;Kq4-M~6>RlV9UF$l2qbjQ;kJElfa5xoPFG^{%by{@N#W6n@;>YN@FK$jyduvoqu1`=lHLsY7olL3AYg= z`b7GLiNFKDp7qo9Ee6L@vP&D76|L8A^2Y#Rm4R?A->XdIBqpNap~L2c8^3J zcMSK(`qp})<1f@OJl$2VYY`2x+v%p>LK7f4Q=Wc<6))g+sTI#oNox_2=C@*HVwf3X zaf9ZzdIpFz{TxFyVdR;2T<-a|yAPP3dX=N;R$62R-ony2BP6jzTnziumnoQIFY#yA zZ-+Xo6(zQe5&rWcf9Ddbbcc|Fg z^@gK9wWmzCGLsr3!I3B!8DDaEuD^4l+FagSUe9ilPi>z?h~+XcaY1pW+DUgE)y1q& zEFq4>_%2sE8i-qELzPq{Kl zxB5pP@~)UR2$_k%&omb{u`~@F@y5}(+RDR*&x2W?rfnr2eQO3{)2hyNh zX%?0e&o#BIaXrKDkgDTzcFC*NBIAW7sUvg-rDZLR!!qCcw*(>BxjnPsk7~c`E4H?e zrMi>&aeGMM*zroE01!Do+3arEM~*BPJHPMK|U(= znC6$Z_n-E)VJ)qLHrrrkl<*W_jQxJq&}R-|GKFT4DPX=&)9+Jfi2{7M%N%y1HtHXY zfsRINHpe^5U)7SJMehLIeVz;y)D(c?cDke-Oatz zuF2p`sUz~|_pY*iC|JnFRgZtJJ5U9VU`jvAPo8Pjx5Bv>QrK%Y-waSCi_dd;dM+kJ z?+Ionx8IMYA5%fp3Dli6aGEsN1y^w>*~1^q3G6UAuA_2-84-9rh6PAymBtG&9^lpL zOmktT>k@oV{9-^-$t?hqS6&Dsu*aN=s_9)LsO_wJqjiI;%3YxkCOt-M_V@Wudg*ss z712p2vDG9O48WgJcOd|t2>Op|vbVQW6lpfy;eKGJ<^4XC>M_Y;(E5&FgyO-p=>_PK zLPQVDQ+9GkgU@Pvs(N3jEwoJ=RnwvqYL+pCnIjyL^!LSehk*wwa(%@Sg<3tksn%na z^`4v6cbyxg0TfSr>xa0GB>b7}`VTbcQ0X`Fb*E2XMG{FWsP)_eRWaY6*NW)q!|@)D zc^$sg_>KbXK2;;P6*`P_yJhP~>x+q=+5ka8zP=XIV__cFl&TawhF^B~{i}U4Ln4)M zGvL&MA;t~~&3Tmw+*DHCOh&bi6gbEkG_$NN306`<`*^3xL6L>;z@%`Q0P#~ysM{de zxCdzO%^D<3`?d^@+*GK{WBPcd$__%FYWBks*qw*dToFt~AW}h#<|CdeS-Fd8*?@e5 z*ibaer7}M^znY2$b&b(NoHrR!#SwU_JW^YA1CiNN=h3tF;dP9BnPX2BS1_w55pypMFh9EzaCy1DXJ# z!Dj9`6#}-w8Qp>8Q8bE?jB}pY6vkBG6P%uC%VKT2d~ur8&@Ar6mOE66jkl1__!<15 z(aju|Qa36sOc@CvFCN{V7raTq|ea6#}Rd zT`PMEn51nAIQKN!+aKi|)PT7_NdlAk+MkU(Z~`+y`d z?d?w%lW*nz)GExSh*8P$PBWdj13XuGTqu#`Vfg_1;=G~qSn*6Lm|%UGPaA z>W6o?Y<|AX!H0Umkl?Og(6iFyi@^SUA7jvJJ@5KzJMghZp z*TJMOCegt-^FbFy)RRr1-Ym8Wbz^K}n7R|Rk%Clt;+@gDOH_lWwOulO zN9Ve^X`;A98<(*JjAO-aK6CQ2@CO*LG2q9_@lEV7V@8un({FXYuDg!%;cWi^4J44n zLi%5CBo=L8+sP2x<*rAkTl_R}W z^rn*ykd|mH;kNZo<_Ljz9{kkmT!=b1M#rV=SDK1nT+0>A#&ovMBaP2?!0-Ch0r2xp zT`N`6(rbl_Q@Hh04mnWmR#dF_Mr|5#z7v&lBNbI zm~uHDKD6$_G64J14cH0}0PV$iw1`ODIX&v?fy=1*jxp^`Tr>Ju#&QlT*M6s_20%Hg z>*(i>0Lw5y8{dQUtnx#{DF8AMG2G^yj&49#1d)&`0*Njl*yMwe^A6Mz#L-=yr-fXD znwVUb}W-lq7S4 zYB7pcEMq%9mFF%#Qb_ZGRVrjs=j9*_3{uCmZb8mYd8PZ-xL{7{rn zA@xG1$~SXTAl)Fy?mN^yj;c#woSJ^fGTZJX<24svzG(w4)3=^UremH#Ce|hdMlXan%M7PFjVKr$I_KS z1@R#?#F5E|+W6;+VQOCIJ97Q`7!>48Wnw#H_NaF7$$)odvB%P+S%w>sw4025pAuSxG$N_K(6>!s*&;dS6{P^=zWQmHl*TWIp+N*$J za#%_*MM@~@`B!Wo6vIPvA%kFM$tMe+CY+6q+{oBqGmiBF`4!y}n6XyfoYTBCxzCIm zS%#dBMrn(%8&qfSM$XN&?!YICWdbhZK_H&lp_c&$!NG47B{)KvLbezmtsH8RJ{R_= z+?hiVPta1P!#j5Yd}kcg!%qCA+DQOAdt^}-+@Po!CWGYkolndO{Y5G~W(7Xo#(Pr@ zI{_v$&u-?^sWXbSre1Dd9b(?q)6l31=-)QFu?%Qi?ohAO@rgp%wuQ5FCYaoVRV z7mSjP$M056O$EyXI;s+@-y~5qtYTCHDqH6F?Mz%uS7|XG-YChGKnRTEIr`RdOK?y~ zgdPWKMqtIu9EBLhE6mcCPvsGwew32rD{O?e;(75^4%tkm46ryHQCv7Mor5P7&ALh= z!=Me~ibQ`yY7MS@<1{poF6&+DqdrKaDoIvg4hQ8l+;CioeK%o~?M?_J#|(4Zni^Ik z(!j{^MBUI}@#NQ)7_J+FDIDMugt7KLsA-mvi>3hQA4+vdX(BgqsB4o(5jwkia)L9pO`e$=TP zySGS3u;P^(ML)|JB-fan9nM8-LPSzB+lcSjpUq7w?)ibq?}~J+N~%k4ILOTlzA!>$ z_#SB}1G%5mRmMKF=#5m8&zw^VS~*XsaNkbU8?%WCCntbvfGlLW2%DG!Tj^F&0wWvF zNvH&>ToAt~10;cpuv~2X#PQ@%Dv1f51w9`;e|qO`wFCpJ zWC64<>P=B1=TN@SearqUMVu;y`N=);gW8oJB=MgW_Mb4+V{-xy;tzb$IXi&ljQqUPN3=H3py%F|pC<%! zjw+DjImp5JardoP!*|D7SUABSHv^yPHIv!71_8%oiq(8KD<4>Cg$H>E$NvCC*FRU; zU40+ykEgmpkKzIRpsDUY)vi+Iv!A77dQWrS6<;WFbM~!-alZh_4m=JsUo(VzeP1UO zFl?T~-krpm-I32~@}!$vEHmSZ5$l2hq4W+_F(Rx_)c)$zqlb&mz^xQIf9U~^24Z3P8D#s&6 zz{t^&nRkx>RNWqHCbjs1YkU*cStNH}<&Xjtf%nBQY<2#ywA8wPSc^c`A%^nO1%Fw= z*r1Wv`S#DfAH(jexzyxsCswy>O;!linkPB+Q~otRYc39rseB>48e~Z=)K)OXbLmDy z23!yc$Bt@uPF`7cFIDI_+Aj7weYA;rD>T5IK>UxCs-9K$& zNHES(y<&Uda!xUdlkm4w)wM2|x4YF0rd`ajyqq+#gYvN+{{Zr)J}ot@T`tL~*<9Yo zB&q_!+el&LpC9X1eGOG>mKK36ygocmEtUPk+HrM_y3J+zZqvN6hoj8yQh z1~-rof$dCOdU3D@O`|>YS-u1att3P$9EAh7wR$L}H$|iXC>&JT6;dUUrW;R^d*+ly zl|MKnb{(oy2srB`&B1DYtTzwaXCu_puoN%Kxi3G@I5X)VqUa5GM@Tq@+` z{{V^-7?RzC;7Bpzg(5LPs9(AAc{JtQ8201l&uSQUOb+vhUk0YA79wr_eq5aQ?Li&C z{o!SKNyiGl2fyo5V@wUX29@GMM^$7$IEJ*KCEvJptfWVdG6{J@~ zY!))ven%PZD8C8~sYYfX`Oh_Eu?9Zg*`O%9aLfn%RW)%5ZB+b#kQal^KW%D}M#I!B zTasAji2SFFVD>a}I)HL8J;%KaGwB5RcLSdl99pNXyVi1`{$FaAiOI)_EOQL#9} zk~5suxR{AFak-51ob&djyJ+pS`DL7xiZw3H-~r7@mL$qx<7gz-Gp(=Un^n>D55$O} zzGn4E{{Z3vk+=8f^r46JJ$mN<08oh=MYx92;O5@okV7srk_Y9jSI@Xer;Kn2_O4;j zTE*^-sL#>2^5){z&*53SJbm%|S5t#=g+rXSKGk`7F&0wH1MO1VazzAZW;UlMikK8T zI=0mVu>-v~uqu_v$o~KpEi%2e6do6I9Gdr~9GOy29@KXe!*6WUOMsE7EXVpltUpEd z2DPd6HTAvvLYAz8Yk1Ur$b{f$>PI9WdW+KAv||O76N+XG`i;4H_cfN)Z}lBdt6xX6 zyO!GC(lGZe76VA^I9&Jxy+YEp%}+ztFLVuJ`}m6sR=Ty6E+k+xlwt41SrW$Oql|`J z_wQZ_8bVYrIK^_UKHK7q%Z{dS9M)Hxw3~^&Cqy_ddF1D~{{WS2 zWl|l_MpgM;G$rNvf1##buQGx;-t^n?7xvrPe41{od(?E!QFUY?!~fK*$UNZoH1Za82X`aC(t)$Q zDLKJB0g6TSaq|}TqP~(;jK1YNex0hlWVn)bb(?DAED!osF+8NK7Ys-GQjzJ6@;JwD ztuqA72`<~zo(&9%>)c_^6-G}qFyl({3?Sp`X|xivFyMjBJvP|j+@N509`w`>+qV?} z9>d;-9k%{qV9SCBznXk0LISTI^vr5R?BIO8hti}8nb>^14#t6r_Y4rV2 zb3iv1Q?%f7oYU&6Asc?x;U4^)m0VSxL`ICsJ25x`bH#c%&Hz)K9u6u}BCibOl0B(p z2^kJQFc~0LgCZu9Z&8$EJZH^HU=?|8dvQ^gUDAMf&lLUxU<~kcLF;%zJdNfa^(6UgGZ*5T8_bKKT;KbrplQ2zk#i~j(WR)FX7`7(ANNd^kV-ndyf+kElog094jX{_C&M+woux+68pL!MER2FP!`&YKIl>zd6A8MgpCC2F6 z=IzZE3l3d#O&?QvEW;S?JLZj`a-#(Os3P18iAe}Y>*lk%PeEyxQeI0SGf8yLa(gIZ zNx=8#ztXjvTW&Wf&yO|lOON-2k(g7M(9) zZ+Cxt6ie&h;7-lX;5=fjqf!^;lnkE#09tTv83FnJs*_b+qv%!|J%dR zjtxAKg}@vE+XISh2Ek)19Dhv{oG8P5+3lJ@=|T@OeYvKjft(%EapclMQIKW+^h9AB zYZLzfhJj3b0;;ehwsA`E^SCw#(9%S^fGpr2y?M~=+*|F6oxm8Fa$kxykB%icp^&2S zo=q4E!z(U%!J(#SM%r0r=NJ`RdVwI!ti<>unzWfC^v-!F-m3Kj1a^f_{??bn3nud{g9~q^*NW?^j+Dh_$>pHJ`k~{%h0JnM;ZHQH6 zB%U#vb|j42u2^m#wKW`3%B(>g9E0ykP=e88VB7P!jPv51xq!(csgU4vjQ63CA=(QZ zj(I#&j~siLme04nMK>f-m`aB{{{U(jVlkB4ChTOBnm7>}Gr7qGa0#QrEx8#d-|a;J z0i%!!807KIEDQY2jOCT(vQA4LIH9FeEOH6_(G=3|l1y)>pT!-Z(`k3J~NK-(iK z!!5>p3YZ-Z#qzv?#}(+A8Q*qD@+eCutPy(=&lE5e6SC(A+|`iM-I7~uc~u?hv>Tzw zkdA+9dgQ7hI4$mJyGbHw2`bC&Lr-K$8Hl&d;MOPNFHcsw20uOmLj&NxyrRNX;&tJ~N-+BTNAcLG2jHhsS*oO_zr8xOG^YyO9E3|5ll5%le^P&1y`8uCd>I?ljY`+yJ@@@AAE0gmvHPMKb zogoq64(A+F*c6!r%B$FpX^WU#5=&sackVdtQlwW)i1<6dwI$!GSVkl6dydtbIj_Zy zept0T4Gh`ePi(txE|%R7^Hd*|bN2+Fdd*w)WU1Eox|ODjepW~oe-9iaI+2f=hl=j8 zL^keWxv|MLiPQRjS+)3&tLx7kUx2xVQZlcWBms^*cCAY>R8HEalhuQ%THfI4nxqJq z3mTR%$$^4Le0_ej#zptWTQsv*WwC|75bQC@9R2%&T3)KM>r2&=%T4|pXKOFWE*Y4} z0q@2z4M^xtlN~>AA-K7FtI0?*+(>yma0Yt{Z*5E`T3t;f@n}|)7;bO!fxzVueslEW zkJhU?;V(2!lp1c6XK^u=L<7)LSyvpMK7${vT@424^}A~=aIFlM;xA2?B|rI3f$vv> zDH>twCfs?=XTac>n%2I0heS!VJBUu7^IF-eoXQ7&{=&N7L~6I%+!N2J>9T3^tff6C z&OjK!=kHDHJt*3TSlDZ3>R6t{e2R)Tw>jrEsE#=djkR!maay)j#CaM0Cm%O_3Q1r% ziE+U_s!SI0w(%!&bBxdkE{x>l0H=dlL`Js)@j|HD2R!zt$e+|Ms=qIscc<3gQ*<4i z1q;ZhE+KLWiPa7<_n}a|7Z9s4B!ikG9E~Eja0vIO+S*O3Jz|~-6x{ZXE>VP=qbC^8 zZ>=wI*dOscW{MkF<5mUF%z4T0Kx#Kdy}{&U zsi-o?3A|R^Fny}X#U4m3MmzIN7$EgGFLB2Lh8cL1DyaT(ick_K+?jf^+@(M#Y2;#^ z4BJZ>$3FDjGrKWegKm4{y?cAPBexvn(N2KO6n^wF1>c4|p48liMTmM>3skKmFrFWlPAV#V{;+F0YDt)nz@g-W=6kNM6B_w7M7 z%xjJ^SZ1Nz&28x+ns9)~6C~p&IK_I;P}A;Zyo%mbmP=;aARO&D?OO7Q4${c!ux$tY zRg6R;giuNMs(UR$&hG8)?dMl$NDddlJ}TJ`PCz8%6)-pmE;n=Hom}uKl=)l=cyibS zip^>aV5Z=}k9y7{<5<*}BfMw#2CwEjqF^}3ip*O!{-D*Ao)^;p00y~VF4Nlh!Zju$ zI~rZYU<~6wtt$paC`Ma@noYa1-HG~F+I+*NRSXYt+>d$&DrEUjoSGc51ck}>9w?Bo zmA(9$2MZCj?P1S)(|kQFI=@RG?qNPh)L_9)t`!!$~oz!%N2=aBH8b z?XI4ex;~@nHAcGy8ykaQdv>jiV1>r-vF%u{kXIMOBX0+c{i|lE#k0HR9xLW?d;MQ0 z)+2xmkAJlzMt1ziG-{b#?&lQRDoD%&9It%#uF=4rm@WCoJW}mu9I!qs%L?O`&N=%~ z`ego7=eVkZ2mr$$+J|93)=$?oy9$yRljk|_K>QF@jt^l}7>SzzzB!`U>+%LC^`TTG zWnAQUpMH2_NJmJGrWza1$x- z-l_~NU@6<@igrLfJmd5g832w_rH(VeH1*8U!x@%GMMW8G@BsSLaicOryA6-m&3Y(y zk_z#j^%f0#OuR`R+V%;OFV0cH3HSD((>1*}QpB~T3Wj2^~>m5k?b`ixX1)h#Bxk_fI5?h$u1VC8`A&$T-YvN$g2c;H}BPo~nO zs*lo~ZNY&6+i!Qcgi6=eIPn5V=ysc+D{Kn4xk&0CSp& z9q5uHApx*)jCY`)6x&atl4OC8(wr|RD#JYcRcf+{gzBI(e7POSpod(vlU1Khj^!HW z6$VrXJ2UT8TDqA~sr@Q9p*I1XkPkV>H6}3RXFPY$iY%NS#)biLv5j zsMBSMJ2ZRHjW1k@0yRR-`VNfbKTy`64@|IAFpA7PUy9k zNY+2jGw<(ECN7F+XCvq-borpYvP+26lyC~rx1K5L6;omsDRcw0?dNt)s&n3-nDC-iRLrxn(%wVgiWM@>~F zL2Bu|IX#72#}zSeqAutLcH=x#b5A22uL=!hwH~t5EVK#rh?ja9j7X<~Tm3lZq3ZpA z1+Ctn0$a1&+!PORKOilSm+mvoGL5WHbLtcWj^O#GZsndENu_ohR2C;4fUKRrU732y zEgV_OmiGXx6A(uHe^=XsTN$unlY3=IImeMxF*Tg&4y0{nJIkezehv?ja;JL`GoSiX zeNkz1spt*-JA2FPM>nJ}3#d8U&-(jT3#YC1>#mj58(EQd--m|OtzYt zF$QioF94J0y-b?sPa$@cRb5od9X@q^wcnxyNkNH~mdBbj6&u>R`Rq2upmO z!7?+G`c)0Cs@Jd8nkCe#m%gN?Fh?w;5EOg%AH7m)dboa$(6vb|Op9SKigd;?fsAol z&x}IN2D3LpK{fMClG|!Ih5X71#^QYPPAhM%>5^!=P37jBaUHW>NUwD~r}8c^#CwzD z>S`xeU0!t`KtnEvq|5&KBuJb9%^LpzP#=FD)nlaSv+7!9q3Q`ap-;)IT*A`{{ZhDPxU|4_s5PaUZ(&a)m4FP6v*W6Y<<0|))Hk^ z-NJ#Kd(`p?ixk=k4o3uYih3~0Fk3Cy{b)Ax#|Js$ofsHmc?Pj#P?8MB+lO;mEj0fC z_Dideq+j6Hwq8&YYaykR{?zLK0RFH18smJsPiy2Tn6d&Capc#daNi+N4}wQ}2w4n7 zwsY<~*RVr=bB_lV_Mb3HK47~r^ay}E2P9|fUfuI#vm753;b78axj8ro-k?&ON|H$W zjx$<5fMVTYr8j(B0iR)mS-Cr?E=^zbFlFl9FO<*X2m021Pi=Jc{n7OgNvw@_tLzES zw`$s?d;-#B=fz^WMYr6BljJ`(D`yX;INUw^*UaGedcIDeUEH@mGeiQb515RKZ5lpK z+~dtDDnJ{CNUp9l<`idf<2;{gR`iQ=CVr>I0^swwV-(l&%MrNa6{I-v#!f?$2R!jn z?He(VQP(*mj@a=_b0lu57&r`emj3`s_ws=X2mmes6b=oa(#w)@-jY>C;65@v=@~}c zul~NY+c5OTNZdH!c%U;Bd}YT2f(~;=ix}+x07{1P1@DY@GOh-6T`Wlwd)k6Q1Q)8K}CE9vL`ow5^S!<^$fNej+2r1~XKhYt)UP zX{mD#pC$bAxJaOc5?uHF0IQm7N}EmqU`7sU2n3Cz1aX?gFIeBrX<(_Kyc(VBHrrX| z3$+)!6OZj$n_IV%+B0;5Ifxs=wm?5a=Cd_+EEynS^J64=p&}3$0D)N^y6U@~8LTaI z{X6kjchY)LvpyqM&!07-v6kXpIx~ETmjyv}M%4!sl7|8d-tGN@4o&T>Hu`_ja#C^$GHnkiB!N|;3Z6z!J-z*_1AWmt{-fwzbq7+Vu)Hf8!SgZP-_i$a={mPhCZThuUP~iI_&JQb z2O>g7IjWzC`VGHQHLj0qsy&XOXrGFRfJn#X$@Aaan$%W4^PzgLK_XofTWg@dGQ_M< z{eU?Jul^i$WyYbZO`>%Q7&SN-BXaC#$GG<&PW5H*#PtBOxwGmEgq`g!3=vKaeLx31 z`Szge{TUZj(lz^Nm+^Mj(M6DXc-Y{B?~LY_?`kw!?z5%nT11+Lqd0~oIL~s#_yftH z({=4LQqqOypDIriU=qF$vG%N(w$nPZtTlZ1Gmp9CX0x5Uji#~IL)K|Ddy5O0tq5C)%aw9T$I5#gR?^m7 zt1(;#$L8)cTq;?{oeqW6Qgn1e)T*rVBVb7-8W4cKRUO8k8|R(dQHu~x$#q7 zO={gI{1SRL@w(+zGlR$1{c4w`Ei}pcYpr5fl1pz@+8{eF&`N&QdV40vO}^`W4&Owx z)~C2+x4R?L0dmCo%bm=9!l~p0tWVm@j z{%nW9`VV~ft1gw(E%iR3>YYCM0|uMYkw5U9kbm{&vnFn%X=ATxb7)6ZX~J3&yt8lz z)9+Q5eRHPEp=x@(aZC7XCkeNJ&zxX^`qw4!NRBc|1RaduJ&)bHd}#+OxeYY{TMz zQ7rO;HUW}2CxiRd4^!E_hKt_pnF^I4~EGIa_AAU#gRsB8F@LWNCuU|<7(@A6_ zUq?Qg$Lalhu>-v^@S{r6E_znM^+dMymikF81f&LL;A9Uyt2q~4Lr^itVKk4Ds)VpU z*v)cYjq4+SscKWpCE&MEOeyLA04^xdWRid4HPtnZ&!*fVIR+*i{i~968AkY3tAO9m ziKQeV=Yq!FGLuY)g)TCnAJ0NENgLd zY8Zw*U|^mP+N0lEET0OrTlJa06>BO>5mAy@zBc~=dbG9lhsCS&dvhFk)8Yej0rL<( zU#YE0hSb{*zPet3xW0JY>Nd_T?aWoGB) z_x2T*>Oipc_gG)qv0Gc4xRy6wer3k)N7p>m{=K>1@Z!dB`_(-kThn0JT($HZ-DXs~M5dTNx7lwG@ng*oiL{CWD-w@&FA zC-58nS__zNuBWqd(i{znQ?@*R(zDkh5Bm`p8C}B>zSDwE^Zsch`W<^g)c*jA={DMq z$BAc*NUJC20Q=K-(#Bz!dW_zfB&wXZY~ri>7V6&PrL@VSV93^}2R(>T5A9YGS=y^h zF=8^lf6ZgU*UuIaL3yofNpWMS6_xOj#Du&=u{kO|zO~bDb!)bnI%`R@f_beh*5h^Q zF2?|IkNCM9)Hh$X4}GXhEHKGp zs9$NuNErVB71NiTN#X0BxYTd{96i;udK#Cl=*rQIdSD(%_T&83e!s4DTIt#ztt|fl zhPNz;BHfpmf%%w^2ilibx3X@M>V0m-CR-!2C(+X>HKDG)hJWa;#4@dIq8w;?}PsY4>cB##H|RDEy^yf_{e_eZ^UV zpGlivpQQDRNOc(ZTDwT|7j4@{a)aAA%}LiU^$Wj+`ZdJ!My}T=xg#O)NFTR)ccHHg zy*JgWY-Cv&$tLp|#~|f?85$XtaC1N@+qQ=j4X zzh|j8@ZEZ7?JuP-a;kP?C!fC+q3eE(8n;bb*x5>LX0SQ7J@dvePkw8A25+wVW-ULV zb$R7=XOcy@^wfNa*&HwEPc;v#V7DC$(-MVG!}{=-+5i|JXFuMxQS^<~{{Tr#qiR~! z&Be@wNYj?=!x$L$r!RUBPM@bNbOxS51+1$s;BsHS;q=Z;Nqdawb7E#8YBoq2O}HOrV)6Z2<)2t1FyTy+MWHj$+C@sCfMb7ynw&e0r&VtZ%m zO>|VUvq`@YEu!^^+9cRFW7{;hGAb27gy+Dmb$1wuw7p#XNnK^pVz-jWESX&3=kjOC z`qLdf()!Mw)EAc*mw@T(Gu*)%o*CGFVm_YU^|e{#mN^bPHUS zWvg`L`pMT*OqzXY<`D`;OXP95j==u_;x(b^Z3@oQM|(X&6`%1g@=F_xu2|=9*Bn-g zD`=y3WsMcicM!e)^y(n=w_p?B?N@$-F(O6$xDX;BzyM$Y>?so2oEK-nQgA9*P!8_- z=M;o9MnZrx-SI?2+K((@`)5C`Ss?SCG1{R72{Nfp#|P_EQ!F{tnz9KQNag;ipOb3{{XUWU&%h1{>HX97}=ktV|0)gsx@v& z;y>_fh4Sq@SI5x?`CMQObHzIuk;%ymjzFeYJdKAQXn|b!Bad#?_Mb8z#?J~yefgoA z9vZ2 zJ5n%&u^7kNlhllIK>&&;c+L-uX0(B5;c>Tu2Rx3{g(1M;v7}7gOiIKGobN1JfV?7>{b@_MI8jOM2R4O{f0= z2bND(S(Y>Euml~yOyljvSj<1y`qsSxxx3KkhAT}eBYDvB(ld;V{RbRjZzm2`mN8!ALz{Rf^=GUVc&Lt@YlL*KV~Q16d zn=YYCq5L}j^jb$|%gQe71NS&4wt6p5#iaDj)}HC=wZ{2ojW}J+-B;=2vQu<6wJwDn zv^VP$Ti>n4v|0I3XC#x4&GhUsSyXFV&1P=0>fJ7FO5ms9nn+3lJrX}I&(nL=0R$ZJbl1@I@%~dYBYk70mHWm_H$d0;9nYu&@#fc-2ZfXXJ zu3dk!`dy6ova$So+FeS7nF@@Mx$n4~RL~AwOKTG&!l(;No@If{Be^R^xU>eoP9W{qMB_tSliM%cBed%PA}ymR6Cs$X)Cy#jl!Cn zS=BUtn!D2^cDuNPSzBogS>DmV&5WNd{3x`ounGNrBw`J98# zANs{ax#(*ddcM=BXL%+TaH>ZWWaX4$Sf4oNrp0X(s9XgBMKtn5^D8OI{^pHjRvUKh zs{LD3veNBZ;`&97Ai;Mr@^}Kd_*AwDw;prb#d~rn0abYMoYe&VQKlvIRuW30>Ofv7 z{IC@6snj-lb%NerkdcBn9DYzv0Y9xy;@;pfLdbGLlB33aQCorO4^V<}f_=?rEIPt1 zQvU$(x^0!*umZ|%#8`(b;2%GwUYn*2o8!SJk&3zVL=LRiVbOFuo8fCM(_4Xvk$GZH z1!4N9Nb3%z(yjGNKjC(>`eCDt2K5zBI1+t$;}y`%(Y3=Vj1N4~K{n?D0~zBLcRVAJ z^#1@~YkDlBNY=Ve=P>-B+C+bRb~*RWYjjvOj-b`8^(|V;T|ZB?kk4yxD`rU?j0Rvk z06PzQ+s1(MP;h(Vmh1URk)AtoOJy9-Tj_3~Ux`Cl(5Fj#Yq$bk$Ur#91Oe_q7&OmK z^hUF%T=gZ2>ITzKzsa|ierb0f>d(+*AH8pNoo?Gjv;0-?aT#=t0`&U*+p7(`#0%C<&v;{vyer6E3?e%Q}pSuGQ-Ej7(! z@ETpDvk1{%c}dzL=NKUJTj^SNnpDE52j4WiYOtEeLET*S=kfbX{{X{7aT!av#&B10+GiM&cJ;0&U|9JL@u&MZ~%YJ!KQ@mw4arjoDZ!! zs&;b|@cUo7>4>zu?PcaR_W;8R{IRTg=bU7cIUdy~thAT6*19UqrjjVfdQExQJFD`8_=Y!Rn#hOSkdodXQ02OB?X1*zO&-+7bV>)ULr_Es~ zaVc=Bppm(=?f~M4r1UkPSBF&vn@_Za!C~q)T2jX{Onvts?ON?qPqox-e-Efyr13FU zWqv^*Qxu(N@){nQh~p4AK5{ z3<*$sWPNGXXJM1ox)<^NE2s4Q+O5g_9KpoI4aWnp`g?m;#>6$U#b^UEM;JiFwppdWgdxX5)k!<{c#)TFh{X&NA(@zkl~IwO7G-L{PwU+;;+!;aE*8;jM%OjD`Zzi2oQC)PNmvhpe?CFbTvLFk$Ach^A>(4YA zM@(sU9XfO`Ok7(!ktlJ@g!e!C?OFj8f(GTEk&{kE2#f+-A&0+eZCK6puSRK_9+h)p z5qWZLu42K1> zIj3~gT}snTmrnj0EwVkks8bf-mv4%XK98d{tWis06C^k>BPhb)cLJ>haZ7dv6ySNK zy;uTo+D_4q-YK+4hSJ7)AiJ8}Hl&%SKdyRE!btK?2Fuqkl<8JMmA!hjPRo zJkyb)#TkthDX%ctFkE2OiKdh;=TY2tB+?{Fv>czP72u#t--F3HrDsJAv=fgM`3oX% zAjE!Q$I^=7GYO0yN1qhSTnOVVaEpwRDnH`GgdnjE{{XIQLCeX#J)s8X$f+rvyO`sQ zimwFvC5g^Yda#V{P{+Tu1M!{113kOZkOv<00k)1j)2Yq}=~%5qYsYm(3I-VS^c9iP zj_#$^is$o2Kl-(=yYKIc$mpj!{<25^0F^=i0M!-7`F5Vy$Ge$93(XKd2+0GzBB=^n z>53$BK|bRZ_Mb3u?$0={GhviQnByN@P{O%l4s+w0CBQi(?$3&ZIqKm-1B2h`io58Z zdb>->82m!;DuBKQ(l{JquevY;)tY00jLy7g>x$1mZFKbg@%0}{E@h2d8TpUiwvqZ^ z0fi&YV>HZWPZ+;i<)C|*6hf(?Am4}Y!hNG2v53aKAj^pZvfNCakqnMnX{02)V^ zV*KL+u;RMB^r50B=h?OGlZ0m#M=IK?Da3nF}g zoE@YQ*wKTq1_{X+#wcUTvpPqD*e#DV3SWb|BazsFLFz}rW#o9pILt}O1ba|Q#~X@d zk0OW(nGSwlN$raA$rb_HM~}5Js7O}cLZ6%=W!>92tanBAO!G|IUbM=W$1fCssY`{= z^kdt#Tt?Z7Hd_Pq9MTkwsO01v;Bi^Yp0Cm^b;z`trz>$Q`f5TQoAI5yyKeeaYk!Cx zJ#i!mvq~OYmbMu~!w-LK_^7e0COi#nPdrgX7# zV)f^c?S?E64?WI258k4+1)~m1gWG^>+oEl|l#X+Q+OYC`MbYEat!|P#ndhCEBZ4>> zJ2HX00kMEH+|(|v>McW1>Fq03>Dgk6SuT{AWPCKg{jZOFQ@H4|C9|@V61KyUk(?iK zPG>R>`aab>wHIFLg(;mOI-ai;GqqqxSFZpEWy5>lU9tOUdo-7Iz66r=n0zBQ8Rw zd->*|*(*hly&|+g4C`}JB(Af+lN!ZjD;tR9`w0T8|4EB%`lSzS;}(V_8IRHd zl=%GBWbQ6u)Ou>?t=b}v`n9#A0NMEmIU^q69Ou~8XI1rHnXf^2)9GaXl$P!2+>XrZ z2_%2v1#zExtkbQnV)&WV7Ll+iCZ0mb2+nqc#aVO&HdlWOb9E$e%?i&LosYk~#)9vjZ);7JlxKC>O4g97nosX4+2L$Ks;-){u7GANm(k?WKpt8H-)_(0N&NpMg ztoL7N8a|QIdO~Si1;6nq%dvgg_a>%wOW0ccTj|!B<4A-odF{E0+fJEQI>C{gD1hww zfhXL1S2NW%mX~BOG||+NE^8)}5vNPU@I- z31fn3bSDorgQy@B6nC zLF^eV6@$+aodE$*a&YwPeQSsATe8GC9N7*f(UH&qQK(h1_wH{_Qd@B91w~&p8&o z#E640_vHc7JfIF)wVIOzL#>(HvtFsPwhyl6+I>}M$_b+T>&Dl*-Hl$Nm9keHdoO@o zqxZzXbU*;qwP`&$=j&O7Y}j9%QOC^f|D43aVj2Zh`;H0;J$gFB0`EROskuxu{7sKq z)hrtD5qsHh6q0m=eLSVK{VCk@juh2!n8ky0lljoVhRP*(^VT4WcV0{n6|~~$`<9CY zK9P5553g)qnTO~Tb+=BtwZ4A;@npUmNjD13XuL+fD|7x3cB4Zc*{ynE+-(4B7p+XtQ(Pg% zj72|S-E2$2h6}x_J<(9Dz#X8c4%Et6K8lc5nQN=5b~m)i54pQ!HkyPJiKq?FfD5oT z5-mme+udQ|4+>Z(Zh=(GTWS9KhKCvENS0V0jI6WTHLtYR?FzBn_bdGBsWw^mRbwUS zjyxGiKwo>`j#G=AW$g*Xu0SQFQOPcBen`sp1>d-LT;|D0~+eHX78+lX<8xvf1Bi&_hq2!U#YPKvZsmNPuXUY5i!3C4c%+UgWrQA99q&)5Ke~CfHjAFbZUCWnUeAkVRuND^1C2oQ8yyr zXxaWQb~`)i;Df)vyv*XU)e)vBG^vA)5-7Nm_rx;6C^~lCP%mW00_(SK;^6NfPZ6-3 zj0qaKX=-bwfCy;Xor~^I*zq$lwjW{6WbUnTjg)37gw5TjV}tA+ks#}T(j!Zy$!;s@ zKHc(pqwS4s(Ei;#oUDwOr;K_uKAV$Na1TtD86NfK{^yFLIAnjB6X2yM-OLCgm^C$O z{b&0|LGogRZ~y4n>1i$Rs_s?p66B)$V92WPwIS8!_e_3vUoaQx(V9pWG%CYTdi7T! z*(6Rhb|ZuO%C!YHI&wo1@Lnwl#GJMLfeRf_w=38qu6&bi-m|6uxvHH*py@_&vg{qV zoWzA*-dnjKHM9vellTdB3T)+ma7@gC(}v+B+ka779VN3 z|LGIylpnicSkc;aOCwQXg~pJ}05w`yM5TeZ`;^(M4>1O@>lXUr5Ti{;0AVO3Ni<&H zEh1uY-RV09Lva5i`jo3t4v#t*;#2TiGEJ7iaV%M0O}|Q~D~*Zlnx@W38^!R1Qo16p zdqV}Kcy7D-Q$GJS`l3_iCY_2^p`s14b#;Bwm0t1qXas??DOHd2YtC*W$-c=s1h)=KVvv z$Eth+Qat3k+I!0Q!ud5V#h11gweGLvq;vwG3lx$~yW00(j2_^M-aluHCQpSWsb$2R z6Nt*+@|~sMNhYxaCIMd@`(>u5SIeIUAKC2VJ?tVWhwyJO4(q5y3mBk07B?SKpIvH( z$m!>*<)3^@zqK^^oBDX%8zY77n4~$~H}TJy=;5`|_YQTezAN*+y)8DNtDLFCN)vd= z)6nADLf$hM>OXwCI=rd;TrC6&^gUQU80C4foJ{;~j&4lhpW#D^NYD%{Ue z8i?N8C+fK`6Hn2Q*cIl5v~H_M0d+gDuOGQ9zl*(hdH8VmzhuRO^^x6+eD1Zw+Q{PH zkXJwd2Qc_8;$0R$Omuh-el}v}t6S6>Qa^cWui7|FCXx{aVLlyF?U?cOzVp?&F{!jQ z0P(ER%UFe^tF93jWo)onY*71k-~5yM^uYN@g?8ZH(@B5a%D>I8?{1xm9wIVVXjl`@2O*ymp^Dm>=>Br!)Y3EmzFX&Rmn#q#wXna`@);gO4VS@H_HD{( zd3(5y?Y?=gq&Hf=+VxhlvKQZya7;?*e(`)6KBR^DHij?%Sht#;pxn{I=mYhr+be%z zh`u2W3%hO|<^u9U;2D}aM%b0Jie)y9fVoemc_P0)SPyoa1 zv9guFh+S+P)KuYl5^z%2o$+PlNzHZ_e2=Xe>t_V%zF%tPCg(Dz*QV_-c?jlD>k#ay zgCnpnot$0_5smM#alK{?WmHc0T`aB5(w^J+%0KV;G0xisfGY7FWPFcwuUN2t{E(vi z6T(Egxfm+xGj6c@&U(#vfO|gmSrk&q2cn_omPd;S1kVyQ@d@?S8mc`@SMTJ zV3{u#*Y~G9Aua8S&IWfX|h`xaxqcRt1F{W=0XJ?51C|%+ZQ2A!DnnDuhVP#1)n*lRREvU zj6rVM8L-`(OKiIES4gW6hBEI|Pw+Yjzg0wGm8^M;r6dN@P(d|*!?hhkj51Bb(O6d0 zp`@vr!yBLMw3bj7YWj}_z9P>&t>hDSmNKZ2BCmNoCM=UJQ7so6&dZ0Fikg0CfGOd8a0(Aqf12ytlhyxrssl9A`^bsmxFl~C*<>t>HZX@auuBZtQ zKE*==;jt}~qU?~$ru$Xg(uaRTrr7wt?LAJF^`=9Fci1cZJ1(B$s;bS{(i!dBoCDPA zNv$`NRe8HPVtq`9SFo##HFD2jN8P1D8)8`=b+{J_RO ze5U&?VV0j@cEzF4Va^(Goo_x*It9f16@9=b7obHeV`wyEwRmfIsZ=>(1ier$_pd~_ z7r<@Mf^KcteRJngr2+{#a#q2(v2c6p%ZNa78jF~59D_Eu&c6QO%qz#tfC(kGkIZie z?jE03ZqjAu9CxduVtO~?nUjS8^th#$<4R2Bn*PDV$f|b^Z`LQqJuo_b`_UVfQngWj zS^MVW8XjLnb;RfGwUPsep18XMNjT^1_I`F7Znr*8U*r%h6{UgaWf+F{hI+jl;JeI} z#QBOmmYAf%p@r;Bm6bQY#R@+I*9t7%ukJ(p8N``_y{8}dfb$L;5Bf@crDOEN4TXUg zjHAQu#r+9L@+%{zp2vbi!Co4pQ%kh~$;rM>WupEd7?tO_MOq|ikuS<#UneDkCTeBj z3Od`_QHCE1-89+qNy$l}0cW(0{aH;|3-eySP`?3N#!eUKd&BDM?>+@r2i9NSiOCx+ zCy)L&{OD6O*Z%+<0!pDj!z|p1;Zfsq0FoEtBXjG@UoEInJZIBK#~s~-c9_3n9TEMM z&*wGW!CKz)3QGG0P!VKB>%TJJOFxb14wd~v>JkR&k8!>w*?xH)J7l+O{_E>=JP=M> zttsW`10f1F-e2lvKS$?j!&1uEZ+4+KEpu~cxcM6qiGxht>lGo?>H98YGO057u4Q2k zuDZi6h-*z>)X{s~0(_Wv5l&LaSO@8t`@LGCI-Owh2a1vQ;&rJ87$ClGN%j&d@JZ2z z+l5g|N0I6xYYTG~EA~YEXo;EEg&?j)p~zTbSzq!o8IM06>xOnF60z>KPD>IAkaquK ziK`=J@-ZA{leA&rEF9%)^FM$#bguC-1zrPkk*Y#{f5Y!U>5saaFNK~x^dWyGHM)ai z7ds!tbsM=U+EO+>hng?5@!#9o&-U@E%4%ME{~!jr_UMy#(Ni}|elZ)%z2^Idub(pC&A!*_{PajFXT&%1+MyoW1Y)6c zUG1=~m~ZsBbI;Uj#g6pDZe?59B(2<1nUny7D7$CwC=w@20w8%U+y6y8UA{;yGGce- zv6&xxQ1m`~Kmv7jQEo+6&Dp5c?PukAGP3y_l4APFd}e=WL%oW3{?I>i5#RFgcT&s0 zn(yDWXIDwumr2?TX8(LEtd4_{r`h21<()v>#V`X9*Hp#!y7ao8MnW`w#mzeP%+}+R zP2R)`-|u1J6gZS*nY{Uh#P8bP`S#z3#}N-6u32wt;c70{c;@{~B))cz^apeFZ@(NH zLH6OJgx`(yu@)LYhH&+M7w++u`^yBRV(Pu>t)@mV_=Jj%IC;LD6L1vAaavXBZlyIQ ziM^@zFbq`rD6pzv+~hle2r%Ncn$&?`&-FdbQ>P{$ci^As-O!ZC(_0!xDILM zIEOR+(LPw1bF24-P&_I;(KC2-MnBaOl0p}w?`K%_T{2hoJ_b*j_Io1$Z(z(2xd&(i}%Oq<-BFsS6+$htRY^7d;kEjo-tBg zZ@Y+ok!efWlY1z?8f+0{?ImODhRx+k{v1Ud5PAMdR)g2+j5gP$9f^+sgBhKqopFdf z^k`(G;`OKJmdvAlQ<3vX-uW_s&L*Kp(x&QGbT8yrg0SM8C2CMS> zS=&*~fgtGi8|CqpCdzKUCc{wy7bf_!-v%L6!=+<*zkwl936CSsdLj ze8%fMEcH0+q#SES3E2d0=6RUWXlp`AXN;d5aaao-pl0VTLFPSrq@GD(Y)Rt786niI z(yCUj3XgZ$Jj;T)aLK(J?ps2;ZwWzbEuq3}9SZ*P>qkaq3n?oBYS*P4-O4&Bzoo3v z8mw8A{eWBL;eAlM7cLJwQQ$)(*@r&v7$vYDnD_mukMf4Gos~btG+xLbx_6AdK5hJb zjvRt`v15b}MlR?2a(2}|vKdpKL8Qh?^oPhZmLUTWGrFpMZCe8cZSYd&uRzfpn&cU> z^Mj2H@!tHFczHax8?Rd*4d9exJ5y^Zm{vzURxB)v8gn7@p^e?nU^ZXNI2o|rjfi{9 zt8*igrp0S#$@+)>!7Grrn%SByZSpu>Xg=Deyx7iXG20#0UyVY!t_Wq(49yEdy66Ih&S)#F8q{MW(4o5}f2 zqm5RzGC1sbW0ZtBCPE5#=a#|(Hv?2R>^qYl6fg_#l$O%98)p_C?T1v^L4#4d{=w==tx&G?=_cwc?^#`ZeQgi zOAz$RemY0Bi-ef6w)~0le{y%N+nSzS$y-8kCpU)CbR0aXNquS3RN909C|LuPUXN*p z*`L4Lj-xBK_eYm@VGSlQ_m-kx^W_|EBz7DtgZP65Dx%F^H<9~cg;>lpbs)lHP@bi_ z=Y{2S$6hJ_ZEgDeN)2LCoNBD&qQ8PS&|Z-anDU2j z;*H^7n5_=Q&Ov^hA%{Zx_x~=Q$?DACnflq__3?{P=Cj@OM{Wi*Wt~l@YzEKfAJo5n zU;m~7=NS>aNZe`z)3>$5E{1H*?=0StSsBW6fFX)m59)wvmF- zXYcM^-GA?q`|3e!N5ho+Q_+^GpAJen1~8V$*Bi-)OGlNVPjoulTCoowx=62<*LA!p zYUY8uvN^?i&4#WWEbH8JYz_8m%Xv8>F$oSx1IY%acCFio$b!^5O<-;CNuKn}lRk}H-M|E}jo%Sdu^lo2m7a~a8ozrt_MT%Du!u~YX zIMH;!?Vh=9ddc4m0oaQ_?mtS3#HD&S@>1NN;tE%f9XBHxLN@cjl*BL1@>5W=2K{!s z;yL{dzvtY4u>S+dTCmwx8!bp|Vm4XLxs2|!ACK-Em4O4C!e*OuQAI1J z3MJR=!-o|WMo}o2iXIje0TwXvU(TRD$M*t9?vpL@>QSIFrymX?+%=-P7nZDHZX?>{ zLVjee*6!Oq%A@Okqv5SDy(c_ySjIJ2(@OeU2eHz?f5nzg&n5>eAe7Z8R{&WwCFFUwgY%NZ5`16q3~4Tnq>1d_5Y&)@&&)ml;i z`1ju*HcK|@F>=RGypO@KJKruHd8F_2#YM5y`Uh{sa#jDQk+W~A((v%P{|6kqW|Gaz zbcVhgr3pPfzMr283C4vEV{FEdVF2L8c;2*(x`Oj0`q+hiWc{dcUJ^~=uZXYUhaA=CPnl>U#j zP!vKQR3rS9FWzhUO%&IT^F_h!B`r^elg3qG@vGyDrM)p+i#B(8;K^pNaD zq6&g!NbtPZ^yLK9_e#rL0e3D_p;6?49{pfd(d#{&eC3#e6N?2pV(02VtnvrV{+xIf za5viP=*W@W5%X1&9#YRhTnVX?gGMoi25Vq4Az|BbF?ia;^QlM=_jsCY67eVedos6b zdY|hA+ZZX3evU?&hJ`Rq#G9)cK+mc<^FWBx@p58P3X4GjNjb|fiX@NSG%Zi0<|tUd zHFgC|R%~Kp50A_q>K=dGJMl4Rs-xn^5pjxyvOO-D0uUxda7>CF#YsZ;n+#g*GMmbj zh*p23EPO6u8`;2?<}*>ml_0}h?^}0>A*>BS9B!qxucdK&sAvZkoZ=ijP=0)04)KM4 z$^&Am;kj@O^G3Inh*QnzWZ2wl%x&aU_}q|#%VowwvVBZGgUXYISz;1=&_jAna6;cl zAF1@Cp6WBhbX2n7IGxoI9A5;w6jLZ>I?+j5X@^y!h$z`Ls&ksxowVW&q zE}c7$)MybI@fZS~=|LZSNdLc)&&BBZTTTDCrIKz_q}X3&P3Sr(u-NK6TK|{5;|%9# zkX$O7T<3&h@!ak;<$>(VwEr45T;;jf=bPH^kFOL}y4R2wllI$^Q}39)!BUeG zpvkv&`b3;?I|g1Z2tw0=u7iJ2;k=qAQg^+FF?RKJ=HjBSGf7Orrf8b|Bb}WQvZyQw zTqegNOA;sCd6g9%=F{Ayvv}qAr}8q|#5%HQdaZpm3*U&X-28g_dYaDZPzZzQ7=>a? zhK1&D=wY;3N3OCG1FAx*_IrY(e<*lO(eh_C^zq22Y47D;it96=%z!y6pEHh2MpnQR zX(R{>Em`j?M8Wi@guS=D9>OYj(+z=mS+n`2Y-`Q5fa-}0KD&VqGVmTW{;33te1)?PvyhH!6f@GCVKGI9dV+#J!mg6i)Vw9SH|7q2s$XwMuC1w

1?`aj(ZL4{@^H4_*O!oxHr6;}Wf?8FudYP=B`K7yl6O7*5*H6s6 zt?do%Q7^Awb<2~hZ@0g^Zsj5`W54j&osT_m{ox1hq+Z_rOYhtLi~q3er{4LkFK%`8 zL7)5ise51c)9+vU(}T}hu-}ohzu>2{clg)MKe63rAK2=3?|Snq-m%#xTfFskZ+r7= zUh~TKm;Fq(uQ313^|C@$sjE_MoV`=6URJ;=MdK-!q7|N_t|5eUM&a7X7O`^J3(`hm6!m^UEGi-JmkF{_v1r{`}ahFFWJLtIu6_)1~(>U*vsk|G?;|ZWMjhu+Djx3F%Ks zE{*EJia~Z4LSB$vDFCh=Lkh|2VhW3Eu!@OGbrVN~sEA9f(b(v`y6JVhaL4G@p_>?W z|6%mJ>SmJehSQBDu+~k+Ej`KTZj(Twa(i+2z%~L=rg?xHlKCHhvtGUIx4D)tz0PaZ z@|3WNm$ueKDi}@88XB?)?9DO75D8hEyL$QImA5`}|I&(O-N)D5^_!s@Jn4`4S7c7RlubMLHZmBgF-3=B5iKXamQbQbJd=|%Jlw)3l zc~G$@hD_GHoB$?kMjcQ_hqCj5C4iQF;xdzrw29Cxtu`fHAo(ORAzVxrz5@qO2P5$e zy-$jf9(&Utu~-n|NearS+vf;fRz6Koi^!b>V?q<1te;U}tQnjRa6l*ruwp zx?;(s(G+RRr}Ub;QQbyW2hI&Vmcl{dZnT+d<1twgbz42)CeAX>L&z4#5GqLB;Z@fZ zgli%c?x?h+^yInt+RN^|Wudy5(LDzY4{}tPofrg|v890VRSUo4=b9}CNJmYZ#4mxQ zS<~4?H(wXElasG*c8Rx+Ez)Mu#l41I&Hd*i#rNl zj)kU>auBY0;lTi4#R!0d)hr-oGQ0iP#Cl1Xm)sH;((IQCDZBBD7u`B zjvU@8sv8osQHhDzzL=+6Pirb-^|GRv&Q3h{ws$t3?L7>uktYx8Gf(*J9_l>)VHnaw z;9uIAUy@fQ_siQ#W54~Sh#vdPEQ#7#)%=HN{O0-c2bW*-(28q)e|g=?8|*Kibi}tl z{h>|2`q{12%R7Di?Yn&9tq1P*p;Hdu!@t z|LGsz{JyQ;yzMrdZ}XnFzWEKW+2|FMe^ToI?|p2QtkSi5*$yvV&FibmRm5!{S4t~p zmCSaNRk*5P)ovC>UA1sO_NBP?N;WEXREE^xq%CZtN@Mjk3o0;R^|Dbq@R=tauUJ+o z_v>DM^^5%Kg7a)H8)95|RZZ%_nj4~7_HWGQY6-rh(_HX+=AT4eAsDO0N?@K4cs3nx;Wi5FaU?7xBY3rJ1cZ^yXYhohuUv-b~Bj3%UwRDVzcqIUYF}ooJ zI2S*c-SX;kbjvc-nxKkJcM@XeSwecv#VjbMz%F5#Yb^qSu?V_`nye)<$~aGyR}-T0OKjs=SxNQY9Y zDJ&%~vF`uPLP}hSE*|;#9BFFwkvV+`BriiPf|i~;W}ioO>0o(Lh#ai5Po*4v^o)nG zNJ`b36Z3*ctF_3wFi2W(Ixr*tLbDXT^dO}%on#+^EL`{wcU?wln>gw7lm%Y5S;QIJ zGy{8awc%yX1*R!$@s|t?`xwd*m_lH%johRcxgm;fxpCKxoRZGVQZP!MFj}QJ;f3Jt z+<omP7HRV@&`-giIzyLryX;Fu@t(EvH;Lw2%QB< z>yYoDWfW8kAA=SfmnLG=EO@bFE6K>ER4k(yTJv&lB{4KFqX`QDl6ST(p>l z6R6}Nn}{Ln=7!&mx)59lX7~g+^6aL7$B^TWmQg;QEm^>{beO=yw>)(Lrj2}gkuiOt z`I08mWC4oh22Qt)(GwJik>L_cs%MyUYom7#j2;++O z4GR}Ii%}ZSUAZkXaK=`No-Y?tPz*-tEb&|LZVS3xB$?zjT1Kf( z*HfrMjzBB~4kG%Zpz!qHBs2oHbg1n=dOS8j+ z90o0cX_RH;gJ0JnbA^(UDMM-Jg-uXKLdjPPKuXV$aPWa~Vjk{{EW?+bdr{0qP&x}2 zEo4IHNZ30B7#t}xr3fcFWTEls4i^d;(MvQ${A{-r8!dEjG-}P6CC{!b^Z9 zDrrDky{-rVQYKBR8POpHbwyyHk5nlpqCoB`-9x6w&QUMhYu5FwYqSTs9_suQwjm|6 zYPu&q52YS%jbfNQpuYGPWzYGl<0@s!Gc*6xl-71Lqmo&TTm@}3`Tf)WiLqsV?CYsV zmOixN>U(az^!6LhnKyfvkG}WSwwU+Ybz47?z28^2J>t9DFF0)HQ;*p7nD2k?#JRg1 zIeUjacHZ`rA9~}5wtn>ow|U)m+ibec7O#K*)^FYV-Ea04=9%v=PYUM0)Xfw1ul-Lg zTs^B?RWHiicR&YYE-eQyrnBAy-=-QX0aQ~QSr-IIZR=p z+r3r2Dsr#7^n4Yo@);Xi1uwgQUJAcLReh?wMGHd#Q@Yx-mV;5{svTkbypj~%kRSVk zF^UACJ!=u=TsQ!^$;T*ugj&q_g(h^gI90^gU2&1tFMZj7rc~XV6c_XI^IuA4L+AYP z1Y6AW=Inj&{$D-hps&wA@;g5{V~$_OzWDs(uDE_i!-E+r{l&W2` zwhmUc))IB+(FJJwW^L!%o8B#WKfW!BA6QdI>sl`NB6zLalnmW7&8P(#*~*_TXNy45 znuS@@&BDYLq~^;VhC32-q%_l9tOG^MNH7o>_u&>6t<@U>Y!)q%*6d_XoJB2> zVT!J$;|E5rZ3>XOraAfSn|Z7a>0`9d%Dl{I@RWvdePXtT4zbKzE?^SBwj81zc3y;0 z3&7q)B!+qVI%3xaKrRHZuEQ;J$d&>R*?_zR@JrYlYJ0MRY{km02^vih0*r}>SO|-Q zN?gn3zLDKgmJEib&rt}=f?s<54F`aW(UKbsMgBVT7>YO0Kjr6A|+qQQexrKD#4qtTcvsmbGf1N zkCM*$cN zk7Od0KLy5&T0Oelw55tc7pE>TSUNjL%tW4sz?ucSZi~s15uwm91U`F1CY8WgG&;^K zDU3LIVIYHwB`Uc^9$`x@!)9k*&PL}LQ3WfL?JrM%h1omUzP9{`f06vbyXgIEQ@+c5 z*PS;j9%bSX3xo$hTKV)|s5$BiXH-^3B`g^(T6oK4zS3|?m;yT(JO)O-CESc+;DUsm zBNy|EDn~9z98Cf#t{g}VhB)FcJq%U)Mh!(+s@d_Eyis=3l$a3omKvbsOG&pWqmmUP zJh1n%tu1%a-6BM9S&(x9fR9kC%Rb^-CHJvb0}KNh*%nAFUL}^|?!jnO4lRW_vT$rE z3_&(Qv83VkiImKYk|-oUz3ZGm;P7D0fH7S>!A=HF4br zO$tvl1*1nY+E#wBkymk17@siG)QrM1a`|3;bAox|K1?w^_jGTJv%E{JZBK z)IG&{!t9CN^IH#cqgdAUM8|Fen$c5e!k94jjN0?wc-o&l39s`px@7gtzZvakzI=b# zKiupeeO^EFdtWw`)ysCDhdlo9ZI9f$$d{M@`;Tj%U3=Rdi!Qw4{Nwig@;`s>Bb)jD z^1*w4eC|HmA9dhoPdWU{r_KBF{DVI~{|8@~cfe;4*!`cqt^Ji9w%+MeTYhZ2w`{%n zt2TSXEI*OG`CDG|x>x<>ee8`l-guKuHu-Dod)!IJxrDGe%)y2wN zWv|WU-n(Wus+8u)PL#dA(aZv`{%r@jinxD;IdHFM+n%mMw+Bst#Z@xfIQDNv*<$w6 zwSWEC_n#Hc#Qri_boG_DEm~;QH<#xh;b*t@s*-IhSzW9`RAwqIZ@B7`HhC+46_0+K z+INSQoa$Ks#c4Gxg*$NXW-DXWv#2U{=`1i@)UE1e-xscKWl`on}#=pDg*Kq>i zH>z;94J}VXIpf45PMv?~4-VMf_m}6+{)YFlk2-WW-(Noa%pY8F@yWN{bm4t>Ui0vM zi`C2C$G-25MXmw`-6-Fj@ah_Ms~S+frigR$tJ<_Mx~{BhA6<3s-M8PM+@lkh3@R5F zy$eubsH^JQNkHHS{6d$5Z^x z$O;f~O+PjYHjCra#}H0y)q)#pR3AgV?S!KfQDSaZo(tdgg@*e$+UYf~8U0OKKi<&1}g4&&30QcoJS{;}P17T%rH%7}veWd+89doHDzj&O3YGF=x%Yz5?L2uW+D zJ1o5188DO{rEJ~*G01_VYhSnYzz8pEXljl^tF1+DE*&FnR50*JuoYKJ4~bJZq_}i; z`CAa#mH>Mvq!+D;O=Lgtxs(R*Ul!F$MJ3V*IW+YqB=Y}XOT6w}i2Bs6731jnWnvNa!IN9aUlYFh9 zE(B3v%C;m+8-|@;K8Emc5y+BiBVm-1-ZzAjh(Q%r%^a8g&Ms*qSu)sJU zfMa8!NJ9>Tjgii!xVmByFezc6i|8aVN2d@5oY^A*%N@gTEN#g&>q0|PMAypDg@Bn6 zEPT_aW9CR8wYr!sRJxWUFTQliXpMIA7pqmGgLQ^CcNShm7bZgdMRaMDv#wEKwEk|X zZECz)UZ*;+1pzao*udy!(FMZ2a7GczlSI`a(4r-#EXlm2R4hq~)dcYr5tXyCaEv2! zu)=ALmZTQIvhX!8w5}A>VS&eQlwt~8T9&agz%6W4Xn7fJ(RArJITleD(nPfEtse4e zflP`^2OCI6Q-eoBjI!XjGM>tMwxoD$^SG-hrr1aJ>h-7&JmY!vRTmEnFGk{d&x4;w zRu&jt&$m78<5Vd>c+cVo?z&OctU6XN+g~2{c6{n*#)`xZa>@U80Q zr&r(VmF$(bU3}gdbH4Dgx7k5H@SFc~*nXe!``AA@{@Xt~_8Y#!eD+D-KK9@p4%>hG zIeUNdTf1z#>u0w4@wUHHFTeiPlU?P>``9z@WWQJk|F!>D&)P~> z1uKvhuxezbF5Op;6|i*0tTNV6FIOv;`)8C{Y%43G+n&Z?2iXQOrSchbwJ^5c?#5Gb zjTsL+r37>-lh?nGKbH7qY_A*JU-mCSDVFUx`^Tj0DKEY8`Wt?Ix!=%MEUTK2IC!?& zQ>m+5Rdx1NU*)9I(nf8Csv=i4s;c!P(#lC?rbFQzTy!hIkziAn`sQ*! zon39~ooDYkSKFespQ|)hqDq6PSk<{m*o$k2nj<#FFPK*73gimsis-6czM?k^p&8Y@ zW(O&AghuK3wO}Helz)6#S7kk2DOWFRL6ppM5Bl~2d+l`OA$y!~^xppQ<+D!v{)Oip zb>$@sZoKB~o31b&WEoBF+^t`7_b_l@&n>I;D^c zq>F`;UJ>nLagD^*l?YF|tT+PD0Wz|PHC-OMoZW(az`193i*S>qYrE&CbPr^A)%T}G zcM|v5ZeJ8II-$7X5DI%YvF=pe?deA8K4pAzb*C!ycROlXP{j$BV0I6buz8vL)?wWemF z+=@H0Wrb6=bjL+RtxOs+1xPwY6tnIau~2*(avXwZ8#>Cm_EPa}c$u{3$ z*mx!Ml98V1x{$nbgmFgs1RTla8$cVl&oAV`-i3&+b`Ix)s566Ys73#wMQ(7M1FD1?GU2$*u<0@<-(H%hnK&as{YTX{oHIaBB{ zl93m15zVWqF?PEXhu|DVz)kiG*`OQdV4ER@$e-;3!*j7$m%~mAr@+p>Rf9B3;T$bpT$XH^dQ@ zOw8Idtu?1h7%V#^VbPkt91IQerl1P|pXd@pQE?1rlwB*RDbQQnjvx$v6YT5B+J5rO z3)sr%wwqPW?JW1Iw%4=$+fqW~u?Q9f6`hz)%hDmvVwBM#>r|nevIl#kRzpevO~D+? zN*BLnfsr9cgl5E-GA&#Zj;Od81yh-dXs8@wW>0{foOBpo__c%ul2Y9PhR~>d3OaZU zVYZmVGEL41AE3q8ik`jIz(8pfc+9uSvH<4=DWA@wjFM#0 zwuS(B43XhJ2$28)8PicnmhhJg|AN>=9S%VO6p0QO&ccXEsl6J;Qs-^|;&1 z*o;IMH~_~U`%(4Iqdny=lz@x2}I^8R<1_rLhfVh4T>Is%BqaUiZL~)%PrX zcJ1xn$9~|BD<5CA@X8D3f9c;gw}b5Wv5(mQb7!Bh2RY-Yuby-Ap68yr*McK@wWdF^-_odoZ8Y^{` zw7yVm*VrzwN|q>Z6})Zxs-xQxW>FE_N>&tCL_?}}?HUvN%cu~7;ZbhGplxSa2t~1o z+AA;nB@!-tDTHxStuHvyd)Ky-mC1@_FJ=>8Th>)0-?H#po6i1?X20}h>zPHRJc?!Y zrZQ7q>F2B~S9KMYTJ@%iQjH0$-c<0aJ#8oxfNE`TSu30sutrt51lX3Y`kIaejF*~M zrOXSPh!we9II38csPJuV>#Eb~B38`8AXGtXi6L9ShNP!DR%H`}S{SjgQIx|*(N>20 zYk+)E*<8lYy?WUM6Kq7D~q?Q2cCY?0YCfE+{=G?(!#5L z&#dCP*gq2AG(@O;pWoN8dbY7622}USB*l+ zHR5kdS|uSnFiv=)>$(HWQ4OW*vLXRu<4D{m-3odH9UlR%YobeB@>*SYU`W@#M`B(s ze~&9}5pF&nqQ-run+&@f1+Q*CBMJt3w4O$M(7*s=gXC)+%%%KgE>)8yQtcMOsB4Mb zb|(4H{MoHKu{qL_XiXNTt#)f`)Do>-<$c}TAHL6>#J0FwnY*!%Jg=+SxAMsC)6g4YORzS|BEpv+Hl6?4n$LEvW38mja?vZ(bJKP(7CWsnsv0M;K11ub%>4ntQRpJUAmSC zjZ-Yrj4;1c0&B-mQ7kw|_K_g~orPUWT3z9YSVlQAj>V_wTD?TJ%_85B1Kc6jmNR>a zMZ#ECx_Vyh=f3fXLsBq=Zo2pF88u! z!9zDMLTOUkLIjIbIElgQax|2?Rv_-|R5m!X2B_e9>)C6u(MIuK1*|PeaS~eFezzes3oW{ z7?OjQ&dAsL@RbiEOtYqpWHwM7VrLXD*MilH1SbUpiNU0ZC}Vk>i-?f$hA=nxjtHH7 zI6|V*#Sjmr+2tth{(jZTFg*p1(`n0UFgrAmx(S$ta`$kO6sUD$mz4=BlNHOPP2|3_ ztX{UEJf!b68=8EndFH-nj!uB4(8@R8G7*|01S6P;Fs&rrD3XjN8CYvI{H5F`UY$L6pS6CoSqiCmuj1p_C?<_^ZOOz0V7h8aV2A`SdKoKTEH zww_HBASJ?<4u-Ks)9Q|?1;)&hF>n}cr36;=EOY?!Qpf3Jj9Q0MVvnRKR)aLVQUxQy ziat^*oY9Uo1pr%w%~4xEjOG|yE%%Jhe771`B|A*lohUK^c-iTIahD+&-O<;T#>y3w zpD?QI^|bG~)I+KFBt5hfPj^Ff4Ym91d5SaUK%g;S}m+vpDm)GCFc;&Lo{Y3V2YnNR9^J91Z3#DK z*x~1=?t9M3d-?LRe^2VX)4p^1(O*7!-Y!QR_}OoK=^u95{yks%%vRff^j+_I+eTZz z>vivb#~U_%y%(`(ZM@N}S8g(EFC!Bc z?_e7OR!6gI{mj=Dzq$wsFGaMjdRgTQA5X7yE0#q85QP_)g4(U77)5Ky``k)qLQyFr z(khL;AMMw@uDkqF#j@>YKlTM+CtAs=qU?QUm8Qy3rR+<`S_Nsfr`lHK3#R6U~!N~VHd87JBHX0`n|6O#~w0! zpRav)&o9lNx9^z?zUN<(I`1ckUvj~G_44gYF1mZ!uOGN;q4%-ZKe$}IT&-HM<8mN$ z7moAawNnkK3I?ktGUSR->ltNqy>PKT>Z0o!(HiBhJ_3M8Sn0&hqHTzsp3n_V3agoK zHucoxM(T?0{_M`+V%KH#ETQdF$<4tdU-v5yYyck8Si)zojfWRsw;InLgc2LPZg(Mf z5_c^(7GQ~)i$4nxcY|i+hTu_T_FRb0>^0yBn=JHOjgDj}qL``qvX=lOcWm5;fEmpZA$#B!pD+H~Z@x~E z!gnNQ(b8K4ZTZKBA$PO{r*2WQG!dCdk63>e7_=o&4m}FSscop~07TM)L>n7g#^RRe zKpGlBff2SK*hr3Ls~vMGwDh=`wIP+SXHMkAQLbL*4%0ed!`ySJ=#nw&a1iUs3uhT3 zlQEfGoP~j$O&K7m>ozytMRaMa)<-wJ=Fk*1;paxWN9ZKw6yU==w4pBor8N zjdB#7E)!jICzR5bj|pN621yyPz|$?TSdP96jUHZ5{NfT8%m~dZP9G3nnAi5cvHZc{ z(Q-s(k?D+i(Q}jr3n?FjB&1dvlFvmuyfBkd_^=yA-hmLY)fTj4ja<$#7Z(!VC~#%6 zcd*AMvg&xenBD8yEX2O^LE(uUB*R|j^zzS9@&H!e zy0z5uOu;~ml0lw@ZzP6VI$eT1enuExm?>b5(hV^hlAC$OZ$^T~X-E{^#1Le0n*tbx zBO`DMa3mq4ZD`s8&)pm`u=5)EG$gmKDXAhH#~rv#v}ntX5v-{-@tBQv$N~!iHd3Xq zKwA8AGdc}m1R$83Y`$nm<|V^2*f4M`rO5G*pas$$0$&u$SST)}xil|S1g1>zm|B^* zW}VVj48Uj!l-S6mQ$kQKgfDSG>4J_N0ESM{Eu|J=OsDiZsY(x6L~M|qS7R=eLCX=I zg%n6jZ+6r3r8AD@0!vCs@&dV7NQ5J<9Ll7)7<73}i=kF#ml77x9Q|6o$oDaSoQ^EFE(>gG6MssPZgxfZG?(wuWDDus*V+;ic}+k8?ByMk(P`I3tmB%R=!o zwts@zzbAFjKD(TF^!`6S{Re*U%gfmQ@#SkSKmFE4=P$qY@|DZ4TXX+yk3O_w-Kxpg zW+zqZi$$Ch-j$-XamgqET_BzLt^#$C)&-?av8f6Grdo83@bZ?jKP$OR*c-ynu4<;Z zps=wEr^`{SE;L3ui|0Uh(Vks>?zpSCiMctrOSstGU3;=+cVBYJYdz4q^8ovmOWO@@ z2)T4`IJPddJ$RtVaG^00_lubpHncD^*^WF~Ty}>L%eKF?4nz88AUf_&Dh) z>}Y*JQ;OT_p_Q9titad?S}qQO<5>A17CTHs7%0fcM08PU4YkO?EW%mdvMMogotCC) z6ga8}?po@o(IFEs2_s>YUJFh~E-`^x3PXg8E@P5cYaW1$Zuw{fXl25;?(i*tZNAzp zU|qJ@nmCI|VJNZTBHIKf1sH%(bQ#B)-()S3{28r(%hCzl$slO=lXyfNcvN!GvRJOJ zcWKaB8bwIV!u2J(b-p)C`w+z4e%kYiqBMgW2$Az-2uP8w>=>=;rC z0oDmgkr<AYC5)< z6{JR`#|y@Tq9Odc*i2byN^vs+$PF4}^GJZrg?qFe z{YkhdSNAV0R~q+&*nj%{(``B{qKS>>6fF``wwNQ28Mz#WlxnkU%DjxSG)-*`9W0~G z2*#wWAqI}q7f2QiToPW*D3U?WaT+!fuqNz;A^~=eLQq8Sq!Oh$N{=xx7~P*lw7@Xz z0C)&RmyyU{Rx!|7;1if3SpYD{K<5SEs5Oe#CF%IK2u5K5*o#Ov3mAzBKV=J#a0P%FT1ZZB6T#O3cj8Z!11HPqAqN*<~)e7Im^=OQ9ww7R()Dz^YI+}+`wB!e+tsoHnzQ<*AkK zL6@*I_I%qztYX=NF^fmL@i4E;Xb9fpu1fitNBy?{G8@h79=y%|a&<8yvC-T&nn%^_ zW$Y(c-~ROK+kX4_U5`I_lb`%jFQ0MTKA->STb0cI@nv6O_T^<;%-+X7{m8GJd+MGi zAHL&phwL!#pwI60^$+^VukAnf_HEv|@w?tOYwImuv&B1J`^MM#S?rmg{PHgL-@Ls% zOChY7RVFKJRj(>mrK>_%MXVN98rQY0tcX_adbwJ)3$N?tZTrY=Piw1}Rl!x$>T61z zhA7_F#-BTjQI)m&nl2(LUG=g(Wi_&wvAu>(v5)M_%eIo+biVtxTfV>F-oCj!Xa9Zu zGMK_}h(fjcRL!e&_1?1j(F@PIOBP<~o5*Td)iFGaQPr_lnMwShGX1P`BqQce^v{ zquFRT8A^9T55|5Aw3699p}V3M0NxEMT9$@*VIXcAq#G$P7yt&h8qp;$Ny{Q_>6LJ| z5lh5qh+V7O%Cvx$#si?1v`QJrB3lPlmC){fZes4e^3)YwXvvh7REex~t%JK?w?Qu5 zOIh4_0W5;aS}24jg-vUY`D%5$dGf{C>LDRURe6f#QTl}(qX2{#5+RCi3jHymv&d3V z?73fZ!H+Jx=qI->y26>@W@~ZGn$kozpc2U3Wp37{X8K^0#h>Xmh2cmsBU$mHv_!mE ze4b8c2$0CuqiJUaJTDe56h;_&QM5$Y?AXDyVc1KXlF=xEwA%8gQ-JB#(BfNOR4j7S z(j{O1^dyo9^A%-;U?~l04}N+lv-}&4WV($(2sv0rL0O94(zU$Yb=fJ&ASpetRxJ0i zyh78>)M&BFM3<3>+i3IEO@8uQXPGxD4P8d7J=TP~pt_7sT>#7Cg0lFL z>&9saCn|Pe7fYoIfU}N05^U2)s+5eHECyZK%0ZWcu!!s|EeJh4rEG<3n((9tGovl6 zArTo%xLH@@8|tBY92b}bO#T4Pi-N()@AUIL2@e3Nl_!P0tRU%-f=XBpc=$>ZtXbq6 zp0O3?$PPdu?1W<7P~k-KzyAD!sxWsOxW2^<-#TnS0>ed6_Gzltst_Br0==w=D*-%& zyKWY3Mu7l|5`Wp^A2W5S>YDO6C@^Jc0h=QRIv855HDotIMm%)nByefaAvtQTvr((z z9(sHDfK-1UqGAS5H)3Jup3M3r12*dNWO2kZ!QjB8Y zq6<^Fz%1-WfyYqMS>%>Z(Jg3GK-+a8XUB!n6{IcP$Ok8{GNg~7XcKsV>{&`OuYr$& zvC-@n$_hA_7=<1|hi|8uQ7d-O?Kj=F_!|EPmcR4M8AfOfk_U^2&O!_c0YfMfW5J`_ z>d_g8g|X;bx@4?arz`i7d{kmJ>)`_5`U@>LhgeEQERB#N@&(Yg<{g)g6q3^5-a(Nb z3!z{Jqa$g`<-p>=;b#{MlARJKJDpI{2Mm%}5ccxdmIHjtNWl+NP%Uuzm|&c`6qrJD zq;mv@X(9klHuI9Xp%E@-Z7zmLAz)0zHRvXw1;9Yq1q!2uZx-bV#u!`qh&a}*d|G^Y z0!UJnCQH#}bTe^W5Jt|DczE+{=CRF_DqYpArH_YLI-|`pFb{hkag)GR*}#T8755}O z{e1iTXPwFJcuUpGeQjCyi8cOtbWeKzVla76o7i!#aDMc`Ti36;#mm^wt-HfNzWl^P zH$T7bmP>wi0|HeSYf`ez2o@`Ro(ErDRqwpLE!l=Fk27F$aI)z&$^@ z$CtO;;gegucZ*H7c>62V%kO^Y>qfmiQ#1dyKl3$ks#I@Ht9FS>R(0HVu!>lh7*)(3 zJ#VgmKuWP3o}JR(w6d4*(ki+Yzg&n~8-7&E_yM#EYQ}13_zrQ)=5TSaV3mr?E6YtQZQ3ZaZe$Wg1d<<-76UsbS{7iPZzX2|BU-DHKf z!d5*FUrUh^j8QeXl35@+7rsce0s-VA96Y;vS>_7uUL#i{$E<3G!~jE)LtE7dG0#6@ z_9@5BJ?`)Wjy`m+GftXw)@gJ6GWMkx9)H8tXDwa)^E+?7_|97{yL;JH>g6X^-}BVk z$@>+~c14Xn-)ci$XFlC;I4kk!T9<=sK_TofQ2^Dyh7?DHrHhV^ig2fQ1!=hxFjt?; zt-n4YESA2VY2zUnSP=x#gi0G>cRFuOUZmlt1q8B2Ew_k=J$@v-6T znW4M4n+y^y=|KX3Vw5|fRNyy-X4I@DOw1a@vYZ&K#P*f+b)lvs+E8bCNX zl&$>7yheudA7a#<6PQJ+rs)RgW=q@*xtBb}5R6&?jIfX^a5wYrXVvt~~pA2A5&Fi3!njt?N~){@4bFG~yv^O6sMQ5j-jj{z*C zu4QZ`jSf3S=g|D z2u6A&BpAZZRYTDZFxsfXIFT@j$i3;n56%edvO8}(Kg$hX*ZB_s2{63~h6?f0Huq)` zIUPBw@+?q8O~epmjxo2e;x_T-mAg!MffvBhu+E4 z$~5b;jSy0}+*tt1vy^mwCE2fd*;ZB&+dft=+h|rSk6*^lKI-M+E_5eD8A{s_!j`l^ zjDpICP;|LC&s`<}4Z*V$7I|h5!>d2nlz$`uEtI%~(Mp;=J6#T_Mh^5~T!h0zF$yMs zF3KqN1tSah0D-|^vIs#+60#`+;2ffhP|F>XZ(hKxD?}RQ zED8){3^~&4rWlP1l8m5?qQKxsDD?Ch3u%-^AW17IYShO%?d%TQo#ITVy$I9;pSWTVnb10eE}kzNAXD0e!cz%csjWq&)O zH}n^hnGLL-z^ChFPpuy3Jj(V&XAhdi1F+VEt`^w#hbKC)o@OC?3^{`tle5@!vlDmM!1;%J**h z`YpG3!<*hP`IWE97W4nt{&Fv4t6tTx_L|#ARxGz2Z1dOq(u!a9`Ntfga6akyqm{6Ww7%HVYwn}4P zSoUu&`yFh%%zi7|d)L0Xto1(jvc)&_?=pKs+dtOq&1BW3B3C_Y<5eZAE@n}RdVyI* zY}ZvG*=x`KNhykgT6wG9q$pRFv5HOm(duHArm7iFUj1$u5{0u-Er9A(tBe&wJ9M>b z8@}GnR`V)s#g&X$)xwaN1y#}_5tM=ffKb|K2CK?8BLHHr8NhU~f%yxh3EC@1D|wZ* zS*V>Qz*kmyz&HXN{{6jAJnCTe@-c_)eaigVe(&qd1qb=}mzB&nUh`x1vhOe7d&e~o ztXS+HlUi}>bxv}pw|8jO!uCL`nw2(;ELxX3#GAyPt^ey=yCALCo4*%z{v)+Py-z{LP5P zg0T3D0T&Y-r8IXlWw8|fNr7%1+$sevrEZ2E(e07-G+wrn0U)|Q98HaduI1Z})FMz! zA(&C?gJdiqFH)?Q&XDd{&*370BzJ@Y5qNeBrPUSD=ZVNSrf#JF`NyXi z{cF_zP|%8L*@Y9dJY(*DZeA@o;}{J&5^GJgrDql`Lg~?E$QO^L5NJ#dptQNf-^49Q z&oI3BHpEqRLZ&Z4H!itVu#n&5sn0X;*cR92mUf5o-^hOs*nV5x7ZOn2e6y4T@JAQ&;EMQ$0 zqhJy?H7>0idtmr5%@Hzi33Fl3D657Fr%k2-Febvzh?!7yhY;%B!gG3^hwe7gWtXzJJgUeVT6L^pw-hx2b_S0c3dJol7tv$Jt3zbX^&C6j%__12k)1G9eUT z3;??Lo7x!SRoVa&2*%=Z9W6&l7&^%pr=W5!N+CKfv2hp3&BPO(=;hp^nLdmm?aPWB zh@e`Io#gc9D?Q8{1#Q7)1xzT6V38Zug>POhI}A?gVUWMs70W1v02?wFJUH3m*##v= zqczl0lnlZomw2?(;GyFmUhsC7Z87_LvM(|#igk&?**_-Lza&NPee6nRJkls1RI!<| zbv9J!F=a+mW)z4}bWNO?V?~*XWVc++0;hvT96s+AKx&0(S%d~F1V;kt>fNy z?4(K6X7Tcml35PAWlP~Tc?5la-Q>Y+X49v~au2aKjy<6E$Vyi)k3zr`EHT=nzNcA* z10-KmczUXbw{PMflk%I`YGf~FE0cXyxvwqvL)qTV?)7Y4Z)@9z_T^=-Wc&A*|MP{1 zY%cqK?6vn^?f0=i@XlFZ`OKE;<$wLhSM9y)`+R@-;$u%cIdaEi=X~1!@=@R0 zVefBj=O?nivC9X)^qFn8-r`m7-{y^5zvqpcz44WsY~m-sHrn`=8~KUsv2pxYD&DEK z?c0}*6|LUER@kbS+Y9c;xLB0Q{dBfg2@DCKE>;37qSe|;T{SY{%NPSi-TkYJFJMivjovWIzx8e)%%c0IJRG`SXSY-+pBc7FRXesL{~)Xs_T`*ENWW{ zq+(oM%+8|bg;#VdhZVxQ*;Vn%cEz%~+YmtrCKi}tnHM&I3U;(d&iU?wW9ItRuek^9 zdFIK7`U>-TKRN1GzgTeX6=&VL=t7&zcPzQ$uBF#La__SBt5&SO?{;@E7o$h+o*^l& zP#2o63(y7S*5vl+N@Vo1u2FvuRhF{2@QreDfp<&q3PtFW?5YGHJ|F!B)k9sCV3%rF zy?YV7mM^<|qpm=ZeX4@V1e;MpLD(J4M~}a*_roSyPZ9q1e*eA8?zrv7yO%GaduH&b z`1Na6dZ;i;FI*&eN|JHhZKP5DW<;J^`-qo4pM7-gbC0bf6kSF_fBy0Hzj@+OcE8e{ zaq^R->sJv$A6~U$)yg}_gZJMl0nv#x5loqQ-f`^59lJpSxck3Ri`hrQL0t$TR=ng`ddUiI7OpJF60 zJohBblaH-=e7&EnSt}V?^{SyXtcIm9HTfhg31ni9EkbLk#b7#MEz$Hk;ItGR4W)zi zEQ+6(I2nY19AxPrv;ifB4M<_pOi>dw?)S zaTExCMx!tUxTBMcQQf>K0-5-lhwmp8-9`~M3rGju=<0`7I^0He8MEs${^7UJwp@aa z=;8)zzL2e0iw0SSU={}n26#CDWC>%4g?r4?oE->uQ;UHG0H@>~5j;jEi~$D8;3<0R zW3pU+|H3oyW@^Y0XL%{OlnjYdK755HB9eyCHia^j0Dyeh2}PF?13V0pn0Xm(1+X<) zip7D1frOt<0Ac56)r$8qN@wLi0jv`j*=HmX6sdT~_da`u+1xbzOp1basw}g6gH2GlBtdClnYK zZL2|tu^>i~6e|on45L}N`m85dQM5&j3O>=rV1m+4muIOK#gxmxxbi8q=;NS_=td=A z$i$82k}f4(NNg=KFcG@kq(@+6(yB@WzO+k{g<@WvDtLfPDH5TZbx{EDn}X{RCk4Zu z%a+`A=Tfw~@U2)=5WOUgj%ng&yz9aH?{?<2IE<)<@FiSc zR=B*ZcXQ69UVh^vncgf=G?^&_zkq7VVj5Xxs{PQ1Qz-*PQ zkF*&r3yb+8*VV?)(kP1%!a3W=oz^H}H%dBimRAUDk%$tBVSZ-DnBNLxsrFoy4E_82StAf3T?PYBH$Q1I+bANiH-xT}#Id+*B zU47+vH`^975=kcVX)fwz1?=J*u0d#LTqf6Ebs3|HNJ9c@b4GZM{WU;W0!nX$NNkt; z69KS41t8JN9GK1vO!vC0FE=lf;)_HeYtkmQ!d3z6b!eh=RRXJR6}fcV%IyNP=rS55 zw#aQ6tEPL8TIFk~vbs?l(+X|0019D96fs2`kh!n_}z zeB}4`-R-1fW*>d%cg{ZZu#3(+o?Lh3k8fLiv3l7*zP$GSJD*y!lBg3^g{t5x8dVRS zMZL`8GO7Gg?9eCmQkcpsa{qARWz}<15|)ZgOnbl$u{32#;;Wr zS0gKvlPuNIDrF_}sH0WQqN}jfU#c)w=R*%HS2eGG=x(8hRIiP;+)RN*MFwUfNH8Q3 z8UkQ39hlsCnWOrS(o)op1)->U3C|px) ziNkGStzJjUvGdmqe@p2vFdZ_h-G`K0cb^D84*H7GoKxk>fY{&Rszx% zTB_P1(jLMHW;#u|b)cIOjLgT<1w)mQ3osal@#lrS*ryh7Qq&OX^~vQkNajt4UDt=# zn5HSP5FcSap87Q8=;Kn`I%v6!HHiVzhuVMv;|vyldZ9$$Ks}2-=s)dYm#M*!}Hmx+qhqx}iiPHt@fLaqmcZiL80@QLOCfIQ|Dx%gX5<!VDR?A*Xjsu9>owIfb#&95EXr z!v%(L^cd_}-4NY^>%#Pq-$WvBLFh#>_grL4qO)_%%P88hT+?>3 zjGC0=4r7R8z6eQRJfyh|Mee#)FmxiEm$F%+6FC#|F^Un!b#6kwAu(+iYr{0 z;>J(6bnXWJO5hevC!O}qBKKBaJC4X$0Ty;HkT|&q8>_@lGV&sZh|toji=?9vNte=C z97uL99W^byE`W$gC;$;QFUGu_V2+5T3)Zs0j5T2&E=JkwQsgX3p)+;}=#XVT5^F|= zV6@x;j0T_!WL`$|Whr6ZU@zP98X2Oc!*gr}xafvyzU)Z}6hctxky2vJ3!$SNVQC{Y z+KLq}TD}M|cbIfT@=dxG;KxHs%;IyAUKXP`;gML1I1Pbm*|i>h6Ay1asCz8;-0KnD z&UKc_WAMzgx3+C%Rr3%V$|`83aqpU{mwUTXSIMkWeqzn?-oB=&j{C{4Uck23Y)HM_ z*0a54_3{I^E&cN|>mRx6=0{ds_sCsWty*@`8OMI}Q`>Di``aHrWUr5XW&3yR@Xvq$ zwNGz(*mpjC&hh&%Jpa_oess)`?q@A zn_m0M>g7#ly=vo0-Tb%xr?yoLst{FtZSpE7Z5b;q6_tcSj4CG;j7H(BH`^)3V0%~L zOd*C;lhm1KFg|ojQ z9CPHMcAQn&Z3HWXRkdnc?_FyZylPv!$u@rtseDQM&T3~v>Rg-1N@?GEZkw1FDzAbo zz#$2jHjW{6mD{RvHFB|hHJh>Mq9DPb=ss(~F+Vx=IQ8-Y-}&nCM;>tGA$$Dz^tl(F zbKIpDp1kPVb5<<9;-1^Dy=U3=4=i6|i+RmT{~Frlt!6cj;<0*1SEa+Q$ObJ{o$290ek!$5?LK?hIqJa(EQWlxk$#%1Ho~@{?;Ge*BTY)U#ET zin~#!$%IHldZkpk2U)_yURi3iI!@P+mT>gaotaWpe=wAWM$3|Lk74k`(7CHUxl4w? z;E7x@uKK1IJpkP{E7;({uL}myI|knVb<`Zt&LxMYlSkXBVm%rC#nMf?`}X)a@F__= zlG%k(F7?r1l(?mrt#9Y`H-X|Z%IakWKvrf!T5v7BH8d*u>4_Mv3&XLA)Bg`=_u=kY zRo{F3%nUGe0cU6e(u~(A8e@rilOUK_z^Fk*rAY6+qoSh8wVPm2gVKwDbOxrt%+QM{ z_7+Y1SMKYaH>_vpoWp%C%=3NL^IhNd`Ifcz-oL#+v-a$$LvfeJ#z z7lDiTXtH(osMg&%D(5Dye$kXiCloOmE2%iuP+I|`U@1CqmLk%}Qnk%aJ{F}q2Xa1_bCVtS-( zly8Sp*AgYw&k`O*TDSThk(~n~cQ_y`pqgcABHHSNS1mmN@Z2RZd)2Ts1zx%^qR~}C zD7uUxb>XXomt#$5Bsmt}tXZURxV6J&7_;XHVB*DxWUL-`LZ84;@=|nFI94*G zAQe-!zy)MC5nW!+F=?zGEQT%a49=*n95*m z;wBrWGb;DdR-0#R1&C%k&Qd}##6=wL@TNvl30Mool3i=4&qMgCQbr#Ox{RW=?7VcP zla$vOt)%LaMKnUm7c8CeKmPC=A9QuI4_{!yDDwY9EYzZ+Dk8E`ag=HuTu1bxiCnJo zRzDq{7o`Pk;~TOi0s}BLnBvnRJ0gax45XH&PdTdx)5nE4BA2U}eV{d27LG;WPK;{B zXEZ8K#TP-By?IG0QOM>f5e0_Ohh$Ma3m1h=Kh0bMGFGSQNF+#wm6}U6i_@xV4YlB6 z>K38A)u6Rv@x+-LB=J1cad!GwV`kV8P zf7gc&*wYsCTlU@j-EZ0D_(R@t%0~`3=KXI+mXA2-tp~khp99{w>)T$p;x)T%vHj*V zd+)O4zI*KO@~xla%gFLe-}&a(yov2svj5>T7k~bu^NAg2+}W3xKYi|5 zgdJ=41!qP>QV61K>5$@L%8mjf&gxVTfZ~-GCfZAfkyN!eUU#MKYwmd&;)`o@S5oX- z1i9Fo9lJ$ik;|*vXP$OqU)DxuaaGh6SH)-T@M6LEGWv`Z!>0Kt?V4Y!q2$6cAq~{;JebAAA3& z&pYmdGY|jTKc4w7-?;F5mw)!w8@{pT?yDZW??(H}tM9n+fqQP*c>nFVCqZ^x6wfix zIl8KAXV6R3NRZ2ij`+BOAa%Le|3#}^L8uVA#0W`TZjntEbeLr9G7O`QCYOEM#WDkpks6u+O;GKGa6uyf6dZ2 z3a{|m>?!cg1&qZ`t5Fl77lb_#fdq@v#{THQVfc!sjQDbi0CJ3j*TPpY%;1dld3E+Z zBP-U?agta!AGWQe)waT8kzw4b78Q47cUT|nKHf?9bTG5A^*H>@VxV-Gu{I7RDL2rnR`brXDuZph-#YlU?~tU^ffTV64Fc_9!}cHjK3 zfBzkrWrvZdl2}y2QrUDOAOPPg(N?WCd&|}8!M@Dj#{J92bjtz@3a;9 zfi?G{10LP^s*tZZVpeKJR+X6SLUoJ`u@lDdBIh-_QnxaSA!eb=m}9=BXdR@e{*hNG zUd^}jBrk3CSRw__TZc+23*Tw*bY7zI(v>Ttu23x_K$Q%97GNttPe!%ou1J&1{y&6V^j3YkXB0zG4q)Wu; z!tZ0MEJla4uxfDC6pBt!hBmy6qP6B)Mi}qCkS<1}gk2?u7`3JVtQZ$g(G&xaBSjY= zkxN$)USp9dFG^YmEiZA3-x{hdW2G8ZAq#s$ycmHklr3fjvp1=b>QuSo#jD6#y2)0& zE_d^7{dIE}me56TM63oyaE_wM+fbtbQj4QYL?%VhRuT(9OPW%^CJ6IFB9|fH4vI=j zXUd%_04Cc~s7fL_;R2wWkuEP?NhNJTDkFDbV$`BhT8#nY3s&)3_T~sFxHyZ7XzDLW zF)KhcvgoYY;eiu6OS!aM@?|G1h3(MP4WL~Ze+)GWuR`Xxs;C{Ov_TD!if5>0I@9lg4_3L-~vwgST zdS+(3keI@&)@nr-PD~vDO^Tio)Ko%Smy~Sjsz;t{Qg+w;7 z-S99Jo2n=zB1yL)i#$@|urX8k_LPN%#7QM(B)nim7DVvb_o(My!?t&fAYXd%1-`1> zRKc^l#yS*JZ4)N+eNW)of15AyUAj?}&A z%oDtl?I*wNFZ=$oKPhE%`SNdGgfd@q#TW0o<=YRex?#ipw`{oYHeX);kuSr}kzD)~ z&B1G8i@GiiGfa}fRA=j^jU#sg&|7CK$>KC{G&^u^!QHIg!p04&8;!e|p>7%7=GvnV6?6|axqQ!EFuUO{}r!g*VVQ}bTHk9S`$NwZpdMoDjd6Smekq` zLOiE&l#b;;|M*)^Q@pftnVNvpEObVqctg`s7A}HI6hNyhY+mg1M&U`>v2}agM0R0| zASOk3IuUr5(iO9?^a66p9gGY-BfvC%7A;tH<^tmwIb?R;xmY(36|xjfBJ0zNh38A? zhO*R3J%nZTsylsy5{A(!!Qyahb(zJ6%ed&8 zi{lxo6ipDUbzp41T0`ToDh#G4tB^+3UphKrCtL_{MsavqKtl@FvKwVDg~+Bb5;2Zq zOraDoX5^q~o4&lo2PV43$-5AI+T#5V+W{)N_Cj}FF5$ZmckL~1O`^5VgM@G5HCsLMQ#dU9nI{O zU6Gyq++kRR;sVC5LZbMp6;=>Mf|V3cXOX3v*_%jfLSHB_i%Jm+9)M0pVT`H*W0YN6 zSKAPzQcd3w3uIt+pAsyF#vLSg#d8l&frQC8){W6pn0pIlvO+nriP=d355u06SF~g- zjw~ew2szTR3ngrrMO#Zfq+^tef<@zN%{wQvGnxV;Mdeyyt5(5;MOTr0xv*4*wi!_b z%m^@VedHBZD~Je$QZXVgl<9hc5!06_Us>h}JBy^kR%+FXLz?3N7-|Y)8g*#6S3^gk z;N{ZJbB6(7EOJ*zn92|zK%0_L3*Wj?^3rky9{bB4;d+W=@kL9N*&|#}aa!HyM(2L` zt6%hrFWW@Mn!S&WEcZ*c6kE@P((hx(i|P0>`b?t9Hkt{htgFSEf8i^K58nIKhE zfkkFsgEB0{Gf8vWz3!4mPSqoZBx3h$;K#vh8Nnn6#}d zpdq`-Fh;ou=#M_r{qZbA{-U&K=bmyjIs4=z4?E;N`0}NnIuT#~*4HoadtWzP^(Eh5 zw!i$)njhlJ53jk?Pku!kBd|zKT|ChRf|F9Bw~<*e?6M&{&Wv=HXf;}d$!cTAeC-8C zmlz8r8l`B`u>&ORx)F0jvIAFxu4^_H2BT|87A|o*EjokOAW(zp;8xKqj485dh*x1n zP71}Iv;rm-kM@=|f@~kzZgLySx{NF~nknAIMxt@&-#q&a`Ycxrl!A`oV!e{ButDwu zD42k05i~8kE{>*K9F{~k!btIUv@aCr)t9wdz*x}0Dake8yrkxsi}>soq(rq|^CsM< zq70)P;fMRYI<1}+^I1B69YVL&t8G17c3^jkZa_Z3$+%&;(Z?CPBly5~ckro>#m8+; z!Ag2#d@SjdFSkmnHEPP!MN_$M6EQ?ghX`m9{2Ch+mDj5Z^n@)FRdWpJOcUz|uy08F6(^?9T*1orB`&G+Qg_NmY zB4POk5UPx=+lXI^+!e3#7L!E~Eda1Mq>|W`(F1nnaq*0b#T$||#})y=qebghE_S|P z?1k0p=E#?@nBYU}U6{{p?n|xmkg8HD;Uc~vPcqXYo1=7ciBRNb$5+=wsnLcS)n%N! zWM-cUC1Pp5x(+c*s}V+*0GLG&VDK!}GlqJsGE_A9M7J!E3JAuc@}85`X3<;c3?sgM-Vf0fO6-;uEJ`dO!XpE3`as2Gw{^jXe%&+Fw*@$!*Fcni@>NG zn4+r^F%?OHX^1l1DY{iP@VX8SaP??&v2KQRMWYv-1+s2o0dfTBh)TTojzwj+4%w@@s#wHP8+=QYI}5u?#AFwr2okKAbnYc>eTIDu8GRw)yQp+rlvnWK zJ_=wLe*oz6DpH$W1Yk?iLbVQB3+3a2yHOX2PZ;{Z@RiHN8AXE^rgTq=0zx{zT^Eia=BrNY7#hcA)ZP!r(=Ky(up)h3OOIR*eOmpH;Y z7AzLWXtS6XklkDoLn2#;am1UibEDG(pxQ((AY>5?0lVIv8!X)*fSZnzJivRj_K@ZY zP1|EO%It}*Ar?aS_&qCgzng70nP9}@Ys>vCcHdmKt&Bu#Z86(2Mug+VUb-H9*_+sg z5MoB*4_Q&`c+S^zxT-vcRsTE#%sU*xefPRap7qn-0zi3-~QTd(d1(edCP?- z9dyE>Z~xH#Z}{+m2kx=OvOTugVxR42{$$0>Yj#=wSNpAa=bLxkZ?A24-fpu!R&Mgj zUAEcvYOuuXW1`vMVTpmPx*o~PVGJIOU^$B zea5N5{J|;NIf{w@3X8+y?Eb8ku%#B+Xf?B#4i`$rD4Ulu#t=(K-|_p{__F=wN7mhrFF$zit=`8*YFrC079182jor{uQM%DcWfv5#iWb2~IgQ3) z5nd6D#+P$s#Fru23(hjjn4W(bTMEIuPPN4ZW)#^4JH2KZyK*Ta01!%i3Z043M0Vmt zTHQ7laU$esmHiH{qknDk0Ei@j#s$~Ky9E3@mymnFrp zVq0{i3^l{N=tJUbwT2Xlh^Z}t##h(8FyHI|Lwu7LjIUAc18eSFd;jfzPpm}w@`4W* z6EsSZf>2_bJ!=*g!Hk-P3K>}_)!Y!B(nPcXj)j(& zoq9u7Ov`ALQ4q^ZX;n?xn!p%Up5om=J5sfgq!NF(bMRLiQ@4Pn7TQPyAX_&5}wE|7Y1U+3_I<3b-H2_FwJqbmZ(IOCC!CACZVJBgLNnJ`= zMg*9i912oxMq8iaiwwr1NEV7p)CtDRTtuLZWo(tQr#D$~xF_&-s}%#r-a|{#fXfKU zB6y3|N*V&-Rb-garK@uU5tHbWbjY5dGL8y$r7*9EYNwNEa>>&AXe$huaY*Im0$*fE z5=Iu{0ZhvXPuH4zdybOQn-_Oe7+n!qI%>_d8ekX+r8+%ZmX0ER1yIblbd^yHseqva z%c1}xAUhW;Ru{N{VWcagZjSK40#;jgUe=^y=moE;frlietH=SUJdPp*fGLcK1H;nX z*Q1+K;)7J6B(uzaLyNb8l==}lb1_N2NsfrmsctL z(zPqvg7{!ji58$s$>FAR>|oI=mD0LVM*WHrQvucA?<5PGu>xB5+9s$r3QI=0Mx7#8 zfGn+KEj%gF!gh+B>Ky`>e6@{QRYT&)J1vz3z>w@^Y}Ucp)y9|6npjD`#iUq6LNN*{ zPSGqH7h-CL8s(lql5rx@!5qF;*!l=4RA9c<3|6S7qYEI2Hg|djR30x%sq@uk%qvIw z2;1QhAr~#33qVJW(sT+NAWV_9WE@t2>|;&Jr7b((mQJ@TD+^=A7jR)~G%>_QKojZI zEtk|Idl=oaS1?#vMx3%3bwJhA^8gGxM-p-#-YARUO`%zox=`bVBFh5olu##tf;oyW za!46jTuoM+GETp!v8T210PH!n&HUZ|uR8bG_nm$8drm#n8BI`ZH*9reMte*C>} zc*|>Ew%c|yuiR;7ubnsBbJwk3v0}@YZMlix#a=ciFYedAW@ZpQbPpdyGqG9>5h*5~ znrzg1d_oV|?Vr-w{i;!!HgVlzv(I#n0_Km96CFs;NG&fk8Fop zR|KOt^#y012t%>^tVN&EaBq5F{PR*eiQrStwn|bVMd*gw!Bb z*fCy%3dM9-{_wlmJxWvy37Q2w|Nmx!m%5LF@%d0CBFC%|VOzPNaFaWOJ5f}kN~?138^?y)=*i5LP$`r`gvAnh+R z{@1_%Dhf&GC8?nZp!ggG(XAd`VA*vAVb6<;L=~ykB`_Avm!@B+POE1bEtjL_xU_nm z28?4F9d1X^f17l7(E3OxZUn@QlEv+SP`aV?f9(E$U{F`HP<%_L)Rsk8{Yq`(07^A4 z(k!&;P6N^U{}Xsd$Ho7mWCs(9T{Np|*5ViyPyvuyL+&+hOe#dF;>#X*+>%Uz;`xx$ z{V15}uoHJNc3prb4(tJ`q3&PeAXyfH0_h$H0PH5qA_8OUM$s*=fD5HnWv7%*Sh>V> zOBL4gO1k#`yZvPow^0E_bBATDPB&j*_L0ksh*8okDv2(*M7lj!2qkG%Re(^kgC&N5 zrEA3u;Z}rVh>@ZI;DS(e8Jo+P-3VAEjdCfeE`Vw!Vj*->5Nd=i!f-KJSu9Q~--2kx zhin$Y>WZdL%DmJ~wg!=<4ak+DdA9B{IaK=x}`IedNaas_SuoEs)SZ%G0ER`L&)N~OzsuRA#1^`A46%W?wT~f#j0OnYB zin*j~b+a_R#d*k=od^X+;mfX-v}tMu=V-aWD1_3yAn6XA z&k%oV(_i!SpND1&BMuiZqmI70Kw2xs5HZcSXeBC+DTvA5VXbVnk?BmEFAF=cHVcL0 zxI*g+VpJ9>2(KZ-3v!_h*gT~;;pu{m6{AaM;>w_CJ3W8}0ah)$ZY8P8nP#o52BaS- z<6_Yu1tE|f3rHA=#L+sA<18&vN#TuJMvG9!QVT*@6d;jy6aWt+ucQ<$F)E!bLRCPg zr3hh$0x1Ha6v2?Lg8A~ImsE<8mmNT4Uc?Z|t2o*UHai8kj3P63J_G1Bs-;_yii9aL zdvvnMGV)?9orx?e6#&*DsBB0z&?$1k1enf(Nf$MPQJJimED9uMk0 zEgMR7{oN>A$@Y$s<^HM^1${=Dv17U+ThBI{y^L-58TqwwOt52n%J!G-H9zve53yrI zx;XNN)&4;3b(k_8clOI*$nryLe}FIFciR>J@yqp>UU1Yd+ittfl9_F0X8wHNEsy@t zUmSbL+fn1=KKzbzj(88ge9Q;`;{ET~n;m8T%Rk%k^?UEI?{1s!v+L&g@@^})+HR{& ztUry6M`I4tJtrEBcf&N6O`$r6e# zVaI*0sNE2bl|sZKL`8GfuFx%wBw~96iUW z{n*zv-}%O8&O7;pBMaWC-sCs zb+qPA6b8GA6eFke1OtloP#TI?x_sD?Aj0nYDgvWuxOQ)1gSo^IPy5d35+sXIFbQ{~ zybMKP)#IA&3lxx1+8l!!^5@`{0VWy_6+gmvP#XjXjY51VT1J8d!Dz8RrHWQ|0R@pl zB4w1$C`GIC+5a4v`-@SwmCN z?TUF-xoYuWLs%P*#t|}>s*MTe!VV0>9!Ug_34)m~0BHpbG4^+?C?UleERv!t8va;J zFeNF27?maRC|3(>6o#;n*0Z(hFgYBpr>?_ok>?#>qusp5C%=1#o4W@sANAdX02qCe z!`QGuRMn&YL{h0c#Oi6X%Bw;=w?zP>D3u);Y?RGzY9`q1C`52}k-_LzW<+2iMp@=+ zc#Z~kOLEhpxKpc^^n}pujPAC`*bUT-%&U87c$Sde4eP>pj{^=?SDUmrR!Lr>8xlCl z(sN_Jz-8C1NEp8En`D$^bcF3FumEuJG?I%r)hU%! zu{gy9HY6;tjKhT&SVmp|jTVg;F*=sc)Wk_(DsO}pibZ!(5sHgwS`%@6DLwPUi_F-= zhINwyzN64Yl*hu0v~F4W0zjq&RtB(W9>#{FuyjewMW+y8Ud@rwlM+YK{0EEEOeHdk zMj*jj5DVoZ!jekitZ*!hioiwK5x?as!kC)LlDfjGs+9+>040%9S0oH107uW9M(YYP z;xN{gt}>dVc3@16Onrw7uXL??%4H(LYMTOIIzR_YWTU1pAUmNbwPR!fjnYYuVvfQn z;*53R%TX()q0-SA1<{hSFb+&Zy5S2BHYPGpk*=5$(anOR$dy-+W+4cq=VXUi5M7wk z(L)xT4q5$vyFg6#*2j=6jwu(lO()BRB_psQI#|=RV*V4xi^LrWal|JuBQ-_0YO_0G zgc8Kr$Wa*zR<@YA2L^zwn1X1>T!bRZ8x#qLN07aZ4Hf@rQ;~3$O1ZJtuQcq@wLFgT;oy)uBuT0*rKwq zk}T3m)IltiEY7HLP-Gdt^l3>QH6<~+#c~tWN?AcgX>%rJ#c6(d8{qE;BP-gd+5$C?MY;)Ntj!eOzQRc@Vx^2VS8=rdgPRdW8 zy8q#IH$VN*ou4`Pn3rubv;Af>J8m(v&yLFu`14mB^`W<)cEsPFdidWQ{r)$f{E@#r z^T>A{^P#sMa^U|u?4Un8_?`RiyUWa8yUy&n^RnG{TE6Rw&3D*#)2){;+jPn77W0e! zhZSOiXrN7Ge@Yl7Y`@h$GIojWo_p4*SR_h_@M4HsTvC@~8_H2;tsyQXj%gp(+tG}O zq!hd;BH*Xo2@6c_=qhpwU}w26EaS^nf>ww8UtnUSVLSCJ2sf3q}w+rjIY1Bcm=I z|BcwTm5kxyyojw=rU_z;;WC1Sv}+tM?$@*XZZd#ZvfHP&gN#8FM($v!F(OQam8+fT z**~>B_uFD+pM4X1?ypUK5ztVkhzC zXje>@3p(s7hKz{CUW?Bnoj61>xD5J(QpItiHh4|s4#$ZWu~RT3?lKO(EV3*jD}X{M zFlvxxc3zJ^LVQ7spuS5x+%N zXD-1kV8_UcC5{V=WgG%Cq)+>vDAL^lJ@b1o=(g))pZLu8dCu5hUY4}tB~_9YP_RWn zkbQT%z4b~}^AJ;*3IJgd$`pj^5H$TJ3m|e+(eMx(T3vSav#8%K zjD^Hs44HM4h3RfsOs%0JQ@9k9o!%Xjo!1auql=0-IRaNtF#{K^!WIUH1dv_LDiq2w zVi77A0f%oTu@GSKgK^==2+;o<2q;K+7>Xc_)tNiHZW(27l%-m(WFkp*YIYSrgAE%}Z5aLJ3Id;bC;s zVVbYVttS|ZQ5K_#uCSidbPHva^z39b7p*ep{@-c#KX#<;G~Z8a2-v>3)FNa3Zn zN|H`J#Z<$XI6GY#MmzmR+119_(y2r!VYx^la_Ke!D3yKsj1pog- z8S5}zid?d&c9aTF7srsZOCm@SMlnszM5JyMo(>;O%Z1V5<7J56;t1H%g)~a&FxB8m zyT|FC;^;lau@ev2Jq9!O$c-;!$M%l#WgE@jyGEJqBYQDhSBp*iacqB2N*iB(e8U~M zFs5ub88612=@gWi{m}<+du+Ww(=+==rtCXE^Ta(5J@5l$dHtFj9$Wvz)puR>$l9AP zJmUy_d57goR&GAC$M!RCef4&Syz6zxfAp_U{=|Xje)8R?9{Shk9R2PKPdNCLPwaox z2j6ng{`Jw99qhjR ziyk7CI3ObuN-$FxcE=;{&t_p>X8pVhaOkMm`5F9z`DiKZ!%$+61s*Rj-A$EzSU#3K9wb(AsOAusa z76AG6J~jpCrQ2e*Yiu*xb~C=*PBAY;7#`84Gx{N~wxsPKbHuxCIwRwJcX>*_@}R&d*ydNPy9Wp`|tezx_htRxcXLn`RRvNqs&vj zjNsrk(VPeleXz(ZQ6XFyS?-6dkxiFUOc^OA(P1qe-cWoQQ58fhXpv&0G`$aF^3m5Y{d zGiA@k9EHLkDRzxbmc1HSNCmV+A~edK7-eZ6HkWyA#5^x}LFm$1B1`8O^)XA)?RfPW ztDA*!_5_=|!}>h;`QMGehrCDDws_ckWc7L8$3C8JS!7g?BGqg)%u!dhe%*TGgMMdb zb&IL0Flrm6T1Lgg^99q2X6c&sP}qkE(wehy9NFh$KKI}KIkr)GCOinjZjY0|q# zgEdDMlL9bVEljs9b^z{>wXF=jn7S}VDZE<4yf{)?%)GK^DaCjO&=rKh5W%?<^|#>K z5xgffz5ph|t09pqdlZvzSt_O)xG)+Op^1z-D^-^rOt;h&-N2NAr|1gXXaVVj?)(`v zP=tb1b}Jyb2+fFH9QH+JB!VHjM8Hy~7qj_RiGoMXN&>TD?W%jKY4?G#A+2b+3@@id zXOYohYW84bUUs(;-l7Rh_e$;k_ue6O@7Gblgp#7CtQJ9?V=0;}0GLEv45>}qkaVO; zvG?4i&gr5ltoV=|1?f!oW}w0QU>1B9gEQ%DY}gD6fxC9 zD7+ZeAR~<6jC6KwaVRpDw2Ym}bO3WP)ROYz-Ykq7Qh?wT0-RJWz?e?)`F2d3V;MzI zKw>FiSojh~I&j^EEkMR#0JEdZ31tfwO zfx_1i_huT*`a8Gj)6=O+M*ZS*A7xlfk=4(Uy=BoASuU39(dNi#C<(w_5CLCEMi9u6 zo&G{nZBkJ9lA*{rq#;2DzS=B^>6a)B9n7Sx5AdSWk(!sZT-K8dUm}jye_lz^W!uWL z%LPAHKFf5WDrR20XfcdxR+4C9j=5N80jmmzqN@N?9tG$ULlQB9l~Irpr}$aswFO_; zT0O|@6p^(evrq(7Eun|3DwcT{0vBLLpQ1h_b2ke}dY5*=h3e#-=4!r8uwq0b%~5M9 zbh)HU#HDUXEl0-05DTw9SG6h;pJQ_o2Odl@WvK=&M;0x7IvC-|Ma+VP31FyDR!KnJ z;x`wq2VGmqwv{R4$t|Wl$nv1fp6I-T-EVxM#x|6D0h{GNe)nS*Z)Hc5XTQ1p)PuTq zljFkJvA433V%_meU#RrPH8(%J?$+$O&pdtC#`QNm_RtU3-FNNAbvNC6`<0JAaK~rP zJ8|o!GutnpS+V)d$}ML0-D#6|zG=_HKJeC)4u97-EUx4-N)Zy76tu6V>YnlGbSc<(w!Y!98iO28CAZy#KC^Rg6# zVn`5*!nAaD^A)gYCMY9;l+-#1i0b0MwuT9gj2(MJ8`bT*#u%{;WqcO5HG~w~K91+Y zpv)MtuHE4)zV%gq8H!-O{^k^;7qi8LKlzwX*iB|{W7-fSM_sxCC~6RYmSTV5;(z|q zXHPli(36h(h`+z=_p!Z+?Jr4P_1#PT-q+fDuJ-%b@#SY8S?l}Daa$x2d5p2fk+GWy zPNZ^{>C9QMOW-)>*hkJKf*i?p>A?UPVu?qyh^B>PpGsM@vba1W;KAsS?1J=~cZEse zibk5S7z78YiPxmyK~W);CxWSqe2w`QQ%2>*mnjNXp0K?TPPc13e($SSvVD1(AkK!o zmCXnct){4CQL*8R_o)GQL+g}nZ&a(isF@+mjxS5RPs_*xKqw-NO=K`)vhhLG6?0s3 z-)(*Y3>cEc^*CCh7J)7-7m>?IXMqe;(uNA6U}6+4jhqSsAiF7a5IZ+IE}b5OBafly zl;`gqKJ%0AaXrV#X=$MZeCitQTBvh z*FCXNFmwTR4bizYMCV1fP;n?QM)yaOmkKFFoa|aU5v{Ny3}I;2v*0D$CUbJJwo1 zM10rnH{W#qRqNN?Cy3f)A&~UqSFJWN)RaY*R6vVEQNN)uhDd8Ai1nm1w5i9o`za0k;30LkJ(+Vdp4WziKx*N|=%)>Z~zQo7oXxJvk@Pe;=m5;ONEXlNRL zs>8!5gHRwlz9Li}JEdsNR96J&3@Vz?`F8X>QN`f`Mv-C^0HgB20bnRBL}V!l9n4`O z6aZ(4mV4Qi$1W@+JE=%r81_ols?DfHgO|%F-*RzD0Jsck_4sylfJc_}041FUVR<2G ziIhPE5uBqqS}qiYRoe)g-B6>-YYKdGFLm7tTi9|bX)7QF%!n+4H8IUvN3&?71%T(( zL9Dji*(+etc3~x1%ta**tG4+zLG5s9zEv)G>43vmgVg{ts&YcnWgL~yQ%3N4N>eVQ zq2?lmP+Fpl!pZ`m23^J>x{UrSPbm0t(poW}qNGbqJ%WoM*EA75V-u_m!!BSY(G}2F zi5;(I*ZEoTQ+Iml3{9)0yFA6@C?J^Nko3CE1%Odo8KVYDFmum1V30xy>pX9v$}0sI zL;n6U@wkg4x51(H1g=eqENeX#&&tmq$B_|ayUBEv8BNyx<8OYX^;S0Ni_Es3?JxiO z*$uXpv1WWZq5FGM2yz?BvwURD%{H3tID0YssV8p7m!Ev}R%F>O^Mm)_^z0LBzx?@g zw_5TXUw+xLnVq+qdGo7R9D2~7pLW!HuKC(IpF8#M&p6_pD6^l={@8n8i!bl9)6BlR zFWGz7Wv|$A*^b*S#h16-bm^u`m%fDWFC)2FuuWt%mW~pljhG+>`@>|7qKe3%QC$k8 zA*~@B(>O96ql~d)j+iMJdo+_B$BYzH@Kan{7Z1fWg^G3Ra`Z=|`cunxmc5DX8@;G8 z!H4ZH`xY}JPObIkw(VwFBFh4*TyQaEkwSTD7S_AgFi5UlU`Dkm2~u9898D0?e-yyb zjY8V920)Nm1eD0Pxy@I>{)QBAeAy6-|02+(+tnqQGH#2pqOpXoMVWD4q}V7%tVM^} zdk33>O+#Y4wv6eJSTkPi1#Eab(ReYged19cv%d^4krW8BQ0(z#lsUf4!t1l=pZ1+^ z{KNUDA7g*{Q|BK0%`csI{gwacJOBKttN-;2ciep0x_hs2etI9<_m_YC=(?ZyOFIAa z2cpp*6cXi$-PC1?T{6ZfDF_iGMw#e>(Y0M@FcDrx3ZaM`pDKQ>I76^w*Ji$4C?azK zXe(Usex1M&aI^>&!gP?Dcn}>AiV6*uWN4N*&Ala_Z&Roq@ui#c**QCRU49TSf>Yx+4<+Y5C z6lHp9bsT5^>*M+VXzg>mrv)T~BJN@+C)Ot;<2W7S?ioPKMOZgVLeXWU025wAbS)_X$kC9b9C;L*I02oTa#TglevMhwV zypa)J6_87Ln<)#W__;__7RIWA5!UFKPA{PFqTsAWgOkI4}j4{rMX*~ z&T5s5067ZCOKLKrl_g_#RkcKhSa_+~qy}c5A{2mS?6a%vB8(K2L@1-eb`;8DsDOq5 zru7(t$+EDWSH|e_8a0oA6f!V$D{NAdjsm6_MnfG1HB^Q!qvBtbIPy@G?@s7y*RpC}*_hKF3~&FY_JlMQ9CQ$nFF-1XD>;IINwnxs29OquTIV zqaySXs#;T)l(E@C))g=>0Erl@4J^?G;8)(1`}r?q*?ZcUGMY>w_LuQxUt9jwPkcFg_D@G4$S5=2``9nU zmmhod#*G`U`O(w2KlX@sv+v!w_J_|tx&F(SUby+p%=VklynKt9t!y-JI`g`{wmIZI zZ#wOm_g;3%F<(6M!xx=+5Walk$KH0@5r6gf@BWiLSI+FS+YG+E$1a=hx?+c6hcvFv^THFb$maV3N4GZf)^0jve0EzznL0B zx)ELs7g4ptJ7{cs!7M1V{bf`a$z{ZUS%CZLY}>*3Z(w{`SIdG>qrkW}Fgkq7v4?r@ z8XYD8!nT_{7&@ga!k&NHvA)0T?@1l?vG<>M`VsiDzr6g7FP&|F8DD;2)wTHYgR6cp z`_1J?*8UV_n|y=VWfJRQL~uwJMoN|#rHjjDHA{b)anix9c#VL$ zXt|SobitQGBY?eVCJs0A!91%X0c2j@WZ(j`rYSL&2X|iBU(&)U=Unj)Yw_RdpLuYDKzL zb(_<5Cu%y308Q2`JQQhD+#Dg@CCzkhg8zfFjSp*bW@uU=t6r<&$s3Dlb!1yYliI12WD@D4Az;4IY%%t%+m@S;?|(M1JbaFZew_&F2E{Z*owgD z0Qrx;{&!LQ;uO4eS{R?GFrqcjT~roUlErbfs^W+stRvh1gp`Z1 z;JRw)kS!X@tN|)XG)ha+;w)@C(-yW0Xcn>v!jU4oPe{eIMB;NaYKd9_dR67ZC_CTq zT65%%EUU9PBIFwko;2$b1OPZ0@r{z^YbZO%EO>DpGHT$~tW2@*|^qDMtA3MJM%#ZGP^x<2d zc=(RhcVGYbhE<=t=!`9vEZJd;B`daEf-l=*e$5_RzW;#zjz8>xQ$G5RGY{MUykp;e z_EGOT`qBJ@T z)^Bmk8^s2bmr+1ph4#mnzy8HfoqY61KY8fE=bm=>7e90Izkcg8-}?FmzP}t__Wk9x zci%Ak%h(UE!IuZ$K_26e5g(Ti$udYKOY|pjzKpTsK|^5BsJ)I_1bS=W5-x?q}X;d)*Ok(l>H9a(+_X(m!#NjI%8goitNZHiLAk> z9Op%c=?c>&M(Jiz!GU$-fP#448c8&-{xp)=K}PGebQWH5Jps8ClYNMct&ChoVK^o% zqyB*ho+43Bx$tGvv0T=QP+GD6+riQ$)1wULrtjn0J;goFy)RbfquVRBEba@9vbaG2 zORb(}QDF+MR0Wu()~KOT?NG)O{ZzP8&W(2w?pUVB}j{4zV2eWff=Xnx{a1`tPdlL zIFvo0{-5x`U^k@Dyz(k(b7BcX&hFQF=fV~ebYBz)lJ%PW|cP84t5j4moIMM=Y3DAN&t zgc?aD5>_!Sf|hPng`}g?I+x38I0stxqUD~>9e_~Mfzts*ZX!Za#?jH0$TqYmB`|iW zn?;My$TH$o-T;i4EcpUke>9DY(^dT_!`!8spkUrV9nrBBaobYIW6C3T@}4P>cjdYii-yNseNU zm1Lx|C{K{^fjR0j0*s8+pqr7hP`H3)rwBqQoe%8N5sC^K;pM74NJd~?GfFo~7L}H2^aes&iYex)hjdZYfy62t_VN^`t8!yiuQN zTBEr`8mc?oS(14*XAGttquiTF(WY@&j0R|ZvS_WXd(F5!An5AgBj@q~{8+dD>zwODg}xu3{J zld)+`Sr^Qgmx&E!l-WDj_%cD3+iOOd4K4h=uSXuee*K24o_ylg2iINy;M$w+yzNTA zl7s|py=-R1R-3HcYN;?MAG8Oy_tabZjrPsEFHMix8H*kTN~A^WGm zgDIns_E-r80Nce(qm5{_P`RU-gqIy#?7J>6F0ooEgvB~BYhK>C_J@^w2OC*NpY0~& z#WtF8GFKG>ixkTu>j35)|iQxWj6s~>YStn7h{Ps5h5M=CFShO0C1_Qv^Pe0+v zFMalWa`eal&QE^%dr}vl^GRgc%h=!l_UC+m+56b|@|wG@-+2G+gD=NuqQG>FvW?vs z5X+!ET~?4VZ6hK_$fk?V5QPBSSR^b+u1ha_(fG2s`jDv+7J$Wt>GBK!3_v#|*P{=v z@;9XD8AYfn79=PF6=}kHkQy|K(GdC**Nb7nh?cK8+6Eol<$k`*T*@duC0Orc*J8(0 zL7DA7b;>!aJ-p6(j##7Di)e4qAn`Yctq4kjB(M_`dz z1)G`!-4-*9s?^qes}?3v`QXI>t+vS~x|Yi-wI&F-9ef!HJG|h{9lkr4f)&|B=ERp{k2kY4>Sc!lMaM^o& zRaKQR0_(zvqY`mOs18BZ(utZO^Kzu3vASg$F}bibM=l&?0Ui$|4Z&9la4<+ix`GSR z>X{Trc1NEGC8JSNCE4lRIaXu^6q&ITOjk^g7GVm)NQCVH7+_IRKtalGmHfVz!Wc(J z1+<1D!?#2%>?~4Ha<9&&)5?g$r8Q|rmCB2qh@ei6QX2|mG<%Lh!IYvIf$b}t6z3Qq{EtsDaoqR z9TX3HMKC&6MlQzblWiCP#;RozoLA&s7P`EGTLg6)g?CXQjrQL~`+>vlt>20ZGfkB8!;0K*F;aCFL^Zma$O)?uBhNAX5ZURYQ#7 z8R=C^A+0}4-9}YnS;VO}Mv5*YT@Ya{Z>v3xIo4q)&Jd~VQmn8z+^6SEQ#Qod6(n6? zi&hypJNft3x z&c%gbnxg3{sWXsW@Zn1zT`t4KMa85{t@TM4M`7A(t1!CJ3K+RY z4RTpbLg7mcarB=Lz*Qn33!#W3WA)Gr$j)mR^BO~?hAH5vWW;CT+eB2N+Da;sXoc#d zl2NFRHAgx?1yDG4VK$?DxsTKm`F}7$L{M{hahhFm*a`i)3ZMU5FrL% z^d4nq=~)iI*OTMRZ8XQ1@nSD#vm1Tlp;g)_GX+8R*Q0!4xeaAXe@qHlMxwFid4Esp zp@*+s_rUiadHBZF_g%I2{_F3)<7$uPXwl0yTejmio9(#uvTc?v*>>s7o-3EVZNFXK z^H=+R>V$)i{lJ@!JLJvhANRNRmrwfm+YdeXwFmyiD_-}im%aYg+wJ$iR{ZI{EB4xb zhn3rHx%sA>zJ%Y$_LeoVRf-Javly!_W)}RH9_5VW!VrWM5}m|wqnwNpOcatB!XF9U z#w!c~=yhdwTfLD@bQM3<6_e=VxxmW6VYw&(5`aj`B3Im&q5xj;SzVENRgyRb{=8%ZVpg+>|D@octu)- zrT{6sbuOlsMKmylDWeq*Z`6UX;}4Gu6Ypcko!hy#r+nS_zJ)dG+M6Z-mt1g$x4ysp zxeNT>mmmA`KDNInWq;XUl0ueI<{#ep%~iKw;qNc^KDHmn_AO?Y0Ae_JXje^^s1S}r zB1BOkE?|r(Gn$EV;-3BCwxSuLXlL2BTN&l$2&3$R z026v;vkNjLBgc^!*jy5N(+?&~?v+{)mW=YkGiIsfXhF=e2?`6w(mGH^DEN^DSR6wF z5-z$eDSc4^C^%nXD~Tc|VYG&wHX(IkxL8;L1!o~{^fFcnB)z35h&U95apB&wQ*XyQ&XnfV+`t{DR6`*S&o2eE?s4g@Ld|gj3L@1bY2|i84QJnU7ima>JA>oxV zu3~X2NtQ&wsgx@iU3QD3W-&Pm{6H!_p-3Fumg{oHpDB(QO~gC#gQ_Mx`N1_&<&Lcj4|ZG z*3c+bE-ne=NK`1xYmuAy)C;VsQH)N5(0}ZYBlxL`pZw@&KYp6>teAF= zt8$Aetc-@H(Ofv4-d&h4lzx0CmkBFR!m+=o0KB^ z_*FAwLT}<-{vla_6%3!a%VW*m(KrH|h)+}#gO&Fj0o@&(JP=VrDHKvZRJ%DU!yI;I2^hF@G)zOE@R+IVlfvVhMsl+ zDwUB0$k<%cCE_AWyIMB3#_n@ex~F|az+Oqu|44+c-DJENYbF$3KmUa_#$TY`mMSyYtG`w_Wk*n(MB-?9+R^eCAcVZnE<>Gh5HhynOl0 zuG=pC)7@A6?c3gP>JbN=eAxcl<39S1j~?{8BmVwPhyLB`|M#E2{0*<#5`Om;OZVJq z^F3B>y<)4)wp=o^{KbBId1eM{L+H>td<{v&=AL^mdhW$(qz`3_>Vg5|ZfG4sr^^L1 zjUdNBaY+^cN>niybWafcm!m8wp*VCf43TeL+!LN1ph`wJjF|9R0Rbp>dn<-c5r?3z z9JK-}k|LnIYU@uy3Bu0B5W6YUVGw-Wwl2F-cGHng0mKvp$x(1z+Z1F}Oc_~tSsX4+ zN>v;MXSAN|<)Yxf0?J4SQx%ui8NMMU!E51A`X0i2bOPbh{Mw9X71e>N5yx<={J^%>HrF5~bO)@WXokUl>> zazlzRj9eC#T%;2UK=3TpR(z8xnoph)1PqwS-joHIe(pgwYe)cX0LHvV3f<-v9ypqy z+lM6I@j=InK31$DkwqX}=mN?fvJX#fwHYl70N-FNeb~Y@%9qdq28Jas@fSuL$N?Mi z3#D6;P2U_HsVrKvX3wQ8QV>IsET+(Em_jn0i2BRYoy8$@4!Dm{#0(!m5I3w6vFln) zZ8H*c#HWn8RGaQ}6u3A=HYA8pBv=(rN7ciYB#Xr$EvfD?49ZLg!v6$RX^a z6^BvGrlwVoWyz~h9n*Z9aJF)R zVS}+(M!^|%EsnypWE4Cvrb+cg#6=APHWBs;W2pd&I3B~Cy)e_&U>BrSDlbe(M#v&i zAOYkOjf^Y;8Y0Rw8elpacxuU|BH0^alu>r)lB7mk0SoR-v#A|^VH=FQA)zWyTP2KR zh(%;W<>Hu&R{d2n+8o)@pD3ZnbxL~A{1JgDmA2>k06pF}1Qz|tD;6+qr;LDO3?e0c zN;L2$C5g;9WfSp>N(#?{eawqvD2cz&g+l$}wz^y}m!eGz3&|^9qbo8PcM2q(P;{HP z7M?}Skoa<|1S5hW(o$Hr^t|9DDss6NI%XfCasgu*0+U3<#?+?kKh-O#O0*-z$YSxc zr{Kj_Z`ED5KlI>Qsby*Icr@u`5MN$G5pV>jq_ZnNnO9+x5>^(`xMvyBoT3Ju)m?y2 zU>4U==f5Fcr#hvpgR-au8%P*IP~iUK-DqJMDMF1;36^w5*O&6fdN#q)R(XriL@W{G zn9+3ZL+1`mo;#*dCPu7+{p>$-Tmew)RZR38gg##*^k@NJc9x%8QkJ0n0C zmX=q9Mq5L=DI3Z@7M7h}t2Upp+Q7o9FeD{i-l0J1T4eYHSeG$-kw+Yu=A|ucN6JCz zxahKr4@s{Jta7tc+kiDHiyFiyUFFyn?vekiR5Fe2dx7e}Pe+df=YNR^PGVo*SNg;P#EHu6ugJZI7(J_N?PS zu+Pq$?zi{Wd#>Dk+oemlSu(TTCNq1l-1>kwzxKqB|J5mnzw`VP4?gdNgHHM6JI_4s zZ%_K<{s$ef_v`jvjxX=E^Je?(y4CJ0w%Ts_Qe^q1@nu{uj)t{Gjj=ZD4@Wb^h`4EC zqLY{%1*ydEutj=cUNJ#jk5Z6=iy*Qf5mk(m9SmtD0>W!iRG644MG8TH4bip05l=~j z^=;({X}q#hbA%U@9Uynn_$niAZc@bGlLBKjWE+=o2f&n#hBV6PO?5HN8eVF<+vv0D zo1jrcL<$xYB%_QLM0}R&5jjTO$|$ees=tLTtRY^a8Nz~bVXtaq%D$xRCS&j|CNM9yGF)7G7O6$K~QfnuVnk)vFE@ZhFxx? z$HMU;(`Cz!q4)&z#Oo4vZ3gf`MA2o0)Ll5_qVB>s)MuI&p0SUV91G|}gQ5nBBn7KB zT`;XOs<3LsOn4=NCq8`o;L_znDB|}yqvbg4z}fQ^YD888qwE0Li<3)uqebSWLPno# z@btb9OBaU#2$C0_rLwg!f(XTKrdk%riliF~R=PfWo1uct%gazoi@T|8LL#NQfmQ3n9gMkHT`BpFKl-)Iv8W_A`p7P-N@YCOvG`w0_vob@L_

2{d3993w!OaZ3307%+>U)a`J z92>ZP^VttRo0qQmEnU&dVu?8N8d6Lxm??-xfa#1Bc)Es}d_l+RyD-Jt{=^%->@?J!32{ zvNm8au>+PtBIYSbP`=O6f#>(p2Fv&#WZ~Io@Fpcm#2zLpfqI}AEQTsWB0LLtn&SU* zb|3!MR@J%pAK;;ifQobk0TBfO5mYP@do-4)iN+QiHWW}S5V6F9y~M;=5yXH>@AaYA zNYG^>(==I- zOsNucgdEGW0*B6}>1&N z@Xg-P%xjHr$qa_Vi;zN=cd`(=HrXadXLm5&zQ{IctcHR1Z= zL-!Fcca7W4ttzDv875_GB3-5CXc^5&%i>idGb2j`u%_Fx7?OajY-`>od4mL8TCmoQJEV5G?UY;0 z+DOL8eVP0cP{bwDsP4>nGR#=oYg#V-lSm~qWP+qjBB2Xs9b8mCr>$gm!tbG2r+wwK zLcsZ7fuIoRftt2@SuLzs_ATZXxiy6k6n zT>HywF8sl7@4E8-+b(>|YmYy4-)*0H_~VZ}c-OslUU|g4)22=P?Yi9w&v@!zyz=OG zzW&*Y<JcFbtxn}au}SR=VHK#nWfw!fq`cisJSR+!%#Suvr&tn`1NIE zctOoVT*WdY9ZxO1C&CEhL;@gmv%{lyHUw|ExL8ja@`7P$f+iaSqi|A%G-?*52QNv# zGX|NC5Vr zy^7^;edVJbF8jT&>n=b0#;bm=UiM*Z-(OZWyQKZ=kYd>_pmz2Lj{6#~tDU_H+AToQ zWd!R+aCfk{Qw(*dpdv=AsjHW59k!Ga-L@uMN8%ndYNt);gUOeDS){<*D%FV&QBK!L zSeBf+guN}%ogSur57HqJgFx*7Fmm0I$$^rQ(p<(eU};zrB=Yx0slsv%P|U?TB=lB;yLZxb$?%%>9DYWe z_<_@>o23=V9e~iA^%8)&#EgzaD;7(c_XeZ|W@%;Uy+g4>@)BiyCzfzCWMTb70Wpe) zBbQ-A2y8WSDLNPoOR%UWF)pP9wqC4uj;(DeSzC9XOhhMW!O)ruV^Meo78<5WF^XG6 z7GN$ckaQMEZ`R(vWy0QSXmvYy05mj<d1t0bgi==-Rx3Mb*3v&ACEQ=Ptgi9OBs6#-$v`^j;-i3Sr`DLKHI6a zETX#-{`ddG+0Trl1qbMu@Ir5?#MXt&y1egb%x|B&K z46ovrQc(&-Cxwd~l6x-UAx$R<)7+)qBk90P+ENAMTjnK@y`a;=B;pn;a5WTn6C|+( z7|R&h8M(Ch0qU~&gfjsifp6mYTYN*|`5Kagp=e{vIE$Y<7xglj*B4JYz*wA?MM+2* z9tv8%6uyK@vtW!f7rLQp$uNaxU(RBM`JvAbyIkjjcLBFSF}7XUB`B`*uI=@$jmv1a zK)Xu!JARJvM%ky^w$uG~|C~kW{%tO`|9_IJ@0*M)V78jxFOabGM&P{wz^n;lQa4MSGU47j!-#I> z?U`1l?Zr??UPi-k0bmy^i&4u6++s4;?e$G>lounN3muY<8K$8uy)=e&;f*dy@?|$o zZ(9^F#v~U;f|F#&;BV6?7M%QhSmvm&B4DAY0(xp@s1jhccD_M@?6ERYFDsmt$v%VK zmzU?!vyW%1mv6oPm-pXt&6dk9xa;~0$$8)Z(!E6eoKl+JJJ!ntA6}IEnTa!QZ6WQwDs!OGr!i^|7)oZ$hz7!UvrMi!zu2deXF_m?d znUKo51V#a<=!2YEyZTr42k9BE%9pPqQT+&0MJWN%S>jPtic&49g(+J``o}-|VP!8- z+d^{WgXMg+!CnfW%01sbc_O{GDPsuWs8R>flZCkItXM)OK0aL zLrX!Bm>sWv`V7q6ODS3y5v`Bu7=o9ESwt?7?^)||Okn&A1MzWd#d1IYrKJ-_^>UA! zJz~~XI}<~`zwGz1-}jeqAb#)b6Q{iO8)u#B?=OGv+h_dz>@Q#btG~bQ@^f#$;o=AG zy6N}#-$7jD)xxfAkAwXfojRAr1+Nx%U$B!(s+072Cw6fW&T6iHUHg>MJb2GC+lfCVijgav{m0f6mS zlX)wEzb!o&l6+@<5OR;F(JYf{guF@>cydSyAFWoGpen2N`QRawh6O3Zyrw%0cqZ-0 zm#rbf5@946YfY_X6m*IgS4DMP@1FkkfC!DG8mA0dTDRfHX+{Vk=`+U`ux<%YCiynB zOl;|nGhqy0+OCS;k3?i?F)8f5<*~>_o(*x=wVo|$x;eV$`$Pfq(!)S$7IebbtU1D9 z6OmBjEdN@@a^OYZQ1BE8C!vE0S4mDs>y99F@4VfT2z!%lvPI$BjJOnwrI*RnII@>5 zd~f1-hQUzp<63(5c<6k)DDM_FytBx)2ErfKkt_SS+8D<5q@`bdL>=ZL)&xJA*AETj+ zQyo)CzPY0Uo0mXnLq!Cpn}QbzV3@NH2|E$Omv9YP2dyrnXUbk~>%$#Ba-%V{ zP+*fhOeGbZiRQd;93Vb78x53B|qR8tz7bS;Cf=`sm%@T-+>-99{pm zW%jySdb>?tkZvC?4Hd#Hq`J(uxm#yLtM#W)zDf3*BhzAGs*(#Vuz53S^rzoi8vLHlq_rR~m$p5hfT5C0|q|67C}tZd`aJ zk5Dfw0n`O*0cC;;q9@KidY%0Kft!6E+oNO;lyAB27e0DT@7v9)X2#oZyigr|_YId^ zfAP6@UiXW8ZoKgPADwmYmS6tv-b>&7+Gjp$t4)VJVcTP#{Fo>2x0Bl0SD5#I%(l-x z>fqPC=bO(i_43nBeC7M!cEZW;e%S}!@uJth>?zNB+GC${)ZWiO?!f0Cd+?Em z?!D(O+xq_Uj(=#iylIoaB&B4l1g;|MIi|9%s@5N3_FLIXLWP@p&;wJ|oMMnyMW33K z9frFh^)CxT_^LmOT2dj*E4%8?uZm$Xq*T<31)~HO9VP~f5Y@}dTPblN6vZaIpY;}6 zrKyWYL5cwzWr|ZNWk;nPh6KQ%i@|8VlpZKo>Pl5g@{|d%S({hWmz5|0Xp05O0-*jy zf)EnST&zjOyR<1Gn;@h~si_T>1_mi%Mti}}e~Je!3ojTi!OG%)`LmuKD~hX{r&yj( zXajh%+@ojDoxk|0kACg1Pxt-h554CvPXEX|Klb4__s5rgh57t*zIw%l-&Zf+dfkP6 zfBCofxQ16A2fMa=xa{}4C@hu6F8iJuQ&h){s$(rLE{vqE*3IGz%PZemzU#q1Q?>HA zd2wEHKkC{OY!~ijo5^ywabLSnjkak7gK=iF|2RRh*eGCh?c_)ts@sv=%SPjDFJey_ zd%K4&LV6oi$6!bz8D4D>6r6UmvMOy}Frq6K94vLsNIsD4EV6=UWZ^;|cZ39tNT5lf zDj8WxAV7KQW`q|GvXok)*Z>F}pxmO>GO`dU>0*;oAs8umwDh9D2n55zw>%>OU~f`% zfJU_dx#y*u%Q8^z!LkpG7Yi|(&>6F5iCL_Sd`;F_Ix6;F4sDC+1%)vRM$bq$g$Y5? z^#Yj`+LY4pdiDMVPzZo=6pl+5gHc0eMUT)>!X*Iva%gEUt`{OgX?qoPTk7F2iis$e zZ@lhG57r?i*(=C*La-2BzNk1F?IOe}G)wvCQes?S#wl6AO>J|!eu6syP*+7WWyH)Mu?@$ zfEgzT3S=avW2k9T7`45W6LG1;57w(mVUaT)mb%={uN4GDAfz&TXCUV0aX+_VN9w%Ad~7TdZQHKR2FN< z8^zAB$4Q{9z*y)+AXF1w*K{I}zf5wJ#(bkT`d!d^}$pq6HWj8NE(Pdo5VqCfy8WEyQN|IyAr&t!h0Hzh4 z3m)L6Mrn4G($-BdB-V|~z;FVSQPOo27jrTuJaYb-O~$l$~WR7>njQ(RGb^0BtdS?Onax zvIx<=+`c3?78}>hNJ=e8S+%#6xf{GZ8JiI>1p_u--m~yfzznq-TKdd)e1o-ruJLL& zwt9qav}EuUvE(t7663r-wnwouf(3xgx1fw=m0e0pLPp(LCEf6hGiz+@xl1pGrdA*c z&nQ56#)tbnwnCr+xq_m)V6}F>sprdDpTX{LNcGWcWwLsiMX|i)n)5weX1{OC<=0(w z&h;1nM7?~|yM8= z=nXG<+6UkI!js?e(i7kO{8QfZ@{hd#m2Z9hamOFE`?H_6=kZV9?>SFD@M%xlYp-3m zQ!M`%K9Ai`G^+}gX�Wk4lM>jYW;7uw_*1DI}G63OD7R;#Z+ZQ5Y&Gm9$}aksgLZ zVxU(8_T(Cg+E4-LE5pbss!lNcTIC_7a#D**Vin+uVql4B1!b&TRxEhv-4LXcerN<(iY!$gIGAF21)^h99+I<`yGb6{6mbZUrP#qV@EF^#4Iezf^!4HSCz*sI>sf9y7}} z1RSc{uXHif=QPeSwq-;*V-dMK9Quo*S6+6ZXLbdOO0UGVV={5f6KtDjiAgl%-x8OWG1{7E71BN|HSu?kuK8%1WLlWt81a z)i}f`7iqARFfe3}0Hw{V%pnu@-XH>4VHbv>rrZkX!jy@2=F(_{YiMUK>+J$@Oz6dJ zvfAFKL@!Y5qcsIDt_zg{*0P5wJsIXz^438JIwa1?fBAahDZNJN>qTg^cuF-k7QXmJ zA-$lM+Jdti6_J!rQKl<|aFYfFIgHlIJF)a{`!FN%wCp8Z8t_dGfI9%5LQ^CRj8JRQ zl@hP03v`0931xvtCwIWME|U}abikN_m|jQE8k<;Ga%6by~R_lge&o@J;+;T0Yx zl9?S#)5k+0khN}PjuyWwKVyq9=>eN*6IqjL=`q6K;etU{6!;PdV-yJ(DLs}jVA3#3 z=*=azz)Sql255+pFB0w)r&BKhX?79--cZUjUoDKeSS316qccH@Wa8Hvf@Gu%6vG7N zYY30SyaWH#JLw7~$NrA`4)%%jJehj7l1EYF@b6Q*Ra=!>;|pI+5mgmaj_F$6EY%>@ zcVhvlvItpiB1+{Imol%NB8wuQFovr8fKg#srpF?pNr9CKM|yTa0~?~3L%Eq+(M4&r z?yLin5@08MS&Zf`CF|B|FJUu5F3X_0V3Xtwl~ped;}F8PZiY&mrP0Z-xQr9jP@j7* z)^ZAksBOiX5lghdlxCNbE^eV234NLT;JHt@ka8e%GrD4vZj-KdmM(aT3w~}!`WkWl zcgw)o0SJbuSYkuf9m?)25^fgvX<`V*zOh`irZ#P9n^EgyW$5q#bT0?7Xkcb)NVn(9 z{mTp(d$(=rHD7UUr0Fx?ZfDufM3xhDkZC!R4Q`UybHzV?iwZyQG zECIc^A!A^%J`eYMUn+vBmz5IpSWS(bD2;oX+|OnA8(%)2tzPa|vbCN#`&n$?T)y$r zpQ@MdxbElo-E#gl7k%e1Uh|Ayx83xF-8b$3xJ`TSvPr$X-)>v)v&*K#_S@wJM;-9$ zKYGf0-}r(Lz3rv%`t#?0=p8Tp@OxhNrq?|EkB-~x_@nlC=8=0o>&X3%eDa?A?Y`sA zoBu2Ivf9m4PKBiUPDMskj_R@sMul5dWRF=DkMOEIb`>0BPi(m>rP%?vS7NIWS*Gx0 zXW>E=u%1UNeyb&mNN1;4!)irASk0?=6tN;5t*olGxm2GjDm{MXQV6l8UXD;b`?H_^ z@E8AZPN~YABPuQCe9=lnV$w#1K@{*tn?ccs)_{Xuvi(k9bY3j@1r+!TEK^fe=w9xE%TJ&88j z6J7Us;^qwx#>K6wYyTmXc8S>9{TM^y5i3q4>5K!T z3xUC-y7SLF+b7|#x$+XmHYIuHp#xwO zl$aDFM;3eOY^ueYNl`e0C3Ie*a~~dEM)?qcgwB{f3ubJ&XV>B(CSD1t?TV6>QLWYB z4kHSIq#Ke?tF0|b5ryDVK6JidRtW0ssh7gXhHy6G}6Z*t;ja9_S zJf7CsssiIIJv@n52eN#M6`n7ooOw|M)zVG5p%N&0?vSNXhFWZ>v>OT+gajMC$?8(X zDszOyvbgq886udCGJ-J@URt?18<-~D5F}bl;YHL&C=-t9MaKrVB*Zgwfj8QV5}w|R z?(@BTky}6!g%)wLDswK)9e^WdyNy;CU=%P>_`=9U^1QHt5u;k&5{q!s=Hlg(5--N= zSqz~Sh;Nx_kWovS8@YIItA_|G&E0m^Mp(y2?-VHh4ch1*VE6sr)pdl4O-*Qz5j7ueh za-e#FR~9WE-C^TGuuZu^=T%B^0xM~?y6lQg?S#WN3>@Vjh7P0KGidtcFlpo{BA4{Y zahe@FVJ9pF3Z{)29;WqS%pIH25{QIviPg2j1Yj_HW0of)oy&5SvQHX#vY;i9Tv&=* zN)|1AEhAN4u~nOm~kLA#;2DS9{OqrqUKfCi z6>D}TI9)~nbK!`=5DQ;OmWhnwOvi?Crh9+ui{XvSWxa9fKhx^0fH6!1gXH z3BYb%bV{7%rpt&>x0eiU{PAc>Ij4hh0n;@{LeXUuG+A;27!{}twYf{f+1~dwNriJq zeP{ma<(`x+>t)|r_A%@#Wz{iJFB3)dl+SdSEdO}(Wxu=ox_fWBNWFZ+r9ZmvlE2$> z<@dk!#Sa{`*W(U*!cOYtJ$K&p#6312xchcW=0o?{`6b65@`jfj_2IX_?6mj1>b-A% z!AXDl5}(I@-5($R(&ruUykquOFZ&AfF^BJa&_269cBk!j+U5`b3(P*!tmvyIRjeuf zdYY*KRL!xo5JjlcQkMXz_>^xIdfe53o-c#3tMypoRP-q`Rf1|m7kWv0DsDwk3Zm43Gqq<(V8Xuk@ z(PeD1N?L-U+E#r^OilOPvwr|=c4(!+%hP17%9lmvS};|)=wOmp>b4XVHMoC9Q144J z5_2Dgd&#rNRx4X<>{clf{9Xn1vcD4L7rj)mye%}@F;wm-pq!8zaXdtbi4eAAV`@`(B0 z9$2Y)6}C!ampY;7G7?XgeLP!5TnTKH?l$q8U~UtiyZ-6-zkSuk|M0*6{kH(QD61(* zSlm;72cSc~ZGqTlIspFi2Rq$IyNipiK#&t)?MfHjp-! zNVwyF1hGvS<<7VL0RtI3215k}&>hJ|xOOdV0KUymdK|%s5JhPVDvI5y4GK&etpG0t zh#w7Byql&W`<0<`pj2N-1EDT*IS>FwE%M3`nUrL!Q4TO;WQQk)HndAd!V=Jq7cdLo z2)V~^3L+BrrfGJ;ns2QkF2cbVeSIrdGsOmPb~yWo0uNgiK`mGRP*Bx?+}i;vLxS=W zB6oVLYUw6J7%sDJFs*a>OJJFmKS>GDya0TV*e^xvw(^Du!g1kl?%sgBXSE2B_}OK} zainF($+vu1%$>BJkcPP9A(OLkjCIjsunt_f!^@{p08(5-g=>8P%AAq}t~G=wBn7iI zlnjP(r(kCEqIfO5K;6hJ?7XyvX2~~8MpTSVQ~su08fAr?AR&`EN8J(tTW`;+g=z{# zj0eA0!J%T-1fB)b&~o~^jCfMCtv}=BkV|&HAxl8GNNO<|f`;#znV4!6Fn9 zId_-0nr?aQ5u(*AhxKj8r&&;Q^k zG%t9?rnUq)5($*;1X|idYB4l%N=BJ*6a@p_DoL1bbnl}>C70$kt?ww`p9kzE98v=Xxj++sA_sduqiODt{tt3l(R;J5< z(I{PO)*-zL%Qaulo!)$5Bri%Elv%j_C18VE3B7uoPh#y1lH+L>9%ghJ1+)n_) z7-i=oiddGSSZ#pT!P@3X(UpVUxGfZth|6xrUM?d&Dqew0Pn2N!ZrE}W1)_>saVgHJ9c-X`88`m8@33iRpM>Q9R|ddbuKk(xWtw&sGb8o7BR}9gPP%BT7M@}K?5 zkxxBv`$P8M<~hggf7D@*-~G{>9<_PXPFruS$;vSW zn(|GFN3TF*sR&i;sa}*i}9mD1<@;5%1bc%C6(tvdxK1}&Y%>DOKCUT&AOjp(`= z*^?{)-KvZ>Ltqx-4h3+2W~UfxzXAx8ef?0Ij6h&Wc<~KJc{mwzq4~B&aF4-g!hLC^ zmrR>XX%`VP7bS+zeqVO+FBGuA(yl0Inj9&m%0-@qhEdsh!(wEy6XGeHq3pbbsDz0m z21Z@(4V7WwvRa=uJg@A7r?3boNgJsy_eOzPctuEQB8Ao^(zffeo0McIED1ew?m}!R zkQ{s0j<#eh;7pkXGr`lUdiwyT8xlo3>qddZS+;l}VM>Nkw$_9E*IkxiHGJ#qnC3PCBYdDM$F;@Az_oj1z1Um^AW* zY?R}?KZaShy>`H)smZ|_;wO>d8P4oQUycqy!ZTUMy0zm=5vy=(90#-HDNrsYFYV@) z!Z>UZ>ITql#msAXmMbuh@oT|EFOyv03-oZ|z!Huk!dvsC+~S$Z4hCTW7E%r@W8{ca z@@7GnX)JX^bMYX_ox%t<7bHcOaT0)Rse#>A4p#tltuCQkipaBb;ocJE+gj0^CKv%j zlIU_VI?J*~2|q3r!Waz6Snv>=t$(*ET(lh{OSRRYLvlo7Ugk19jPw=qhpJ0T#Y^Rn zqdQraFzm#^p1pFcGk-?N4F!Hf;Z-;%@5(M+M#aTQ{I%ty5?Lb!oe;VJz*;(c>FKIg z6m%gOvk%@V3}cm@9Oy=KF=WavYa6BhO$gR2wq6q<<_@CJe=T>LrJy{^ehykv9SoTd?Kb;_C(AFQ~d~-x>=F2yimCSt%TdP>^6WWUCl@DOwc@>3`tSEO}d6zFT-}uWLFFX6r>(9I8>L1>B z>v?CK`uhEM-}-5XZ2#o_x87&hZT5T2=Dl{_%1?gzNcJ-he%uL1ANYosJpJ8oIN`*< zIN>i}|D4yq;)p+a(UV?r!of!zwBzB2?DVXo_Br%`oge$CO}p*5?QT14SFx-_o&uS^ zET6}oQU#}?R26#qsa2pU*{c4OY-+SIR{ep&tD1e87Ytt8H;b!76@Y4C0?&?338@HG zU8*RJsuo$$0_alIi3-^G6_Q-49|c0r5p#+{)hHfCw3e_SSE#Be;Z?8j6n2k*wK5cf zyDp`w9Hyce9*KHcN*KTt&K?|#*gD8f*8^@a*}}_Bn^ho64s@(UHq=lW*7=gU5ityo_9)vwjZ z%IaU`EvaN?R04ax+@F_XcWY=B*IJb_m^%8xpa1xpOD~{9_VEmNO$=JPd#7`hoxu)3 zpJSUNJ-uC`o7DYCSlTNXdFfUeO}*frZJ(hmCF8W+{<<&-@reEi0z;OB=@ZLn`OAlt zUd+f#d)dvxz0JqauP*qRi^>By8y5!8;0lq~2%LUSz28YE+ul8J424Dm(Mlw+<%DEe?xSdcIRaGb;p1)DsL znuQrja$uz~EjzDbG1BGG3n5Ta0!D#*XGdF6jK0SeUg*H{=yUNJY9 zUOsw?#L8+?$^~<5~ZHWOv=XiK9E=`7_ifhvpg%_uI0HT80Fxf`XI zVQB-;yePWtv_iL+U{m->(J_U&375o-CW%WF6r*R^(_En_d-|x)Lia*mbt~cXZvuAd&-bN zi6PUGn-$ZVpheKi-wHEYC3a%CyRS{ZGNAfhNHB_e&9K#9qxY+D(| zEMsgn>LOXLRmQ=`zib;S(1fF`=do!0^8&?aYbzEoLTjtdD+_?p_}K{^jQfT%nMBex zfMi23WB3i*^2abD6X~%}!ZC<7&({yrx(-`s-USx%KMvd~?}{vG2a=yj!pR;axZV#j5@`(AsJvJS@ z*Va#X+*Xg-ZqsA8+w{c8Y(8SYU0-n2fv*zhy%LnYe&Et07Gr(QC@&>Gp(I8K_3{Z}9S&R0L-?=Sm2_TPQ$ z^o!2__Vri%^xDhL@%?3AUiQbNRLu%o;^KF~6H1mYcvZ8{S6BSH_}v0-5JOtXE!SLf z+YMJ3W!C~2A{12JN^arAhP>ff#Vtu1RS~pr=i)r$VB=isbm|G?ls|0|FaWvFoj&^p zdqyyv1o#o;;qa4u45rX5mO0D;jIxYHmjk4o$8$*^tM)}{bM#)r*a^0{&D0PEx}e#c zBMT}zHcJrz7z1OO6jyf?42JTeFA2-jP?$E%cm@{Fv}PTNxX#9P<-=Ie0wKX>TW*yE zVrOK5#I~HvG8X%Wf{mA#UeHN9o|cPF@hnN*+JcBE`qHIWp?4@4YnX3WRO{JXN)@uM z3|p4wScZHl5`)1&;tx5K;zCeO{<4B3xeuO8fp9L9S=y0c<2y#~1n1HPpi~PKZGp1q z#l8fHJNEb+)#~P`%}cn}SxUXiC4iw%VLVwH1#5y~z$h%l(2#UFHI$Ks zctzsTMm20=m6#VodS*Ja7=|tezJ<$o%`AZNbYZxDTyNcp18xCp3(;g*1SO)A(x9*d z6jA8(@;5;YBf~&3WWLtlsBRK}A;!3&0>jQ1Pt(trMJ9xjrEImiXz6om${l{@D02WG zc=oYpU;u%f_rlOkyt$WvHqHVSMfAGsTfQ)5j=Ve>@oXsWN_e}jQ>(N)pz>PEs zA99Y)_DX=NfGOG5oZ&mnLw4YI=4Vk0IP+7qKABvtw7#u;*szVQS`4Qy*qT_4N^BSc zl(V)pDC|p&Dn^%kr5|#ZM|i$m0!X5QA776MTN!^x>6RO>^KjW{g;EjoqPH^T#yBJx z!pOmLEo&wA9A_PL86mlgi?htp(kDH70!D^}3o4dKN(MuYjD)@>2bOX(F8~b4hXJ`| z)RLAWS{OWZ;kY!!s2f0A#AzpIRE)tUJx0D{M!NxBv2#av7k5*z10-GTU}KqU-Vi;P z?n-xri+|<%U)?c=+@{gp=M2-G1vz)?iq~p&EJMkgyDO>{=+ZJfnQL1v-|M%YU82pg z2?B5rIqTL7C#524SHeBi?fWUyH5Yl>l!E1AF6M51%wjBSX5IA7D02G-G1GYP7ougs z*3wagBa~JWjF(lSz_16VBab9Id~8A#&M4t)QpH{0#=vo2#EF~xnpUh0rO8dAmJ! z+Wfd}Htn^8s(I_9pSa6&kJ|Hv$L+6De*QD|BF}yL9xr(I6V%HG?z4IC$8PudUAFT3 z*sCAD{y!crE0%podkW+=ilR(msKir6s#TSBsxzgTZqJB&AX}?iRM08(6p5a7`tCEZ zu2Qiol7g-3k5ZS5>JPbR&3KR~2?d(kFc)2E^gJ3vgjys-o_;dgr?P=128NJgNUf?^ zR`9BG;j5s*_LJQZFmjjgoj|C?zds32+fe zZEL2y7-PsqGCgP(4jZqKmQE&k_-fNDz%3LoBc!#G28{W7i0pU0JUCVkS1kLsvJYwh z)oCaB^tBIW_jjh$%PMQN^I4z%n0oolkA2|e_rB#*AA67QFMs_@r}$%1U;E<89x-2e z(GM;@|9e+oe2yo}zqxzM?blz{1;4&tu5flEByI#nvF|MRPf2QFPn4y8M_I^?hfX6a<8P;xXSPKn&x7PMe6+;bVnqO?KHCNP*r z*SGBq%WRRCLkgEj;K3(V$Yw+RnmH-j{(_Drx@7CPAEKbbQUtHDAM zuCk>auQXVQA>ACC*D|Y+B_@iYHqtP)d;!9gjPx49FKr}Ev#h3Nl#j{sx1) zVW}2Z%RNW-kfPIDz^H~HuY#6;D}V)r6e)-UW=4`IZ}9m!D#bsBCc~+J86Os*J2pv+-*dRrCqDrS!*s)mXa@XFln?b zWysFR%NdUzGcV!jJI%ekS@stF7|shg!Me0eX>86g1( zu)AaBE_ldd)SXqaObO2z)yl2(F!i$6rTn#)qASP0Bl#}5tIlYS%L$sP1+-$;mb7{z z&TUJdE=9IhfHcRp$}rmIH5=DhnyCpkg_N0&ITDp`me`^a1zT;KQf)@GV69#&iJ7~- zk&7Wh;S!z>5I%moxQrW$tOCy{uhQGF#lN9&FTFKWz+Z+X7ALUte6*u+S9|K98OJ%Rg=L+h0C(UE#c@URE`$qi?(6SO4wqtM0u10?(KI($}3gp6&Va zeYc+b$&+7m{Lzm&;aPhf{p1~G7L4shW4+ylHPg z1@?GBX&LZ@Li5SW{=~Zw#SnW*#6-CcHHN2E5DEJ`SS7+b05Zjgy+j@ zw`wtU8BwFLXjNybxv6TEZ;Ca=pfXc!sCe{k+>f060VN)cvJQ!si)t|vMIxcYFed6p zJU#@j;5DQxs1lN|iju`BJT_?%#iNK?Q9R*Q)k=)+GvWPdCn@O`5s%z@+zcineu{_1 z67H!k21?|$x?I$}kl46Ln^6uJ7{L_Om}MxIT3EK(VRV6ombSTYfngUBXKSd7Ha2tN zs|zL&s?uon4DmA409+!}R$P0O?8&lX*(b48#GWa8i0rX)50pK3R!#e{uL@_Qe(y`Y z{FzUjbjthQ`iWEi@^fdr_bZ?Okb3zC-#zo<3%+yRH48CB+Mt|{Iv}|#?HG=QeIwN|ud|mDYx9U zs=2n%>b1gk>dsf&Qb=IzgkIJq#kVia9-r)XO-v{ur>Vw0g+ z>uT9s2PAc6?lCkZcSv?`1ZGDkg+PKf$M}IqdYPJ3$#XPWt3+a88T>GdB38>{3c6%m zTI4Kz*UTD8vDlL}%Q)e{Mi}KYiA^S@ham~SgmIj36FN48hAc~LA!DEuYffg}z{-J9 z=8!p3bOVRyLS}+s5k_9BMsr^?8b+H-n0YM%H%f1*5eipUkY1XO9|IeAoQ%DEU>JD0 zJTRcru6HQ`Z6w;3SG$}P$1vuMWiNmxo)0))})s|{vI&V-UNp608KaAoMBXL*k94(-nQPWw*vS{8@@DYEFmPX307 zF8pmQ1)uWWfwSqRS}H87i@ibW|9c;Cy$x9C8`FF50?U-AMACw#>EiS;G^4#rRMG z>tc3WxtmEiFk(FJpd5_MThcLV@OZ1$EMr$rmS-I5)7{CrpBVLp#- zQf8_JaOL;{xE1J!*Q{HkEM3Oh<$PPK2Ulse2rb*9H;dlc`w38sP*(2ZnR{TCnU^6m z?Z%%qS>hj@(dxV+1RIxeq_kX^DY`Jd5P5~9TQvKyRRxfbwigJX_kt{R?%Jpr*9)qp zOsZ%%l=Tc*S{8blOg;!DECDTDtP);6UvuX7)Qot1rUdY$Y)!rF0rbEA=9cAGn0u70 zI-bvC_mOP&9zFl|t}E}k=~wq|`Q?q5fB*LDf4t?YzyI~^=Y9HPfA+j*?EaGH?swb~ zk3MLxtq$2|tAqC3M!kIC?wj}DdE0$=+HTM7H}A3C*84nqtHTf2$@68O$5t;Nb?A=A zKJ77nBKz;G)#~LBfAB=c9vO2~(CI=}1F9c&>AbXRLuDfva`m#$e&dhU6Jfea8OA6b zk{1TmsA?BLeO+0Kfl*S78bBs^xcT2qThbo_KM0Qmr$vN`63 zq{RO2n_p5OS1k8M=Ki+ScfbAxT?MiS&0rRg+713Iw_K=Qq&QWwXpq6gmARU!$4h^|CyJ2;07YTjY zZ8PZ<6muk5+Gm#d-2bob#0@#o(c&B#JA9dR>Xt)N=n`v=waQouyVkLm7qQ9TaP1Xa zZn)axQ3ps2INKyCz>#aSTOWFWLI~Gh$F2NdZJ;sjH4u%Rkp~RLP68HcsJoongSR2gdVLnDwy7S z!jDmxlojUkk4h$#32JoG8xo^nOJU(9T+Fqy&1Gg(o?|Z%9w7uW8Yd+xilJtzEyHDQ zlU@mw10x=KX;5?-VMrmgn3MRyNEwFAqMZaHGOfr-iDj#hU zGH{r@N=jjMk>-1iP#;qI@wdAonR>0H%a-piYX1%}tup6-ab3n04@#sx`tN4N75 zJU7(Jp0M~w2^pG;DKK`uWwB^Jkl-F^#bDUYtEHax91AB{;;_KV>3|Y93Z`mR#lN6&}A@C;Rgillf&ivKOie*on z6$K9b4*%J83EgAm`F(7kxb6qBwHX!89yDu-dYN6NeD{qPJaEUwejNMG8-9A@72m!0 zmb1@1?R76Yey`WQ>`5-Kb5=B4MK0`N#r*N`8+)^Y@vPk91JhM4a!h^gFk%;17B+SvGS#4fk6psDImx|cK<*Aod&>ky$rrh_M*?kgQ zWxetM`txUazWm8k-gDBs-*o23PyFiVPx;yxPxck&AO7v9fA-@qUHJ3wc)on&mFM5R zJnI?C(rU$il@&VJoXbY){bI z3fz5ePos2Vi&(iCS08h@PrGxq-MMamiqY^klaY?Wv%N$TGcaKp!7%!NQsI&pDoILJ zEHj!eeWWx3m@<)G8A{1vn5AusqO60w_*ehnr)gBp3TiyE0x(|yM@nx2>bstniY^mv z43yHq%u9<6=hWSdlVmBuQ(EwHtBZ;>wV5C=9jzVJ;am{c0k-&%|F^UORoEjCK zB@%jjFZb4SmK2rs!4qJ^mX{DAOUWM2f;@h$xLPYCv=#G@2Jqq{w;&RulO+;3HdGWX zOYa?%FpHEVz|xc%$;Ia}|cXB6r)8SuadORQY9P{$YdBKT+VnH z@svq%SqPV6&FFHWX01ez&7u{z&=WEKR^AmNJy~J42n2aZp>cw7X9U1JqRS{mco<%} z@S=;tm!23Rz|%E4^AcCihFF$MB#N#i-3#1Bhhd>}r!>JrBX8nhr8lus*0)W$wu>AI za)jNpc!;Y`mk|a@vx5{!D-#xq3*IxScw~aMJX;C?5_uV-($xyV3qIzIWgA2DTHhMQ ziH&=43z09q`~mWX2a~5*QuWDjZ3zI(5S=4;N^|5`CMnM9PWx2>%3k8+@AOV_?01G& z-#PYI33jUQ$CIlf(|u2K%AYj~mkp&tN!&&$)eF@QjAN<1QhijClyKlG5jt>q7TLbMFdaY=T<4)v&PLZrZNx z7FI-=bYC|qadnAIdWVH+i;(RyJxIa}pw?)&`M{d#z?z^@(%h}EC8c91$`Z_26ibac zce?GaWGtO-Q)m`iTT|Oq3a1+oujv-k|Q?=1T|GbR1jtIxgj z`hWP{J(u5e)A@JbbnbPRe*4ZFe|W~JuX)7@2fXEVN5B3RPyM4~_df3M#~!x-j{84$ z>x1_6aCz$kcisM=$8Nj-qgOtUeb|1RUwr%%-uk*{e)v5vd)phH`Ia|4>(5{P%x537 zk9zs}&wAp^UhuRXcid6Q>$RLTr!5n4$s2gG|~2eSO_bCDqo{L@@~8|MtIl-OE3A@;ko!jn91RtDpG7XHNRb_rG|_FTZ!?#Xq_J$_wtj-CslV zx0P@B=lkzg7rWHm1&Uv-E8o8zk?xD-9Z|h(yKwC6VC>$nI%|t?|JoK91K7LTSloBr zYb-h14Q4+vI*yKNZb_pogbvm|6I)wQI|Fh?FvgI9b1!0>TNb16kY!6*>ZL6KLPuV# z=>iwX9MR&J2AD$99~DxdW#lVLag7o>3m!@tVqn+mD(>jW1LJJ(s`6zl5MNRPW!|KW z!rM#Xu@QkRvSl$%$kvCwa81h4at);?P(u{Rz`Q0_DlpKxXFKv1Z0Hr$Q z*-MPky3t{XA$n6-=D*Ahz9cP+mw-^lg}W|HVuHXdR>t}e78vPqWHefaqEjSb3cV1z z;oSl?(9f}}5chC~YB>H`K-A;$>7`AeDdguz24_yMLFS?enn2S}_ z>bfbwTm#mV(yL?C;sB7DzIjC`6Q?z&IGNKstV%%W4uOmvSt-Q9&wRB;9sl?`6L!GU znygt6N=8x0n$V&@oIrXx%DnZ5WYl_bwTy~Eld=>JHqF8-(#4<~zUvJY>8fL2dEr*S zdD>h|p(N|pHrdu;<;1?ab?|K&(TZymv&dpuSUE7#k*{1Hi#l$(ZXNhn2p@Wo*D)KW zA<9_17IY$|$%ilTy0DjCql_*pRZo{zx);=|!xz;E%LpS!3>Kjmx-`6?37)0Ez*pcv z$^khx7HyUi24m5dWcjzY06B&!VKA8Rf$Jg(Q2s+0UXnPml_W)mT*`AQL9hU8A|e5n zyn?Y3W{~i7K>@UqVW~il993)pc)TKRTSM~EB~6ynB8QKVOZiaZHwxLrIWp3*5%$4H zvc3&(`oPjQ@sbyALjfar%BN(wtY^;BB1=XHGF`K#!^pN9zl1{?5~UExiEeY$mOxW0 z%8bS@5dH+YE-y$fOHRq_8k)q48;p)&g04}FW)XTBePxSSeJ9nnySDkL?8v;!w58Y_ zkd$*1*OijQcs+MEEZ?DN*pG~?77Y7^GdkpMJDql*{LK@q@Vz!Av5g*t;d{8%3uuGoM7 zJ3qD~3I>_qHQ!&ZKQYkGK8XZ|?lp2k!XIJvaU1 z?bqIW>y`K2c9rMLtB;tK%olCB<~$#Kz3J+o-Fp4`w_SVTwHKdz?S((_Czx-#{6|;* z@|#y&@Rid~e&x%bv*(F#I`&n6wD0qdeAIKEy5o^g*hCK4Wz!RO*>vD;TOGQ`_D|k( z2alNj_Lsu>sr&5ozBj$(j1Ru?lkb21+h241tN-XJuX_GrFF1PNC-1e*;~%$qhwV0P zv(+ZQh`o86OT{LwMjNV+aFuPxkDqF@2e{miL#VMGSxC=zr!XQy2 z)6K$^{VRc#_`1AEIafhrW>GRTa>UaFxgcyhYGE)lk`Fr!Fp;5flw6FOpvTR+ zU;DDSKFjSnwI3VzRNddO_ORQ(9;oF%PUtLZ=g)uk6EI)>{F&s`5B%jj-~8G$PCM~$ zzW(X&edp6Z_}kC@=zE{P@ci#y@{8|Zb;-}}*y2a(uDNr|wZFY@<(tUrWd*Wp+eNNb zGP~?6oGA$aOV5klrw&4HJzGt?sC(88?tYxz)mb{27R)^xIK2C}o43sh8wnp6c`kM7 zGis>NxeQY1_B~)m{Cx9bA@CIi>|~7|Puo#m@J&a~$>b1QJccU4DD2vCFGH4M>E;3u z2END20>LUY_ zjD*V+p7LBX$MB3L07+l2XA@_^A9J*|Wr)@gKy#!UU4NYviwi;9qG@N%b2lUhc2c(C z>Cy|2ADCmY=(8;GmoOMMQa;)Z1y2AZ#X^f=1b-m_dh^Pn9ZU+xrh_L8dnpMKh5{xl zR4mztS+}>*dD}IVWI+p)#EeQ%Qcz%uE^5@xqnFkKb8q&1-o zl_*X)&ToP`DT&)rM<(XQ0^pcLD8vNm&82KRUo{Z`-MpGb5ht$Pq*QvPw5A0B?gV9M zrqAdlZXGI<-F+sADrrr)Fj|l%7@mb-Fggn>Npo?)Yevmc`(WCuFQuU}*G7v$IIp|? zYUS1cKYt(yW*GQdNkSoWwr?4Og=y)Qj_kT)0SFnP*Q*0LJB*fn>Q*{{c^Pu0>27!q zM_63*ZOY3KYg`Z-Wh^dzyq5g*p-rzlfy$O~yeQ)ZW@tGoVt|1K(5{pQtC>>>W+G27 zD;^e?v~J{iVaozQbYTeaGElGqqeY^$DasJ$&WPM=Dj5nRnQ%tSTpOVT7zrhCgr%fq z=PoEhL&1h*Lj|$~5XOvQL*y6>Jt>JYLu(X6I9sKX(dvpQTi~*_>?Pa_K{slsKwRiE z$I^?Lo;1oa(WS)JYUEX0=2~4a$)rGXl$9Y$NyfaU<&}jaNoQQSg;)P!&3RTqs&pwij_e7urzcN7BXFXd&u6zv%_0RWTf5-I~-*mo1m*i;zy>{t~EtV&hlRoLP&%5jQ5ztg1}C6(UlM71GDMI|UkQ=R0BnnTDgMWVRkCBK7CUZj)Gd1<8E|CdGqhLr_xWLOtP_f`;E(b$k zrqdFkYSvPMmqU3{EL!WsMH*ZH=uM5XTIa6R%ULJ{t>uMmtuRO|!PzWyxzky!kHpl) z$yd)((fg{nuUGrn{HMPC4WFMs<1?T7s7m<@pY!Pdlb$jws1?g+efEsgKJ=~+o%oi| zo^jGQ&N^Mae8JDY_2a+)f_nMl3;zC!i_W?2#!K(qaCVpC-RGWl|I+KSvp8mF2X@j<2TNJKNmuS%H!=%An090BhQfVAK?2`4 z0zlDaB&8|@C0!txHjUUuU)d@*JU_1%CLEmxiK*e|WxuMLXd>7IYJ!E+GQ#9bZ(d`h zND@y9q?3F)Uw?dx^we0l5>KqBgbWo%`d zTJ8gg$EMAZUOoklhgWfDWulF_xHux>ml|%S+a4 z*ZMy=k%uZ}2_TV7`S^@Yo0U}O!lU!$Xzs`{_Y&)>XXyf2Ix8lSMFy~*@QfCj1x5>2 z4zU3+Ca8ERShOs~qF2yyZ`tGYBGz5s3MU8rNM@A2T!i8_8gt-uzDQyyrLK+aynqQ3 z!br?(cp4hQrpyaf5<~7$)lNQf1`bBIlI)@o#-^jCb7Vv!7NNlBSZ-Qf_K=Mt@*rKLksFi?z^?GVP3OE5zS<3!6i{J=z4w!^bV;bK&V%_w(y#<{+YQwy1_ zAzagibZPlvXHVFhok{ig!XRUiLlJdbLzXoH+`QN+`67o;!cW3w2)5)|GK4hR^o6Fk z4wS+@Trx|}HM-}UZfE<3o4q}xyPn=QYqTB4CIc+)#F}t&lT}?hfa#E4_weQ4Xvwe1 zr0)8~;ZDqyR~MMw%syJOYO5^|dSKqQ(sh72M7G?Fy)92L>pjIHL3n)EGWA9m;l7Xd9tFGwx zt%zdz{yVQ%EZ=j>RqEw`y6ZY$RrYn}dv3kt?prRt`<9E8%>Q)vmB0J-b$8xy$t_p^ zQoZaCG2e34**9MPy=yP}=EqNZ`7@ug-5XzV=!=eh%rS>-_4FrhI_99Q{`7PkhZnp9Vn z%2!>9P-#rjvLmS^WLFPjV<7;14XHg#4;w*JGQx@OVKIONq$edGtM+^utQ=SX{Ap&i zRh_zp2qUO+RlO`k;gHX7W#UM=>05Bjf^!)oMjecfNh`U?5(xBZ)gr_!JHOj*h7v)Mgp z_AvVIzWL>^ec{s|`|!Iz^1*j}{>+ol`rJpqe%5K{{`f0D{oz?YkA2y%e%SNnyKlMH z=dpkPz{*$kUGsfg*#-aK|M}Pcv1m=P4AAZ1UyYo{R!+sM|G?;ut7{8&54sas=$*tU zZtV_KETb^Gg|%(GPI?4@g}XLzmVrf!Bs>M65Hs{}#zJ#+j0A4Gpe%J00T9kCmKz#O zK>@f=s_I-8qisQEM4y~XTNfUzx}(`)V^QoFid;S-axnzc;&?_(I7SQ4%cxBcxNz`N zN(>={hvb42rsy0=Fcw|~nzhyCqKkx*UblP-$Fg1wwE~9l;5X~xp%-piLvhZ!Wyx{) z<-k`9CIpP3Wp0cBy&{In5XnrlnMT5~+>Evqx{Mr$c{0oetdw*OX@#)(li_*=K(^fo z^yby;wKQ@R+9=Gpbjbl1sF#>da5{%jntpB5kt8-lO|}_nb&Fnzk||-Mh1SyJ(ZWO( zz-Y9(DV7dw>Ar?XGA_R=oz7{U+#EI-of#Ot%~u>bOBgkzJ5@&K3Kn$@Jq*~AXJ%UT zrbf3)ax+ROO`KiY{iBLm5W2cjWoKx~(2D}#lvOyNDOCc?ydxaD#3~j{q{ENQOOGAc zFFW0J+s$;RDF>%H0gl2rLFYvg)WW(#HWWj03k=x=DR^8YW=b5Tz-$+glmsS6w2&?# zFbc=k#13XP9QmxX6Q(t%$ec*n)S6}!H1wdXKE59wFS-PTXtNT!2@2i3xN~d`=SZnb zUs;XSGhED;d|rhSKF^2&^bve4-c+g*cOUEQgT{SV-U{p@?0FMy){U z!OZL4Yb{S+>;zjb@a1VD>=?MWl5{S`&8wh{*{it>WigujBm;oF7bT1~k_d~a&6vA3 zMM~_m+Gt6}Fh+GJoKy)5p%@)PIq0?^!j+QTkh2p?)1gR`<19k>R;jo!jdCFwr-e_z zG6F}A*)2B6H43ki(bA1BEi*KVlu7`9u_mtWdf^D)1%_NQFuHU~B$VmKRyg36R|`NU zH%hSrK<2_8vbo2crEu|2JM;SgIJ@(A%c|=B_eH1JE7%7Sus2|X3L>pYBPwEVNHl6< z-$%g)u{TDe(Igs;Eh&8zFx za_*wbnFXg*MX5VUT4Y0vLemLFmvNanOOEWbjDkwTin%f;x>i+UgrdvdC(~u#1-v%` zUasL?c2HTC}?lLQ%byuFdUagneSqRJRxA++5)jO+~ zmBtNqb4?MUYWAs5-;zY`ym^ay*~8ekY`whavDM2yuzce+=Wp9`uE(%fp2WWDM>kx3 zuKUX#$yP7>RjDg3`0C{seEFgueCE$jJ>=MD?e(WGeaiEn^RVX}crW*tJ&b+q!4G=D zVUIfY-~*oh#D_oi(GNUupZh)GVfWncfxGVefDH%iz2W#n9`nXm9{b_9zf#Hkr>8vU zlwxg-SLxno^G9{<0cY9jC@*QN97xWr1f@Ir3E=s+2u2 z%*)+UcJ*CUF(?UPs%ni2T1l&fEF3%?n;|8g^Qd!MjK9~_Imce2c z1zv&4qD;m^Jg^-Jj3_ElR50SoQzmjo$k(V4z;f^mxfW7ZT*Lx1iiFN6c{%_`(?uaB-(0Zp%X5Q*a+V;w5qz07JvJ_em~LW!{#e@kQK-)pHTj?1Lx@Y)VYX0 z_5A)1o&L@_Q2`-%5`_LJ{+i}~AM|ICli`AYTj zjn`hPUcULdD}8=>XZ5lgIqBa=N@++RNpPML*A}Oti;U`w=sFEMmpk8U0lF4BwPBWG z zEN(}KB2|{9V2FfolBKpOl(2|ori^VVf{J34aV9mBjX}3ql4QYVt(J>CGXg8LR#p;j zRRJOyfFYJfLu$chvSKag!rp3Vml!-HKSn%dTP$tsMrrXy*_q7L=EdO1dT%mW7G5z6 zQItZk=Lplhbj_}~6eOmTOW+bP#902?FucsA=>rVD(2fy6?pd@&k4-C$5@yHGLLt0# zxr;Rx#(3_suYP>5>cgJF9>XS~`q%D7byI=UCd4kit&y%#+hC3()u-+7+vKlMZGSvK z(4|Pjlsm*SY2u7@F4~SrNzUlZf~8rs%M#9xtDD8`cH~wLQXP?xB(qCf$=rR70Z?YM zl(Az}0!43OI~{gx;eo@n_@+RStr>N6?5WJu+`UekrfxGTkgoj_={f^hv20~zWfrE; zvQs2%bJ+IP5GnJ!CbC*D%M&wqLP3krOUnyGVr5HNne6uaw%pr)QV`xsq0*T*HFU4|(LEeh>Y7wH_+30JD zu%g#m3JbzZSK5vjC1x$*j*!rsc*e5Iq7|JZksC3Z3?Kz8C8QSr-PFrX3TzyURc<@B zJFAy5tSh2p?uaxm{PGVx(U;xwH@on?-a=+%rv&Q&iBhU#H%hO|S41nXPLC&F?Ih5W zc4V>TsEu~VHh#!S;4YWN^VoASacX;0>wRtBDLU)dOuUF_*(X)T$)vcgiEJsw4ZLGJ zGhHfN%vThz%()<7^IDY^aaANzm9vW#>B`xV*3?|AT)vQyawxaf3cqC7127sjDGH;Z znYb<^JZaX;R4rZ0D4&wK`+B+Ov1J~t>9~^1)>S=*uKCTy1-YXW-b}+_XqsA<-V0V2 zWaTkzqF$~p{@Lv|58HnK8&$mjFx+Ll<5u4RvSr(Kmn#H3Xj*ApY0z`0z8t&T%{{6L zK=BmoE!SUh+qNs!%l~xCwd9VQuKLM!m)@}DLf`jw$IX}Ddc!5Fx0rq9*LjNNZClQ{ z{_^wOV!q*ub8or!f^Aovb=%e--gxCV-uKqyk9g{RU-|sU9e2py&wJKhCmiySL!WSu zBM!XJVNbm8bN=w5hducrYGLg```&B62W@!t12;VWVY?o2(8E26{n2-ydfJ;_;^y-4 zhwKk?$P@PZpFWSRh*O#==#+%2KLwpaRduF*^x(95O}SSA2&QmT-04!(hG0h3%SutT zo+?l6+2hR>eUWns14b7`!Ki}fi=SYw@HFa!$$U$rQd%J?P~V{~C@&)6VCBOCre5}2 zSE5wH%acw#N?lO(FQd@9706Y!sDz+5BWwa;7E~ssga<&Euhx9Qs%k5Tbs6KA3?rcs zc1RIToUdiUFDsnQT^FoH0MM1H7E($$Q8^sC|M2(zWudHgJN|Uy8Em@euDjjrjSeXFI{Hh0^-0fTzw=Fh@|M@X>f;}N#}_~Qp)Y*){eS;AXZZB?Ip6vVPs zx5rDdn1BcHA4+a2U zbeTwuz_8eGG8&DR(NI?{UWO7nFpQlf3uZjPEVh?!a%oXzhpd*#tBJP)e3hka zB@HgoGSXQnhA^junXF)TCSkAiAT?%j)XH!U6@L*`PwVF{~03src zwyq3Kq0!uzxtSNFD?gr)THxh^ls_1;dW2UCQu5ldbId5o$rB-8T|@Y1HOg?=f=LgC zlD+v_3K=$w!9=(W$HJGSL`5gOxMz=imX0D;fhIi|dg~A#rUhhochw58G_|}Ugwe%j zsI!ut1-SQIc3o*BpBxwyFk**bM^Oo5!(dZq|6t!@_hK{SKmUXRFdJ`sB-?deGPRG} zyX|~JYBAV{h(1ds)mbQtp^hd%M@nnkAbFBvK^W~8%62kmG=xNY0+H@7OR2q_3>oQi zNM_^I2Fa*|RoSX$?cjqANykMX30T9{xfjVS=~5XaV@K8@Na=9MT)j-k+%Bk-0zcSD zJYX!n@Bmi!UE9CMOv3VaXryZUSK3;WH9^v@Z&s`vdU?p-f9hP&tiM1~ZJ$QLj~QoF z*eJRJSuvbmB&v@hLh8cgC1`t8_;S!L7r!JMqVq!AtB8WFQWT;MDcfFXs`MG~yZ z(hbe%au(K#^zx9TvJd?K2b5I;CE6=Own1^SLQt|-3mM8`4iqR$4rKZcKbH5Y3OAQQr zmr#LF$tsCftQLf^Ojv~IRgtcn=n6dy>ndnTQ*M3y>j0YzFQFL4y) zCErBmYF6^DU3{B+6PXas>rSFV*UBv=SZ$W@W2{{BoEJW{F9)JYAv#P?oxc zmzP;yU^pEqIyUcj@RW3;&5<&rLj3OEe7U;c6cf(-k)6iM-DJvt{O*_R9>%U@Rz%-& zv)^jj-1mA_HCHKD8uVj2N@j{`LMcsAFaPwq%WvJf**AUp2=lM**rsH5e|hVb7x=;D z)!&f1=%-sR^5JFTsccpAwHwd5`Ra>r+i` zzVGY(Z#?CXUwFt#&v~4Z`T2)E_J3M0tKd{*N;!2aMO~&!rKp+}X!8ME#h_|VeW$Eb zEGtMWkd=G73OYPzeDHl*FkXB;iS3TFs*^9FkSPyUi@KtSt1#pxv`W^`RHAyM6v>PkhrtQZx`mDTbQ5fYV@Dv)IbCN4}#Vh}W{%2gz$Mz{-ySwzu| zx=F1S(a;pmx)?}P5Kd46X$>Xctc9aXz}oUPwT9?myfB~@J&Oc#k-Q-!Tr4jyH<1;^ z-AeXgwht*+GSlb&@<%@O9>$QLJmWO?mrsB98{ho8SAO{YZ&EM+?H4}e{_@{_@xwnn z>x=5;O&5LN{bjG@>o#9{%eJdMN6`}$P64lctuk2!?OdRE*(dbQ4yT)|iVLQ*l465o zpEgFPQjYVuNuJpxDwo@c~=`5DL*g*--ox07l*M<<73lXp|B& z7(#9CIHLs{y~J2xMxtU&CJ=I%Xz50CXPi{o2@Fd$n$cj4_Ljbw%O{kT%+kgTqxLkU ztA&Jb3M|#jx7j3g?G z=#1gJjI)q&F@#^jU}K-r`0if{vI5|kFO05H-8ktiVzD&DuB+AU z#H0+Kj$gejE3i(q=EbEmVrGh*lv|_lS+pz!b3%_0ru~L3N86tELX5UycF{Z5%PJ0H zkKNX8+-J+pV%rl2TQik|&|2MAk76#Z8@qg33bmx|fs=x8fidcz_Dx}6!zM!!1uEO( z(kap?$@XyWhz)wXZ6>ubLkgKz}7ZWd>amzLxSU^!&?^KFjWy2F3h z5m^Ptj5dK`K|bB$W;Y}dccZbTl#CfsCRWPQGA>dod2L>X%9cXLw^drRR**9V>nQ{P zqhxH?)l&s!j6XJAJO8Sx3M=@$;0IX(<-iWWB~JPvu}ze!18kCk;oE3bU;x^rtb}7E zYr+{FfV9`7HzYeMz&I;ya4yr>5j>W{O~lEn+?K5s05eB{EV44KB;v8Wu=h_J&x@w1>I}hN@8{70U`_k6{zlvD!Ifm2wrd=ds;lRx%6tw%WT-0}v zMh($9qEZ#&;i${5V&$b2#m|Ki5{CV~?|Lg*{95%gY@LIagn~kzE4pjIkj{MRHH+PIT@8-~&+d z)ioqr;i|-46&cN_5QZ?&ajwZz=#qzzaMlwzmn_8%&n13Clwrt)mwX6?m(eE6A`Quu zPXKF3!Na#9x+EK=7k6^QSwutd+$k+GhC+x!(KXZ&p_H38yB4O16#PgWonR~_&rT5~ zfkYQihoDhiUhujwbhAJ*MniNk z|EQsK8d9t<9)v6~Eh$GvLr4q_ZBeA{{TFhCePG>MZU3NV(bcw>vkkMKA??RjHZ!XQ zj@>mtJ0FU!J&)0LDK;BmNP?N*EIwUsLYs(I8l=NC*BO{5Zpgg)_tf3g%R-mT=Ns<&dqhTnrwQ*`{v+ZQWsS6eAV752o-ZV{(n5^UF z|8I17GBCc57r-ow(H4sR-+#OEyTB4NHLY!41(7M67iqiLUa_4jLR1(mI32Tzh)Wl= zmjPy?>lI?+7Q_lf)dmtXyHz3sC|qEA%P1?#c(uGN<4(`f5H@&i0BPH`O4|@Sm!&(4 z!(pc8P6`3T9tm*F+7ZTtqbNccg4e~?Tv*s!E_$4l9C?)@f1Ba?v{nXXm#0Jw35+sDLr-yLF?twmt4UQGEV$PMVxYy zozWtcExovuBnHM7N{Fb+c1BB2bYf`m!?Q-g!xyKPiGSM3yRZY~UjAUFmZi4$3p%M@ z7J@NLRJ<^&l&ghr`>8M7-l{tGRoHGWdn8-eXO<~Hzr)7TGTIef*FClFrp-;XdHPhz ztXNPfP!tP_3*V3JC#5`w?H03QncR7^4>E7^Ft#6J_G41yC)clh<(JPdZ`pX3FZ;Ul z#?5zZz0B`P-E`Fte)IE9Kfmcm?|IX)zVGYhCp_i_M?B(`=kEKWBmTfgn2&qr-Y+@& zF()7WxMQFB$Y($KfvV=G?SF4iVtXWe|GhS-mydb+J|4;T71{56?F(M_k|R$!<{%$x z{-4&%m5NF;6`ATxomh#=@}@s~t&i?1a;q8LEUp+-4=N9VRdDJ)3`9lAS8=CQBy=?( zUl!%IighYtMX?Y{KRgOX-x6jBlBi_eU&g6sEDb}t5@2WKOGlWyA$h{6G`X{5fT5I_ z!c%DJDNAMOS#d}fc3#!ICCrYWQRYoe{)SMU{=RpKuGB>$I`{X#_Z@sy-@5Xw3g*t2 z(JDy;42GB{5-TPIp^Pq^+%T|+B|}{pE?Ud0YBow2x|-T|dHHQ+A71vXwW`_uWkvBf zzUCLT+>T#)LYv}AY=tw7o6djz>5u;H=RfUv?6?2%wJgXVczAV8_-;4WJB1=)4BFV&g>~B)Is;=Uj8uCWMS# zA1L;{6P>cuA?4GaU$zuoM&hEuS63Q_1Tre9Aqq)s zh~pUPvpzAHLbEWmL~;xudW1y}!x-~qXcUj>05F=4NmX1MLIv;YO5%YGY%Wp)Hg`G_ z$d-#8n2`l1jKjfFs0{`Yi4KlnwM9RQJGjS znRl$0D_|7H>zVM92AHg1=pj+j38gf!!F1?Z0+>i|sTcr^He^%?qZEWjCjoZG($lgB zSjGxmh^8ZFY+=|%(FH3d-6jQ0N85=5-|Nb#ve90}ro}$N=D}9M`P}ZM9mSe*&k zZtav{%BQ-%{Z%`+nJ+w^HnAKjx{R%A$3hNs$Rv{hn3q&d$Lunz&c8 zHhlA)nT8o!j!l-5?v-h4EjKbtEl>Nprf<}g&DYM*EMU0EN(khbO(c<9i5UWLAbQzz zC(C7_<7qKx(WYAkH~UZEGND9eh((4g$!!0YYH+9dZq$<4PT-qKmUven9MDG1kA?NwE}~T_E$)ZDA?8 zj1(|N4BG6)opozo#bT_R<1AnkX-MWXy2P{VT0j%w2;5>)BG_2JB!Orp1mC`Gnt4f&2-1MYp(=G zx+crP<+L|4LzdKfdatJ>W!~W!-yN0a1vY_Zf-$f=lw$SnqY!pmS()s4Y#&%A6kRp4 zXRwvbYUE03UB;ffRw<~F6$?FmN~u8Bt%9h0p#Sr2+ekkq-A{@G7A-G0-h>gCHX`lcU~^7-YT-LUEAE$7~`?E=5Qyz$)6zxU10d*%~%KWVjC zc8mGNM?LDOr`+#pkKX0b$M5;_6CQucu?HM~=pzq((tV$Hz&#J#ceeu`yy2mHdQSVE zM;!D}-;CY;Wj`bOR0#C=o4nrZL_z1H?RUlv%FkN;T@)DgQ zAHIdwr67R>sCqRKf&tQWjH=2_4N`$zy)Oi2bID5qPPmJu<%p0=3n&vwBIJcxi&p6J zG*gN$M+z5#bOp8MyntcYDXL&4vqIR{V*92qA6r%(|KmTHk!QL6%=F6lX1meMNZe)q z$Z7BX)JIPD60Er+;+Tm(M@@>svORyY(us&yBa; zc{S1;SY**sSxbV|C&=(dfgcU|q2)a@!dJ3|%>!0ZGpX;Y_l1ps#4 zD1_Fn(#tQqaLeY)-1P(QiV?mjFeJ<9kq<2em{D{gTsmZ8BJ5^SGF^PY*i}`P((q*R zj|YZve4A7-#u0XsuNgI|VC+!^4oR1Zd}LCE!gokGmM}}YmM$4CS%i?b7LqSQZSyLP zF3!9Jf~m5Ofw2tP=_SU?lH$x=lq3L0b~+`Y1I*z-!rt=sYDr(V*(C!h+3EMk=;%N?1YB&B`evABrwvw=Xj!<- zF@>Sqb+=iSJf%-Y_f3mGOa z#)c~N3{8T43xaM z$f|B$wc#h+8m0Qn>Fi3<+ea5Dr$1v?gXIOGWx?E(DP=3PZ0Qmh(e1TS%VqG(^%*at zvNbe#U2Fh`lBQF1u;sA z)y>yl;^wmFvA14zj<3kR^@fYp%Wg4y9(&6rXZwomJGX6AFaOic7yCUapI`p?=`Zv7 zTPJE_EvR`=kfd}mOpa`$NhfiHaK zV}5+u_hb9MuTOvUJ&NVCzxxHhD&_Obz90LtAD?x-FQB`6_qRZ0qahW*b6#V7p=%UQYJaGkv9G04{o7 zqf;;xE@oXuBtjs`h{`p@1*A(5#SpLDVN#+RHg=YHS^?cK@L-16NwZ@yYG^7Z7DKZF z!L*Y`aVO$1U?vhMfA*wO7e86%-0E>Dy;e`RX;wApii<~g2n%5-X@Zy&FaSgN=`!yM z%!s7tnR9GX_(g2K^c}?|9g=i0Z+7q*wb)9j426@4qgixmcpg=y4923{f=q5z(BcZz zQ7(s)+_5c6v!)9z2QGY(WAn-sT@EbjY%cbq zFzx?YN}zCZrU+q1<=@dkp51gT3oq^!EOg-ryJaav3ofE#yo zaBM3J5Kp7E;cMgHk&Qbr_W%^#I63O(qAi3qF&e&u+iQ4WDY0qWU^nZGc`b8W>JwT+_)cdRoTPKV%Sh)t3$ZdtB9}Hrm$B*eT4Ir5um9dDq`@Mq z!0=r^$G)5$+Ky4nl9xb9(>HV%Y<06yhAv7J*Q+j`E>^CJrlTu!*Txmue%)KcgRMXm zmq!;#nM-35Tca#9L?yQXa#&X;0m;aUP+Y6adD3EUWneNWA&v^P;q+4&;a@U=;)e7QQx8LyV zpWXZ~ci!NezT9Qjy1BgVDi2^^v~A1z*Ise9lG*dv>gC&Ryu`0C`~0%|%bv&feP2Jj z@xrSw{RiKV{n;~5^?On;Jn9epo)mfI@sBtFhu_q_h(KY8KduR7t7H@))scl^l z23%Q95sL)|KX(gdnr2iWvzD{2=@d>EiJ(>+vf^4S-42cFxO>WOD65x!NLgL%$Cv4D zI{T(CA6`~dySePaY~ud1lKG9VeYwvspZS5eDw%z9`5RyTq}KD;S6=pgHN&h%yrFi!Ljq~%0pk-BW*9TD7sGeHfe!*Ie!_u2t+cM zVi__3!bLa$dTeFIE{dy4 z?$e5%AfyB~s@n?K!&BsJ3Qdy^07;p(;tOD2j6}B(6kXA0NwafA%Lp)1ToKKSP{yl# zbXx}sK@xc__wWTZYE4RpP5^XeZX!j8Z{2i_a@36$Y&l0$NV3nBeC(Ku)pP(5lJMmZ z5Pk^`#;cS>kD+|LL13UujKCO0qP(Sx;+R@?N`ZzAtSGhFr)?CEHyuW7{wJU`Q>0sA z_BA}EEd)iZcGxH6AVL5U93NG6TyB*u1#LjS~PF(#=*^aG!1weNAx-n?k?N8c+fw{jd zt~}+_>#(joXDND#0<`AKS280c7emXrvy9l)*xrs67R+dX*PNhyanNS#UHsmyElnVw&&|bZ@ZTHx6 z86qv3Q5d6jwT1*`lt4&rc}^?IV2~8<@q`~31t3gAYXDOj6&(+-v8YN^UY<))*0>W?rPZ;!wK#JGUt1vE;uYs zd6^)=UxpOj<&2t?+`KsFokF+OXmNBqj@((2axTL$4)e^t1O_0hyAoTy!P-TM#pO%R zu4e5Jx-c?w6sTCzXcm%@Z7Eq&=d#O}Q9=JJ(DppDfVH+5W)>?evv|9=l3lA?J<$oH zC6!fg1iDE`hoB=e_BD!8mZt0?j8H_D$hB60-N(BUPhb0jFOOks=L^16(CXzxnOvoO zS9kS$z5KQ;>9fp=Wxq44I`HjYKIW;iP%^l)>`wH`!`nZ-&i&%=`Hq_}Q!o3LY{l|Vuf6p4>o4_vUq8L(M}B|#x{Y7`)Q4XA z+-E-MMMv-B^ULaG-;e#m!yozFr|os(ArJSt=I0%<_lu5x)C-RMgX0c;$l(V)@SsQU zy5C+K_PPItr|x&3KRW5)cf97L*SzS6Q%^eVov(S(2j2Xu!w-A%e*5lq&pmc`e_5%# z+pZgSS^3*zd&#`^{<5M>5vLX--Pu%~DfB!St*ana!70`hn2NaSKE+@)pdmG&au0*b z4orFIPOIWk^=DKuTa`zt`h!>7YE_6@QG7ZW200Q`)vdas3kqC0*+-Ar<;KpJC{YV4 zE)qG&Q1l;u|J#O8C5)1xfA2g0C=-tGx>`e8*)obnH+^6s2!-WGKlqN;$F~icIIz3e zS^&xLvPz0{5s^cZzLyLq1}=uw=8(;cox9KyRvpXS9I;6W9*M5$TM)A=ACAPRFUPJB zRw;WV+ozb-%Wf#Mdl>tpXZ(dvE~}~4%M|yTKk?!B|J5hXP%QWR%b)q!dwp`*E#|-c z#Jj%tt+NHFw>q6QVuG5x9m!q!JpF$XI9U)M<0blZ$=#s zZR?N`4@^?T+>5)MpoEFh4o6w#A}%SSwuNGWY_%K3nG_;GFNWCQb!QZLiLtMd@EC|M z3E@S7$KStkWZr0YL&Jujd#kaGTejs>3&U6rG1SKFZO$k*LD^eaj^W1(xEWb+_|hmk z-Ow^hUS;ClbKYKaESCQXc>S25R%5Zc_Iot#G^S-=SGu9uZ!lGB zYt_Ye9q{T>>sGfrzdR;-0$Pk=|p*y7LYQ(PM%5$%Yo zdBtV1wG+MR7^Sx(@`KB=m1JOcN*k)d*f`~4sDJudTbT$gjgCu`vVdOCt|47UOsd%d zcVxBJyjd8Mc6K~;_KxYaA+=~B!2})H%INm(T${S~pZ0aFfQd*xqeak_QatUuv_r^F za58#0oHz=t9$!4Op7>i#K|8XgF?-$Ywd?Lk2aqGe4xB)Ad-4I?4aqRWENc^-(E`{K zBSdRKI^qpgeE{$^I-)zY6ut>xjx6RV7Kx!)>$c4WzGcw`Q-V!s%(1Dn)=V?7HK!N~ z$uVDE>$=IlHMNH8m&%G#I zkZ9p~)otS1dBF^(nZ`*kiWdMRUkr@eEQ~qEUkIZ+iqpA^hRe%;T_hLJXw6%U%aM1G zqM$9q2`!u`Mw0**XYv=ioEcpxT96jU(BR7uGQqY)Mjq8&nAOs_i`7bc+n-7HeP8t6 zzqBr5X1CJrq+aegYF)U9WrTF3?uxQKpXma$)FwD{Tp3+mMuDs!@Wo^EJn8+4dkniT`0D<0c0)<^vbvy$v6ab{ z43yve@-{|ymfc@gP*gJXl}ElD+vk`6^=CIuz3h2xKPjbN_W5NWUcU1un|yv*z3k?) z-;?rVQtIW~uDj%#OTT;bma{jV|0U03f98x=Kj)x(pM1=td^xs9vVGsz^PaKSF;9Ka zOOD?E;Ky$`?4Ui4dFFlHWq$E<_w`xkW1g}1Q})|q-v@5+NcIblc-)&`ecbC`dem!A zKI(0!p7gHQzU0}5JmGPVdDz|$+T%WZ?y|@3cYS_&_ucl~S-tGGsawgaHljcy6m?nm zmsNsFLiL@ZuVPS@r|x6ag;dQEird7BJ9hVQd8rV2De-`nf$T;r04eH3qe^B)G=Whq zB7vufg_B-Qs(2K1YRT1_apl{v30lRbOv<5;U~|9Z!gKS*Mj)|O#ug8`U;fG$Kl>MNfBgsE^QJHS z^~2y2X#3U2*euSN{B#8+KMNS36fVS1&s+ z{7%P5>HNmH9CM4QXZ0-*#@HL3Dx;_W8^#)V#JL$evC z@M704?bg#!GAkLTDL0WKf+=1s4xzNojsZA#ZKH+DE+s)pMx3n%JE0d5pgEcg%NVnd z3#AG2wOlU6w#rtf6=(siZOhw=Fz=QA2d(hGY`HS=VN1O_ug__mr)$SC^LVuDa;hCVB3BF~hHf+rCd|OV(Cl zwoh%M*0i=CHb}E~;)2A^2TYYZH)tYkYNEIDg3`^=vZU)ei&bh4qUD<7uG>0<=OSBc zLRQjVT_qTyWitCBO4}aM?RJ)%y{1-r3d#0SCL-rbB!=v5N0)Y^y2GOjsqP3f zZ6w;+Q5Pb|7=~OXt%gOY05QaueJNo&?sKdonRXHYZjKmG6;Te8*pA9zN>vwTc)}0A zh+rdu%NEk8iC2BpF5Q>DgM^Pni?)`LUK-Krsv6jfrAs$d6;tVFNZO1>gK2e_VrX5# zMkaGQm?Y&>+KmP?H0u*!d?}K_BeXNrKe!;HWv@`xvOwa=v8kbws&GcNNf@w@XalTa z7^-4L4#}>?yj&1n{}vFGBE7C2bl}#Cz7z~Ar{rq+2Cy6|VUCPZv4~D@@!6NT(bp9h z(nM|LWOr)2Zp;;iPFz*kyC5_{LRdOm`)) z`Zcz6@3y|T2R@nn4K;;z+YW~z_`8nC>c0rG&tzi*P#U|f?01$G#p-1rRc3M1xqHft zJ*Q1kFRvJHzfozRI@VSlR4*$H+;>wldtkdE^)kS1+phfOty|U0?kxNK^6FFBTijy4 z>Dmi@g!zt}H~##VD}H{9um8H_rfbi?{>qgv`})Z>m#CMoz3i-8u07|fi@)-dtH1rl zPrc4_*QXr&=vO`eaeY7bF;9DddimwYKK8h0@AbSxAMo5~-S4aVQ;91xtBcLW(4`lhr>ofHLP;-~sE|;M3W{0u z3TlcWc2&QUU+KzM8Uk4sKPs*3PPcBp)wWf&5ds)efLDWK>vp_At)#jbpp+~f2350_ zlm?-+lEO);DRAUPC~9F;CA)fBu@6edv8}{K)BV`Ql%l_NC8%K-GNtCExZu_T?Ayq?@&Wp=joq&aoUyHCAF~aK> zBJx^8<5F~O$Z1jGVK3rjJ2@jKIWoc*uEp0*foyb)bS{|Tt3xT0VJ8$uIiL!|(lxe( z8H-yRrc~(!%p5zE%PG?hk#VUT20lPkO{A1qQaG}k2r&gNd5t4v!UEvfL9+0omtG_# zUke-+WQz&I(kOjqK#WVgoJS;|09ZzjhP)bM2!=|xV-dfWB}@z0wP=}PmnI2QRxVJh#XdmS zD{PbMl4niCU_wyVl~!_K+^zr0A=rL{TXdl%U_Gtm{5xW04OedpTVd~Cx&*kixg@e( zSC#-SrippwIiKzOpB5y-)mi(x)Z6@qlvbroDF6Q3U-X&fxy4K|#8@Gq7U-dCMX~DG z^Vq6p`aGE0$2q}%<|mt1pT|}y-+tqjz9QTG|YH5&1Bw5N`>iZwS9(EK%gSW0`96ct z7k@cH6wx+XZN1v3*{a!acFMOCTkb!dq%H_t+RiyuSI^M93evW1U3DQlgzU zV8nuv6?-`kGg?F17K9zMEe)9pVm5MZlXh%@ZRgtc*;KSeX=`EMV%Nc9R9UKJ7t8AP z-*`!uzdRi*Q}D`n2b{vrxMosPWt8Mfo~y3`Y|Yx#TE;cEv&%SEX>9R3A|lQ@cbJTtT}M_*Ei47zP{+;`bm1MEHbNb;IN8b2T8Ea% zXkO;q3!B|mN|J4uP0)h7t<$dp(bsEuss&&VFx%#p=-cngMN-a?ghhrdV9JCgm~jq)G=^~4WF-L@3ZqsU z4y%Xl%Us=5RVWZPhqdWUT$bc+;$jw}Q-i{{=~H%;cuYz-LgCKXKaONrVvL4_)-n=Q zTHU6s<<7`Am#R5{r3;>fkpihj>(E;YMkJBbS+XP7ZDq_$Zn|P|Uc0HHuT%b;Ikf(_Bk;YP&SKE>T#V_;Xo^(Ckcv zP)b-Vido23ZXHddLI-9fTuNr<8$j0(uhL^ogu~oin$FDVF2MiyKGrP`$`>)YA7xE zfP2@-zHS?`qvp7navvfy!4Xjs4`3^V-9&cpm`;?&zx}oKv8(rVp9h~-p2w!>GFAmB z1gege35sI%vRi3}h_0c2Pf986VeH@j;+Eh1;wQfB>z1vT-@5GzHn@n*v9G@5J2!0p-bH7BZu5`-=8X5A?Dp|1PJFEU%da@$F{hsN z#8Zym_qb;~exf>OOAZ{J)iZY-41#3?ngcS0mnXTZ{L;u_=oRuz}~y4m;dBN zhxsw7mmT{I_3~T)XyyC99`UgI%=dlmwyO^;-`)3p`S`FxSS{&ykkxMQdFNY{%9gpl}U2t(LC8qJ~r2hhUf$BaQN zp{1&;M($oPJ3_OAR2_52lW%o>`Lh7 zkDtI>vF62*ZsKTH^YlgEs&zk}CDjtbFRXa&aUv0$L)0ne=>k6X_iDxcB6lZI+>z>h!o%Olc?~yi#Ea_U9&V5E( z-{tPNYaM%+7f6Ar5vLPWTEbKBI$=zsEUdAl}7A|?2wW|=MS=hHrKw=h0Wdw#F zEW&cug_*dz5)jL%wRKgb`>cw!Wfu-}KPw!YE@sCi;iX%pT*4gtLSS};0PGWv5)63< zMFK3m5F!U46kSGB1IvY74y_V$M(LR~3$`4asUclPA_3`DZ!!W~F)N@=N&pg7@3UwP zX`6CXV7i#s72tJ6I9_~7N#*`c+t0VJ7kOHn?+ zFm%i~MX_BZm|1Lx@=6C|CsJZ(WWfn9I!lgC873H77)H7Ag<Yo0F0n{}!*HL_CD}HzWdt)j zE-+||Ldk`aI~P(u+5n|0B~rrjTvq^Mpzy)~)>=^tjnfdL6Rml%pk*{WpJ6w_To^Iv znxL(|FIDT<8U4Th^Y>n6jBXJ+a2U)8gXlujc@?odg=Wb(o7koUAP}AOYSEqR08Zph zO12|e_)@&8;d$xG4JNi7TWKJK3|ye9Dm=}a7eyCmmcWxIBOMGtEG@l*m7VVzJ4=ht z7amNP%fL!$IHx8M9D)1HTt1Oc@%Ld0==)v*Vi*6#z#V5!u~OH)V&WV^nCE;V~n|nO6XT zu$ed>p@HcZT-VC5bjmRLFGLs&y4-=wlY&I(*0xmTHsZveYcdkT5qR>6fg>I|F=Sq1 z(SfvUzE#!y zZ~t`j&wskb=a<#XTdz7#y?pykm;LI_EyN?)eu8=H73cek?CUrF@b;}2xxf7Vub+AI zmT&#_CtmCOvDM2yzkK3h5B2$F-}m+WLm%P(vM>8G^!&pg?%Tk8-`Al}-t*aic%SDT z^3bR3x7+@EZ#d}DyT9~#PkiI6j(FWm4<+yX)8meL)?*&M_Z~j3?2hui`uwurmfHOu z8+OrIm6h+x-gUPXS^3SWx4-4j-u;d@zxQ2lecPMgpmtL)|HZrCRspLzBWgD#o?=-` zSMjL?)o!Y}iaQ0YiqEJag)@xOPnWJRq*q;PtCW?fjH*RtuRx?~x)M_D3BVnvpaKE2 zC>uqjNQoU7A-z~EDoWv$?cy4$hNUQh>55{U7-S2~kp&4a0_G`erM;BE+^SY1Q^2^S zz?45W#kg1$qvp#|Vtg^D8-*|VGEsdCEhrbXG4ry9ybOuhim_M_Iy*5Vg*Gpo>`LHj zWQympwMyeY%k0~|`V=#O3R+h|?Z>2iKlU5`_$9vb>!Tlh+vh%Y+E>4P=9wRSqpI2G zm#?_wEcNoWoBd;Flal%7YqvN7J5!t;PCD0z+4eh$SO8QUilAyM#_XM{09k5*F=z)v zzk8{~D6Y`aY74z)H1gqxffOtE>a0c!nmbxUBTX>aB+qD?=BPDP^pFizcF`N9@GZ}x zHzZ0)>RL3S3#OX@6i6(iU6U(yV=$`NL@p38m?{{cdU?8hn~_n~y3t73F&J7CQ5r2* zw83E5358erw4`9=z>ZnVVu-*)GLkZs6@`6CJ{CMeC-h!19I29;r5p|-!&m{3P)nT3$XngLNE*A z4RHdE#oHngj))=wm@K8y5WF;u_O9CdI}0#a@3t_F;wfhqRE#XD4`50aVpJ?JI}^JR z06VTtmt1(oWf!ZObrB+=NQ~Z!(Y@&xZdR|Zw|)tbCItz=Nch5_n$hNxWzv?ph`RO? z@N_V_O>~nj7DpQ}#TUto1&nWj1hq%fl}0%;x@{w!@YE8nawt^Azf7KN=Y_UT<&eQ zmz)5_8Y5;hX_zV9s3Zw|i;x{>c7W`<6stiIE2+q>?*k60q70!|W_50ss#FI1& z36NHdVn~Q)L1|ei|9uf9uO&%F($}~DT*Pl`2c08EnzL57azXd8?F>MVD@x0Qr`uA?HyMgUN)Huwx-Nr>&vz z%?MRI>?Fd5*o{i8E-!j)+IRqZL9&1qoh~kbZZM9J1c@QbZa>!BH^t_p%>$(p((Z!A zJG3fV-*MAb zN@gEnCU@R^?RVmH9(Me*A9B(WkMQB;7aq0G zbDz1_(FgD4Bg{uVZSN=V{P`;nf77dveA}Nq_w=`%eCUCXeAI(>yVowe z+-tYpre5CT9((SrUhW3*yWajL&s3|F)yr@A(^Hkt?CM0do+7b|SnWnv&8ZMcgfy%Ur8tgDzzszL&{ILXcfg% zZvunCpdxsxohy4;ba@G9NGl)i(Q=d+0HFw~nl3{y4Asl-5$ggseVoE!Lqg|Op2}fe zB0~`o5@mAy)z@4SFbu-6i=qV9qD6&}E)8=8wk(#;O@IG?u=17&*iw+yJJcWk*hk*? z$&a1x>1&T$fAj0UXUt>aAEhXe-D@_aRVjNU+mP@3^7-Z0pL)vM{_KxFamG7+<(GQ- z!>7IB>wo|8bARw<_3~94&rvdOy~@vpZ@OX2=58{!$#*HJ#+g&n$>M}^0qKOI&k4(t z7ck?Rp(z=5R`LK0CI%#=x~wxgi__opBol7D8jZ(j)k>p=vTGY^YLcN0GO*|b%xHCA zc5UFMPzA>_Ui2z2bCev9 z-Cu?&XAw(D7n@iPC}#GZiPbijE|Sg2E2M8ljsy>-lnPhdXe&<_r74u8mSet6uzZA| zuy8Cf`pnV1P^~LNyGB7sdTo?LX^g%`@kZ&j<;~9i#MXjPyl;Bf>rK$x8B0Q!Ee8BW z5nXx`ZaqsTOo4ijZvTS>gZD&6mfm5tSr|zeM!Lk>5>O^dfLwSXA(UWa0Wgtn$*ttP zT5+^-qGPZ{sm|iUf&^?`XTDDqo?OR$LoUM=6ns5;UrkoK{ za%^$RiV=yGu{mtCR2yBo#}G;OvSpDfX^9Fd8JS~7tA#NtRb4x)_F61WJV04tW@I7e zg+wOYi%SQaaM|Nwbfe8giiIa%Jf##&cidUll^C=fh0{7Z-HqbTrMP%nH(8l4X<2lM z(TU3oas;PJ=fd-TboN=_{?^yO`n_*|!~Ny|{qMi!&ILIn7ce{&7Oh3gzGfm#XW4RW z8R2u_k8H+W?)9rGxBssJkiEf11wb`RZ z)zU>v2ZqT$DJ>OInn>WP4_-sNM)6p}xo3aR70jhfw*LLX5lQ(3Zh~kNU84mp;VikV zX$>I(E)$Aw#z>kzdsK}Iv}QeZ0h|}EMMg2Sw&8WjWJnppF&5OZU~x5YCG^@ZiWkY? zqe5GHOM6rSI8uxfU_Ni62@Ao! zKvL~>ScW)BL%<|UDVZQ?QY8QhGizlhCaBxK-Q#xqyHdSPNc)g5mC2>OV`~%)es(V` z-7z;u6EQDa8W`=6!>@?t(&P4j#fgtQ z`IvoAe9j|JIP4MX<)`ky`?H>K-&ddXl=r;hM0b{z%x^jM=!2i|5cTqXcinKG-T&+U z^6q=w^KR;8b(wluU8YP{HJ|#bm#UgQ)vS)|-ma=mDW@1z^wB9=UMf2+>4{<(q9pWl zPLOo&N@JF4MulXBXHO|}Cq@;gia|Vz$8M@>Ar+*C064nes)ScpqN?;}q*T>Hszd2& zUU*%KQDv+;6iEOsnCXOKBH_gn9bUp*Fqk{(VQltbT3+U*Zp~tvr20BSR5CHD^-I~jRh%QMJ|{ z_KEL)%Nl;n?Pjez&MIYvGrJEj|K-O%^yb&S((g&Bm;d@RANa?we8ThCKELe#@}`Tv zbLC|}@)OMJ<(sa({HANJ@(6Bs535t!`8#)Xokvcbt`CX=UFVK7YhCFq8!&vZb&DNF z%yU+Do_3DAwJNkB7`ds`v@FB3ZgfWJi5_Rk&{>Efl4JU`<3bUlKt}m$Sz-t_>KVdOr6WXPzCRxG2s039Zyj71!mp>2u& z`z?kwV&X(Zt)b>c$=F+GZ&kgK(R=?|4tbxwOSXSlxOX44=5C3~PTU^K@ zM$!$(GKq?Af~F$}S(#30)D8w47@_3uGpCB3AD{cfi!V6WzixCfxJ$@IL@luBjD?n} z?vT(~Aa|UXA;Jrw^xB*=qD|1m2mD%xImTzs>yR_`2Xks{@-8u@)ytf!C5VoG74;dMw+)cfy{*NjmKPpo_yF%wJOdX9bAoY(X& zo*0TnX;PB|1^F0s=||C(u-V64=+}jhVbT3QX_# zq<4I|jjs>|Wu!~Bh=NjPi3HMJ4i%b94BTav2wE~q=01E~K^$5wUREoPrhRxFdg@5@h0^(p4+=x#R~_4~`WZr!}?s*8xvFZ&ep zFK_cJ%-6WNeA|s1|LNAv|N8T5fAh=h|M`xq++4o)iXU8a*^h49y6MKv=WROgYgb(G z)l1L$!mn<<;LP`(a>P^byHXLK@R*a2-sk1dJHY3cr(X6;QvI0Jt4?}?lKF*4?dy5$ z=RND;hd$}Phaa@pD^EO7nf&HII?7YoZ~fzA4tvTz`#+|df;SG@E^3S`ByPci@5>;AanRDo8ls!UV65v3x%8c?h5Q{pKeE2!tD zvuaU=2%ysRY_keiEve$GYF0>6V0f{soju;HFsxn%VrchZe1Q&UN(b;8NG78(~qRTN(LHq9kW_Vz^(T&2>OB=ax zz-U=a%8J3Dk_ksiGPZQ?Bdd;CS_fTVnGloZjzoq8XJz{bPkVh#Av-EC=rY2ZCWKF-T)1i+5z33U6M$d-mSvt&Or2qu$hr+*(GdmS_3L zR>TgUQNo3<_dzoki?jpj9dr4<$xbSW*tz6eTwPfOkT5pgm3$Vp=KyR%AYldu$-+)S z&aT_LbLp+&nQjkYd+&~+#I*6qmY{-W*tV60E}nWO&(F#2ym5Wc#M z^4D^=p_N1Vv}la64PnS)t!cTpB?ca5mV5)#F}EA6ZC>=*Bo`EHWCB?Vhe-|vY6Wmo zU{KM`3jiAsDNC43sh1(qNe*@i*Tv`6A2+KsU8wNsrEg z2fjRuuIlO70ptr4KTEpSPyjyC69uPNS$C-wv@ZBPs%be@uB~+h}beBnIpisb3_cv*$b9Sy~YM!^omb8fsFI z2s`KL*yb!+i7}3patCHH>V=C1fG{>AE8{2w#8zmdg$TwNxt2ci492%fNe^d^*f6(% z0M1+2y96+Fr0{g$j20#L*ub)bjVR{cnrP*$8$%d)lH=pPv)j>jBepAT%xv&HFK}B| zvAX-W*K)^qyB|U0FN7%gS;QJr8bxF@J5z&aPo^oAl_j#3vfZ)~605C?xb2|Yhe%bP zwrv&E5?)XMAltE^qJU+@qpOWa8xA7OuNAA1zelL6WqL*%Z)S_+Pc=T@XIDQ{bnLqKt6A#^Ir=7O^v%bIVi^~3j zdGHk#^HMyj6BU&zMBqwSMizA_3%nwfuy7Gkg-*w&6#|S=*~kl+ zP#6s{>e9uPC%w36QDw(gs+4M2*#a=;#Zhzxd$BlDuKK|xmGI_;2cAgYBVhRQ*X2m( zLa#cmyu~mq7R_9^n}r2Y9k)JaSJwhcTObjwKb^D!W`qYDUqUg9bIy9dC&(&gqEc2J z_b<$<&3b0j6A37DMVCSz633{Qk~zL1@|y= zGg*OH#`?@ryo!nokDqTYmYs_zd>IR^Wi$lCK5}N1Bv_Ic45Voqh3q&|U|6)$2rv#b z7XZxvDTxiCxP^Xl_sS(3X}2&pS@$frF!wTg_bfNWaZfZVNvV$9iZa?_wG=QjGE0jJ zt%|G-Kv7tTdutooZlLa=6!vbfr3BwCl^sCgP{I-$4DAHE5d-KZV_v$vVvq@pKt^L2 zckMX}7yywup_q%TA}{liGq$ECP)U|;;TUx}A{n!rPH-dxV`?x(X$s;J#@y{C_#z~N zYVi{;x`sm9eMQA9k+a;C#tywr=`D+|Uv|@teg(+qdzH^H!j)djURLSs4ar0THh5(v z1fd5*A`M;9?EobcEk$%gNazH9@HYiRs2ZZcv!KdIPk7Z271CWJF zA*5xBnX{X z7-FVyZ#B$?7yAg!(NO245bVG)gOx)TJWYfWC)m`e3+d30(UR9jxL|9Y^BRug7E#+2 zYBM4)&>W-86FHam0B}r30!WNHNeE-7D+_%(A;+2-pkr#4{*^CVFEse-^V~(&P#xY7s_?8UXLc3rk zP#{B;sNjpD3#n_eys}45=Tc%Ud^_kd@M1}f4j0wpC>U);RFv-Ko)p-Xx%F2s+a1|p zxVhU26tTp_lC9OSIAbER8qySMLzaxhKIAxly+adg5jJK_usp-50ur*s%DRm{jOJk9X0*M?4CP&#H?yoEc-mRFE9V(fjiX8 zzQ26$?KiDndey~WKmVeyop;gKKK0#ioOjtpe}C=eU%BbpZ>pDldHIpMm;3(mZP$Ot z_m@{L`T8#&y7BDOUfusmjpxhiWj~JX|CjyJS6^YCeZ=hlmv=nw;1?b8is$_0)=%-1 zUkC2~+&3M&yANZZ_KqV?dCMWh?|tpO)g~KnutEPI^&i&DTReBu=RfD!yS`*6kC;`p zo-h0PFMrDH)53~D<)QLTjaeP1xKqTb5EXQWRC6jpb)S~Knh}y?kAq=Uc}5vkd{vQ< z0EC5$g0wQTE)0G(CcB5W+{06J8x@_dc*Lw8SH>z`b-5@!jZ#>AOxma%h-%a)!y&Pi zAsvaJ+Uu^q9G+LzwBP>nSiB6uU@DyPFq*H~0XL*;RL*GSP_|qkWnzdhnh`JjD(Ea+ zEI2O;5@~q8993QzL@6Ri7(70VtsGV<_t#MrkC>IrK7H-cvr^i3nE^a*?lH74FRPb* zdD;I-opRENXMW(FU%TM%zWT+pFaG9v*Isd9-(Q~A%dUMT1{w7i-L~JWK^K%OMLAFb z;-y2^D6kh0LeXV3M5-6G^j=S9N`{4GT=L^e3^onqa_$oL-3^y8VabtK_-U-tDBA%G zS$f(qqEuLwe6J65c0x%vI|7MwFvPFRZUsCP?%}c`k&GNL$Pmm51CXl7(#{>ZQ5N%p zhf#Wi6^o@E&4FbnZfH zaW*=@xsN0<Yg*}lRp zPn*(R%&oAyty>t2+{O}(=B70n1V*&DYR;iBkw$P00gDMvI|53nwC@d%b-N z7hPbOa^POMU0kCq4aHw8nXxj~9APj<-Ht*4IgBO6qbm&xysWU5q;4>yBs&Hedl*9w zjVOi~S?r~C=^>F&im091$54xar;QUliwT-9d^@Gm!(2Fa7CM0!oeK*u>l2Kfn6kFe zz!EO)?2sgw=m3zEGJ&DwQq2xf+FV8gU`TRIm(O&Ru{>cQ4J|0Zq;$5+>6wI#1EQ17smbDVVHEV8(CzAaSzB_Lvp zhBjGWK9;nITpSc#_(4+g(^eE+n7q)IKo&!~xtD-8BS$iZ>T)-QslX4Hs77nn#Hck9 zt~46OhHA`_F`hv+!cB9Sj+%QUeDMh67!^gJ`D~0+J7hA}vt+amr4yqB=C|S^7qlVD zly<}&iYUG+%PU9Wr0RMvA1#K#COW_fG04ZfhSz_nNN>1Hra;r2oBps<)X1);70YgR zqWYS!Ov-JvBWSa$JE9R$zsoN2Obo z+L&`xR1&vu8z*;j`%l{syRvlUq#=6@tv$@l#%cDnsl_IQ$&#W=z?%1RL}!H2iq*o( zgwV~RoDD@HRkZyOx0KqXP4~m}sg!wD^ZeEpdirthwnJvoe#RW_VT349doV-uMO~~? z?pw?~Vy$Y{vuJZlmyH{L(>-ArK~zclFW6?&r}zuz{dfI~cO3u9b54ET z87Ccc!jUiAf7i`lw%xOLc>czlZ~T;JJazVmuzeVN<{zcj`+WJ&|7;__lC5Okf8V`S zWd2^tr?P#2S;?&2QvfRHJQb}f?6I#Wq{_D{O{FelrJU-|kdjs*sj~K=ZiT0kmH>O6 zYY3;JFGUGV)P?{GL)EBOIfU36K0Sdt%PT_mtW3RH*Wm3Gqd2S(SjyML}gjxWMDr%1u{R;<5k}Jn+B( ziBWb&Leb^Ot5Vv>+Req9bHru^*tu(kGwOW|)ik@h*w0_9i$8nbN7coCBHM#z&yf|# z9x)qY_c3fmw2x;q`h9Hmvfsyk>xsv{`|Yng{nWR9`EzG}=9BNg^y1GgzV@3dZn?bQ z$M)rAzXt!HZzDcYSx~AqTr94duAoW`b%Elj`iR9ViO0Unmn|U_UB<)(Na!v!*K#}y z);jm;`unH&e+Q)kO#|fh10L zc%loB!KiN2;hS{4YGDW+k{2XLEuCZYijc9mMKL7X0`Zj(dkm1e!>iGz#%O396viwP z6PIqX&W)Jk5lDK2(-3J9L|jntEGR;k=QIKQ-3S;1P^7`wec9fr-Mc%NyOo<-l~>*F zvu@mAsQ8+%gw4omNY!*GWEvf8208m3G`B)GM7n*29Xl$_B+jCXNam#Bj@doEtYiyd zi=xV)qvczj#U2K2RO|$xSd5}$(`6JIz8gJ-WkG?5;eubpZtjYm>K%@TBqJq?oj_aA zVzC!pEbfdfa^UN$&>rUc*=-DH=Zm;};Y|Va$P==zeFcnBv_>s&$p8cf%XcIr4K83U zI|WkP#Oby)yf83A1}*4=MS0!k0`uWvJB^U4{;&(3BG3 z0&l4F*yU{TWeAzOAwstlj7BAF9az|#BgaG_%o)*A3S=(_pUx<#(0B@gZ4l<(vdnM& z5S#^R9ZIZZSf=4h1C^sMF+-gs&K#MDrC1BdLbjwhevC9xL21dy^UY6HIDgg>3@`flbF$`Yl{8GJFLWfKa z0oYpsN<5T|*^v~eH8C~(!jWDs3f+Rxk;4SeSI!YK(#1tcA!8AooV>uefVB=<-4?TU zq{O!h)UCXonA^R3x=#;MsyE%!S!^D96e~j{81N^}RERQ>OrpyOU#d+lqY3f_bGTUy zS%ju6Xq!X3qxKsTXg_23LP~hF{Vdr0)=%ap2zj-QF%g@nHg#G%8L`@WQ5F>CuOrTm$clKHWR*Zk~}+aG&) zm9H@W@Xp0|uDW{lvddQ7e2EWZd&InY$z^M9{=WZ{T6W!69$b6b>F+)4;Jr2-zmNS_ z2W_KX_T$)pedLQ7-*}9#E$`s}q}0oAJZ6`_e)UUUci0XG@44M>FWUIUTRr7fFMrOP zU$dvrV;{9>+udHY(H75oivN>(;pQ7}_RNhpe5!9R&(z9KU;qC}^{-OuKW4WoOkt>i@xT2#{ zeJhz!>GG0>7C<>H^J}iW6i=|C2#rC)N@>}e4!uk;v=nB|i!QEpkcO$TNJf&_07NlL zml9^92vNby&2&6;rsI@`=^G-JB?XTmT}XJd@YL9MlYQ3O=dM-CS|zg@`D5pNP^+tU zRxiU)0Q^4oP9vMaj<6quyTSz=YrxIR;&E<)+nz57TsTveUtgCfv!e)2DVw z400f%6bla{DSuHa66MLFTk>HzmaU=U8ifbL7L|y@kyl;d(st}Tl17565*V1o6BWHA zx$|;FB8O*bO%Mv&nIP5dknq4->!u};gt4X2mTfPs0NDZ9=`7PB%!bNeXrkK$;R!%f zpkyg@7%eG#dAzB*``#{njAbuf6&~Con!QFYg1-s8qG3_YwEmf9`U@`c)7*8lv zF_mdrK@C-f(QV08cH9|xQIe=?+fst{L1njcNJDK8Oe&XxHb*(@;(d0C z5~g4Xc_GHEbvZV*k&-TvPXYkOC3~@Q=@gZJz9l9KFkeV|r)BesA)~C~9C5i=b1OD7 z&r1vJz`>hc$DLR6l3U5NCRzoKprTul-jgW=Jg_M6s9zU+(tU`3_pq zV>TK<@&ruRQ0d9PDYPcTt5OS+i*;5Lbq2=C*yg{WElB%(7NhVO3K0o+Ql2G{m$-5s zDNWM@Xx6PTScKDgahwYGgd_Jcoq=g|gloA9WJu10GL}*0GF{t2CSRjoCMpB--PLhrkujayN)Wp3aDe4u=(i55m!|)vuF-HzSN``{R*$&@y zU30JVDOifRu=|a%TjkB3)iIVF08j(dAlZ?DED`J z3^#OQH`0E_T^@2qW-n*01cpu#onW3OR>W3bwuVg2QaIUwv$RSao1ipAxAU<*lT2|l zrc2ML#1z7{ie;&qg?$cV6?bXRZ+q_C)DR|Mf?{Z|rJH?bz7O7|F7_yS{7|-TM(rPe z{Zk*tRw=8O{X~+lHs85o$-3o>?^$!RlKB^p-c264cg5YS7tj9r^7Z$vz43v&ZhGjR zn;*RUCROwO>u&tv9k<-E?3%~!U%7VK<%@st<(@D9&0oL5?_>MlD1W|u$X=Tsvd5-J z?>qBnQslUUU+DSr?4L<(b;Q2UBYy8oz5LpPckq*6FW+vX?Vt0M7jN~{eP6Qa!Fz6T z;O@`cam#1;y)Vy~`#g5P_4Oprm$%zyn`dme;ik{})7^J{v8vFAv3-&G zlPcL%f^=mdqw24U(vYfFSqEU$1Ldk+6{xCE4d~IWl3A5%RIR6Yq*Od2LaR*KWk?JmQS-_Wp_YyTKo_&ZmaaAAE8CK+uvOKj zz{}9+wO3sRsa)lSK?0JIiMXm)0O2H$)}$2RDseDUvCPp@fJsS34AW#ItS-hW+t#G$ zTv)VC2e?HRx1zYuTKjb`pUT$y3^t3$%{^%D=`*9zZ+-O(7k~YNcfIBKH^1SSPn>;< z-~0Of`KJ@V`gP3@zN%gxzxU-U%y+F?K58@k91 ziZrZ@A_$M%$|MGC1>yxcM0W&&6j3ZB?Tf(hz_8dLwfz8N^xV@rOHo-9PrNNG89HS4D5Va0?VzSQE82qbJ?=YmytloL84nSqL>Kg zr0DCb7smQ?CnGTo93vMIXW=@Br)lTae48CdFc!+79n3h%G#|*)fNv?LnHVL-1*5ce zkDm*PlTu|G2h`>hNU!89h?Ik zo_&Z6O@Upk4#yzJlQCMk@qz@P=n}eFI~$#xE;MNahlDAUTnf!%l%Ar+AO|g-BBJZP zPitg%R+Mcnt%)c^=8V%brzqvw)L=?DcV4Z3`O{_If(t~!4;i?Vyl!#n*hKL{p{RG4 z;UYwt8)|f#aPwuSv`R=YAY{}^i3JZ%07Jy2tS1*#FePTG1&SQTD2&@jt1&V(B&F(5 z%!SYv7XwG4O9tBv;S=AtveO`L=oWPLB_dHLOy(WT38uBrV__d`}dfGBE z7gBemkp<6KlNrKCepoTvc12L3OH4Al?Pp4xeH702Cx-ZX4nDFi=V=@HuyD^McOix^ zMOURf#0RrIZdN9%k?E8_{PM>tWtBxO`@L(H-o5JPdvCwRSC~Cw_I&w)JD06pe%-p2 z*Urk-{!+@r<-1m2fyv=kC7!Gj`c>!##G` z_{e>?dBYJeJ@Ker_j%b?d+xl&cF%v-CL3+^_-}pnjCno3kGwD`0gY`=M;5D;P}Wp}Jd5TH&cq1XKF)B4Je7 zDn#WfOVulP^{HxzKGrVbF$F`q zxl=?jHOY_`+4@-0(v$ftn|A{|*7xsMpZ8?hqh$56r^yt>GNW%aU+_f?Gyf_@`NjpG zyXb3Q_Hg-aZ+y*pAOC>=lk(+d;{TWZpVYF&m-zDXovWAlacrN*zSr;DJn`XS*MdvZ zp1*B>m*@OeEx_2d(*^5_R`0NMc%M+2~)y!ql2g1uvNff z7|A!JHQEbZ7P`1v-BJy}QCGP50mPC9qi~K+UhHN~Vt^^h0-<8D&e|-|BB8|DjEaI8 zjD;giv*t@D?CVrHpCLQ49&4m|FA2BNIsc zR@Huk#b}n2LA-(j%U^Zm3R;I^BBz%?B#?o{l>nhc%Z1Rnj5LhuT2EdLIgYy6#4236(Ws_b6%-5!fFZsZ z*jchSBs6z9m$sphCCtTsI~Tr&+TwQB+0TNXCDAoRyk_f1jS8s*(GBg2!$BZa&Pq-1yjBD2w23K?ZoV}8=DRLE4tCL zr8C-c%hnX^Oxo&Dbd9#-n%{CC0%@ZqTw9kWK3szNp0bZ#_gBjm%gW?Ljcmwc<<-k> zP+a&hw%_~mXHqj?VP3oJp}SZ7=>9dHGW)%++i$(*zO_s4UUS3R6<4jf^#}e&>W<}C zsG60`{(pJ-jhEfG>V`XSyTa$O?^^M_m5aans=YS#BiOHg)mHvY$|L3@_I}Q>`_KG1 zwy!WNlYJ!n#G`lek?hwVvh7j(ZE@HupXdLi)XRtL^*sN-yz90bE_&%^#~=2W?>b>W ze=K#_zB|8aj~)JU+s!xKc;@@dGqv)Q*2~8nc}QPj-et#a)n$rAe=Oxw*?y~89jacd zG_FWg-@&LW6>jV*I2M(iQC@u{TU7|7GNqjK&bJ_}qEr>~QfVqARfTFpMJ4xYUZcM3 z?D4K4)nypW>RH_iTRL|x!f|J>_EhG3GLAvD%OWcoqV>t=N?NVYb>pn;MGFZaNn)n( z;(AC8BbFf{%$<%9SVhkZOiP-+h;*%jS_m#O)Wyjw5(x{ca@Um@5s1zRW0XtwlCfOY zO(ure70ZN?-m)u%6~*dhbupp%s&XH|Rvmkw-0y&~)b;z=-~7tw{W$hXZ+-pQXPk7- zhu;0AzyGj@%Sz_&U-b8julcs;%Xh4}LA~tse(3I*hlwsUt!v69rXC>~*@+j5UMm<$ zmm#}KZ4_Buk1i+*ab4#N@A+~iPxjc5OkaQCqh*}xQ%updUB&PcP>EB-lC#$eUF9S% zm?;l{mzQJ&DnmxIK$Q!A?hR3*Dx!8uJH_)tDTyQOkRry(MYozAi6bJiN!PdIrOhJX2P%=`I>{@n#1`oZs7!rWNV5nG;7>YJ8t@M%zM?1A+82A?1 z<{zLy+Pq*yY4u7#KGk{{z>KoQHUg!L5Fv9%IA?TBaIDGt<~0%?3g!_dBetd>=a|`H z1a11J*-RM$#Da$b$hS$AL!(+l#flTyA~@fiF!1psDRh~H&xpbFhg~L}Iqa?=LxH;r zguoA+aq#4>mA}>m`8rpdDHxt19WtXZ&B##9qH`BoloAMo)>$nvcmSg~n^6{EGDb^M zca8+pdR1>~EgCk8ISvw0_`+z*O2SqZ0FoV83#L1w7&SXc36x~C?l_88kH6hn#$68s zaMB5&9geMS&s%K_+OD)o&~<-L+V*JMfsFi{5ppkmCM#CqnvtwzK9XmI9Qo*$He)bD zoea{jsj!id$;g3X%ONY-mgme)W_Ev=oeNC66gpq+f`WN!N2p}>Av$c$-6+g}{pM#6 zE1A{HK8CGc?z7pI&#Goa>sH>pYU%Zgi~HBD@Of-sUY;qL*DmvyQXVm@n(tY&#Pj8c z@4fZGyO-X-Zt*?0U+>{^qF%mh#SJT${J{T7`Tp_`Z@;>F`PB!%@Q_z*<{zWf%SZ3` z{9_hv;s2NYIQH=eZ?9NZDW7oEj@rLE^hL)W_=2PN-}=Z!Tlzfq(fhs7|1a;k5Dv}TP^DP1)US=kF9x&^Ta@TSnD z=q;dShk-Asbt8NkJvCM)f9BJEsN6!SsXwNC_I#NrrFH$xmo~+Rx6eH7jjQ73m{Bz&u&zJp5_9fr>>@}DF!wpwmwDiWy)~>k8w+ws_xf-M^%Qd2^ za>cm@s|)Cch^wy`3PZZ8mK!slXiU}Tcx zh+YVB*~@_?Jd3!owa)aqFqV{Vp~}sSc;Oinp(ik^Go#HHzJgj;hU|DC6~%o>z9-b) z#Nb$5qq$H9PszkdZ~84=nC7T$eYAm%ipxcp-so^|Muu2Aa4jQ!tYinbr)me+03ZQ3bk3P>;znOH*>xn-B<&5N&H zwfq(sLothz;7`VW9w!VVOEsg=Bl`4~iUEveWW`dpe3Rj5Xi8hfWamI3_7cifqEarL z4F)k(0fjC5O*dTY?r$?75Jf9Gq_%|N8Jis=3`di)0g$cHXbrJz*@4T4Bau}FJDq)O zZ^E}+xr;cuEJU}N)^f2yl7qS1cmR|nqrH=VjPmJ0v4jZUHbx6y2=)b~%^1#BRjQcH zPDDAFZ+juWEM}pFDZ`R1bI1i{PSC*G=^Uqyq6=Y^qm&2*-VP9qoiL86ISR7P2mm~$ zfQrty910Cj3CZ5XA)N`jsLYWU9Y!E)-fGASEKZ~4R$Es}NMyo&fFl$%Hla%|Dy>7t zf`=oRa1vmbSW>D+laWA&xT6yWISeXBnJ}_cH;pZKX-K$503$7?p{AgvBjKyE;2RZ} zW*V{t=psfq1x$_fTuN-ZlOOp5B*5VfL5}WVV2`KdW1G_&8q6`skg;0H5Tn*fWsZDX zfe|-kc5z1z>=uMfSCx@q5W@Pmu*gdwCB226GtD{knyxU~G8As`M8&H$AGvinjFwM$ zx>+EhEn@(=;Ps(-ES2xuA?=L6DQt9qZ%h6V;+EN9CdI zL1G`$2Btf{?b+CdjZ6fUp|+IhB`kq*E@F7emzZwJG)Kl@6g-UWG}^0(F0?I&eCQ=f zA#(0}$~JQLMF3#H^maZh3kt8MJRQZ%u^k${1u2}0*f*DbBwMBIGuSF+&y@Z4*KdFR zh@pS_)lYmF+mmI+ei=-$JR|F7ej;13tZKf0?JXWLtCweG=|lJ2;%Bk%yx=IclHhsh1VYuRCbF zK8fwezK%Iy%VQ4MiunFAIsUMnk2`4lLtn9_zhL&wWj~I6(uw=~KPmseeBhotzI40i zZuU%{z@B-wyus6-`t%JqT3@}aDBE}M-8b3z886vodzGdFP!+6RQ=L(iqLpdvDn!Mj z*5gt2quOl%kW|otDgAmRsu-mBoHiE~sM1e`sIK%h7T9RztsyQdP!%g$U8Syal2JG= z$i+e>6Rq6DRf;Ng!Ac2+yRwuW7zycf)OaK;mO{8ky?6xTQVzmNTdB^%4otA=GU7*7 zEX;=3rNIJR{f>d1E;_JQVuA4tU%FANfx)Q6tO*Hhrqa-5_nrd`up##9WuvpvwVyrr zWB#exs?Pk?vQK4eFaEYl-yc$AnE7lrQ8J(R$&Y;QQ-Al>&!6YVvETIiqt89()X#qM zj4yoV4BubA;`?8^@tSY={<31(BWCXcRWG;CcV)RcTq9lwT%oQVWfz@vg?3c}7&R0I z%vDKe#{&sZY1DPf;wc-9B4RxGWu(KfqvcB|Q4Ix*449yV1-MICbUKU6*^j~W8Inz8$#2RWEpZHbnaZ5MG@z;V=paq$;;E!ngaJobWKNy;>xfj*(F1Y zIKRa}3R))NS(P*_`rhvgZXvcz!!rts0bV^_?PJ}GB zm%8TI;bX+ij)Z$rYApz`&`nmil(N&M(d;PcID=s@q>DD8b1_=-CJtZ-xhV@|D6baW zC}b`zNXsjEu!iz&al#;(!luZP7$aJ+a@I8@2h7%i2rY*J!%Vmk#vxfUmNPvt!W6Xg z52(=PDPk^8&e#k!n!6S;Roq=QA!~uH zz?g5jF-|e`k@ouLRUnR#rB^+eWw04-c3MNpARkH$3jC%cy}@HHNpsiMMd+M#nRvA! zsW>1qp+aTH0x!V*-0*(=(lsrYwtw6aIi%uj@C>cf9te4f z6G^fMQ1KtFZ_V^A+Pw0 zzQ64GvSOJWwaBNkw>Wyg7x{^7#qtNe?Uj4&_N-_AiRa4-N#&tpO_8hmQQ6yulqmG*O1$v8 zN<2lM4?HV}Rgua)QhloH1K0{!0wY`xVYTW?7#1PI;~bA-J;<({G$fn^3<0QKt9C`# z#i`oWN}C0zN4-Q5EiqjQaMU#y1!-(_aYgsFWqPG95`-#TJajD#Dm6GAn^B5nILeu$ zOz1+G9cFmz;~6)?>SZvFM$t-!T~|I7j^3sKNUUMYTQ%+BvVV#C=toZT>^S|q-|_7G zlb2oM-?Cl#4}6st8mE7XP7t4G%+ z-DtI8S05x7;AjCt!gQJIVkQ)$8MSzXCX|fjiH8MeFK903I9Yl%Km{qT?dT%J z(*)U3Wl5JnNsexgT)dDfmKC0S8HFy-G8B~2QPUmE0>elc1!{?Ok+USa4KX%NmX3u? z3|k-$&mvW#0N#U08G%Wcbzm`rbZ z-QC>ayzOA(Ca5)Eg!Hmy(Y1PtW#^T9&`i+vRAJpT+Xl49E@HP`?bIj}380eR*p28i zjKgSnTbTC|SZIu@5waTp(;Q7$uMuTt*F*8-NUf zO%O&`xRMcw3nK-s=$Oqymr%H%g5g3SQLz(>E+f4_I1@uG?Difk{REAb(dB{=0H;rY zd%i4CD`~VOl-q zES;`cFr_UVFke%%;}Rub0Lh1mkQWl}j!tX{OIx=<*2*Yj)5L}W%$736kXY%G;l(nN zVHZ>h!;!*ER*YO&X7;7CDZyunzx+#5EHctFI~Y<3%;pH+WaEcvBCR~6_2dFW5vXul z0LidR!%%jBAuLJrVu#N{k5*SWRPnF_kI=b3 zp~O(;lu{Cci+0S&kfqZWT??rlX=DdLJ{GXmK-FBr&;f*Yy>tthH~yWo8D5{N>I)$HgjEj4$_0EpWf!Vt_e0onqxY}%4P{?d?%SQJV}E?>K1BXZt+%YY%?mv9H?7mzVvYl;_KSAKQ~<|GcbTCW>Wp$Sa<6 z;2xetzra_R)ys$N?YFm#SX%l}E8 z_lZ+|dHKRGo#QWvEg~OR$+e-(d|F`lV zc{49xld2ycCreTYMv9`toh`ruHKdJ7R@O6SM#&X$8>`mH@TTkQ5XNbtCu z3yQp5fqSJ*LR*4v>%|@SQ(bl`4GG3Q={5}}in(yKAE4W7Rq%4Ouj{!XT|^r^K^g5~ zU`V?@Mx5im&&YyJ+Ui`726WpfV&kP-R$SO6CIO+@k+QC>UXZ z84E(uWi+p`6lItrT~I?_3%Jus^W`OHU&LN@+fpQEAvcRpf#)7iODBf~1)I>2CPiSl z+t1KjD;5keWhlhZDY_U0r7S4p3@p7!BE&3&l`#=*5nCem5fn^%t%I{mIFq#kCMZvK zU@|9R?t_5>9LBSFAp&E@CT&C2MA#skh$xoVX+U8ICaAcGYhGIUbT*n7i!KG2ixA;; z%}%>up5|U+Fv#2I^DRnO6CM@}07QBgb1W5y8Sx{MS&*nlIU1i%B(OevBn6Qgqg9Akqq zG*XJ6#VCwG6eoXd&5mWz3SD#wyVyf=EWLPSMR@6wd@-w${h@h(0oJi|i3x;|yI3x2 zms>`5^CGB_NVv(O1xBUC+{HT%cL_ss6wze@(?!}W;0p)C3tk|zh|22&3nXJlgfFUK zMKM{)!(+&v3p-{(rP?T-R;FwlYKd~hY|GIOVeAuZFl-fSDWnaC&BM$t<%x{}1&Qfc zG3!Q`4`Ck|v7{si`2YxK3awaPS}tI@%Ump53(@6ccj1wTckJab-FuWXq&-`so-)9* zaJ1QLmk73?xQQiPAnjYYb7agi>Sd!U<%(t1v5NTKJ8tt)RS%RI70W)A?VFw~zx&0b zEDznY=9fRZ?-!5UKZFB#uo?)S0P%RZ9*;2k&l zJoc}DwDN(qSKPbmyPx~SoA-IiGnCASzjAZGkA2{7&+-%52fuu?177+peSRC9`_jN3xID=Y_97{3UOF?JN8X^BazSKXAm46FiGQ4(dZ+K@#ls?5{^aH&XFOIEjnDKM)awJK9}u>#gpT6P5{OOKTW z;-Vm}mRE#ccFA{C&B)c!z+mvY0RA)zn+g|REEYUOJ|k8NfEi(yGrTD5>4r=}S4#9I zVo0nw)#Kg}=p*io=3;_X-!kb@zLk+GW2?aqQyQq)1&tHGOmu}aeG!R3_)C?SC||$e z(_i}RN6-4;NvECsmQS4hzVknE#@~PH%x_)znTx*uxvMYx#?9A%-{-NH-+bk|Rg3Ri zx9t8qZ=GKLuE8PfE-qJ!wrf^PcLf<5B_2F`;Bj$vc{0w_BeRbPS=2;|H8n*c*f}z) zELebxf|*gvx2u@FDvYI3E)-rYfBgNge7SY`vL#+gVgr_T)f*@VJ56 zEl_mbRcG#)vp>11+xDP4siCI!u&+Y+b4K5x7TVeF9W8HLJ@)8>&NR#J4F_L@fAj&P zci*%2&bw~E$C35^jIV_IzQ3=u`#&SAr>9&*AFtx=U7JFwNW{-SH?LIi8Y ze&W*h@qpQ-lcZ@{0rN5oT|#Fxk@-jV$t_$+>uKEx-I`!-F^!5#WI{JnXRPU%f)iue zO*gJvvr^Y-;C#0G@}FE*z$!HGPRQHJa)L5t~eyiq))MZjZ7 z6kQQ5rWKQeaJ;0{3DAkrLGM7DLd#-(#=PKv_VXWiLOOpaKmWx~%+9PI``M4UXmy=$ zt+4ZuFN+pAW^7!XZ@SYHxU-R$v+I|?`WaYhw4O6dH+#l{u~*+#g$|d`S2QcGc_G{3QQD-ta}9Tj`A3aZOER=6pYCr+oU^O-m& zwYpv_oUTOo7ytNEU8iC%4xQ?Zq!TijtH9alwCqZu^NI(dQ92`+Azcw%4CT&(-xMT2 ze?7J0it(IN_g=k(}I?5a|&{<+@E=XpM%j|IvIWjaPdIx&2l|Vrm%S4_- z7)uqKQTTD9X3@%l(Ye}7L>Tk+g2^tfrE6-2NV8)^UXrzjPzeV>7jBR@NBbOP+W-6i z`foP>9z@#lwvT1Edzsl|%zhlSeU}Z1t;mqJE0AqVD7H6T+UK+zWNbfUH^pwx_M2Zm z*6ynf55T{7AGp(Rd8vhc;@b0N)v;gq@`Yu^vQKFH11X~(Iui=O5AV9|7eBt|mp{Gl zk^5Iac+ZNTKD>@RaQAX83;FSbcRYOGs=LuDtfK`-^dQ`l8)syrXbKJ$$CD}j}GN>rlcRBSrpYFHJb8c@;44xkcL zEmr5Us0WpO zMpc?Za|dQ*VV6%8s8|K9o;9Qkti=N(7L44Q4m%^TcMSU4HFBXzUPiqkAU$?dqgWQ9 zMGV6tJ+!C@1;8jHK$8-VU8{?hi@D335$0=O{*-$8%n!WleeZeG$Id$CypNw=z5MNe z`0Q1ee*KmkE?sfURem4a8S6wP6$P#r7eyD2OUz~B`qE}m2)G!#;uv)+62o*!uCI zK^YxCuG%DrBW#$2l17<N@eo11`HGLw=?|<_PE!{!^aKGusYi?V* z*xMuQ9fGE56kCbKIe2spVb(^aWgH8hmll9T6*BT-xgWXFOmAdvL*4$|$lSu*)^)o> z>2|vjw@INw-ujF@r9nmlbs;mtm$tT}z+%X1xHq{QE-126zbNA5Lzi16kM2;+g-Ei} z&!oy9*c=ljfK69*N_ByKl)YB_G21o*0LE^wrKGJSY5%77QL3n36(dO6=~bAhWe@_x zRLUtp0vKY{MxK#{qgI82-0o2!qm>E3KmW_`vE8=(mRpuCuB>JxH{W=jMX5vZ7S?-+E-XQj#fAq)#BpouDS`?!3u(c@7Cg95VjQ&b2vN9JQWvKcbFdPnt8J}pEr@5n zq`6xJCxfZgc81vJo3c}=8S%18^em9R#LZ$U%oI{9RTe{521yGdmcrcjuOos1l`g!x zwbhWeMR2%f``h3Bl94#{<~1hOMC|JA@H09e2w!!c2@hab`D(_|W&@?!voaqbNP%Q1UX(2nG}AkhYEM!MKatWoZr z*bWK-5JJm7B1Q{FtPenQiCL?wO{QKP!)1jPUi&- zRwkvvCH#okivwmB(M{P)tBaJyyo`>NC@l+y65v89E6h{!CRormB>(;2{`JP|uC%?s z=E_U2zT*3~J@y!eY%pvy=xxkwKiZ|Rkak31@agTB>H|izSsP~?Mv>u?W+4%U+*i-cdxkWC-*M-?pHtRJIF^Z+V<7^Z}ZxNw)e}} ze(6iG?2n}s#cE_F^K16smaZ<=9=7N6j(Eiu)yvh<#~-}on~&M`UB~bJrlVhW{6V`M z^ztos+Tu?(dHR1`FYmDJ3)IWoZ?m;J_sws7!&zs1fb#Kk&QcxwPvvUl4}IW$ES|O+ zQaG!i&;R7d8&XrNmwkcuYhU@22hEB!m9jPk#$!`;oZ|PrC%xT29anf(-_ezI%0o4* z5>Lt5*M1e470WC}4MA!dRh>#pgu0d0D#X}W)T)MfDMED>ysAr8t@BDbDHiZmKe*(I z%f73q#K{PNR(Xq!BLx@_UuCsXNM1$_1q0wBs62^Lm8>Bxdy~?wn%0H!*w;G-sDvmD zvtZZlu`-hC;*lo{1};6DE)d;Xu`8~{GFk67h{fI#$&*+#LqbUM!Y`hyUjFcDe{;%7 ze|^q}-m6~z+^5g_=2y>GM_+dFR~BFM-DQg}Uvt}a9xgjwz4=oe*VWgB;~KH^XIH9p z-MI`^5G;gJO;nAbF6bKM1q{h+h_0fE3rht~mmxhomr+?5qPv6{1ybJyi=VNJ*JVo} zbrV0q8cj+M1HsRQ z$YD;sY?N=|D7s=zkA)#ok}-WDvLkGT%Z(8UDTmfG45O5^;p>)Ob_^*aB24+P8vjwOB;kW-N76pdb*E>0Hz2!#7g-cOPhfCcO(*hS=t6Na5ZikqW3(FiZN8!pX zUjT}O?jGdUOBfa!mK}RwriNO#%;TwPv;mk=m z*&9rC)t{Bp%EV1sdPdztrQ}xIF2Y>c-G$XMRg2w)n;qS)xw^m(jo5Lu%c9s;*^en; zmBXI9c`&CcQFruUcjc;EtC8ES!9%J<+UcoQh#_nh#Q<&FSgKR(^xEu!6}=X|pjwvl zk%=^LmIg;VJ)AR-m}kFGY}N`xn~m0?>Tl-g%s%o^-Ws)|(=}w>S|2SJc3v|}F&MWo3dA9+RRI(`PYR0Me|@PS6Df`GSHx zek~qb;ubdA+8Nr}M@|l+JZUKBH8QTCceE2e8mYmOqCnzkL?m>NraLSA-{)?62f z&!~I8Z0P_Dc_*Q8PDpG{Ki#%}T!iK&G1B(Flfg-K-St;lqA{cLuSBiBU@-3|je|b^3!#gci&AA*7Z*2h3<0K4~17@|1&4j ztT@Ly6`f`*E?9u^LgB1)-ZJ*ip>8K<=cLvsaT?Q|s~kI-J9Sx*BjiW{(*ls`6ByD03Pc&_iFe_8#b~L8 z;0vQ2m?DC8QE)HqFk0k}EUy3@{SrAIb;+BS1m*ZfTEjE_n)vtK{%Rl|b-rhdn4pZIn zyI*_Hab}N1+9TNx*(tR%X`=!#Fl|NbMkrkB+P{%uY{$alnYBmE?2xFkkFI)IMXWAX zAbZN3?pLxE%gX0Qd$J5ee*UAomBV-6ezSV{u}ALw$wO;(@44d^RkP|t;p{W6T2Gb< z`~B-~@?&4><#o%hxb6DysF&}$?b0*<_VvDlOpZHnyEh!Z%bSjV$%%*WeDuE0f7Py= z9Q8_n#Js(7*h6GBvhFeazJMIQ*Ym5FUw69`@41_IkmK zxBk;-Z}fP*yrIX;aE<@I&77 zH*fDT@%v7Gw@1&4Ur&&YdaUfz+G=MXy;eS}m%nl01%|4TmDQd#v-n~#@o3dyRqpu~ zv+qhP;*@*33PWY7TF-M{qDxmlst^^Viatigpt9GA;m@5FfQA&6hLqJtZNgCjohOo)G=;~IqRm71PWue!_PXw*zWkiLC zi&PD1Whirw3UCpH&?Q1QqvGmf)>MO<^BdmJD;9~50 z1g7wcXGCES%(sYL%b3}H*w_>dxzfE9@M6TnLeb?858N~<^F<|Cg${32TMkVDEs~hE zjHt%!;EifAFb-= z=X&a8Tak*OZs<~#kBu3nd#gJsp+kCXYUj{?zDc#0aD%p+ve&YeYSWcoaZ%Z857tJ` zkm}?2fB3bB9dsB&ZPF@oD@N2K6c&WYZOq1=kR@h-+%dotU7YYL9pNA|R%BD9uc>gJ|L`_HvfOUcz#J(tn%}D87ZfWq1P_LePD@>Mt!}$(r-TW5 ztZWnO%Z5r?$u|r0>P&Xd6BB_H)YQg~uGz5wbBDKpPN#NdKA8G{{Odpa$ReZ9brVQS z=RF6GT$Gbm-fpmC1ZMgpRVUK#e*Y_NWv_LUIY8$OSi)%C%$l9W+IsVZqawm-zzEQX z^C_zDP7h~bM*%sQ!{HQ~|9si%;p8YfDzyojIj?3v(aPSj%RW&6qG{ctXI?)8G;^y|{lI@O))JGTrs!Y@~3{x9ft=C0HbQoWNl4S+pDp zmrnRj9vMonb&$MSn`TNYsbz68WH&@wF-G&^(mI$pPF?n1LahTYqeJ*IS{Y|4>9n`_ z6faO5Ef=1VRf%bN1>*}wblFR_9Lf!q(a1-13JiS5jxWNHhLYS%6}ayFAr?c^8L?ny zVNcg3d?4-U+5$xdFuFZQ zRvjyriLMW3`&n$3MAe)Uz=yF_#Oh@ald?Z^1efOFh9=&Us z&tof@{ROjn`3K+n)E+xM`@r3wci7%rAA7*IuRCN%&zFxoU>pA`b?Ba(sf#^XRuQX? z58dOrhwT2Gqxad`Gi9I0Rzb6qKBwLD<@cVr=xxXDb5 z85^vxUf%M#e{tL~M=F6&eeX#MUiGi1%rD(#N8f1Pdym~bSym>WaoVY;o$_8oo;Z7s z>@(P>zyB0ZmRSJQ&K&#lGO&tQMfdHmUwHAizD^X08g-lptjbuTLR9ejv1p|qMaAdP zSNVde4e2)6%2lJPP7jOK%T>p^3SoHlAZ7(4g%_PD8+G|A6)B2k&xmyul5(J^G(DeX zM*_p0SDY+$v4H`LE>N*xWW^p{9SQ)CdE^5i0d}z%1H;H%@k;>cWnS@XYV3?7is%aA zo+)FKqM~tIBv8Df)=Wy*1%$HH^X=CGnRbs`*l?94TPjDIDe2bmGzKGG>QIrDYUCV!Y;v zlt}Jixl1eo4uGd2EwCI)AQ+1-<9K0g3hXk0i9}m6>~t#+HoK!==gkT)vn=d>evRI% zZj%)hNnQYqbEN3gql_6+D`IK1EUnBK3f5d^l7C{`KpD4Y_Z@ooAor&3T#p0(=h|E> zBxNpPX|o71(h!I)pBA%}=k1R{Ud%u&W+Z=+dj@)$D;C?g0K^;D98-~GY)z3df9X=jv1Mt)pPhv(_(hr zb&1t>8P>8mZ7d_9tWS(O7DEDebQ%(Uh^nxXCp>@yA)i*8&H@ZWD3W9(QXTp5WhtLV zhYmk=8Ph?_T{k>MmobGqJ)svUW8EArnxTX}05AEB2~t!6V|EnXsU<>fE|GW}p?Y~5 zI_9XnTu=&~T|2;$ge+X-#RZeU1tbGGvckitYu#LKjs-A_u2aQ%MV#}5(q-$cb`m-p zT^Ij9XZHcUT~XzG9?3b!mz;A3$p|7DB&mP_MNF6wL_ondU`}mDf`H@tt-jdF;0{N#B5Q|dOdFo$u7(IcWO3_Y|ZLlv6ve3K>;8N!rjUJV&0X@yTiBPBpS zn;S3Mb$jSZRoXNhe4LTUloFFGlGN9zs@U0qub1IoGO0pLZW&KsHyWUZsa310TLVII z+8#wo6Kd6JrM59*a%{d8Q~g>NqTC^141DBcisi9OT?mJ!8BVvh)6Oi36+^6%p1 zLK?y}6P|=p^IKo;|I0pq9VoUd8(y})3^+^p$lz?`J+a3ok29jIhveF3DDpc#;ujoD~@ zsVYJA+Ts`#4kZL=){0^9tiJTf?8bsg4CaM`rJ5u_6(u2bVpPdBOkNf9KwyL_NvC-R zO*u)OQl-;@mu=P7T*8@H_2GBgl6}@`uX)u8N5jh&mp}cncc1^64_@^7k6ix6Pl3!g zU;D*-Z@v{plpwgHypH6cGOmarTX_#)ORp9aNgK7+HF%zTqpH7BOuE;49JSg2~c zqv$NGVV5S7iltn^j4TNW1|B(FYWS4QH`P3adS{6)_&GPylPeO_BY_gjGlsC#SQ-vJ zE7#3N9orWJszIOF%ey3eRWM0x@Qi??mz?4dSQ&JF<4#XD7lVE zW>^ZV#YAYLVFN+nNbJ#Yh-pQ5qU!0VyKYAAMBQ{E+_~J%-QX0*-4HZpn1(c@gigJ% zpDT2oZNn9V$f|~uJq)OJjA&xg%uRoG&Jw{`cUy$G9Xug64DL3{<^T~wRd-c4)<&nf zkVzaR5-Yeu!w#^9AWfi!33@;r@-PLyCgERv;DwA<0l;#S6c(PPA4e0l_&78xCZlRB zWJ4*@xRg)qHwL8v zH*}sMqc*3J6gvTZKmGBK#Dv-kGx*Aip%u~;;tVI#&;)1>Xe+AWPkahhm5fd*;>|N? z$~#3S z2M7;*sDuN>*uVPOPjpvvswk)SpMU&A$K9c1B# zM(D;sILd6{F@7^T8Z9+kk`<-q~QcrWK>m~X3Y9U7)2*C0r*Zr)LqJ_+xDCx&geyvo)S9yGeb@}o$OJS z!ZEt^Oy`8@$UZJL=~W}2C4;&nR5!BjLMn}=q&vxb;fz%m)~rmj?5 zlFf7ck%mJ3Vv^BN(nO-!5SYh@^GuA6j$BpaN+zG+XvGPbQpbH9Q?1Yh2&nKx8p3~$R7R{LyPA38-%@KMeNo}Uq3;Cgn{uI^)`Ha*yF;lUWDh=z7K=x-U zx+ZF$sVaH3l+am6X{(j0+LCmtc0a?6MV^)G%Rpu!iW2rvrdMvO!fEm#uO6oa8j786Fx6$zAzbHp%3K2^z!wbC|< zmXaj^*LU?}}UbeUl zGC%R)4gd0;2OqiPl4B0sevhqJ^8WJC`)zXY?rZO|$>Q6s`>Z{;TI!J9*J$^3h}g=q z^<}78acsthm(h|v+s7ZY6}uT8e!XoXdmnQX*{eE+A#<^OpgXtKz&CNDLg^zxU2z$d@@l}8J6g4P{&W1)N6{eTBOTASjXI^guvPUS1V>#%B*EC*U(x(n{Aj9814q z9l^_9Res0muM01K{DWuPj14SHmY6|iU&*#5+vW$)cI<+1>V-p{XwH}7f4LI`2g)M= zM}!&zDV=VF)xv$wSsXH*x#{WbL*G941~N`e3R5B{88#uHc^DAUxv4E$N)`E(YbD|1 zka0Te7d?8+cmfO?%}J7}NFc*z=em%l(P)~7or=S3&Cw~zwysDx2&s-3CTS@B4ZK_v zW06Ml)Hc{OpmjPd90K@KW0%vADh`*>^8Bxsq=uAa+w7b$4F*zoax#i+K+7pfEk1iZ zZy^s9Q){Z&tI!ob=Mcb@XM^VR3^OsRR1s@Xn0l#G4M)-ze+VOTm$pX4!*cg8H=yoZ z-HhCK+?epi#;w|P2gG-uf|sc^G)a#XHm;t>>Mp~EAF>UfWJV5q@{i)+7>>T?o7@)h zi;CG6LS$#R*K)N9kR0nx?z4pxn8ZX)qAUqflqCRzU+ilIG(=fI2XG3s{_N-fV)4#C zSa20nl_nr?0b6ib`TxN+y zeX-L@aZ#%(W3hApS9pq;mV3DEh}zO#QgXtK5Xpfl8K7YskVsbs*b`b)aEOk zFbL(k8+ZgQC9esBTRQZJjt;*e6eZr9K4gjs$Cs1g1fe^ziUgdGIN>sM0u}KUW7eY_ zv-n?Iv|3E5z8tkNQFT#URn668@atV|+Sr4ZvVYL?m@%3Y(<9pwKq$r}liJZ%7cuc0 z&d@W>S4FavB$^VrBK^Z-BukoO8X5uTzLfrPI%_2ntE17-=|H%?2%~c;JtdOWHVRAv z(|DNx&S*8d&?-qfGd`0DOk6YrSGy86qfEliXn8XYA`VI`nz3>+E*;JqGA6%-V`fe+ zATy8Ph7yw;KUH06RY|6FP3o%?fdN^_Hg0tIG2OZt^Gz9Z9OBG-P4DCq`klswA{p`lB;)n$r-plF-uVPTMR;3=PCGV{|m#me{m6 zRAmM}Iwd})DxmtqZ+~IWo=<)$`gpYfLwq7`5u8Xuk&tg3VTq85K+6`G1d3bGMPfne zdGIo|+9pG6o>mqA>AO$-`1{`ghhgN&>&f4G+(NTo)3!JJsWs)PZ-4pH7j$ehc3;Cr z_vW${XDiDR0!+5P42(=r^UV74{E`1m-pXg?Zfw8|A1ZQ z?7Ha^JFLGDylhLhMPx7;9JZ8DMj%`i(l_$K!^Jh33eE#ewKMnx*vNO~SDYyD8Z{4#MPiPKcgqLADs1|62 z{s7W|9*7r^l!t{01cyOCUxAB;4K)E~5gG!FkR$n!EUaopSdkK2zO65l9Jn&II5hZP zH{c2LVlvM@Z3@w~)m$Q=7^+oE#>%)ZGSw(F#HUd2m~u@dJ+fKC)Ep`&8EV#wPG-|j zOmp@ppm_~>2~9?kJORQul<*0nM-BAwK;02$8-G%0bYvj(iJc$$htK`vxxS1IFWZ$3 zK6`Wdi|2jR%JL0YU2y%C=Sz3oe7X5K@bXhYV`|CLiQ`mqCW5+_4ud@407T<#a;`~a zpd@e!C~zu~fmWtrh31NyJN2C78qy}=qdPa9o{E7T1WDU^Y12^v6O&{KO`(L)kcBP? zBwY$9ENROgR*^xInbt<~ZzR1?PN6*W=p2KhP!;8BDVac_8F>SjvO%s#qZr1pvmP_M zCfcS}6#*6w`w@zm1c+sfoHz0|dL#lP(Ja-M=BaJ$_=qlm_{qrer(Ig}#L&4yUt|>H z=nkJY!)&AusP}8AMX->VOL`7zAUFo)NGalH+dRGNCH;upTU%Z0rE7bJY1n}viO$v9 za#sdGE^cAncieQ`?Bs4?_+owu;l|p13c((#xR+W+dc~8qIOd_Q)#eAeUi65=wh}sf z)aIFF&xsBLX?~>;Y6ky4mb(sObnhGj@&Rw1|kZd3cAq1 ztXzl%&WNez-g@K5KK`M%ochL-UiI=Vw%p{%qYnGvht9RfRXirH6M%{3^Di0V+8OTzJZHC96O^PuCUxJ==EQuBM&<`eCK7N zqo02$=WwYISI(s%o+g4Z&ZkGBgiapIpK?{h;hf2(lY{c~+Ij%Z*n1jaq&y?ym1DLw zeNvox;d*wiulXi!%#-w&cs1p^E>y-ZmZo3K*~GAS+KMo2>kB0Q1!;Zxg3o^jYLEzU z8b=wgAsA!xL~dS5I6T$>O(bv_DCWJ5)?eR^-UD<0%Yc{XthSn^W?f(&d(4s7UU`{S zynM(%y3p2h7}olX%JXj$&gk~f8u(ljtxcQdr4?XNa zQOYW-uDs<|TQ0uD;)^b};IhjtyXIPR7F&FwrI%UU6O}Jr_C-kC(;nWY#DUN@ZTw^U zITm@3LSk%iXjGv&S&z+OGTFm&rxF)A`khCL{FZ0r_(sZarDjD;w1g&~5_-@T3zZAz zr#a~-$&L7R8XS|WO;z>~BcN+j*YgU8)q%rId9_-Z5~<2;8VJykHpDdTWEohJn1<;g zgJu=rBV`_@T6~gv!7QQcWiSzB26OP4SE6LP7R_2`+_cS@y2IpHG&?1Pq+j|~5y+$z zs=}B4zpgJ+!btjQrbLmFdW^#`sjl&jLe6_?b)DG=Q%LG8MwnVnMTCTJ61wn82(!)% zM3|pwwFr-ZY{^!vtyzjDBRO|W;I6CaH0XtljU1Y*s=Ma7t9Pav3)^xO+DIKr6Uik= zj&BqVHn9df1~u)F=|>7F96qNxNey$FUK}I2bTUK2jDx8*vuUVPIf0BH25?KKGvR50x+AQ4rpn3GJgf9I zx}&g8QzEYml9Gl)AkHXOMGyUq$#rCCs3F2}6o*KKH$^^z4UvghMW7&w>fFemSa-g(uNkKFdHM{oVcLpR@k?Rj_I zaKQs}FZ_qoUh4g2`>}1t_Q|hQ+dN9~L_Vy}(&+i@-blUj9|1r`IA|K9qt4Odo;y`c>M7IG}Q-~w)uTW`Jz zunZBewA|8enwxF3J}hiy*{vBg_6jp$cU8}~X|O{Z;_f%iuyf1JOQ=}Q>!~u;ZozkWwhGD9bhykRk2;j3{E2bhy!pJOCE0NTEY{n)a5zrjp z)^0djllTnK5WiLUkT3nL)r9M~NJn90%F%g%HjC+$SXF=dOOGW$mGS!Lvh`(<`Q($1 zeb3pa_&D~3pZl<%GWWeNE6X-x-*?BgK9N0HTz=#ZNC7(NWO1$`oF-19Xt}UINQhAD z%&kdhlcMIGV&hEI+=*60=bDpIF}11?^x&ga)w{M*r)?)GF)|}5N(9oD!E(YQWi9hS zr{p!S6-ieNeTC_|R^X5{x@=HvbaJ$gG_O0F$w-LIGdij?&k~*DxT$0j(#!$x(QEE2A4<_qB5Ir)b}O+3m?as~eMBl8)$PO!iR7Y^XV>)!mT6oaVD9Gg#6@ zV!$}cRk@qH>vjLtFRQvg8ki(7W>xE-ZTS_J222)QXn{o*eU`N9YAagBgIG!0^-9u4L6MkNQt_ME3{Bwn zJ8tnSC=s1rz>qc9TFt{)%f!&lF)uhm!=MT=@>=1D13_lFpbj>NeHgIqc3ZBs_Uhr~ zO*h+c<4xA5YJ&~eS#71|Z3f=&IeXr6!?hMPjSggPn1<{CNt~7|5aMJj%UtR;=Qo}F z8bHq@-=!B{#Qn%OGegj@IJjUA%)FT6zhe2Lw54$N+;;N~^fMbcbupOcj^vi$nbKiJhmt z_04O{S;fKi+~dVBc@ZWcz+)`uE~7C0fNOC+VN8#1Zo2upOD??-&2{@ecQ12tx#gFF znx#GV+~q?be&36ZJ=(5=?Y7@)optA!W6LbNguM{3Ij3*sY|AF!K7_I z6LvJYw}i%NYIVe$RD2?}?cZgg@Eaj8yKC=7pUW*}TD=PQ#3Gn=`$<7g~~ zhM9b>!%T9vCx-x;63LOqw2VV-9DV7L0j9cRib+pLQ}1<$Nrp2C)O&5qvrw*~+D(@Z$3iVUWxPl!t`8O?D7JoZ|9g;@>}!^LPT&K8=#`taSb^G|>D zZ5*h%x0``zuPxhg4Qp7Tv$CvcW47gHo3Zg*bVewa?wXTnb@9QgG>%+@O?Yo}UWa!wka$m*{FYmeKa(=-KID^uD zHRaFCptRg4vK5cncXR(=wjX=W3JWg1=-8F5rU@UM4N5gbbp(;oac!Vy)q^h7=Xfy(51aCEoLm`k0aR)QYfke#kBLPLK#;PkB z3Ux<=9#s*5RrRQqDoJ19DrBlJlI0WzQ8lcK_=K5Fts)s7&^feD*_@n0*|(Kb?8JW1<$}hYRGAlfo$zwo>bq@d0WH zB7g<(gOi;;ikQwj#c>v+oyDDl70hHBqTM_TrVNAt^hd6AQ&%CrySj73ZZQmrRnPuCRO}HSkkW?NF+}uzFP8< zDzv;2B~PIuW@DMwPXLpKlC~wn63LNLmHeP9(oiBih@pXz_#_d0(t%Kn$uLb^GbKzz zP(I;)viB~loP1Htf(oP1&1Q5+b1kF-rXoAiv0*tI^ zqI$c>EyjJPyIc1ow;l-)D_BKO->@1x&7~Ydp-$(UR7s;18BSwNM?B|X4n5sT^~e}j zb#ruoRLne9iPO6C5*BHc@Jm9t5;2$s{3$|im;>j+R^ltc6+-kShgFwcYEcjsMt;_U z&w9m)C!BfKTQ}Q$BS3YjrI!R%-t^{^tup`a_rDT}0X0wtLOhl#v^WhA76BF|QfrwQ zUS4hWmFTog{H}MOjS18U^K^~YL_-?9g6eJM_10gDJh03R#y~(@ZN1rs8?6f~EwSVx z7Q46IaZ1R<+2ww}&cn$YZ% zS*(Ivb;^Eb!@1EzGvgg!$A~sAr47Nc@^GK+>l1}u|NqhVI+WJ&HK)zyu3dKAZvA!E z@etcPQmm7?tF7#I?=giRVcdA_RrYDq zzv@aWt+&=1?(!R~v*tQ$t|_mDmFs0Lv6AGC)HF$%>hQACtn7vbH?^shu8s^z58AnyJ#egiyp} zDq-5>iUaz^sq3oLW-4NKn%!d@k_a|aB}aGqpmmWGI50D~rC!t`)V7fWh<0uUfe~iA zdFcA!%Gtx2*?8Fo38w>osgoTCG^WSobVktN^A0p1{{6rI)r&I;o9a#(!mg#{37h05 zc1p+)$j))>42sQ0frIEE%FzuOp$e^`gh;aD?3{c}rdtuqjhG`5!)zv?bEdq*9T8pI zp*90Br*)be0J1~V5IRQlOtX4Sz(7jFo(MX5L+@i`;CX@`kB-d58UYE*zoe# z?!W5l3*W!%7Rw+0oXN+&yuS=A`#!e+NgcV*x{$ILmIq$8tlU?>2PlZ@127v>`-dXmEk0TB}NKw4um8dN?*$@p%%Pll})a?1QV8+Hd z94pc8vk>=W>-D+sw)*VlWwacE_HAoRrNHLEo1mW;hl7@Kh%W#O<-vMzA|_M_5^4e= zs)Yx^Ly`zM7jQ-ghpnZOAxunFsz68rn#HGYKtsmhP%F_u4@vkm zS`p1i!~n(Y(W)Nfr#Ull)Z$16h^b*18I5V|iUz0)oOX#D9RtAm)Up9l*Pb@8HhE3p zWnu(0RCVf`PfiJ|tR~xo4KJVcvZDZJV7Z?!TVnS9vX6bi%hs2r$)HTcYs>xf~JDpt8f#HkmS zB#)^Uoo4bGnS6XzY`{n4EsyAd`g|OhD4hNV*$|J&cj76%R|Q6b(!&LKX~%eL~7Ydbm%HNg%Osfmo{~ zlmZhlt>rDd+>(ImF1zly<4)VY^B>-R*4s~)@3#BS&@QyH@FEMWzQ)QB^#A#P{YAVd zFe`-+MQ(s0a3n&rN{H@FRR9Ojwq9&i5w7t{vhKonFcDgFD33xQ`u!hx4|yPn87r)~ z40#DYdhP2@VwJ^ZMGKvt!CE8sijd*kbqpF9FK=|rY7)~_(wwswoxS6=lct493&!H;z{@w>c#XsTs#m{a>uoka_PAqS@WLa_C=XAbyWidh zW!cMY5!)N4m1r9h_{yM+MbQKUbuBT&+KVr-uz9%UR-2jz9>Q2(zT?ihAO6VufrWqk zFbic#;wbVbdNMbjel;>CCp!d1)B;^WvP4)SP$E-G6#9?z z+mmC|(@kiQblO4GmWH7OGl9&=fH7w#-g_Dpr?ci6FsfoYG@e?>wQ`Iyc(fu@!YR#T zMxAP?R<4-YiaN@?n&4y@rjx7Q(PJQuuDOv~O^DTyatbr1A)qy{7fOgpX+z-4M=!(d zy2fMFf zTdGr(AZ1m){WVbY51xF$_rC7C`4Zp9{_;In!^;oedeNsp@Rl7nUh3dI)<1O5b@zYv z8i((>&S873ec&#u`aZS=Wgo`T+l-CV#%!PbvJLyV1Gjv|5j*i*}dn^Iz_& zbjaO9eK#8r_Kw)%i%hm@dF*V@w*O4E$_y!c3O(?$eZp{@1l&P?u%UHNxX(H%ObYaY zt_Ua+xRc|z@(Tb4T0@BNW=evG#9CC9rx_5IXw!=(UWF;W#h*+ZUr>;!&2D3A}EjSh!ANlZnxG*FJ<7P%_MV4``6 zHumVz!m*quefwHlU1TsLStZq@R? z_OUNG+Q+{9f7#2+ci(!IUD@6juuTFi4HI=b6!|+ZIujLPJ+A`;GHBW~VI}d75O9lWzgeDMy9nGcsqD`$8M*SklGfYa7ZK;(rgND@749bDn44?;dK+7%^ zsiFj5Lo#7KL^?Gg6HOpf8&vWnOBpbVSxzRtCINf`h9Id*gPafNX-+IoZbv7NP=%TQ z=c#3_vFM9%GR&yE3>$iee>j#aP}me!GFPkQ<7wG zvbiS2aIz3lEv5s+jE+WB$+x&727b;4bAcOu*E;VyU}zz z>;5%6ch_^96K77q#f+=>2_PHIbq*(1{K*32aF zb3t#hy0|wugBUbUXiE-=S-uRB;n$WD_!OWMg9YmZRA|6N=;bbOz=6-T_P5QpTb%p8 zcfmMbIJFTP+J%&@_PR&xf55&F3MCTsS2!nBOCgR$PY8%wVmID&t%W~$83YEG7G7jw zcv-xsYYl0rYxoFZ5%!>ipJzFHr=7QhkMyK#&nAe@f zYd*$qm?9-E;wyeL(gv$2(&*6v4-hQv zG(5;fr2qxBlt{EmO@n3vfr-q_UScB>+zO24dBbo#2LNOF)<}&JF&XL1qCYqpBrgxt z>(_A5)jEg<608fm@v0*Il8!LxEU31@+~*r zNO7m_wst~smtOdsLQg^*Ye?BFG6%e6>?qqj0OX3^!Bra)g0#G(2pVvjFJLPojLQTt z8GQE5BbVpwvxl#Hd0`pi_JjvaAa6!J|A<3u_9nKsMOE@J*El%^ZhB#xcqBYbUkyTluVXy*Ut$4eTJ25sBCZmcN4d}WcFx_>` z%p1la>NGxDlWLP68ZiG?l1??0Qzea}u_$IC;{P{Z&Ys!1j!KMFtBV}MIcQ+n3bIbo zwZ#Em6dSC=VKPh}I?c>LSB7vaBD!g2mYSafEF(J8O=-^$yHvV1B3w@Y?Js{K|GCe9 zYWp3wwynYwq;~Jf?fmhAKcpPi{`oI{V$OGk2b|$(^I4H{MlzU7S!jS++o2hG{Kl2@ za{|tgul9dEe6BiFVx`cg$Q zzuy#ML=<~;{2X-pt;~M1PAo>BPXa67_?a$`l}YW1N6wiyz&|XgP7nU zAW08CLRJ@)yLA6?Gi<$`}FMd-kp;=~EUKt}M9uk!&)m`+Buos3RH;KF%?=*%53LD5Oq ziAFxe(RodSCgDCg04rCeEo~`G9*3UJ^iJ?jb{xt}(V8={O%=ps^NCh}ArA}$D`+Sl6A+eyA>uS$SS1nKo${79bWfoQ-l59drT}#x_!0-Q0Zoq;MmB&{ zesQ4DmEl|(ZmtX2@&tAL*2Hm+Rv?lA%!W_uGlynHjWN;MFBdN`w6UlG; zrSar(0YX?S`sD`>v3U}c&44q97%3*_ShjXVI(FgW4wJ)8Kg(IzZgBjGJu_%-lYP>0ib3F#r@q{HdUTrSfn-^8EG;& z?hdHqYrj3TYcn=DkXxFKj5lEnggidK8Kxq?efl$>F!5}+*4*5%_rSzhaG?bN2mbVJ zx5 ziHc_YP&k-1)GE!6vZM<|OkIplM<`wwayp4+Kz$9|F${|n-U;&H44bPRc|)srnrEl% z)V!()OddniFQ={U@Kcp1r%fd`-!v?lhZrM`KkTFgJ)Aab8);Qwl$33znTuPm52KrS zCR?=GXns!Pnx>$}j|uoSq`L z5&sA(gc&3FDfW;TdsI8(5PT6-kqkuK(h^S16BB-mQZOZiaEKDLkS{Td%NkOZ^1kTh z<>kj8y6gTsZoK!l>mR%Sc16q0L1s(L(6j_cKXmskHnR4mFN??!F_;V*`@NKfW~6$?t%iE1N?S6LVb*ni@RUGNj~PZDM=pigb0a^H zX9If}PZ$xC@-(nhryqU)+iktJ2m91Fyc}w_(EN!HopbRQK4yvelJh@)Ak)xPIsXfdI*dvG$lD) ztJ1XnBz4iTAejctVF?;jLX55lp}VFBzd0^CQbn>O!r?e*9KE22+F?oh>8T4vGIDJj zi(;*4s9%?N-Q^kHqn0*J)Q+(rc$lQ2X|8Id={uJgb=<$)q}-}X(>JUP(*-@$j#Ss# zRxL|9nmR?3tX9{JOR?1O%exi2Ek1Soo4L_5QVftu@xP;O=U_>aHrC?M zhwImT2QILe%e3d^d>XdCcv-!*bVtJd4BxL2i4p3KvrUw`C zm;hDcQcM$ru$*wy1HY+=IZaQ?5SDOC&iawkqsZHmA_F>_a{VSb8fJ_hiJ3@poyu`y zGPv##wbdkr)7sFwGZx7atS@;@5csB(Dw6W~rfg1m8VFCzr=K1Pco~xO5DccOzG|K; zIJ}}ebvD;O!M58xzF6~u;UJgKmTA4 zJiSVD#F2+m;-iZix+v8VZS}%hW;1X21i^1{RI$I5uLi#&0rHp?XXh>{LFVDHE}4B^uDG z3c$Rl+>6No@&k9?bk{A{z{^&XTUG{~tvf$@-)${AlL3}3A_L2o=ip`Q%Zku3XzUAM z{noZ6W_fm1TI@ zer&kgR<7_e8XFj!4YEipM7+-3|jo-fzWU`KQaBKj^_9eDtf8gDxUvcrLue$WJzW3!5*}Xl;{Gt1q0)7?Z6Lk#UQ0-*LNfSCXE(>CC(zhv_~QAN|zrX~xbv zk~ffUD0MMUKHVdEJ!YPKzDX}7H`Wph$8FKwvoM&Q3Cqb9_}Z}6ksH^g*D-d1psd+a=OT%uEW(*|$%6li(#jW@75?9CwK3b~Afh?B>-$dC)(c#>PtLJ?@x(x)v?Yp<~? zZB|I_gK|@IcZ2e@%I!7hz@QNQ_%s(0)!ihFv1MXr*~mbmImhDipZ@&2R)_?y)`rBr z)``5+&7|}@7On7Bbl^H(?Cc;m5NatDFgt4WqxqTlc$gcx#d8n9vj+}-T5UvLDE1jH zc-g0|JpnL7D5Q;kol3f3h9QW}@DY+)l0a@RDHAYrM87_X<{hi;x86bkVC=+hy6$R0 zpnEW2?x}dcn$oZRW&%_Sbs^WOqB(Hg^;dC)gJ>4DKY>FG!GjmWF)q{3pv)-4G)7b-fyv;}jZN3S7zQSIWx&+pPfO|+qmMiO1y&hLIV7c{Wpt$c~TUG|8!^_}vXxfVN6OZ2d&4+LG zy)R45!1DcbF9|R2w&e=X-)EB}_ua^rY+%{?a^L%!{2yxX4J;)?&HY%a_m_jree6pP zINOr_x?}fv{qcJpf8e(8@_x_WaNA8*TVbgQ)cqTH**y_1wk`|;pZ4ZA?7aOpZNah^ zOMu~~De&+e&ZU=F!fR7*&|tFlW&d`vD{R@Nma=*K!t*|BA=A%0d=uLeGo(z_WGD8! z->C@q0d4?k;1Q;S>|i|{2(-0PU=YxggPCDrh!0IAK9DFS4+d%rGpZs9Ck7bRqE)4e zn2$?KwogOr5G8q463wu-s$gbJ3Tb1foGP8tpbA8_{;PMwI?^`Ks+PhquTJs9(P)H` zGCt%>MstW5ffF=lYJ)c`Nz%g@97*qtp_XCzTGgF?^R4D&$g|29$lv;wH!xeR zPpiHCt*^DXY$@6Lvd>~mmYaP6?7UCC-(Q%ox$N`aUj~*IG8?6xd;T>Jn3t@}Gy68GU8yQ7l(?$XZ%WiW& zXTRspxjJ>q`!IfO<+`9#Vt^DxWiSoNsH!`)iK)_KeGLP2ib+zA9{NWYnXMK5h@b$|}j;UEeo(7VVp&1|#uLVqo;-rt@@QQ0=&9xjkvC zYdTp%1A?kJnT?jRN-axrHF?Ek)Mk=AYsr+{_2!#c<+cceNCg#czG&#L1U8lmjMW;E z)V)=nu$WEk1Q!4!Ac??C@CLA0p0ltE5ih#fqQ3McW_!okX9&qGXV|I@>DUb`FcYxZ z{wkJ(cc>DK4KO116QRLz0z!dVo2~&Rk(<@w(2Ad)5Kt?G)3tsDlf`x5NehS}W%jJJ z@=7mx>9Np|^=|Tn(JQa8f*TfuEBEC|p@?H-$edOnfe@ldjv7Q`S=mv8;5bka>&Ws$ z?;g4nZMxA$p-bBQ*}?W>0!4Z`oZsljluw64=%!7Gj}HxpV+|9{dtSH}<@(*QSGKJU zfpR{c=9A9e3#OC8G*HWI2d*R45IS&ZwVWfOrn%6X^6)ZFi$@M3K+4G$WbL2#nGuh& zJxs7}o*o?`bP-sV^MITs;(7mQwmaPxYe?DRa(LNxYpvkoKmPVt?%hi+x+wp=_M}(P zWA$09w8?!gTh)roEhh$W_xGi7{ugRe&UnYgJYkqg0{Nczh0k7m$pxMy;4s@vaL{j= zdjmo+Z)de>X>?WbC&C<)+lK1pcfTZCaG}ZKnu!WH+jyYX{IOfr3^7+tPSfVZSHA4O z{@Wkn4#$F97(lJuq~)}VI1-LTiA0ham|C$muuR@jC^>41de`G2=|rIQUvgCUD_&F?m&Lxe*^M#W#(0r)#t-i8e`dVkNdA;@6jHthte3Q}fL*8hl#!_Dm$}lyQE3!uy33OR76PziY%fZX8 zgl2V=uDgj36Mv|0xpN0m;Se(2F0vmy&6{3LaU&{m>k$e?LcD_3Pu3AiS^ zJvq%4tI}lb_s&SlYhqmWm3etal_sqs35Uc>;*)}?0v=%rLQFLs!w5^l%YqZ~swB(^ zISr)(8#&2h6=LEUMMQjo4Q;x>lqSue{0bm!yY-hJxZ}1PuDJHH3!r8@u_0)S%K)<1 zmVHCp=f5z$zwCQokTTQ^6qnkS{nTpm19L4j1Iyok@)0sX^plU@`JFG{>0@7h!Tgnb zuYs2zzWw40K5^E-%f62dEc@hF-}~xcQ9geSHA^iw+m#J91JU?WIQnIWZFl-f2cCA~ z{@!0ca-Yo)d(KvS?6Be5t1Z36qKhuN*fYMryv&k|9DKmOAmkfQe$`oTd&?Pbee<(- z+-9l87jl<;%}FPKWANtdU;E0#57{5GyZq7%U+kBNC!GMhoqg6@_kZqQFFNM=@b5pI zeTLkN$rf0B0UO){56J+_T0xi490aEd;?ozz7a`YBLg)zMlcR%#ayU^AT~eqPXk}8$ zRZYP1XFv7XPk!9rCxKgp@W4x0;5?+S%3f+ew^G#>aS;CVpZ-Tx#QcN99&*XzxP~?{ z6RuSo%P+s=BK!K7ZFDS$TS4gnEK68rXfeUn;4Q<@6$$Ww)dyr`Oy!(>x1ukn@@g8Z+P|b{>6Ou+fMF#U-oI+jO}}0K9TL`%bx9+oc@OP6w0ts zZRQVa0w_4h>FG>y){NjE;e-W_TFG@bIggxoWIES00T$2{N(e0KYkkpG_M@e8OoaNgr9PTH#lPsTEpoAVx|UGc(48BIDCZ zL;S%U@{~vf28WDPceC?qXXjnPhPUZt6<2UCVYaDaL*1d%fIlo8RY`JA<854xG-Qph z7C)aOXv@_4qSIm>O=GzOBl6;%xWDIsVSIQhDZ@E%b} zhPG}i!62{(^a=|E1YiMoe6bw0!f#m8B8Xl@{;ubcqi3PVzwl@`ZhaBM?CEG#ZDcg6)! zi!cR*LQozMph|*Lxgw^BQLUn`IfT;~&@Z$oq*lynebzh9ylPw$7daM&#?_93WbBw7 z6f``@ct_J=^`eJs-M{|$hd=wtj|^&cA39t5seR+iFSEE0gcy;;hn^?10tw)F6A#hwAYXAn@tIa((ZjT!OZuGB(AeRK@Na$5oda0V;n^78?v4r)j_d1HMI835MN;0vk>Ij{A zln-7jR>C^NwOpZ^sc)U@{d`~PVRDfCy%d)mMfB^N|PK&RSo5;SW+!or&QIQ zqP8?7k5;QnE-BV2njSJkZLmnggh|$2P1NGosV2nC3UkTH=$htP77*$ZCz))jT;H6G z9uLZ00A1^>4x3$`^_f(rB^ehh&njJ!N;pF&wfgcbg5P++b=BPS5nYeJ?Bek8`Hk0K z$4ZkupPp+Oa(K!aErNAM_`-mX2=(Q?tvBDq^|>!L(CLv2c{+2lu^1qlLmVatQLa`( z8Oi&w^Z>)A&SUE515L@Drgro0?DW?dXX0e zxvCKsw5V#KnY`=mZ+`i&F6@}8Qbk_qg;Q*ke!1J?%$-rEId<}GJ81W{_uX;TeRf#whc>(bv-g)zK5oy` zPTcQx$L@CYew!S$=O+8_xy7dIud>*}3wR{uMzg>I3ofw0!li{4@Bkls!L7I0c&F{R zg62+n<7)v!tCJw`K6~x@f}@Uth;d*v38aPN)Y{?;09v<&Ti^VK*PM3B>)|-b-34KuR0o%#0XcbYNT|^HK|oo1xFM8D)K;h*<_H| zpZDa(PIB}`SQn{MrAMCeFri+C7`1tIm@yAg=+eEo+oF4@`ygy9O0fXy#wvbxw{^!9 zHHpMTYaoO8UpyvGLks%_V!2xj^8Nr5_pJv)1CM$`0fPd)8T?|JV#t!wxw zt6&nCh4nxcy@Mw$G_w%yW1%gzPFwFsz+W6_BAtHf8@JtRtL?Yh{IpX}aer}@1D3D3 z@=`MVkGA2~N3-3neCWoys*u%S4V=Q9pgVoxNDVn?sk@v|@2Vgpzhc>I@7*Z0^PEmU zuHs=+(HM4lhtKT@90fqUj1ZnF)@vBmRq@3gRaJ)bK=iW1A!wSpdFUbM@7? z-eQX#x7%`!RaaPQ@x|AkGsm*8M-V=qX?wE7Hm`I=_l zd^b<*ZuNl01TaZjo%bRD2jW27@G>`=r5-_;Drm{k;VA3o4u_-9b)4qxGXZY9`39#z zFU9jSBF=VvFDplz{O_8i6@*uz}^zCP8@OAVSDZNY)XFr>t7g!>kKiP zB`(s+h&Z8(+s9>u!TBvn87u9!{aHUY~4XVCbG$9=R%>?DYq zAmysQ^$ijDL*ISkK`8iJkKgyrFW>X^NACiPAAj(+uRL%o8V7OrEmz)o)8&uca}y{H zL4WnZJ05@F_6P60_JKRE`0f+8f8)#7-FnT(Pdsk7owr)*(C4oI{QWlEfA=|u?78lN zyRG%SXRmqqb2jqdD9g&WTlb}}HfDR5**@*IYD3dLovqf3%qJYa{aaqX?`vPQt9{-3 z?Xv#fJ8!uDn#;k;-D~EHm#rPU-9y1aIk+q+2M7aw;9+c;B&?RfFuQdS{i6Nm z$pBk1=Mc8BoVC*MMkB&uGkLg}<=&VM2q$TD`6>@_9IE2o)N_@XscK^uIW&S-?9E~r98E7sQ-h+ZU2 z8q%pM1E`hgDSjhBjBve>p{<0jyR_9^dImTmP#T#zh5`?bnjvjp_VC>WV zCO>VDTJl4Ws-j@13^iuaM0Rz<#)?)gODp04Z0V^ zk-I@Cc8k$Gwc65<68$>d@;C(0B-x1`DGOC`BS``cHa;ffQ%(j!pziRg(nQW7jK`5C z_CNi=hZ0S2G$Cnj5UY!!gpoANH)%NE}@?K$FJ~4#Cg=^`HLRU;nF{ zwKrAmHnch1x3aBbH!iTqs4X1pC?G8IRE6drp=^*@j-!fO*dS{4NWqV^-2VE;tiMZ7 zK<~`bK;cu9ljr%LqdE%Qh2RZ-fcM!ZcQieFcfSLc`C;cBw%%^*Nswzjv_*4F>*m06 z?@h0?+|q6(zQ-lVp{gUIYdQnVPn`ubKT$2fW9Xop9Xi6M1e=T&daK86_-%G#muDfrAiywXL0lA{}2uI|Oa#uQP6SNzUuqX+VkU_>;~5GF(^>39c8EE=DI|U_k;KRR8;L-P zaIG9fN0ZJlOKKHag~KYHHa|5)%WF$UqC|0|VGJ>5&^8EdOZb@c)q}|)^-d>A3fT~+ z?&JX+<}s20wK~F=8oMSD<bEfN$tFFBb@Um%U;+c~;a#PV{bCt&L(Uyx+SEq4ZqQT|M zMa;2l{^Ui_#0)Q=eBAEv z@?P7ox${2@0UpX}0QAConkdR)OgNWLMW_$~KT2>S%>hqM94~4UQ;V;%A$ZV2wVumkgydsB$mvIc49wUP+Nfh@VRG#%pZF187IEv2=6lc z{1@6!ro6xGdtbI>%YE<5%ga`oL1y6)AnAM&A38^zFK8!S=rX+Qv=V5=AyDd!!>qOw zHVvJ0PA*IhOXTN)BzhV0o#t|FNrK^mGxi^vJS7I3 z7$qS`8Vtd@Hc6>g6o!4&QYEj8X_WrI zSJl>5L&^;SCnX3)Oag{1B@@T>Q@Bf&T72HaK~F54*{0}HFBKcOmAhHE1-Ko!FLW1h z*KkJ=59BDhwpr`wI4+9Ry7IfKvxFycGDDH^V;0qL41_%ypZ$u-6GIcL7=gob9broJ z=K-~2VyKO-3pA73!ql9Rr$kjU)IL)Zs=4cNLN`jD;UC^`o3?sepl;V()WVE9lEk8D zA*i@iF~|%xBQRA7P01^YW6wDAG+*%cEuA&jT9aA>=Uc%i_j10mo0bBSNhmTCSQ@@r zs)+BfrI;{GllV(pUbjtY-IzCeyOQChxU!Y!q4KH)A9LF;XKDXIh*S;b`RTeUs zS-eKEq=d8BkQO`g44X-=N z?MxG+b_5OF`mbC=syr)mvY6^l4XehMk74B7Zolc)+inDtm&$+;C`|H9Rb!H8(m#0q4$Be0bThQ*@e;bY#)Rx;$S%jxRZta0W9P zQ)-SC5|gCCC6PUHok~nX6GPV#8NIVGg9+dV00WXFsbau9o$5~8dK}iqF>`3k+WIYz zLnbH48z4dz8HB_D=%y>l|HIgQK=?i(XI41RlQZSVgSo6u%`CD3+MZ-H%&w_9c?^NOw`LUfcn*Q z*72^)Z`N2_+{GtVgVb)s8lvipUnAGc!lMlpO%A{ItcZvW3=$Mt#&Yp7pl||36E6F- z90hSv@}pp`#A!7UOI_SIyb;68wE(RMBxF{n<4Bf;KEvVEzmf#1S|!H0c%dlO8o#jO za8`@zE;~p5v<^uwWAc@VhlC1}q&oVVZ~Yfg7fygPP2@^L44nFoB1CZ}cDg++$Qc{f zUXJZy3AG>#Sea3>OnTt*t)@yivnTvlTymf_|>Y^hIKPkKFOq+dgsQweQ*fwJUD6!HagEyNdJJ`^{SiUUnXPuN_`B zZ>v=n&Rx?PY#YaRnQbBi%XXH#_{;AvbCNLFfEMuk%cmT+J-mGGvAZ3P%+3qu34A8-_ zAR*jW0FI6y81ziUz=s6kK|G?elSvc?0FVQUfkaUO&u9fa0Gkqcr=#a#Bu zk?O177di%bZS*n)`(RlBSib4{&)Oq)@r$oQg2~K3=olylhe7G`0?2X+mn5$&j(KDD zLVyA^q+3u|nA+;4HK-w=tyG`yygIc70>8r4mMdm;;UtkMNkyn2Kt5LhK(4-0E40=_ zN6~J!jQe2O_Av(?s~;bHM(1E;hp}xmbCbZ8Z?mENo_D_a#G?}_Xr;4}B>KY#zN zH+;f`rmQNbZtqih|My3EK zfNdx&NtB~7he1-rDATE_K@)W(pR3mY_kQy6k0QwAYA$|Z41;qBuPO4=Dxav~G=*^L z5jEJ7FDYpPk`QY~Wuc@A2A$PA%(5#nv6C*Cu|Rst2zw}4`91eNK|BjQ0(vM2qq8|_ zl2eu_eScpEez|40Lwqmu}UGfLmkCUZ+J56oo*EI_Tj2z#1 zyU5yOML+5Yk_~u}R-wY=Gni{7`s>Y1Yn4(p6p-JJRTjA3*DUZd>lzaGnK{%?j&YLx zPa2u#6kqLBM5RRO-&hq;pqZt$b%B&38n0SeZ?3$`3Tv;k z`fE1X0C1kW!`9v{*oF6s{^S#m_AcTVKl{mVer2rvHL>QG)7sW#!^3yYz7wZ2uP(u!)8>LbR>M*Rz^>tZ7;v z5k)R5UiR~#AW9O&)_N!-X%P)J?QxOHlOq1cF4CToYrd!G<6EnKfp*GU0h)PujDq+I_{_wdNYNaF( z{THbM<`hW6h$>~sA+6MkPIzMRkW3~ZuXHY;T`3icGgS#ek%eM`U+b8`6J%~OdS}&< z4+{&tY^ribAlLgY%MLo2Z0UoPv4xkdV`O^6DL_=3D9!-0ESqjdpkqH)TJsue_Q|~N zpsl_(9<1KhWWH{)4ej^}aP7zCcb>Z~?2OavICZ4Fy71MdcF@xjLddUc63!nK44OI#0x3YqtWK@*=qA~zq1IHIl{2D( z2o#o!8vN)<5UpNS2Q$j74nx^V8;+>d9;riql@+C)8HypXqQdE;Xh}8cwD34<2jQw& zO=F9g=4D5r{G_#?2QPa-K zd2B$~CNfa$BsSD6jMAoaN3!{V^U?cmcG4l+p7Hvf;N>@+IR6d%ZMO3kt8Y4Ig_W0| z+}!0+GI6uvW!HRpgxMDc$DGQBlmRqR01dVU358K?`=UUFU?UzkgN1y7Wx%-&(2yWH z9tbf|8A6m4`U4d)fSI5l7l_6qmHc*HWd|O?%%Yejkv5$r@aR)A*L&JoW|P=KhJPVk zuk-<4pN|Q+vkP9v1OJAF`S?8Y}ySSg-33}qu~RbfsbHOL7g_rWrl?CABU zKK>!6ufb$d&Ze`kn89j+$p-XQm-)fv_g;SCDaRdp#Y@aaq@v`ri89VOB zo?MRo;8z&8O{nZBlZa!(m>T9wh$&1V+N?A&@gSO*9J*-p%gl->fR0F1I3)(eoI)!? z6a#zA%Pu^`n$)r*DhP$Zq$!00!kmDpyuv9tltE0wr~+m#w#psZJRy z>4P|Gff5mO{E|3@&gzo?e7;(YA}W_p4q~+y##34RO0OXF8X*PJFb00Ps=jDhQaCwL zBt^tul|^eYNiDPJ(-{j}qY4L4IW^x7HA#GYuuN&$+r! zPg%xe9&Mt~GNN;Z@nhDH@(NVt!KsvrFr17=sW@dp$4qj)hR)%X5mAzGEl`uDWTPbw zDS()6m}hlr?U~3`eeENxS$Z^U5U20p){EcrCMy+#3KqEhic5FjV;6T-L4SU=$GySM z1hU6$oIzE<>HY`oZF%=22)_CCqdym(e~xcY*kix><1CkJDlRjHK7|~8Jv7!j=jnXEzD=HPCG@*?lLIA%Js{BvB=M6@y|T< zBv>O8)Hu{WEGy7MSQlt00a5Kxzq{*(zNuskQ_^T};tSK3nPiP+Bbip_g0t2rcYXQG zu&Vdr4xGRCH5=@@({`@+bePKX7hU(st50EPqBIMn?BnIzZom19x83yZXP*4gkH7c+ ztFBysgSEHZYSX>|qn{Rg4F}W#$gy z)z?_vPe1$poh6rgflJF>^93erH(3l?H@~?=&gH|PH#DT3y+zTUI%we@yLro`gY4St zI3M^s>0z=oJS^?djVsC*Za6iN2D1RMh^>dBC6NI8=tM~b zk|1DkhL|E_!c5*}!M^7yhXh@TZjo8MLfat6)PD1c-0@x6hJ|vKtoWU0J;(pm> zjFYxy6d3YH5ml|}9GgbdaVVl(RhBVLWL(@y#U>Xg`*cCVD5fWY!YZY~J#jepMlK`A zx@`@Q!sf9`4-=XVBPuCFg`WiLtuPZsk}U}mtmbwtvITPRG7#--w*BQ34%+&hV|IPZDGN_HWEhaX+?q=JrV)z=mz@_CkE3Ae*<%Kpz~3Adg<-lj>lR;e|H^>a(+7xC1!p^=y4j?lv1W*8H z=d0}_+i1pi<8{}9!!8~ag_HqWLZsri`QpkiaNDjl1!PhewV*#`I51fiv=0qM(Nk5` zY_GQ!jt9McXQ==+PKURJ!_+8RgIJZVWyq9@Lb7m3xt1YKLvi6{=dnA94JHZ;c}Hl5*`#w3gWaI6=|WUpdm_X%1=|uqLf*kf@XEog-(ab zS2R&pLQax!y;IQ@7ybE?q+*tV6QmJ~4H z`DEg$g41HvjxumlY!!+hk8n>GVv~tNjx0l;MFOht>8UB0zN*g<(hfc!cD~M!4nwJ| zJD;Yqi#W*0$HJ4Pd&c>yx4=p(ui$G|S3uhVvHyr}{}fV?Di^cL28548^nwSpr=SwS zG=&Pg0Sx}N(QLN@vzxIuT7TWwA9k>X)XMpm3(f_N(dpw{u>kYz*rSf{I?L4@QkKG8l`!ST11Y79Cfi_IdaE#SLCLSUl64&N+L1$(A^I;))k-xigP zQHMW!=g@w=&ZWQam!0AC-h58VBOAAZM9G?PqrCm@t-gTD79cAPb= zQnc`TYp)Kc`y_nTm6kU=97p$B#;EWP!KSc1cg8qW(_fz1y z+pxrgS$q0&!R)a0>>z|s5y&jVSzXBb*>bDAb~-MQv9FIWXe0(;mDHEX;_ z5;b#MvY5Dr3cB%wl9h{sj$d>vfOyKKn}j(5Sr7?Kh%6jYh`Mr&z1R>*!-+?jOO8}7 z3Cc|+zEqq_G36BjH4GF}YLtjWXZ%B%^e2R<7^)6|hz(t$0t>N863r5mtXyp>1@T;ZfU3se*4eA*mld!3{HE{-iUh1Ba7;*1ngt8 z-8bB0%atHyco|0a3A6LqoIt-J#Rs45K7-7Dd5Syzh`AS@yx^q6w%cv%HD9yta?j!A z=iOfh=p2NDmpuxhIS|5hfq}3+;lyC_Yc_ zvt?(q1tfKT+o2ch5)d6gmIPCi6U4?tf>fK-aI1=N0a!)D>`M1OK|`qswZUGM63`l2 zNwdJhG*>Sa4M=%m8EuG@5C&Hl8NuN=WeG3iltq%LRBS0AfYT}Kj$Q-KAhZ2t4p;{6 zVPpre-}TOm_$Zn|7%q1D`ii%ocifTtUw+BCoR60QXOP*K%i(4Bec4=gB->A(nP|-^ z_Lb=)YNnWaCI~~zoZ?V8lM(VYza$Cc!OZl^`ZAjY;t3#&B2MGwI0=-IO9E-=a4`jF zg5KODgmuTKhS_cA%80Y+FCZF`91bPR5WT4_6*1){Aw_(xeIm()Q=oTh94x`2ALB9QVF z9z3Z^0y?7Na2Bmb=y)W_J31n17GW0M1ZB)TGFt6>G~}aa-gm&vqonRmTQWyO5w6go zRZ41P0ZvlN*RzcfSrCWjY0Kt znQ>?X6u8^))$2JYy!JY4`+AglY0D3UXH14(wU5=(S3eFj&!4xG9po)uyAkl^_n1~( zZaMz3Z+M-@hmU7nFy)I{HmVw_8Zgdhe?mzE3xmeYS2ze=(M(X3@G|>(rpt9a@ReGB67wjLX6MzCwcO)>>m_Rq%xu&}TG4 z&7icwAROM*tq%7(9-ww!HE2dAt|yU>R-zqYs8$gqi?X$A*6@w9SLNn~JF%a5@?qCs|MlPe z$X(f=x#kncpYVpAcHRzx{_XF61w=#2c9!iyL(?t<=ebf<$7!SsP#sM^Jvg>H=5Ox;UEKUx*Xz3;Vr1)y9ujXZfANY11`tc_m zEekB8F#(qMU-ceMr}v${=-M}T8sB)+b%=9!*iP@*+5<*>pQW>GG4uCXu!kWL%F|5- zzd6g-o}$`TgKPXcvG!*?XZnijRz5aa1o#cDO!0^)+1qmIJH(ux1*Q~K#+J~0M3Tch zWf3MbO5i05$>=2wih}5D*r=DJB=FM>m?8mP-m;4#%2jo+(U5KyaB>JCSs)3RA=xA& za%m+OCkX>eRgPFGvSUUW+F~m&21yhUr3nU6lXsLNFNXo=(4Qz2u2SMj4oZa*B}^rv zl8i1qIY=;b61&Z1L&z{Q@Qk`oefne0V_Wosz0fosVb03KVTPtn=g_oQUNV}+=C#o% zqtPgrqM>qaNJ9$+*z3fIBPdQ;zjB2Yj6I{#>o06-YA(~<3hYxFXCA!R^a-=~Z=M%^ z7SOi?Ru!Du2d4ukbqz}OQb&WZaj4LmsBYy;)uE*-MO4P5^-YwJunJ(5<&bn>E#}E(81Wyh>hk@_uoPi{-#hiJAI4qwY8HiFjS(HK)9zHiy zI--JDgA>>9M)^G{0d#no->K|=TWY{$7#SRP1{*;3!7_mCrZ3pp!E90c%iy!{H}1T} z71{QeANj^D&pz?xNAJ7tCx3PS2d_NU_sa(@+~C0ZbNrsv(Fbht`h7NaKekVpfn^7< zkKAh$yUg=8e+B0%Y%m#Q2Bm$}?6NPAxm{)lvrjr?Yv-}U%Nwur@|Bib(j4??n7G-_ zWBYR16m-5C;semYz;|01`rTVLAp9U+0pPbNH3T^NS4Xb}?9O|`pmtns zB1zpfnpE0Wi}+=Ph;gb2u`*&%Yn&XSBJis|POj>s+cgH0`&3yNOm?-`CqMQ;Z<{KN0Dzx<}NPq_HarvuLJ`*PVA9PNJW>p%11uYT!zc$x3_ zmz~ERh|lzCnwT%lDrTXirmBGbUq+W`5T+R<3!+&jTHeL|Vrh8i@)kS3Bwbvy`!vC*NvixW=)sXYZ0l(f-ecxs0vN5|u#=xnw-v;~|7)HQ52oY)u!l+p*)bo@K~Ngk|Uk?pY)qV1|odvh)&O%sSGnuCTKQjc0^q z1nkpXy~fc6^4e5&dl<|JDu#twq0CW+C=`q$tnM&W0GVkji?XnxXXUbcrP2_7_vNqc zMf=N?1cexu%rN6H>;s1A3uCfoQDv_zbd3Nj1&VZloO&T;4^y8e{licG)<`gO$Ezn} zQrP?@N_J>VcK4*=cWgjcdOX$g+Ezi>7*XCo&`>q0I`F=L*3%9SlQX&XuK{q`J1^12 zju4RMT?YwFWqm|>Mt-Bn%Md%uN>}s%T$bz6Sz?n*5+`FzB1%Nr$C%8bjxJ*$LI9n>WM)X?K^Mk@E=;U) z6%m~>^i+#skXPa&zPu!;7m&hF6rRMAPg?a#^VmwA&Z{hJ(c_m9gQ|(v4z++VA&Mxw zRGdMige!>7m~mng< zEI1CRn1-gLL1hpcQIcAJilSI^5Fu#3>1@zCBqL)>khO@wFu0BYX6dFuyrrS)Nyi;+ z;2DSBgE`?~-M;n4>;1r)728JvCbC+yQLQ2Fk#KOxVQkK-?xc{VtiGl^fqghoOKK3r z+Ez_92sx@hh*g}AC~4#ppjE9Dv3AR+IajO7r4+(7d;BOvR0YLJw>l*?;u%FeUtqdi z8AWN^*ibHkh6f61Qc<|%Bhrdb2SoDG@l&IsVVw#y|)=N;In#o3D~>AGe$ALTrYE z)7S8_kC!L6VbA=e6ds$)!ajDkgA5~&-(Lohh5zcikB{ToD0oESWjoQ2+;hi6U%%b& zFWX;!=B_2+N9I+)ybUJx-{ z3@n4mP%|9e?=RbKZV%d#?9-3f@$92_dFyF=A2e^y_M5CUXZ05^{el;`z;o#r`@C&- zyzHI77ye)!bOr?jZk~ps-uVM`a34$u&_Rn*`OutHfxxUNHn0v`ur6HO<}aC|gvbj3 zhH&LYoPmCmv&VQa^9kgm7p=H}XUwv5&?`}G^5XHT4Z;O@Ei#^QwuWI|fY%BG6#I_P zjW?@+nrM`Whkj=-*eGiMYI*C5j0VI z>)L@!?cNojYa;wPI-~RPlwGPosbr!{RnTyXrp9yCmozzR%?ZHEj$zwkhLJm=4LG|) zdmP(#*_W@CZ7ZL1`q3Ajdy@OU-gm`YAZYX(uK6=ZvK_;I^5L(#>> zFc%mLNJ#-4icLGj;iy`r5|~d^C5kR0uyqCl#ac8-z%vA3V(u6cgvI`?B zg`S*1nvdscPmP`n9{K_v3gfZWlgqO}u4EFM4oX+9Ww|${1LvlyqGL;W%$x^=s9Z#) zDxUPLB2w|hhEfIj6w~+pK14b26`+HHT$GGxXM{a~m@g zNYyAz$!bA!0d;iZ-Ma|&Wp(564ZHreJ?hnp@5;?1UDgW?GL@*mhEt2-nwX7qb)ly) zRW-h)Ub7p+<}ZM3wENkt$tTHPKHPJcl}GLPQf2g>kl(EIy1>A8JE+07%uAMX^!jiA z?yvlM)MJl7_@`H2wev39uQzAS`|khx6Hh)2H9L}h$=lzu!;af*z0GE4pL03@u4n_m z#G#4e$X7I#fN@a^Ech%{(RRS$6rTV%m93Su3Gk~OW^OxQwM*xG1#sd?Cjivm&-gWP z0Z^p@APhdg@vPHiF?|eCzer^Od)0&aJ@383&2`3uT9|rfjesb?;^h-_0$R~QTGHE< zTG)6nPV}rc*99hWX07fvSaZm_RTtX@($%)Fk zP9hf`})+w71XXN6+ZT%y$|Vv>ip@}@uv<2R8tgQfKiv2(T`dgpCxUu&C`+xs89}m~(7;azmZqo{8h^k;!F#XkT zRHVkwM~|oa;z7|SH9nr&C+0e6G*n55C>R7#hKopGbOLL9QOxMZGgse9N=~^dZV?|t z4$rhJZ!-B5P;rvgOIT`hDjjji86_qy24T9%B7kCq!TXIa$cZFQDk2 z8VxgjTFb=A7sW>4w4Nx$QXT`ze9}-VE24~&%A1~|h}Ox>4{K&FdxOtKVRBeMa~`jr zRoxb*wGISih9cTN? zlbgPtv3dQJZDkwEuD=G8?JpO#k30@(52S31*`~AKlj7Wu{nW!>y7$h{_?9%YN!PcXyFF8gvS`*WZEQ@7MU54_A=VMnoH7)zXKBFvOEJGfzSnS&+a zG1tr>0U2$_2w)o%ZPox$=rSgU{Mb-9lQD}l1)7^>!DfOt-;)r3Sy~y;hpOoGBwtdF z%*3K|)d+tsHIgud2&AfvL?H9VS(q~^5{o&>#Of3e7w=;QC_r*8BY;g);6zUWL=qlv+4Q6vqD@k; z+AfA|8q9u@o*Y+Zag2n>xzG)GDBr!-F5}XtJ~v)r#bw>-k!#qTfsJef4FK6L2jTWsQ}1K|A8Kfl^sfJ%%>aK_kxXdJqB z>z4t5A6Q;;sTbH&HWIwv@$ECZLF;4x9k$)dJ1Ykdyk+u9f;SSTp*iB_J$re-mWrq11XK{S03Q4W2v3K}{F-Wf#B`tr&Rjavfg{#Z69hK(pk0H+ z&DcILvCdd|EKOFM_MqEM_H}?agoqX^3lug>pE>Jgmva$bcD0%rnJrn0fRU+f08qw} zY##!61Lht=!_SLN&Z`?Y=NN(Zk$tWF_P1VWXW1HZK*t|`3JW?%cK&yd(_R*Ls zRgDx>Wwne3iCC(dN{WtxlPFF=l0dx^j|Sv0i(*JZSrDh4Trmp_{w5B^5<+-#YVgH< zsf)Own?ku#BPxXi#ij6cxDux|k*X1*kTd$7K+;S%(keng_U0h9rB-eQ73C8suL>Gw zG3x?E%u)%7f0S6`q(AwbFgC8F{3v`vFr<<|GJDE+T(T>&6^H3!Q7K%KRL(j-7JRc! z5(;PSZL?U|V#^7XC5uH=Srr$LWm$BTqO6%PlWg*{%SLpNd0g|=&n~yc3`B>ZVPr`8 zhtE91^{Fz7!1%7YZRPI!dg6hvJbvF-oX7s|Q{Q;=vrFZ^;!Eh^p2C_kjP#df#94=CcJDRath7{3cL&{!^_aIZDl}sU}V`LWjwZZu%XND=?OK1)pDgU zHM~8r=dP7fn|cCLYa9%!Nl6R=!1)LIk4%aTwusQ`WrPHggtMVBil z3>tGVa=h-r_FcDM_tYa_b4xbUC=1FAVn?wFO*hUYVsVLb(X(%}6-^~PC@deT=2!H^ zeQYEUF3ZT>EzsrSWZ~p3d(8P# z34vZm?c^d;ebJNLB4YuO1;1)yz%NQ*WnmT&jfhR4LTeVHs7(}?C=`pBm8~cVoQ#B| zl|YWDoGhGF{-{)C5t6HMgz+S*T#NX!<4H*V$SxOVQKFKTs2(Do5uPC9St-i4_cZZD z=y~CB&~rqCP&=`ePMRuLmvo~%S2ZPJlUkwZBt$`slP`=&EI1~MfM^o(QSj6wNvqj= zJEY@sx)`Qo)ei-dT+7gbolpLB6JelIg(^$Wa{pepX;^5we*rN zV5r;J(sZuavTWOHVB0gg{OG4O98k5f>p-bpW>F>?6E3UFJ{rd))4Wd6I9$r^aw^-m z$-dBh)oMQ9U4cvxn8Bk^#u(=jY=`34zxWwwAfw%97ay@yy?YeZS@gt4Pf;JxU3kF- zev)bXxm(X!f6n{<F^3p4P@WMzUfMBAMaP~WW9c60cmjMZ#c`~hm50Z+mDdikcJ^3V?xioC9J60_)&2Kg;9UeG8G{`(<6poAv8qv(!d02C`YNTz^*cHX z{HOo$U;q4LAAaGBUg%JP)51r+?oh`y!36K1g3J>=XRgI2lT#xFsU0^)W~04D8xW?y zq<&kO{I z8>DPu_d3EMZKZn-dZVIe^{+d=fL}X$UJbb-{jontT=G&q6frCzv%W7{|vEZiNAcIWmv=bot|KJ)*d|Mol2dhmmd$Df3u-XZ})(gbtat7!t0IW1=q%E*PEi>(xFfBdpQzO!t>k^bQK1nlYI;ryaJO)P{t>td9Iy9pTBR z1_{IURz&HkiluZ zaHglgbo-QjR6!9KQlLh|98b~wDEvj#7M&zLx7lG8op#f7mJIV_IIFs4-}=v1vB2WA zD~Ll^~JmSJnLsfO7zU^3KfmpSYVI77$bWn0W3vna^S^|f+% zxl`G;n4Q7yyJjb`JB$rJ_eJw#-?$TG{`O;EhnMfW>)OX3xZ$Y>uD|n^tG3$s#qjcE zb9wi5;pO?;t_m!J%wY1uZCA6Q3=sp!oG83306TX)+qcXBwJX1lKX5B}`Jz+zI$+mz z=We>P@0VRbZ+>}ROx$eWFT0}>>V(eRj1AcVp`p0}*(3qWFrZX`4se6}05@z9V&UMh zCg{)a*!h41hDdY(qzY1bLm!7O-}qcVJt&N>Q!`QM+)l z8o|zC+nNO%9<3p&t#INI#iK#wLJ`HLvXW3#L>8%#xTIR%d3aInQ}zngZW!>(8&X~z zHM=O=hBCYiCJS`mm#CxHfAO)a{EXCjXC4bLzx^#|zU$KS?Ku0c`7^5*M4 z_LbW{cjp&B`@}|nnWLda>_WuMav?WC^=+ScTq$SBMZBSh^ND}O$ZvW~Y$G$+5L>e3@1H7ZmKlTgu(96F?%jB?3Blt5|^9@*uRHw7?M5e(?E z7-Rf+>NVjsBwsGW23;wLa;13%iH`ol!;Ml{!XC=3M2%o4S6jK})G=(=Z8o(iTC0f7&Q_Y4Nj9{A#^b>aVQ=4cuRY3!o!v31CJ6*Nv#uy*FafGyGcF9F= zw#2!%i?ylE*;?#RP>cQfxA6@3`!(QF7{WhC#y~QRLVVY}2<3V>D3{6`; zqDHeFPpqFP(w+|YDpB(~v!ve%2c?xoau!-(nc9@E3M#Amly??XQ=t33V4L-x;C58u zNI_*N>5x~{#a+cps;ishQg3C&0%B%Samp~w^f%9 zID786zM(Z|<>+esm9*RGY{y`}*d${07#2uD|xvQS@ASl@*-LMhBKb zY4lHg@*^joa@^%tyj>HuiOgqoIofUMGag*0cHn*sZQr|1)C1IUH9s_V#;GTO9`@Nm z5(B_CpLYmAilL5fPZ>bAU#vu5BKy59=NU{5L$R+FU?2Devrn7%Trkhc?Kd2Kgpb)T zeaG9HGB&T@eBRlRvXilf`em2Ab>7Z9_}JeYCrt{}@O75qp)uLUMve)~v3pS@W$~I9 zS-Feby?VSX7)q6k6D~`16lEE8jBpg)$Kg>lH-t1e36xZpWTHrldH92Q1W2y*#r>g7 z+7hB_8UHCjNJ;tAGUZJHL|F(ocev02A_jw>oS^{zifHzMdzx1&B2KQT+Tr9kn1#^^ ztkxAL3%b0LlwMNGBTB-CCnp?prBqNEt873RwzYJaiKd~E#s_{Ka^Q?W>yK1RF4t0n zzbuk47_r_QVkV(^Jr*S@Sx8gH+e`K8GYwsDjrALUj-&XX9xk?M_>RMZeBi!&`GIC1 z)Bx08>3CSUPOwTwCd~S>% z;aV7HN%^DMYg;nYP)6gNtE|QSdI3L`WFa<}VhO2GoN9qh6i*$R8s){Dgj7oBN>po3 zxa=y}SS5s5oVtUb9Lh|WPG;3Xj2WkZsH9OeO^TKzN~VfqkUgDcL5~x2E=t6h|Bu#q z&WLScAAU=BHjITmONnodwVqqyae8IVWf8>_@(o94u_OnxM+RRMCnucQgANP8+8N=X zF_=8A{0cGy(J=C~gPnn8r?O#Wka;+p%YCpcFkJ8%WVXxvmr(X&?$EyH=|}GH`^%5r zfBoIJf8?VdID3aJmV=k~o41bpvG?0$9Y?aA$KHMGRqYys#!#~jW!HH*bnS#TbnGy; zeP?^kzGe3PvW;e`;pNNDIp~-JwwS;Dy702^m;YnD?3PO>i3i@`2^>GW_c`J_iwZ`kG}35d*)FGS~|mN8#H$_9X~5bX&=AF`~S7mid8b zd(+HCY?H&-ADqBj<#Ni^ETZZ{RgZN|q)pt0Eg*>#4KJ&yN?}9MCi1cYv;ZD-O^VL1 zWn>rjhCwy8IZjopiAazXCkizdIfmUXv(06nFuUE$$IEt>JFwl?&7d*J-2U1%h~;1gy)Ddkl5-FMyy)0tn*I~El4#xyZm%qc`k zTvG|1L4`*E5hp*R%ls0U)lJVw{7g6{GMI%Icg;|8DA0sOQ94cdLQxbZ zQSD>-9rYc(R%%usGa%P9!oz6nFMB9>7I?(;kdi_It!hsVg+kjD5I`Tw*VmZ26s~kS zD?KGKSI636x;p3yl#3_~qC>bShQ_~C4bDfQlKKugD6-d~z?3kB(;3fb?Li?VA__V1 zl!u_!U=IUU&f~rDw398-cD2CU4cA-Gy5uahW1`D0vy4q=yP`MWas#m7%S}ibG_XO& z|I@2KfN1m7P8nSA;XnJJliTP3vXj`HfP=eU7>^4d<-o=XCvAkWzdEWdFoZ8QM~`naCOKn$$YL;Dm7qVWSEJ zc$usW&fp}Z6Ka-9z7kojh#??WE3~d<;81%*I7MBSwd7JSaDv?nhuwCXt2#hsNK+#) zUyVuQHX~nZQ9Y%P^sn7dczKx@y$I@e0{Mhvk3`X}_3RiS=7lcq%m713Yb(oZ5I1z$ zByw9CM5Ya)S_K7i%>oTojk2qSwXM1f*kV?3)%01egYL_|Y+3zlkg%m$P|(~Y`^;9d zS*B;xKefXb=u~Q6sE7b14O-_VG_~0cEQ@x6BFoB_E+UGHxqNT_x#`giUDLxGx~dG;w+e*Ns{ zKOx+aZ2R^7AeZQ22k*Dl=C9duvyB~hcXWTZowtXYohVc2J_~o>Yw!6^`+IeuAB}c> z?ehc>SV!Ay=HB)0%T`@&RV6xH{rV#hv&-z%x-%GlQpylDLJT3yGCHaa4H>bf3;^z> zgcm*rwzkq=?v3X%n7!*T23XV@Q-H%`))*w55u)W$gk`sy3F}!=4KB=$bq@na!dUbY z61^k>5#>@G>l$a$8jA7ob$U*CNK3tZ^c=P-QZBU{QSxVXvS1$4gtPN0Mj7>0A|9eN zi>4U-=$wqBECHIQd_qz{mT6{LM9n_@rUng@6Eh#3OLNkM;Xwki(I=GbXjvyL@^kyIAsh?OBKLi z96CF#$`+qAF-}ZdFOw-lb7m(SmGf@XU~qEMOH?cQ{M~kT3c`2I&LACn;C^!H0&2@u zl^cO%)>%Z=n4|htGk%rDi2+-3VoRWKy~(d_u{9tY48o%na!8Z0>X2EvgKo4abd;#q z2%&!szPL{mA{~Z6rRPRo5~h2#KNuT$ zJWq30b!?sjQp3fTF@To@4ku!J&FD$sm&%WVO+Z~yP-7^eKwaLxc&@mry4l>(l z7PYP1Mss-Ccg^s!4drh4f{TSYd&&bryA2F*MsYA3UjEJ#_dof-SN)!p@0Xu=;FfPb zddodu`smf~J7edqSA>`M*>x?>k1roGe?#|u*yX!`Ne>uGTAN>BZ4NWl3?|p>zAUk*vj`Oq%34(0l zKBz4y9R>7*cH1_E24KTupWoVffDE#1t3D{IJcQEASnx+fS>d28xcC(LU zc_9>q3YgR!qEh6=&deR#n^^2KmLzG@ z{KAz^Tf!|M#FA4%X`VkaCcom!K9UfX3n{xQ)@`(W#}se6;aYdb%B6j*T`gatNx+X@ zRG~`eOR7OqRVq5RI16Kw8XNw>ndW@CI5px%?p{LCiHd7U z+lboM(r3FF>fB_bISy(2*`S?w*ygm8k8`xiXDMD8*=}Z7vR7G{stoLajRH(iqKXdS z9BO9S%7U0_Dk?9-H4_(|K1UvPH~?j9-fs`tJ=DB4zBUOpi*lk`KtN3!qK2-{WgCEX z^z@^^ei4|?W~*-1(MCuEPv;645_*=|=6t)yik)RW>u|3fOy4q-uOl)wu^Io~KhTi2 zTpQ)0Y0kI%sg7EknGO5veU0oDih*Hg%C;0Gn^PpWBbiOu%&Jlpqhv#rPf077C61~# zmmRcLHE%7fz^o=%*t5n90ie}xuMf36zW`g;b72Fb@%W8wXQz#1Q7r>0-};5m-*)@W zcYOH^Hk4sx_hZxZkVE%J`Tk!5s!#c-8E}T0ufO3MThdfAB(A&mQ;@hHsB-+k4Jw|V zE*|elwgd2rpyB!FoM8ufw|P7Hl_0okTXIQrv-fHVp(>k$WRoZ zVFS!J7ovU~+{erC@~%5>Z&h~y>7qBC=UrJ%q{;;1=Typ&s||&7S|C?7#s^d6&R(Ol9nS0YZ0h7&(Z%WPRlu7yjApD&7` zZYo+W(36iyensF!BtMyWk|v4>tFjDPKAt`~qbakkfUsv%V3{wRiIOQI3yR(0>`77l z;orncP1+*zM-dpZP6fzp6L}G&3@p0}8+L9_nLqZI;bLHU@?rA*cY?zMG7r4mjx!#q zj$}KJ?SAYh9{BPT58n3p{Wm}J#24LgV>5faTAw^eW;>7l z=JU_;O!P(<0)^Uy0X+||!k}CbFMJ2G!F0@?K&vFoe0lp0S4LT2Msy(8F0$aOdZHxJiTd%RX3@Ly6iF^G7vvYcE zDigtc$-?I|dKq1uaP}e7tGUL|;@DtWL5Nw%ETUPRj2UlIo5+a`77;FrP6$!d7-9U1 zU_`|uAS$&|92;pgROnEHdJ0NyLWKFE zgmVI;p{j4q7>^1nu@=KTk_Z{DrIl732~|gG(xk>9noLPbB#Lk@nTQ#RJ-&Nhw9d03 zJ#alHxGY0a)1Z4m1YR+$n#aR1l;_*ZZMgB!+z)thg+sRP?CON7FFQkUiS#anK<_@aw^ zTW#MNtoEZPz%pro8)bkI`2BGs717`fKaIoZVC04+3Vfu4^p1wgk};9> zpnkNw3NJ(YzHtTiRmX%t03jcG`6)iQ8Hqvj&TwwfDTP3RxT8DY}JB5E>6Xv8fj?l``^?uLcCf1&1uWNx9(bUaY&2H|?ITdT3FtoKQ^o5e z4BAjrVB>^QB-J4JtGHakh@3!SY-On#`Q&JS%GY;g=ZliidyItS6JC;XD_XM?n?O7b zhQ<9PW56u6Dky#AB^ggeD?+8D;-m&AAX-g@MdMiJKvsdiG8QX}x?t(vk6peRu| z;i=8%6s^9BZpx5D$&xTf#Dg*vz%ZlF8}M+f(k!ukksv zpWs9EQ&aumR2$8raIu5dkAB19YAD!;$@g>&+qQB$%WXH~@pH@*hDY!HreCLe@?Ph$ zAN=}tzF&Uyo@;Kt_Hy@qO@4xT&-MF$dG8(9gqIJQzYz%BG3<7hfnxC4McGI1yO~R~ zeZLGWgUk+Q+hT^GPd{SrTTWSc_=1g`$KGn=RhC=k#eULa0+yc#FN3R4-kZ*QBcKQU zxy{2SsQ@|<$4L!D1AQFS2G+G(3h{Ax;5)c4P8p%ya349sZNEZ)AR&rpG=B#A{qa^S zC6XE5Bp;@p*+`x2R?AMHL*q*9kTW)gV(@SWF+gAd8DxfbIe`vfvo?WcN3z+X&~cF2 zDg+lJdg1T=e+RJnpfO+GwwqyXsW|OND~_WHB`FZdRk{k&leTINGD{VhX+uERhsIt7 z_~qq`-+lR8{koH5(unVWuk+iN3OGC6WnADbIqFEhW)vo0eSI7qF*H5!wGsg<3?XYg z804h@qJV}4u4z@rVY8JCGCPCqF!uRxJkI^tZp?PyS3kaNfBEZQ{=DCl>L-|=eQE*{ z!)O_SObIYVIN;Dc6-JE0L}Vypi!-_@Mh?39*wky%nQ=)|1fz)0Mlz#KQi0)QT*OI| zQ4uD7&V;1^itJpeO3At#YK+qHNR^8YoPcO~DM@Bk$QKPMS2YE3tt}pKR$c2YQ-p7o zv$$n+p(Ex{ARzZ9F+2Sjjm;F>XT6J17h;9=l(_&k>r?$M%#q2B<()LosSGnqXv?Wo zbUunCR)^;HAm9W<5v{IPZ#ZHLz9QlwMnJ|~56y~74%ae|OgN^23z7x5dZoIyiw~{P1Fq)Nt8fj&2n$Dcb zXo7Q-X**-J2025}0V4PCKvGZ=`_|7x+g$dcE6nA>XVU1vciR=M2(c85i7z_lJ2v%F z1r>o1Z@uX{pD?TPc3W>|m)R2cZ@>FzzRxIRS8*~a%o_2rwq{vl)mN;z+)M19tBy7y zB%KvE)+IVqo=pzc3A3(+8QZRdwu_451qJsH|M=g0Gfsa3wY;T6`%qqkYf=CMuGYcfV@ z-jS0i(q!kX8!Tj!Wv_^=Ye{7C2{FYIl@M7nag&xnb5Lr`){}Hr9eF80!-**0>CAka z-`1qz?M0AxVzwy-8ZF)_IZ5tFIp%@yFrXVh5S(1Hi}n%;1^UAvmn;QLknXBNk&0PB zuHi%xm6#5AxCA0rI*Jm@9zztwL9Zf3D?Qy%DE9sDfB$>m``&lI``z!%{0WF6{_uxC z{Lzno#20?%nPYH=lb`Vc6*U#)idNSq(*s zIeP6arT3Y}HtbA}@Z4>-gpQrQcA#kP4t|SmbKktU=buVm8Bw5qQR{kbbbSxCMXIch z6fg*-)((6{i%OzQbw_E?q8p`%QiTo6MtovWWj?Uo9w32m`zD5SEcZ)5hD4O7+Y09SE%ktth&U5c@c%R8w!DZ{N`z$ zSYEk^lFAn*b2M@-oF?SQ7AH~4B|#PNONtr2?AV4rDx%hW6#O#cuh#@*k)$J{$cU0A zN$?2kiC}f*JPt%77O<*$JXwdm#O8!^J)z?C*us#r@=Nk`@D#961HvG)t>kX{;`c4{ z1R8(GSA-zEx2%3Q$Mv4y@cr^LPkim0kACIJhwgal;V(b^$eo<_t?uE!Q7R1-Euh)*r&=4U=L?=88n8P!_gQ*%0M&(ef)u2oOj&36A#^X-ZpEwAA5zD zPPQAC^#43~+5Rbzb?Lm40m z7^O|;_L13>cA1^BcIFTRIP4QtkD+oxp9jRq%B>_E! zQ-Bao6u)M{Nv0@HKeFt;FQ>BKa^A^)g88EJPqDw;k1|8euKcpU?91hc?!Db*U*G@s zWFwVPlwD^+nH0IqttJslNoG@=V^+#*N}5M3E&=qiWFnf0_&E$SbM24kuBApHcG@*1 zOX5!_kxYI?G(pvi;P>>~%oR5HZ~1f#$o z6jR9;tgw=$M$uLblv{7Q>EHtnAlx3V1G-KtNs^I3d2L3=CObz@seIv(GC$zVp3Hsz z#?QG{+P03bsy+LB2JSK_0YI!J4#1={`9x8G!c=Wd;E&bAZviU^8`v)3i1KPHuLJ|a z!T_d9x%B(@|MJV<{OV@{zH9cN^x3c9T7o?_5zI(`d!|ZJE;-sc;eV`)H~BL#hh^So zmJMZ_=}sNP`>f(Dc9y^2;9kYo%P(I>(QZy-+fW+}Pi?~WS7G#O#I{k2X0!}u=b+=% zM=&z2EKPdcHZJt)A}?luCoCoPpf)Ga3k?i4nk@R16IGprlOXCGwe_tBhg=4Xkpn>! zyKw%lj$0qF?}8&=cd&OOet_F^T=u{Jn;*mA)N8y|9c5``*x3j%CN-4(dh^Jz-*4}| zU7qF*k9XRPLnEW%ZH*Oft{MC$qq);OGDn(I=2~FcfNy@qAR2#k8JqRy7b}!AJxMmr zO&?Y(dPPWL;x_lCX3&~(n47uD5kdB5mfuOh1YZ!oI4QW1z3c*2_ zM!3OXNC>0Y289Zxa-q>MW5|~ubd5n6mXs^JDFAG8QDCl|=x|Yz87PxiAj#z|I#MxI zD1A~1bL9$faq>Bw6XDPE2Q2@1=I;kTuoeA1NSQAT3#<4v(jNtlBBu4x;G9MdC=I{< zHIVcj7u-N3q$+4PjjtnsW&6dVS{^_)H*F$^pv_RP#0^mhHwW+w#o_2Rbo|ug_X$ACPG$4K0= z($bG|KJ?@7KJ{&2|9k1B$F9rwvXHfF@)Q$F%-;tY!eygwZDwPH_IqCs`2E;5pK*FKx4p} zG(OJ2vJLByEQP5}0X0%byYTI~qti`TYM9!FvVCM&*y(64;0!zW`^ez4P3QN%`x1~D zShmru?lP*2s?~-{WGzs{HFK<59h1OE{NgP)=q$t7303`TAn9Vp>R2ZQTK9%Oy>b%X$}Dw+Tb4y5lF(x% zhL%&rmgVG2veH%^lu|9rGWL{I*@Vb7QZdNFSL-;{y*KSe%Yvb@_>||;je3>Er6`-pKV|m;zFSq@c|4YgARR76zzbXj5u`|mOS^RGq9{RwMzMe$3$exC z9=oWhM2Zxt0#XD7L20&FqQ*%kF)TbD#C>yY|{^ zueJ6*d+)P^`Jo9XNehHDK6D$QI7&L#mIHf_OVulG9Vg38UQPPL&7D{3J4d@by<~C=8-{`RbdJlOXAUc`dJmjMqM^FU#(JoPEMhF1O^#e zvP`j{w>`Ga5lK@Uh`@L%)yIL~`ldfLZcPC*!XP&(n#dI5HNYG}hP`pq{G~jm6@5e> zH5r?Ltr(^xp-I%FZc37r%^==V5GXLk5MGrgW;%HGS>ua+DL$R%a+5k`3WW55WkcGf z@0|j?A#Dr)A6xmWnMb_W^)x61xBF- zR=0OSU9~|jnG0tma|>dS+gIt!plA9V2uTV4KvqN>WUjS z4X4HrFMe+)p=Kp_(T!L?#oYh%zY&8&9G_?RL2Q5Ez#540SEW1~v)0wk;4*vK5 z`5&4u27z_OHLxC*;*FD4+S@?YF7kSS1mT4wPMt71Bzb)$Hl=tKHN;?6>X+DN4Y`0x zsF7f%Mr2WTrtu^xO@|8>M8cX#IGXCL!dInuI2DqLQbuas&@Y>7I%QR_A279=T&Y^J zm^6oS?(&jl3i(o0pHxNZ@Wn-gicP|4c)4RCsxCgW47Na#WM9vaXeotUC>rq);ztg5@1 zd~Y#JVP_IJ8uP>N-1hx%-U1pU&=_^%EX6AVnUes^4xPVt>lJt1c9rwxZ{Pi;J8ru8 z?yp?_Ctttnoeqf6agA{bkQbfoYzVvj2gC`xCgQB%L*r zXuN<$qP_Ki00e{A9E`SWxHf76(tB@v%WJ`5XUZ_Lr?SJ#;4{4Jh#8drqc@#N!F$KR zGBuX<1CXnkc*LD^WDV!bRnSbOq2_QgH0?l#1R^G}g%c!UMpS1?kscEpXzY7mZ#nCw zFMRIPPdoKQ4`%y|%#gApW?H&nPTRP zeGcCKN~z6~X+@zTrG!fDh>*RUlqNMrbTIL%P#ar2Mrhi;GET@y2FFxYJqsBrFq&q! zRIM!Y#nf<=uvC>n3*{WKPd+STWD92KSIV7Vk8NyPtgL3p0%d8M9VOp>tDB%wi5*BrJ797tb`3YPF<~XfZaZc7QYoP3^G1iuV?OLQJ6!FJh=& z7)AJEkktt_jeK-69+BszT1qo6JBS)DtWAT#t2=6Cr(UXvDQ(H)v?3isUEy{wFqYe~ zKDJ3~)K$yQ&6bQNwW?5qxLNaedu_uU zQ926owWwgY!PykBls)Si#~X@rZ7112WG0^2(21D%XyXU%3rQ_=)6m|GA`J&d)2 zP^pTD1n6eJHN=`gpFu(p%sm%xE^r4QDK#k^Kzr8WN!xDgtkYV`muI|rr`b$w8uB7b zpAq}m$R0AXX+$>FwzUp`{B@zFTRyJ8SPg_Ss5R%<38FSlG6^cH_3? zV3D0L9%Iw)k?e#Zl2@30^OK@9P$FKog4OHp}3PLyQPI&d0lVw=Aj{5Ddf6kibaKSYpkIeSlh#pG~;wGU(A`Fa+ z1#YvZ7JY4Zxl{IVzR}VTBapIo=_uh*>lYzM$-lk-7f#}QF4(8=EPW@XQg2Fa>uHZ|Aq%|{9(6`3Z1Alc?VCSDBm8R19~X4sLXlf(pI!DP%Y z#tb!QS$6S?VT1%J8@*7l(d$Eh27|F7HOP$j3=C@NSSi>F-(EIg~Ut4)rh6Q?9>fzewZcfqmr3d=>w$&AVW3!+ z&_7TbFD={oavh>BgY0w4K071qigGst(UV_3@h+Lg88s?Kwo5q^t~_B^SG2j%O!L%l zS+J_#vU)?=RqztMo4$){u(_|9Sze_7_Rs&#^6M=Ff5pssw8yaR zMg1*v8;a@?1-F>qUmoB4a+(~GL&~1Vmcq_XmXU|I!C|M#fG~27 zEOmSg5s&j_IGV6yWsn(Oo^>P5nle<&6j@T_Ah+s9WUE!#|;kLW3z+zdaOsX z;pId2-1yMFHacjx*}KX9dep$n&XmFAzL+hAos|O1Fg3aKIY;m4eE9{>*z3@}m-*h; zhHF3E?3+D|{UFa{L&`Sl4oqP{z|H+#3eqBbr+}(EK1gUwFSV_L>A*1rC2DQmE|<7QacgVxTM9Vo-mvQCpbP^P1zQXpCiI7_Lis0d#6xmr!*NRtJr6=7IS z5UPIDUP8fZW)H5z#Rx9$No>zuBe+-!FMF36pvHgpoDVyFe#;wQ&3LWWM%MHUi3Xyu zxzc_&AklWcq0>l&#G`(&d2_K8;f0jJthrdAaesgL#eZ|FMFG|$4X5f(~1INQ;Coy1wuu# zRg@}47-kU#l)haY!jmXUt!du&vI(v(M<`R6w(GKZbunglrNc#zqzpE}*@=ck_4LJ1 z81zR&l#VKxFvz$$wq!}+@2ZdTE z+vBw)S~J;-@NPa8VW3hC33~YXU}ADHkJ#*)3q@pJu3*w2&~03XDWuS?NOK z3@So}B*jysT&qVBW=JRqN!`(8r9#!BejZPOzI_G$!6=)x+MSViMiwdyrr_%VTQb*z zrmQe{sVD0$)oTUUY$~M;N`t$$bE>H5``i8~v+u?TfW8q6hJae0AoA9*+r;la{C6B* zIu$iaK^0TLbTE-Y8`uYMb|^^*Mj^x4-PX)igom#|EtAEZ1&Gu#=4J78@Rec!F~1ar zIw2-EZWb7nQU=K_apD0&o#dJ+j5L2S%~NySR5G*pvUO}T-@d(LIH1G+7Vj@FyYczb z7reZDzrB2p%*iGr!efmE4a7JiZ^~QlgrV&ssYn3*oK8WJw$N83atpG@jQa?fm%EX! zd-lms_oV`FNqg)EJOo0u*AvH)T2`>Xg0TWp}VQ(?RM}(l$umA2B zHpzb9_Pn}_b==X1`|7wuSRpZ3&j=E=21*HqiYew9{Ji;k*AaQFMMVl8?Pkpb_HHYh z?=pF209LgY!PM?Y{?MfN)>j;`k4LHP7Fc7`YNlHZ28f%TZWH>JP%jw_wQ&Si3{q9N zU_ljsoyv^A1emlXh)_%vm0L(jA=rl38j=obt#^(36&{o+@YqwltOhaO;-M+2LUeB;ej>5czvxZEdipx<~8$2#Ok{y(!__H$_ zqM3S|JEWN!E(VOb*?8!>kHN+izY(F*asR@Q_`c#ZPdxs(qmJ+q319H@JqVwk@V2r) zzU&bogWN~;{oSaYwtwO=#~%5T(@*n>MPS*FW>z+j&D8L51@-5k;ditv#!Ue=EJ<`l zL^7q+r0hs_lyvB0$fXq-%ziHLJ)G0ph#!9Gt1C;GHOg6|OqlZMlPpw3C=7&AWl*JsJ%W*~E=JSh z7got#$|6DOljB<-l5{QdUYt^9*`%pmQB}y*nnk-34qJ*_lif54r(l`f%IwB5ZWeA* z-CoBI95YC%8%lQ^NO_P`W?&gWMuhQF*cnoGxGep%@6C>wd+xfl(_{b{WbVOi{N1-- zjUZ*|99&-uFW-9I`L|qq-kmp||DHF!5MDlL&&?0pcZ-Ae*aTiac#nS?TXYB+73(sG1w3V#2phuzCBh=2rqZU zoSLFa0c3UIWnalAQL%$&9f9$EFp-O23<4I0*fiXkyS5N&LS3Ql$nF9b_L6d_858e7 z*%33;3@RbdO_Rq^_d+CbD@7!*KZslM~!^R>K0F8%c9Pg$#n~5{U!z$0Cs95 z;t`~-qd*M-#hoU5dD#)O^JOQ?LFUh$^8sJ|y7~*}&K|?Q?OLxe-}ki}ZDY)uCRa1U z)Clx7r^bE^W4#Oj(it;OE!FKvDKtYXrGT-~Vhz*u%$h8xNLKYUi(#5l8)S238*dj% zzbz=s_Py#BHfv%VHkc-N zLZysnB-dPMBiDA7PHkLSSIxCEoa|t2KGhvGR#!|b>dfFuOqXk6j1`jiTo)U8cXL_F zvKTuz!Ys?WLn&(cB}{D?sila1(7>~N0CKIPRx5NVm<$W&B7@H9 zY|Q+9C|_6Z=~fCpNoPR%MzMRunyatA`%`vyPU?fzu%6ju=o*j?3c;%;nvo0>8OgF{ zpKPPrkai~MHCS+|$X}a&YcdN^SQ)Z9yHW|I>`j|&Q z!l9>`0(DA}>eLV&Gf{RnH|IO^_U-nx&VU@j02DT~HkST+?^@b(6tLfFAU3cEun?)lv$S1wg;SKmsxwV{TP$ zY6cjmlzom{7x@64rwf+vy>tPWSOgANdczxxYbfSy*GW)%=w%<9zHJ8~kR9IbvS4 zm5nEC4w?u}9LlCcv$c7Vrizr4s6uFdk)Sc(c{iHPtN=o56io~r7BGk-_Jtv3ibpyr z6b9ZRY?3lQW^S9|nerUQ8Uqk9!npc`7~=t93>6Hykz>FcAtZsgkSV~ZN)*bYj7*jx z$0#BRW~x}-pkiZ^M5(k{mW70PMI6ZrMgRH`-$N*<@lgkZ8P_}t^! zct_7(XjY}zGN{${Nyeok$l^3qT-N&8<-of%db>*_I|f5Ktg%_=u!bQ)7=C%rZ#}8= zU0l?q%(9W0Qz>G$<=TicJUn`B82sKzCt8Y;d51RNWXZ-GtdH^d#QJML=8%I9%AnI{ zpLyrY-?F!M2~0>%(hZDhduuW^P0s0PRBabS^QFK>t5Yr&Je{b~D2*D-^^aeM={Lgl zp}Ugbvdjpj+BKaHn$TP*hB{nbHW^7szloL${WA?|%2QB<5j5iGR27a$C?Hje1T#7T zBdb*FghYWPYt!s7oANN2rdr9RB!~pnF4;=8h-A1XXQ$;0UnF_2ZWX&a$)&Q=p&;AO z;9QKNDw5FTQ9}l)NLYI!>4X@@n}k zHbe{^!@^FGy{a6+$m8f4)1%rT^H08i=a2s6_MW>AFZZ^xQ)Z{hojCUeFdqSP?mY1F zcka2~_r7kr`rJEiIM4UK_I&dCEB5lomzR1ZJG^|@-m@cSpTG7;mYpDbkvW(Q8hiA* zuX{OPhLIIP(@4?N4qEo2llJnxFXziUZ@Xlpb!X3vJp^8MMhRqj7#m~*KS40y3rK{c z@fcZH7;po@fL2%xuL|nK83UJrdn_RPe>@$U;4SEv9mtNForAk1r&7i#Q@}`hS1MJ+ z4tHV6kTQr3kSYSA!;iot?A+gha$XE3cXsRuwi9R27*Ym@J2eJ}J4D8S&km7gA!Q2G z{J|;b%O1S;O)wEd%D%cFh3>(-n_cbtSj6PvD3qfW)vXNk)MDw*bBJ4K=9g75`E(sm3!^;ek=zL)-eec^}=Xq@J zFTdI-IeFV%U)jo(p4AS{nano^VrXrJ9b>iq=@31 zJIyRLXQAI7pRL1LyAtA(Di`@Q@HOfNWsUIP-f)QH4?S+C-Rx`fXK{>T8#Iu8u zI!y6sy?w8v78^+W#!Qv6Hpqg>T3L=LSeb3T#v)nK$Wjm<1@|MVsH6gOYa^jXpHQ6I zk~PfGXPnTgONU%G8am|cOWiRUEF{$lho(|Qh`3WUs*@<9sk*3GC`$(kk0wT~_)77$ zE3*YQY_5{6iU#sh=U@`b20vwasihjLhv^gmUy&>!VjpFpNL1;Rwr(61bGK-!qFP9) z)aHRxHpxjuQcubjZFg;!JEBT~zD^U2EYg`uwTMJ@jGz5gXJ3(zecf>F?C<9I+nw$O zJ|m;I`Q;8n-KnTgE-R9*_Q;uzV^cR%@2k2+8mNY4FbbZMHD3}!J!Tdipc3!M*bj+T zd+^QVf<68C2BR7I;G{v0fwp$qVOuU7ALq*-6|!E@hm~Mz=!t!p+S|dNZFWA3RHPtn zGEzWhw<>mt)19+@dq>Q6m7ZSqcX)l~OWGgg^|c(4H<9~6jLl53rl_eZZRt>|fnt`W zra5*Ix<%8hJiw;SLq9O;E;>$cr7Y_ZI*_TbwO#Mw!^=vMl_wOr=|s3$+| zioJ#Hv0IJu2q?(xcK`wCF0UKw!@iyMkAHu^E@DuG^-s%PS$sPmfq|>K7XkaQaEJR~ zK0B=WYsebqA}VqrTvoyRG`b?!!c_xcciUwL1~pa}WzDYvdO&0Ko}RG+8ze5eA5S1| zuC*Eo&kqQ?0#8#dfW2UNwDb}B@x;UJo^tmb%|QcOGNg zP&bB~CPu!PWC_ikW}>MEm-TVqhGvI)+)iDzd{Jvqf8|76DNyHJDxQATY*AFrEz$!@wAj8Ur2Xh7`>d56 zUO?kT|Mgz;z{|3DuQQw1>hj24rtmspm4*m1(y`+bxJGy@s~k?X zkc3|ys0nzk*{7Uwmr`{w&{)m5QDJluB8;m{B9%3m?KWTi@-yD}`qwy-F%-t zc6G92N9q-3Yqj@fJgV)tP)}C)%D1j_knJk-%W1Ah1CwZeU7AO&Ymjp-Hl&S;L`x!7 zI_(EVvI1+OX~K};2Ut_lK`;y2f~SMAtk_h@@*-=TH#<@&3`II$MO9d1L}?*4{_!XU zItz&%r5Pzy64_>M)&x&j82IcLfD|#CBWap!;)TQlTBxQpzszPqR?K+%gb|)`PG#dM z&@9WyVDu+7I*gd_dW;%LeU2Zb3Z5OMRncBqa<$?)WvXr&6-O{^y31IEEx&_g38QWe ziY)bmWQR_56qUC86x)3wKHYs z$jN&`JFtAm&6o8&_P6f34qkS|?0M`@y#F=f0uqsgIG#TcC0o}7an++sF zf&n)Q2>5cF24)dHyiA8Co&>>n(x4xPrbp3Wx;_^c-XsKa0d4M>1vX(i0VpL0kSQdA z`#L9;3XHEXI0c7sG|8pFZ@3uvMUbzRha{2>5m$=${5Ei{lp1^uTr09td1o13hQ6IF zgWJgK%t90lFWRnM8Y^N6h3(ef0~1 zzUYE;^Z=ZzX`x-FFnW(i4Ewcuv^RXMuN+5;jbBI^jz)?o>q&L-ObHl3_Vo3;{^-mz zPCMBn*>8UR84$F;!t5hp=X~tl9@W0~it{{)4KLqx?ZrR%_6&ixqqBi&GiiHiuO&4> z%o-_PX_B^-CX{{DfPsWIR1}(nc5Zk?PA5td&%Wvr_tUAIVU_Bbq#FDYXnvgUO~CTJYHf(CQX-i)VyXsH#w43PU*;ib(iM zl8{7TZVEM1RwQ_8>f*au;LE0fe=vQsLF6QXkgz4t4E@DKfCqGr> z$kQ@&;Ir{DM;@k##NTBG0Sta1MUgk=9OrGaWIepYM8nl+hk{`}Of$%!HBgOSBi6ts zhm54|p1yhIyjWcr$HJx5v?W&xf60PTY+9Ef^T#~;(QB>oyGzzzn?8{K@-totVuNp< zWH$9^I`a0SE{q0(aosY)DVw>L=6$+vw3S8eaxcE*f_<0o>126}$8GxXNBqv|XS~q& zvTX0wQrC>u5Y1s(RN6?0n){+fITZl86x^dGHY-MW8R}GIA<|3cHS7!q(t!%e2?0iS z%K#cBV*^V5Sg`%M)x&W- zgzK+nJAe1YvNwWxv@#2r3r)~tjMp?5D2S@DVsO6fa)7!}supPo)UW?NUEq$ab392d za(56vDH zTFtd!M2pug`bv*ssOciV3H$K`BN3zbTCI+Bpd$}4DzuuE2U3p-cdtsFIzgKrVj;#F$6IwG$q*wWb6|x>}DUch)or9 zZDO*SeHB$FH}$e9B$~2TOS4jul(H(#Ukpt%8t{Ss{8ZAw0F*jROekw$C^GB~hQBA{VTHf4iKm8!kTv@3G_5j7NCz|yy~+ww%E*J^n={4D%li@B&~;!^cOnwNbl9{Ki0i z>tZ37#)S*BSXwbg7aEd*>-8QZ*}-c*%YGuL)2t-EQ_Xu9on=?EG2mLexGpy#?=#FY zM`tu!z5P)?SlA}PTg?6zvtN#W`LmxI;V`lh;%SPX{>4uW21A9u030rBou1dd2s0We zu!tI=4&f0M$r!+_B}^ic0@Lt9k`-hfQlHQ)U|CZS)et`jXrKwAy(Fq@sa-@KInrvW zx(sF?PnnQSj+la%`O*oVbixdwBK)GHE(?ewei5@GF_olBi(e@n5(-%^FvqOXYG|~gU}n8Ee+?PPb)Q0*?x&XJDOsW;w-5KMZckH%?kub+itzKS zpR( zGIDlIf`N_+!`ESIRS3h&!D@GEMIzx>Fc^^tUZ`RA@T{gO#cR}@&hpSTT-^7*fMR&r z`7*o=BcK1rXG73%^ktX)F&(_-Pdh=MaD;3;bSdL_Ek#`sz9La6QeBL|vP}yGMN)Eh z#HrKZ>W%kFUd>q@~U;fICmwOo7Bia5Cb7;;U z()2O2Y%D{*B=a$i`W#O`ZwhO|SZ5g-R z37$JnH3Su22-@xxlN=+@xl&<5Xr`?&So_9Rd?THVNgL5W~4Cib_hNz3TCRS z7Rp!NQxk*8)m6$)HZl5Nt(w`*h^8i_plDDMrD$4O*enpT&@BcdONd}h#8K3RXlkM`m1GMh{p2CYae+dbMm+qfcc`N5d|-Bo|9)KgU(Y`D&5 zn{VtLK-*w)n&Qy}2F*n7SYvj&!nC9)B}WwSrvK0(QPGMg0R4d1SfxlSicl4H6d?>d zLgiHC)MQhbd0)BXU4^kR7PBKok7PS2_s68*WvB$ebRl?jIP2A8PqsT3CUL!VT^%I` zUc|<}RB7>RyHkD#yIr`TJxzc0$@|N`V5YkJj#9O}yzCOYYKj^X{ydh)vl|13gv;c) zZZ{?<80NK9YSas#|N9;k_gAX@2xAoaQmCIA{F0#0b&guv&ZCf^a&0kw8eL2Q{=PSE zJj{Q$S=T^vyEo3gt!(1_{jYy{;PSoq-E+6AF1w^du7=^+)@o+{njM7Z1quCj zza*wl6`Q|0hz?;BN3EtLee;MZM8{--)*UVrW(v~;WQ~G2tpn`SVGVCrOo10gRt6Gl zi?rp45UDsw)^cGC`;b)U$Pg%)Ip8y}tkfs$j0$o@Vvs;TA69x#;}gTSxS zs;6e)ft&#`YE>jLv$&S>V&X8O$oxVW5h|rmfgNTkfRn5-B?aGP5fG<{!BK=KQBge5 zu}7~xbS(vz@4owPX&?9klkxY^y7yjuN6dwh$3hLjN(Cb%O-r7p{_iq%-CZmf+7+}s zcGdKG0|2-%PU?aBq?lPFWIw!Yof(Z%WMm@OUCOML-*pf75hCso@5q>;K&@J%3fD>- z5|q^vGi7$f5RGQW=si(I!qC_mSpMO6fl&4t$py0*?Fyr5KC>h|rY6M);*wMsUt3fh z%EhnxXpyedlWu=Gs&z^^%2%B*`e9Tw3nqy`htF5}dr}_8zUrcn-FDr%pFijAzK?zQep?@X(6ZSlvUgd} z`SOb0H`wRNk8zsp#bngsvO{F3nXs>6gVM-xGX+P_(DXA7+2*7}w>@FS6As#a>5fm> zV2h0(v(_39^9MC%k7WOj|2&j8qesl>;mF^=Si1SfzE$$HaYZR@Rd(?|tXnU}t}M*%`Elt|4g3 zp17t7VoRZ8X{XFS7X~upRZ+?aygXBz|1m7SI4!E;(U$s3V99H84_!;aVHnv_GQ9kb zx4zzU+Hkb2cb5IlDUjLszI^pd?=j8``*oK_>3OkWG_!07yGYiqVbCip{Ia9=i6G?B9RZ(Jy-5GkovMTg>ly+iUwuwvT;XfAxhviydC}y|00n zO&pWR2G1tSCdW1@##|$`{cGdbW~@17ezh^HNUepWsHs|!xhb3ebQ&_)grzp*?Bhvz zF;y2O3X4+ihNXz6UfH;0Pu5y-mNdg!KXFXR@`N{ES9TL18_ z_fSia_Tt+-xp^j+o7BI&0IgrZ!u>&t$UEEEHMj##$0WDSu}Lm1h@U_|h;wv^n_7r7!tNSQTe zHH4fbN+E-_np{IjYPedmSs>|(U>GD+8WnICDEs8?rt~vLmOiGK>F8})428PA*krqv z6XI(oO<6|hxLC4-$eHR!tYURZq9nqjk9VhYn{Lk_6_O$rs)Z3xVUToZbbGA{S-{f~ ziH6i~c)w22f5G#f=X+mEH{0Zr3(xbg-%R-Jdtstuk2(q_^4_TTL!JCu^PyC;!)jp6 zn*!#7k&GcRHNYGa7)Pm*>>Xa{#;LTs2?mH$YH9<>#&!@@758PJjKXG{Y~V0@lO-EG zeygQW7_?=wvjeO-skE%o5r!113X`&95{XvY`Xe4*zIe?QHp#Y>w$L`Tz9M_Wjn|&~ z+*6*s<96$;yUyxs{H}Z7D_`|eZ~xiz0~%_nt_fBE5stk9`{p>~EfLLADK)VXjv|tJ zMyRL>u4fQ^n(Q>St0JpuW4_Dgf?1|)bQdK%VHgBJ)||^Ozu0lPKVEa~byw+sy`(es zty_b8x2M+K*??A;j8GU|6kb5oV&IEkwHzxuK?a#!lsXokbzAXUhaXIwCR1ZUZ%}rS zZ2@%34>G&E@#R5%2LJ9?Kl9@TQvh#Yu?F5fCj+p1>dD_(KJoaYd?m~~$bMng!*I7$ z4~UR)O+dVh6JhkB^JNc~`#ELzr##t%-^;e#Z1YVw6^5fX0~JO#Dg4E7ztkK2ww)$| zac%|~)vb*fL>OMCU|yIJB<7JSve8tZ1;X~Fbeg25RXba6D?`dA5yn%pCZj2lqoyG* zTu^BKswKR3s-20apwt|kE^$oBR*MDJ@Fe{iDc9X_MX)MlYTO#+vgDY7bB&xZHIQhu zKzSrmU@FXLMueA1Ldx_5o6;CXA=!qUY`9w~cZxF3j#(xi5i%|dppoENR*P%G!earf zqmT|Y33AMNV?fx!F}&P4vee-+xwNC_PMj&sH0RKMgvww|%QEAbcrL`?Qo5$D zvb5C_GX_s9aHGgDpfJMY2r*LU)ZVaaMe(X2*BxP8dJx7~#Wbj<3S10Wlw2CLkqX0D zEh|U{0iwK2{pP)vYCF>|7(|)8J~!GiybR-bmWM~#^f{svFC`ZPBJaYnxsY^Sjbcz#DC5FV zam2}Bl7cKeDLDyW2$MW_$yrd*^+F`sANm+EON}%{0r9H}BV*}cwj!ybDGm85jOrxy z5~pnPl6NVdWLm|_@2(`Z0=pgb_Obg8zAOpCyF1(SPw}!^5q;I&^)N#sOXxH?$P6P7 z>SJHdlp$pVCU@LCd$oDxm$9Md-d`pGlRH!H8SK9OMNWA8O_$2vdCM2>y6uV^ulnp4 zFFE_xYd-VYkG#S8GQ51uipTrj*Ycg#ftS6!eBiF@b&d=jV?bty%fPbJdi+`^$dDw_~w0LH0}AEPI^$uUn+woL}|wm%xVb z7gP(@;lVLLEQ}@#N5jHEnImRE7(Dt21o)-gm*ay~W(=FbSyZ?3f7>pZNloVQP5Uc`-@g+Jo61w+?Q@ z$c~$xF9)LOgWld^rp$}B2t&+4=31v8Fv1Xz@ccO7z+DO>gT^p2>P2RZlVwQRA6zE! z?Jsy4cK+kfeDvH;e?%3QZFs`D9O*!PMZz*3(=okmB+^$j0c7XwkTPDk%#Ok5F^ET& z4kA<+F81g()a;K*z3PlpoiF=C%pd!}Tb(a^9=pH4>=kBzOv;~N_9C;LOglhx!R)Zf zu_?7jvd_WT2ae6EBAYA9b~SA;sin|%k$fR3YW`A2W}hsf-QA!RnvY^YW2!1ri(isO zX&70BQE^IRDJYDEsp=Zo?oKOs)O@=~Yh6ghJp$i?X~RQkr_}BUgW9Uqd+cMTd^X98 z6T`)hgOD7-b$a%vg3U{1Y^tY9eHhJn4Fvke!k*y22PYvhcFq}sF5(Tcz_ z?-|T5HPu`5F*TepYkC0B2!kQ<@Nx<)u#7Nu4&z9+T6Li6vUoaG;RqE!iK2{SROhSs zvq>`-bW(5+LNu$G*(}q{xYAYJjX2$Iw3k|U%z9(m$@)rB7lK;K(yk5(Us6?Q)^$=m ziNLD6FjXbXZh76=-Tsj4!l0{Eo1j<=oRW@)|F~|BaBD@deZM3@T@~y2l^y*toAxz_nz{s z6YUc`2u;Dw#kHW|9c0yi@0m~YB(|SKfMpn2pTNa_;?Nw!#(}kdz_`}F%tqf;vwwKe z3ri36)e!U0b{cht;Pd{S&K9ak(+xpd)UurIx zJ{Aq5+7ysAjPZuI34(9AkX72$Gk2PCBvmnGkh!LM8&27DFs2pWd^G#~xW^Zp7B@nZ zvAJwoHD|eKqH*2I23h;1(OmQV1|fanQ8p`4^U>@xC1tb52+OJuR12lTo)I2ohF(Ls zVPu+s5(PL~3POeujV0(w5e9%lzz+XcLLrBxuV?>q(j|7`sBH@m4 z_*aSt=OO6KvhQ$QoUECZMWQZcb!AmyElgSr-bxNEgTt8k@N%cj_~}1pYaKhR@hBv5 z3P3ieigZ+2u*Im*=PpF6r4Dxok!>|L0HVf*P%|QIBxc#-my!q`k!#WHta*Y^8c_yY zj-+@*Cp8M1*Yswa+lpWn@c=N07FxxB}&6I6!ij6XALo)FXZ zsx1`MC6O*lwJ77Npkk|a*=JMObx6JnvMHInG*)uo16RYwd*EJW`Cjr^^9D+)XQZatnzIZsh^8I`UN;6Fb$M*JA?Jy!AUxshT6be z1lT$u1?V7RKoSb}V>tNc=B_B9hgV(N6UAV7SdRi^N)9lB;dxOV7~}{NR7(+M_z_q} z;4K1*-CHp}JpJ~!ywT3k)7K8`u)QsR_MF2ItOaaAEBd0HAWcK;5`7_wg#35M$ zAPpCxw3Af64>czKrcrZKuqbIiP;vL3GwM zjS;3WAhR9_FFQ)^U|oB;=24c=nX*pc9eAf%#U%QN1zq}qcfSE-_5e1#>=W7Ve9No7 zybLLW%%A$`yL|P_quSS8{<(pdZN_Y(Z26jKp*)*ROk1y!w!^eNu?cKWQ5$ z5VH*0BXygYu70g=BrS%PFU{eXJEq#`vSZU&gycnqNy29KG1gD+5T@nY0+>6cZNSRn z%L>dM&*`l#fm>3MVAy0wtzeWa!gt$Z9K#Fy_-aYW+A8qM2L4i#|3XxzCd36&qRbY81bSGtj zPPd_MJ>3xT^-M1C^o3MbDTa#-s;*Y0G*N0KG`p{r=0a&jWd$H4P1b}F41H?D6wRrZ zvP!F~Vpo|FA>sWwDB(HwdhWAN_7zrNQnC#Cn@!hVecAu~*MB_XCyd}WSfp-ujb&SIX~Bn&pd|A}6=+CZV9gYO?~Xs_ z2%4U?Ub6oBj)mhLeur(>US|!E*gP9cHI9qqwQ#Xuqq`^p0x%2005x7=zT(PDJ(vx3x&(fyaInkB zzy9+-3c%w3`=^`)!}@Dnvi#Z?ab%r|TnLQL_ms7|V5z#g*N+%3kiSdiiog05XB>Of zVIGzDP`v|U1S5Mu+FQ~tmS54`l>g};f5Uj6Wzz$y1Ix%IHXgXvQa>Ym9@`&;0-42N z7!Yz{^`lnv^#grb&x9j(bOoWPbRC0S`}N+d(9b{h*?!hWHo)HNGgEq@E&YSty!7<((WK*Da$)YAxhUhXbcU(YX-Jq5Rt=%#&aZVcpzi@0mQWO z7#F5fF_gzFVhy^KCMOwQD66iB9gfI@#t9h-FXx>CQ0%8F&xiXl2VN#86t-WbpjWlH zV3R><4`yQ+r^9cd&We6$q<$A7hL?Q-+h3LPYZ|LvLo7*3rWDi$r^$ehVY{Ba&eAYyo`yXR(2TAY1txaDw(Za5pfoSTDcopjEX2p zy(IXuQaqcOMJcCh>5YgoBF_t!^o&YZPZdKTc@QUOE@Y{Lz>#;noe6ItA`@J z(bX^bY!;zz(IJRmn)M-OA<-wyf=FcXE>NvV!ZK2X0O6Kai?G$z67AN2LcSLB7GznP z124-`9>ju*?ibaP#e|oC{HKa$e`Gnx3@Kw|2Wp;T=f3ylEU4P;Amwo&VuQ++9C#>P`N%`K_zB{kK+qSFixAVHd@=^P5 z?h$L~IJ}HM>Co+h%oxC#+_^Kb3^n)6w&$^*dDs)5dBpZd?zgSyu{T)j5s&mZ_9Gtt zV9#TN!2V?9$tRuQhU*Vi_?a5-zIo`1{Q%_ttV5>;HaQNX-NL+=M1IUu4|S{nZbQm1 zJN+~e&xQw}1K->+VLgO#3<~0bneZS2csqyf@RV{O6hoqlEfdtKA}|P6dotYb)c`dk zgwQwr(6khCwB>U;40%Gh7+4r;_SBY_mmLs?g8eid5LVtN zYJtfE3-d01eqmKX+NJp2K$ecj)xygjy#|n>2TXNcr`7cs``j_^R|v?L3!Mh}27YyKk6~+gysXZlhGEC=6@{c09dhXS z><_&iJ?EK6opk&mZ#nCw-e`W`J6`VzZRgGxeD=e=!t8nM{{C`!*=EL6vVoDcN5!eB3Xpoj~ksO}=6kbPW&GZLBU=zc9d+jam!%G!sT326*$OC zwP0FP07e7!RJ6qTW4_#Ze%qO*b~79xbKis# zmDGd+B%Lh74z{nh?#CT}lxMKLv%Ku_OP~IX}@H*YG+~Qnu*p*qD7e^ zVc%*ays^meg~s|oPT6YEvSw?MsU|OZm~?7cz|HH7$VS{nN&`U)Q(NBvh`li$9&vK# zl$m$k?%iQ+XHEC==+_CoBk^kHKe_=^6?3)f4 zqg|@V_-|6<+pZ@xPa{>TMYEz9_E|s}64T1`H^nS3vgVfN2h{>nj;kD#JW12=t`Xn@=Ct19T zm@29j&LY96B6T)84t$L+5^4wv%K}GH!;d_Dmf>hc@F|!aQdVmArC^kboT>4Wui$66 z@<-njkJtpjBR-D+GQ8YV+4vv%{?q(VFoVhd&CZ*@`&|r$p4Jwg>eJTY+01esl4Wvn zx_mlWZ+E?Pau=ej;BpuYz%m$U{hVFdc|%wbVL@sap*GeiKfY;>lu)BoX*gwH>LNDA zro)W9VZe(qqZYL!rA8DY&yKoTt}VQ~Mq1=@XelBmM1rhHwpN#w}z3OHN(z$1SaE= zV`xRPx8HQ}*KWP+D_^?cnoB?NsSmvFm=!y0ztyAm+jSEk``UAdwRhd-QSh?!T6V~=E0>onOb$sV@`lfhxsv)pj8 zXT9iP;AKJvVS5NV(8{kq0b>zd?|8S)5w-|{dNBu?L156>L)Y-KzZm7X8D55tG3R{3 z-_r7mF?2k8VB0IqcxboZ0rZA8>KsjT6|`M5`*DCy6mS+&n@ru(xwCza-DdqsQ^iQf zdJ@~;Uv`fC+}}I)*dvzv`^)cm^Q)j{=gvKn?d@iNNy;a`?zrXZJ8%7>&5xZ-8$}x= zyxorNjcFy@X3W+o2_a4LHj}nwvP#=Dwu4$FHOrczW+gIJWt)ldEDz&NbJh}OmJ4BU zx+tZ&Fy&=REJst}lph{z8eb1IWaTe`7PpqWZUn8Xcq?Q#1xqm8Mf!?r2%FYk z5&>B7YYQPJo2ti*x|A6l!r~-k$F{cD3Gq?ku~xMCU6@j)lPQ|ATnLFIstI|qk?vkb zNK!oFB+N1%b-f4?9!>oYiN^A%sJ_BfMFSNf0Z{IKq{z)nmQ%G9Q4@LhO+~|3Mkp{i zs$*mcYiu!RS&=LY8s*mmgvlBgm8!_EkVsrCcsY;cg#FGCUKX3`&c8e?>dQs@@3ZHc zk6z8+mI6-4N`Cku2Z2IBieYKQn$CuO69FS@-btMZLt93#u?^SY>3I3n*ao}6B2NXw zae*MS2A?V5+7YKk%qiz?yX@j+=QUP)gc}O-i6?J8KVj=FkgpUnd;c%~S}lN!Yje;I zoyRd5TM8)HwR+0hDf8d{95(y0w|yPLI*Rq^vW@y*slM`r4ntFrrW;eW%QS2b_R~7P zmTG>KckXh{qfRiRRZQ3{meo>nrPfSs$B>L#xf@RDM*zs8WN&O&JlpWk&kfuB11b8D zO?7pVi^YXL*EtSw{k9-H-fM*z#72P-iOr7^s=Iue29~0#J7-XNPxcgqA647CqBnz~-ay)J17F*%>J?OiK!;U?8Bb zb;rAY2*Ma4pcyde8417nV2o`9E2VF2qi~Z_RTxnes!A@EB}e$`hPEnVkOJP2V?;_5 zGezP9L=OeE0*@GVxfTgNyetNIlR~9TWsM_}S$N<39yr`lGNkN?S(9{nlqi1h*;}1XEc9=H|xVWny$!@Q?c>JovFbZ z%Yu={K5sa31kWC@j~tQJ!v z7(zu{8)ITfju}QsvcRc|>9dSSnJsum77`sElVj31dTkF{pqQ#5EFqTaKasV0M2f;9&$M!HbynM&am)?E*m3Pe{vyXk9bky!UY`xk(J1;qK_l@`1eyu&W zTm7KjHiVZC*>eeW>~U+~!A5-+8(2oraW67=xQurU?Zg>gcE0@dL!Wr;iXHaaX^V~5 zn|(E929_V>dtcBf^7o_wKrbviEXMZ~w6~I9`LY+2`*~Rk62gqmp#9(s7yI$qcThd1 zRDlT8w5)6g^kNKz4q z3iLsa6d*)JVR=IOgz&Oj5H7q7>Vm+KvcLEs>q%@+aKX+XGZ5`VUSKjrj8{7FwL@eQ zXU(87jO=M`Mm&ZMddrrE(?u1c&#wS9#6hqp&oO=<0GVNYk8Q)t$OG8kP=<@0FFS6A zp!o-uy}}GB2bRm)J!&t=tX*KQJ&dgO#`l{*hs!}`MM6q~7fBVpcZNs$q_caxKK#tt z@AcfZ2eADyDd)@HU-q}9-uA{h4hnT2OmRI#p<@& zk2tGk1E~o%ka~k7ROi$`veOpTu2Kp}^$1UyP5Ru`w<6?Jwk}eN&kII&7%ZyvRYCY^ zc1URQs3%##v&MCNjb#>*YnPt_6zL_DcQ&bsr+ZnwGIJF1Hz zg({Ai2SZ`FR@bdoU1ke%^adlII`AN?7<_QpkB4Wz;-x;Ibkq?C`-#$mY{~;Krlg^W zX%R31n}$YjR4H^L3n&3!ATV6x;L^Z$(CI}~W1cmK!Wfz@Dek#+9E@7WOfL5tA6wpk z-#z^OAP@cSw8OTuhi{*IibdX`C$P-8)j$A30`64dnnY?ok_$izN;p1tvg}|BV_W;v zzy1rr*$c}KlmSc|cw0&jj{3f|J!i9QEFH~3HhE#3Jj;x0xGZ1NYUNkcbWTDQTSnAX zs-;M)5r!$k={hF63M~qj3GMs=7K91lv|HvE%!GV}Au2AAFbWt5)!`7dc;c0I30zbC z?>k+(S6z(XT7hnD@hY^(z?h}!QfAQa9-b3uuSG{)rAYj>olw)d+;yO?pg_*P0GM(S zDFe$s2vkaL#X1A$CXf=@K$U4B33B>}UhA#li1s8~dHGv$@1sxJDaw?T= zw&og7lVzz|vSA+=9Wr%LYxGJxYc`nSWk?xs%E!S5b8?T-?SO262HJu1d#{OIbEGDV~BzQZ6KBAAs=lt6cYXAxfvsEEp0+ z25r7rKpAHWBc7t=RH)pM7pJr)ND2EhPeV9@RHge%Ev1|nng;N!_f~@9#WrG!t*#9#i{BhHA$w1zUoM}%TZtjDNQZm!jvOsMOfXT{J`q*|Na0-({Gin ztJ7N*KD_Lf9SxW)i?;%YmqW_3j0~Z*ojl=q5_^2?3*$hU99Wjh`ZH4Z-gPUy?3CHV z*a75$m!akm^Z?6&XkgjlvhRIioG*JG``+8H{N|lsyyc6Ze$@+)-(}l1_uXZq*~`mM zUdQ3`k;^xCxa@mhJ#h^wBj?C)F$wbKa!1eP0Cfl&WIp+bC;8;p;d^a%#PV(S*=6Zw zOJ=W?Jp^8U@oCR>l;k zb)LlAX4#f}A;1}GcIZ4F)ckWYdg6f6D?+rnqM;@vs32YWybg#HsI`t z*>N)hoEnML=89GKji~a1Ap8AH@yg0Dz-1)=rd$ZE3F8wT+eBak@{KnUBy7$i6Hq16gYGY#$ z*2d4C2NBwIA$uF8$X2KAqUnQaSBWu|ssHMA<(5=DUA43x+QY%S!0oKH9ffbv0+@b6M?LVot_>ZlH;So~+e38fwVrmc_-h~8f z3x-ET$T)qxy87Q;l2i5-5f%&uO!~ukO(+Io$l`0!G)|IS3y+b0f)GjVlGX#Ri;wyE z_P!!j#2{ss0?SIvaxwKw$R-IUgBaPu#;{ODyZ|&o%A*@GQY9BI3d^z>lv!b;HVKNd zBgMNMMu=38F{p|YI)D0eHco#0eMiIYss^WF3FJCAoNLVxAIxGJhFqjU8?)O1qAcVE z6TxMMw|9`6t!9?*Cj)ep6+uZxHKwe2WR1!NaP2gc7g+3H{^`F^6AWT1*UmSao&cBX zCQf&4nuWxgV=T{O+u=L<@m?oOhD3g=Wtn0K9)>BVD3_k!O2gEQv%pT*jz@Mp*zbfILEn(I%U!<}6$Mp7%^JeKS|kh_MT{Gk z2CY%;z)RZW3%T|eqhR6Al;L9N7*h69b4Sd*w(Lo4I2wqaz3e=H zCt5H_FtXW~6$bkPE`C{#j2u_i71X!79q*Fqa97H;=$7F^yJQwaL#VK|C1{K(askJ4 z%7wJHu%i|W4I%asz7<<3GScWo4W5*xvg#@_5>gPJz{JxbZ+$ybP6uiGG)~!B@bg<6 zvljdw%n$%;B*G9CFCwJIuO>GOq!h%HaNuRal<603y@W?hF-Rh98d4L#em0(w?qx7y zDylBCjeIG2y+o6o1w@!}UhKKSi@hs!|WoS(I8*XtF64q>xmR z;%kl;DI(zGO*lx z%?_9E`T8yXu#}gV$$#?wJN*gfFfy2o93OiI8z@E~bKlJ-_ZIUVH(%!MW|G@(yvQre zH(&EP?=N3_$wy8<^~l|~U)S^4&X>KveDEF{9k*g@FE8)E%Q}ECM7&~`CH)EJK7|c9 zcfRa!IVjzu+CcQv4}QWSyDr&(r}Yord)e|`x7zZtYp?dmM?M5zh5;ev@G=JO1D0h0 zF>u(YN*xM2KMlJ<)fFo$yAqe~~1u6@MQ%DmZ zjD@LXK}Y~IFiRd|hvthk5T)QjQJS!12e_(`!spuyH%F>E}Wshfzla9X{ zx zj_i*~o&JJnz4jH)_dK?zvVHH%E6kUifA+OkoaawU-Tl=s{mFe_al~x9VrOOF(hka| z$o>aUXsd%TQ$C@&WB47xY)A8}x>`&zh#`#6 z_}I=h!HbP43>rdevZ`1LxZ{+L){)~a1&U@tu6&Ng+S20IveR)+QMt)9LpL>-lf*nvI0;S^75G;_j*NwA!!+8 z6V2A@`4gM&;J`ws4{qhKLz{>0t#}GHMQt>ZZV; zY?~7)rwBvN2%>41;y%ufqJ((cGh4pmsiIvCWj%@NSS{+dt21@46p`R{tRB!0^L*7pIG;X*FDs8bX3W|9%@UGP=>NF{9%qE$R zbMAC$`P>ml~x~uWe?B1#`im1vjKfb(%;#+NH@Z4K)gc z85gHvuc+#PgLtaNDa)$Rq(-NnAt&K_T#=Z1Nh(|a>mR*`H*WP(ealofdEu1Dvdm(5 zahd4IN<}_&$Yo8*E)&iAs>rpMDI}{2B4mvMWfhB@$6BY9DHlS4i_EE$l7|N08TSmg=6Jz#9@)bxW(}s7tv8Jbi5se%4rgyqx5zAJ4#Frux zF|_PsP%%hZ(4wXx1zG(di-wo#QuZ^KOF-YN79M`?8j-sYAi0Yo01uGp6IHa4!f-7N zHtS^L)^K%c3@sO>6{#6S;sSNQrWRS3j6fIHXjEj{8ieqpp=*sw*}WTYJiFYjCv-3@ zNLAzxPX{%m*~ywv8_6!E;5E=i5v?}10%$M7yY4m0(=zqBhU)?CH7Zmj^kle{LDi`d zY7t*WQb@9dNHAI<3~~XJlN6`YMzSovQoP{t2qUYCy0u6bAw?wQLv!?=e$db4gECdB zi)bohL@E*sMP0<&LQ*uSFxaW?@Q4wltHV^+NEH%tB+mMwXcID0yK-HqB&gUlfleN| z(@P}kDkW5lO}xg|ftY+L%^g$2H3>P&Me?$Y)H4`FQ#PBj9O)vB@zI1p{GE{nHGMs1!6RY3`D*U3ZEfnq?|UKL@S zDH8(Dy}1k~hm;jLV*bIm@A%#~Z~x|9U%uy#>k-B|9rW?-dv3uvw!?h?TQfBbh1+kq z@@qF=^W|$k?-SX#e(7`feD&h1FM8jh%eUBm>xUh@&!)$$*!qCoHe9~rdPnTD#Z#AW zb>LGrK61~cCmgWt@%wLc*lwF2vg@YDET6rkJO|5Gd(-7xV+%f_p%5 zun}^kQzhz&MOf=r2k*q+b0|5A$ZU^Jw2CE6o*D-2|>oq!oa_(fUAy@L1QhYtZ9%eUmOuauOVm>SXf08q;8m< z#3_8wX0uO67lGJ%59yP#x`-pnI+i0OPDDSD&Z!8cH9=q?vnG(h#h?4+$ItuJ+2^10 z34|x{Nwtg4{WNq8G6U2=G_Z^zIro$A2bR6U?0nf@l6w8CPJ8PcUJf#QPWzJ|dFQ1U zeB!2SFZOY4e@V*Fm3BH1Tn(j~8s-boib0quPrp+JICOwkKs7*0!jxHNuF-VRph(t~ z3@@uof~UY8%k;y@B;-Y+3O+TGs$_OrrNGEBT<5)MUgX1_0LW)L^WC`R#bVeB(~8&1 zV>wbZNY*`<8nQ1!Y9SjuMHs1t&NApuN=VH@M_3gKt*hNE%2p&*ltd}QBX;oBq9fWi zlkH>4ZLVw~y6xI!xzE}*xG9rh+Ksd!ar150M~?4aJoYE;dB!%T?IO+g9;~I0VT4mf zbZlDMIFaXzP?0PhxAhzqq>6|{X?AQCG_bZy@nv;*eTx~rK|!lE8KqoNRdh0C@@_F* zRvnl}A+_I=)*Biw5&@ueFxpaJ064R7DIOgh^Pb3n5E+=wN1DG>=d! zT4T&z&uA(5+9g#aa!t?)xz1+xNrcgLVIMKuUX+@9Wp!2O1?eS&7O`gV)X9Xhgcw5n zC{;xa?4W62b*!R>{he(aRI$Y0m)hbd2jbKJR^?+0iLXv8QzyyP|LcmIS7nt7%M!RZM zO;#~|DZW(GH zv2?_l5krx26&ph7D9TaQDU3_Qoo;cVX;PD=!zR$Jf2b8CgXBsZQ3+Q`S)fM9b)Bh- ztZRpw1+HL&pTw9q+I1Z)sSoj$VqEv3+0-xwhGbZyM!AZ4p+ktLuebR^O}Z5Df?SkD z8x_2(tobx^Q92Dw%p&)TTt~fKTtbduB3bVjD3bQYY08DfNCRdT%=|Ap3EOJWf@atw zMXe94ku!+!7DtT^L8biQa@*6nWe&Tv(Cxz(lb z#E`y8AFu5)~=&{HuQXq@N}56oya z3-lpn%DT61)a~p*%^;Qd`3+BDWC=06qtQAFa&rHogLO@z<*G$fWOlVlT9NKz8n z8Y_$E2&pnCp%`YN&hfJCu3a%28}9*@1IU42n`8=jJMHu_vZH#NaA{azu0Q)@QqGsb zVT9?;Wz2aeC54& zT>Z6MJe7Unl^4F}s1?h$+w%WXb{}w`Rn@)!5d@{fz%b0vVSr(%15Be#V`dm&=tu_v zk#2$55Kyd%4HZ-b>Am+BiV7No#;A#=+B1Y1-2J#^I~~3M^dpw;w)gH^ zZaiVMtGlxQS9pFI+=KN1Mo17U4BWwbFmHGn=7mq&+XW#7NwSCmJuwY%B|Hf6g)T9P z0hO?)Gtb_j<#@AW&rU?2`G!+`$K>==PTGIJ1-tGzZM&_v*nXQWy`~jXUVYV(nLcRc z(Whn>Y?dWx^o}TuyG60|1ptQYdfK<6SVzscd%p}IZs$Sd* zO?&fahnCgXrG000Aul7Gi?FNCYFO(O zG{eilG8wTT)sxG>vR7drbNG@soq576=$(Gd z@$-%yVSCD*wS!Cpo8q9T0fHD$gRX^uZR5)r!~~FOv!nuqjXp6u}RAzb+5av2}Ljln&)TAet z+LYM3&aCC`DVUTbnN-Q3=`s6}Y`u-?60^Kw!o(W77)ah~Bi zpr&qCq2vpMhnsjEn}u}BC`q7mKuKtiMJs}GNr_PyZtBxN)Q-|_ak z_V9)v`@{F&zg~SSsp-o;ZH;YE9(t|I^k{$}tBm^i{X^elsuskd&9tfM!5>qHvWE6WsM-r< zz4)sgd-Przfg=GyfMF!-_AZA~NMh>tm}q=74b8y4Wx9>z(d)FFN_GJ1#lPP-P{#fU z;QPzlN)&QMexq}RA;XlFZ{R6{o%BZ2OI^B#)8U~H2ut6?aT%&Y*j5G} z<-+-!zxoT8A(R@u>{QA7*MITfq{^Zo@<6m9(8y9EroJr~x{8bQjCN69bunKJ{Y@&= z3=aDh0xtH03C=J7{%7``RZwNsniI$bpS`O=&!mUP7-JL<&@5nu86im(SjbfM#bgyu zxugR1D%(_$lE59ALDdTN(^;UlP{vn^38yE|$b!R}3|nCs^dkr`o3IgAMW|x7#zH7D z8F6YAftI9Qw3@u-8f+t`c4dstV>pT+FS|g`kbU3}#;}JZK7ttG0;Z5Nxv@f(Qjon+ z5#spd(ewx}u~t&;MHOK>RZUsM1|?{mk>z8bBxr-UrElxrc9!wS<7JEF@$2eV)HGn0 zgh&9h038=VV_~>#qj@}D2Bk-zVE*b8P=){+!^_|P`ZED!Tg;Mh#J<#H=Z=iauRZg~ zSDt+EOOJc&*Mnbt?8Zm$`^+s@zxT)kcievSm3G^HwY_&+4_;ol$40{9S(_}|Yl~&G zHa}#asrHu-n!D8jb2dL@-)%gk>|4wS?X~HVi>BINK5FqcM=zav@*z9D;m8@nsfX`& z?EX6*vEO#f7fheI(-iN=9%1E`UlA{d(I7aOPJsLDazd9tr5NICBQ-HJl$Wq1XpTJa zND?z-=Mod>BS7K%E_wGmf9tKj?&Q&6FWTy@T%P;8^~URfXfN9G;P53EzT@rZzxk}w zPqhJk;&I1#9ru0<_MNohhAbaKZ|4q2W3ILOt8G0Gsdl1$g5;oO`}v$`ulx3044=xf z+sg)f-ynarJL>S|Y*P_QcAPy042OCsn9X*3?dl2vUXT`g6%c9%3!~+78N{ZLH>xH$ zEJ;i=G4lM79w4`!Yakl*mdmdbVopO4784dqB1V1y$Ql9-L36KiKgDBSQ#Q7V3=sp! zmwosH_N{FtOZvoz-Y*qD0eIOue}`m&1aqXwl|;l)^kqw@bafW_FfM%<2#J~ z*aNqC<(HRZkGk&Ip=A##gSu!#45W>QH-*uj?6|YpM&E!~(MFR2XfOiHK)3h3$P2Mb zGQPwVCMFKjii5WS+mP=tNR^d9oRpA>sAxLNHI(8~u0pBgF@w*|1Nfr~zBscXTI39? z#FCnM(C&Kfcg;)Urfbt_OB|_|90hv(rfrqdjhhC- z?!tpu85M*eBd^$!Pq6kw6Gpi8i_tA$2n+7=E~%<4N?PzJLLEn|$}t2a=6ntjFvTu~ ze5lH3PUZxhAqhe9CH^cIF(op9wSsbopy;a4ax!R~kv(H?pvBB`gzB`WWk0uwF$a#N zi@apWqb*vj*_bL@Mo9!=|G$Nf{j!m7$*Z-zlu&MJsxE0LBZ2CuTmp44 z+0{#~$|cZhP4*O495G2b4MBK?fq>H0$nq~?+FWWo6>lE`7-@TPA5*y%9cSQIDW)O@ z+jxMHWs#Ke17}c$BUc(!XMErULEZC}Bzne&GNwu_NujJ#mMrPi1TI$?QZ_6r|e?rqq^>%XAmXc*ZRvrE`Hyxd7_SOZvwn(ZuiFdIhzli_8- zb~C&TEI;$e-M{g&8uXv@r@K zLY*+3OEh2zySXe&d!ur}p7@|8&;9oL% zp2z6d7R2yr>-P!qRL>T{QkHl5*RNIAURj&rcu+purAdUPuLs6!Un zTz=ODXNY}&*&%JO$aW+fQhw;38}7T~8n4LKMU5$-(*6%DX*d`iV!(4eMjlT~A}?ST zx&)lVr{H#hP9S;!whD6ZB22?Tlxl>^u4o}k$OLl5WXdAfP&xu%h&c%0H0sMW-a=Nf zr#oFU9@?F*nb5py4#aOxL*utL?!HMNjujNXNfo~=lvwt=FJoE~$0vht1~U_i(E_II zX>duQ;<}00y0XcIDZ6_&+Ty(Bx40WR<=`r?;J#^TAw*bfSybQ;t8sk6oi}7h5=~}2 z;0<~Za~~H7NTq=ul9pa_xwBJF#wB^TX`zf_@=`4V$+u`BCOd8NN@^8^BjDg$pX5Ci z=RLHlVXB(Y>XfTCBuM%iVWURGz(p-2&-J?9Gr}oMfC*) zNlY>pDk^2^Lb$v*mN6=oyy{q|Q7DU~d?g-&Bu%eVF)TS-Z zOD!Wps^L&WS8++@Dq{+1Q$+eTWrmTY0c}t)PlFQ1MCV2wiSuh&9;lCTi?$H>4M(aV z)g}Ea3~6H?BBv?H81z#B12Dx|RrV@dQY(`xMFWD2B-VjQQcS`M|u&1~LC?GRUm>QXl#Gq_i|Xrbm1qY0B?q~KC61yLxKD^k@$<}J zzAC7@ORXcRBFbo%bE)FQ$h7>UWU%lhgj~cZ$(b$*xYW;8+_(cMga#lcyb#TS#vlv= zF9_i#4wRa28zZuCrr=VN?2rhoA)`QpQjBYXfb8VcZxYaXY6x^zaX#=RAXFcd!D=V4 zx!oTe9e@q=uu2gE&4X2%xSpv{`duP)X2Oh7Rwm)d*lr_2D&xg0l%2ICjRG`zNx?CM zBntDD%D&*7u`e@urG|Auooe!LS&KrFvj=Viqv#WRDD_yOtCfCV0 zR2)+&E-~v!`r*)r>+QCKZ3=(?FaJ9T_^kPgKm-6ZcvSTKIFEl!&>ey`H2gl19uYx?II?%5A;z;sQJO{9ut?nxRZ~y+! zD)Qr>{0;c*9^`oTum9y&D*OE({H2Wl@~{60SrgL%$m6(F9NQX1>}gD4vZOJH6Em?a zS35HJs-p^=HeQTm!kChVkZ{w8TrPziJ`^#YIoLxRd9gyv!Zx9CH0U2sWCM9(;_M+` zy)vw_Fa0V5;u z3!&py;N!QFOC{N|d`N0TIj~$D2S9d-hLMGKpFQCGy>C6|^fg?J_>*sc1q~qEQ~vHB zJ_{YA!Dp8QKxug7_nv;(Bh2u!?@2v=|JC-FpML1d4_$nc=a+ZdX60EsuDRcwjStv+ z^Zn;;dcfXW9=2%vBNy&qe;I0CvWKrqO+Ik$7Dq4H;nc(SJn^8NeP`Lz%AQ~L*z%G4 zZ6$bwdA~g-cr)1i-8PuLv*TM7443j5Ih2pDNVB>2uFI%U4J?-(-^w)?3eq zJAKw_;)DrPCQaI7x1A=eyYA|%tqMVRmOD%xjvk(m_GC2F3{CI4)ApEwXt-Mzl6%eG z{rF>!0NcUmV~#p>*^&j`Jl2_N?*>2lgrhxAeZsLvI^fMPrgEKHpLzQ01$OcSJGmtn zLA(6JX&ehO^9&+y6CkWcc~UNtV!=UozaBU=?s&GMWru~~W!uUE0$7HWJ-_UUW_0JV zfoO02f}O>VUV90)bJsp5<&#q9pZod?&O7DecbtQ1i}|K&KK1DRH{XBfwc+K_4?A## zN2SKzeF|d^xD^gX3yAUb0C)qcF5yrh7h)YkSrPzv3t`)sNrr8Qq1uof@Um1HlLvcc z37Dg^A%WqNBzbd1GeAXlF{2)$MAwFNN`tefoBGDSMA2y?Lex<;Rt_?ZJ7mK zUCQnzNKzJg$s~_&W+Nap8BctE&f0?DLGjOl$Nk39MSzKn8VT6($5%WHKcA2;}CP14E^MXEEKxGiwWq=LPZmkOO>^kSYBD8)9I1{2`iC2nkuQ1 zG7PgYcq1osNFJ-bxQNf|y=V*R`iXMsp@Dv$p*;W7uawF%zMQybHVcVmm0TH2Cfkyy z1Ya#&Qbn`N0vacmq6r9+R0>o7sh_Y@P#smkRGlFZ7(%8}sKk6r3EQfO=57TjRVj)? z6o&TAhvc(FoWa>_=+zq)R2|9Vu$Ik~&_68IXqJl@jyOp%F-g>-vmsS9w48f+Ol*aL zH)>Np5j1Wb4lx!?DPsH}l32Uf^hxG7o*eoKPVzxB!eKXrERrA;_yvI@qshu#p_CL_ zgac6Ek3Ai+%Z6Ca%s;K+(W4rTfn_=abS%W7mU0!zDwnLvNPLp`3MIe{0S>}3L4idN zK6LLbx8CrDYp(p7#@7Z4895?aG9`?-I@s))imEM<|#CDc}YqRnOTD9aeUrCG?HoD=6Id`bCJ%mxPJ0eq_1kQvdLio-vZQeLU*qQ3B> z+t8V3z2W1Z_#IBrF1A)B!G_fGiKE&K04HgXOGwXPDyXX=bU)3cLqfYh`P=VnIG31O zt+_}`X;uIkUlxGZC9q7An9Cph&;Nn{qaXa$uYU2fU;gaJ9l&--8z1Nva0Hi_K0Sc& z**?Lg5@q@64}UPy{Fi?Y|Ni|y{GEURlifpayZxrOdFJt5zxAn4fBg14ZvOP;pYZ#@ zd2c%FvQK{W-1E*=zl%umMypLaX#OSnXK?Jd;IBF%a#3ed~*^I6ttRf(lo;U;s&BT&StQsXorzBOD z7sur=n^hua{EQi&iZ5?ftGI-fTQ$?!rJ@n4X8sWq7*Z*5SwLsD4`CzQ@_AbwJb$Wi(m{J1wP?D-*!Gv<`Lm>$aZD| z2H9~WH2@7dmyKM5w-5vl?Ha8OMp=@Ca|h&$LoCc&u28TTU#jfMr+Iuq%WEYPjlS8J_v2wN@+h-hM#bHs#zXBh*+Xu@Kel|rp0L6{{YOOiSY zFTQ}+eMALSGgt6Gn%nUOG*@sA83f1XYDpxA)=7ZO;9HhhRw-AZ;%cG3XsZi^8c7<= zBTPop5Yk{hMp!Do#fep!OBr1f7$3+8QZWfc=SK9p;D?s zwV(&5g{W9TRZXEeL|#gCn{Jwzb!p0#CFUps*`QO6m&6=W=|Kx+l$RbcPMYxr8c0?s zf$@Q63bm>B5)diZh?>5<1kgA#;-C$XE|o%gS=0heMnS|Xjw^)m{^uEiD|klGz6odo zjVL2+gQ?Y>k+sE8ggP3(HkTR0JYr%+w=)>XNX4Nq`xR zsqyh(zbX{Qr^=-i<)(*PG_}J5DSkzWU%rSMk`T2!<7-WqCbv8|CTPc8fr#UhqlSTCJT_8FElV9>@F8Nz zJ}AVLF{p)*5e>n`Bxa5>=UGkm_;VIbJ#;b7eeRrC~6a;S5 zMgz?VoXSo4x4#V;!@*zw<~P5A{?2!7DMQC!7mp8;3Wm&hC_1PbCY%@4zIdh@1Rgy_ zJ9f-CkIBv?bwQLwCbjqjWX?qyj8=X2!+;(Uy7Kc^_@My0lwB6pv?puGQvxsF|G?ci zptRIN`kBhZ|NiTL)Jq{>Sk;L3eC|K}{m(Vl&wlhbz$}Cd>LSGYvsTlHB#DtR(7gWL zn+n`+E_m~~!u#KIk%N)9-+Y6|3Gch>c6?KW1??M8J^8}7Uw~t}uDRt!zR0s?o02lub6;W!LFbx7~8h zwO+H@YgXQH;(9ynJZ<7e>(80H=gzzAuwdan#~uGVmxMQ(G-21>c64L?(T{&%6!`POAJvV{9^^yi_&yKF2EObT%t*Rm9n7dIB9 zDrK-_$ZKd&m`PIQ4cp?ZFeqp!of0y#3&TV-lsFhbCz?#IkgFna(k4bzl|o`!8{eh= zz$YmonHU)zql}qR1*HxS#AyZ)r;~i4qBCjmjJ#P(VP=e`jdWluhQ)w7RZbI}Dy!pnQjSZ~41iSzc@zfFzT3jf2k-5f z$X^x~yp zA9(M@K(P-WL(2A=L1Npbx7~C-$ZQG{L(K@OciegdGbn_Y83S=Ef|0;G4q7QNCgScp zZYB&%Lh;T>1DG(rZ$dG^lgN)ec+c&(T>sGhclog6y?5Oj-1ebofYh$;aYrA%>Euac z=y*M+vVrKe*LaODSlOy}2;76u4zUj#)3%=1T7C2x>q3`sF_;Wc!^>@3^E0LmYv?-; zdCVQBZF9&$OSy}a`GjM^XLaO874d6;I`S&t3gKndC!*;!`L7H z@VoKvxao7ZUVnwRe(9y7^ABT>Er&NlrckE=Vp}#QVmu9B`vxoE8E6$7o|FKmhG!$o zK%+#GjF{-4YYNc{!VE0SLf(kRAs}v;H^yD2XUIEVrh>>9B&A%r>A}Zz$w*-YoL@ZF zYmFCRF&U!Ga;7YF@J&*DQxIA(*9oN(#&@rD1C`hOW}b9+_Oc`QQ#46Qov(IDvW%ey zmxw{Fi~(w@5Kso?X!gi!c`R@8Xm?3V5qC@r7$L%aQ-~u*1FaSnfnqWi8^KuX5KA&^ z9@v?fC5k0n^2BJi_ON;(fEEZ)C@+{Si$E<-kPTlEgk{MRG=)l`GXoSyiGXms=N8ph z5fv13^5AC#PSbu><^f*flTN1-sF^TxPWL7;ZBpf`MvN&i#v+PULyXU6)v5Y}(_TXG zGfwrTlLnW5E>Ll9ahXnPE%_|u@N*-vn6*?l-#SmmGg6ClLfwGbT3JA7<#JJyZ~2hl zT&jo?l_f^UQ4r1Y)?3hePNm=!Qakd@SSRl$?)KVEIr$9WQvT;#hVi+U@-q=_G{{>P z0)o7%z*=;95iWII^30%$5?D-7v{EF=QeHA>ftVt!QG1n^)a*eB^t22h#s(Z2xn&5P zGlbfO_k@}Tp<52i68%bc6`@KUU1}QZ5OtxMdF+>D{G!>M$E31BM)JhitX`^&&s2sD zu0snXYX$N`*;~)N;J~pCsU3j}-G-5Rmp;kn%pS&_Ru&k{5=GNSgF=OtNt7#2lAvgO z9Ayavs(3}?XH^1{hM>MGUM;vqS#-)i=&7th509ZU22Bsma)~J?95Yk%hNPE=ypfmC z!zIt~Niz@6AY3-Y>ioQf(6Wi;x50ZxL_um93FGi&Gty|tym-Qye&eWs8QiN-bYP8F z8LuL_2#v#R1e;Y~t@(c#%PCAMR*tY}AML+z}_aR3o zmIXAm^eA2Dy#5QHzx(!E?!5IT#~Q6OAH4T2J2=)}7NvG=F1`3dXCO~G@wnY~+QBPl zegAas%-#3cb*C*io$OPm!>Z5MTcX!p^R>QwiuS+phU>4k@=6=5yVj-~PaJ*QWtEkP z$+gvHQ+z>{hRG8r_);pd&8JL4%LwhRxBgliPu_6)j@t=fFI>FpYOAdFn$@=6X7d@l z?>u?R#J0Dl?J)IqM;~sZdFHGcekxI4zhGhMy>3m6gf459fU zg(d_FWh_f0s9A+r%$LVa9Ta8&$%>FYo$?Y&Mnac%K&nh2=W4=o#g>yzeN7e{{*;bP%Wzw=dF$pDDgW#ijo{>n2C z3Yb17^{wA~`uQ(E{G}&ud+4suKKIm(&pmPN`DY#AF!sEe8#;`=*RJb&+1C+^w}+SK z&)CSe@_sWWi+xq<$VJl*o4>8|*ry#n^SEWx?JvX23ummiaK^ezXKe^9@4NGwOJ_}7 zzV{aJ^1_*0?7H2Cn{KoQ1os>Emw{z44Kjt)1PD$*z@A>8Wk=N^XIsqhDOd+4!*?Ap zhJ_tMhGcO@IPQwe&|)YR2LOay?OcP&x7={;J1%(3(MKE#4#UW{n9Wr1vS)*@z3OwG zU-l|6!`5u(m%5-gEGe)7u7uN-jumGZM69VKQj;+b3ohHvq{x9bQ{@TPzGPydY0?p1DALx+txG7+0R;b!qK2H zOw9nl31ls#?8gU5;bp?kTHpMI&%?zX#`aolNErt-2A{p)3!jV=+Bn_=_S^4!`z_a9 z4kP>i@)64yh{0rd*_*z+A{$=5=eDbTQp%T?>!QG~@nukmZ7~l(*myC@!l4ML6WDZY zJ3cf@UO+S$2^502foSrus2igC#gbTsX+6$z zGO{4Zs8I$>B0wQR7L$y*$if&Ru$ULO?6>4I;h4cpP(fXMvyfP5$=ci{HoxHnKwP#0 zcWK5mwYrQ`F2$Ktaij|6YMMp6^@vGYaiIyD713!^wADMN60K~33o8n%qp+Sej75=U zhI?!9P~7s5fBP+SX9g`zFn zm~GXebhU2do8=iMsdX9-!g8yXVar|hQaj5MN+c0LS6OR0lD0x9L`apSIBN3)&6ar_ z>;9{)*`^9A*vcXJU-wWisoAe~a><+L#V!RpYXYS!NMNwy6wMDDSf7y{k!NfoSS?6W z3g2?LEfq-$A|o&+k4ehIEUdp}>TvHSAjoStu(n=Wi%^0oP>2IgYCgjK#1ZzyXG6_e zWeJefv11bJE0b)iF2vGFk{E3Tx$0o*F60XC5^@o@@VQX?j+$)Tv%0Jmn5yCt(afWn zq{cG#t1JNo<5e7=sl;&bNj6$dCaqP7sRbidAuo|f>%G-TK?tp{b262Mj7JE@pkXys z!NAd1YhO%-Y7`}=if3p>3-r5GbY5kcp!7NbQ)QH0UXpSl1pF$3u8zDxo8D4Kl}kXg zne)|@B(*7_e6T_FsU<@mOm zn*%A~UWVD1WV6a@L^b6xG|OGqzBK`wHjvhgp$Mwjqg;k5LRr)bS;&6X0UPlJw8;cS z()vv@VZeqwQ{|Gk2@N2nmd)xSqe9K+IAmNlG?Y$Qxndb9S9isc!GSSNV!);j$-O`S z#ZO35LdI-qX5$~q(W=9K(3NI~he0+wytm$Vqa#Ull6jw}#ym%Vy2qecSD}+QN~)sapZClQvlI(1VuE-eb2-CT%!>?>X@7F4MQ) zdh<=d*fmyLg#a;0Fz;rQH(K*Gub#T)W_#|oGt|88R+|I4B+IBnm#vwYZvhK2j? zwRpk4bM~A;gS=2So$`Xo+i$a_?8?H2qwxXoV~#xRO=q9s&9_cdZ$E9DNgHnf1<&1U z=3aZxI{l5OaMy+Je!HXf&Rt)1^=EIs<@(d!aPkQ!9;0?Ov<9kI{Ou3_+HQO6d3(+J zh8EPRMl=5(eUpcq3SJ(YvYbWNX$^Mc?#F-g*JdY=MmqNS=!5sQ{LpEQ&4!__tDgg6 zIPM(}-FLT)E=dkSDa0DHMsc%WMH|2P#)Uv$DuRaQJ!Tk3(Ik~6n9mCO*AZT=W`KwPo^z|=4{`F@cdG4t@eSg{ezJC9in~pnd#;)6~ zx?uK3_Lm*W_VBXcvG<9e=>i3wN8~lTzO9b>Lo; z7wonUq`coA>o4DDiu2e@_S|^!%#G*ov6=m4@5hGMegj?x)&OL96Cj0ip-!+AY6Bsi zPj(d9W-IIlP)kBUnxH6H3TA@CvIBR}r~u5;0|Uw)lK@=2V4t~W6;qb!#nF@-uDQyC z%uq9>cAueUv@K@mvBfrjnL(Spv_-)0ZNefLARv^&9sw{{1($c-4nAHE1-BCoUD|!N z2MscJc$+p%Vsbf8PK=$1PL0BzvEW8Qu%@U|``%B!-RW2e|d&5nq}*I>0Q z?Yy6N_UVCcDBEYIVd@RnU*D{}?pmW?DdA|4dGxd7i=W3r`>wQypke2Z!S^!-&p&YQ z9h|8wC1Ns?0UXPQ_rCLOp<@p$d!v`_<_}(cp=!g(9%P1*F>Nl}cgC^T?D^#@Km8#u z#6Itg1g_`y%_!UiRhrWe+d=o)o*M1(0(K*Tg*nk~_-ITMf((fUe8G){9YG0Rt%QfY2AP6o{$M2lNG z2{0AM`oyxt2Ci7JiV$47br!5oT9vuCw)AorwwMtRRwf0P2w5=Qnk`6%V0F|Qi~zB2 z+?G;F(oZegiU=)6$84#Orj}LAgS83MLrJSyKI8#G-PMt+RW`@+iPp&#d{XrZ5F?DE zIl3j~Jpx0h73i_Rvz)N%V}@gm46p)e1!fFg#HHCGg4q1dFDwHXplv~FH5Sl6a!d7Mv_2VO(xDxdC`(IC^PE(8IZ#IhSIr? z*>(F2Cd>mv;DjM)EdDV%cpkxxbvzZMP{u4#1*!yP4M{3!m=j|;K0A|9i)@yQ!Q`tE zZGl8OWsE7iypqJs&Mh)lSw-mQQUfn~h*6`%Fk5xhg0=b;TI1?VLfy3@f}aW++8T#W zc3KniIfA4@F&Tg$nGKS3FhvXO(MftvlVJpEDOD?#2MCLGOTv`kr;UCAhklljQEoDn zP^%!9_+m0bgcj5>6hTbui}|Wc#n1w0?n8FgV#bzwF!0_3Fvuxvw{VVG)) z`@5h27?zaXC9r8~BGc9_d3wNM*b_7+`5*uLUkTf|+>2HXx>zW?T1OtE*l z&D1R%40Zr`tF1S4JlJ_)Ak^959e3LP;6n~@68P{V4hB@6C0=Xo*RHzSt7x!K+B(yv zUDUPLS-s1ZEp)tW_Ejr6WBgi6AX~6wm#^`<)-hsytmxI(ST#-xZN&nW?1^b}mIlf} z&OK-D1{bfp-kQ!WZ?NII&NBm{&N-iS@^MnvpRo2Gd+rLT0pT5& z@13_fmF`W^$P8)hiD-2!;jpLuX}4vxf=dj*-sD$9(3RqTW;~L3*V;m5Rg|-AgqZ8 zob?i-(W*kmsBCI7RMn*APW)E^b5quj8{c}YBSrVP_9?vbHxe+ zwJQu3z1F&x#6pW+>t4%aOX3#WnAXNpaqtl>kFBxM7U}|k?6UPav6!u{q2rh1WgK|f zk!%1NOb#sDTz2U-UjTA=*)eQLxntNpu#6LYwg)Y^bQt>^&prV!fAR5Kzx4P`Pd#$= zA3S^8^8KdznAE~Kle`>zpBd{9c-eVuo68`x!`MD41uP%D&(@AxA3AUA1LtmX@IIR# zyziy}GQ2!*m$f>T?TKbzXZHN^KD$lXVas(UtTlQ~^b0f6N|3YJY80FtS!MKD=9M8b z$QLLBNYP-L7&?P1H`-tw2aIhZ+uRIO1DIenN%BA?NddIP0p$=tE^Poc+)9aDc8yI< zrcA-lbS4wecYw^K?JRrEmq(bDxgjbX05KV*d zgN7%pF&%yPj_%hTaj+e2_6RIwDg^D!xl`p2-goD%*Is@9?YE3xHk55E zL&vbQv)Mhi>=J5*m*HZs!WO^dt*4)S?DA8NKg7e!kg^?TvFDnd$A0kc>z{n&4tUwm zJJ7Kq1}_7);G|(<5W&ukuP`RaiHRnOa2dZJpoY{4;2~EF= zR8mYP(PQRxuN=P!bYpFWX{}YTnsQea+@l4HH%oE1W0EBilf@;1fV*?37Sq~{q&v0y zc!&>EakN?C%Yt7nVv@vC(@a1hiAl`Dz@42cmr5xWp(0%2{-SF6Qw4E2+@*jBAmbC} zjczD;43lRZ3>a>p^vKScg6xV`3r1Rg+;i_87TanksE#n85@MM>p2I)+isktPhgeSF zd23*+Q~G6BBfjlt1W6FI&>EN$e#l)+WtICT%h7_qfv%xdD&sY-dbQqGgdn>jc+V2r z>X-3MilcUlK#!VW>_;9hQVMtraR6(aIl%WKi1 z8#9clv{BV@cs+4+@o9Gbzgw6sVV^MA<2nMOk7TN=PaK z3e)G&Mhb0A(jr6Pu9Hf6>zq;E7hm7U9<>%!t$tObFhc{e*<}Lk zVGM)0s8&m%i~YRra@fkik6-HFdTFtV~4eZw#FamI;Zy4yXX}1b}pia zPqdL5)tc0rpu|eSRHFt3dDT%_>t5@BR&iq}9Ykud>pljVB0GHkr8I`fK_E>8h)Z){Vd^0K3MTt3kUi34_nACkJ3VVS{x* z>4l5-b->uJ?tY8sA8_CjmmbswU14pKTW+-}91Z+>L>H3XZu_m_aXYOrFVHQu$8`w< z>;jG_cHv^DlnE=50WxL=tW913z|p(S*a<*}ok=d*ZywallDE9oa_zKPyYFh(82Gi? zyyuQvwTSjI)ffVXs-xO%p?YReoBsM&zr4}<>h3$*NcH5hOVHSBzWN!*sBz{&%MOGA zrMY_!x+58Sg|4eVkm<>2W0Ud*i2k6l^oP!++bXLa4J;O~BNpV$BmJ)I3NtJ?d7|;)TgMNfuxk9LYF)Me_kT&{XqGwP=zCdy)q8hD`pEq6}%f7;3{cKfiC2XB6wq|aPJ>X&j zEu>qMlR=clqT13r$uhS5CX*`5>;WtT$Q`7wtKD z|GAsb-+AKFJva4=Z0E7zWuIVPyvHPO!1nO+Ve_|g9@_zIJIlh7nH%gsd!jdH`#|%u zxtn_5*LIt&x!xKhMArPa(u(kM`yf_VAnA#uFzp!_gGM&u*TW0F(W?<@Yzk7Gba1$!))4LAY=z zkZ3?F&Zf2)$i-BaBEVj$fFywIG_}}nEc=-U2+KtvQ0$U6hp~apQVBDRskWOP-nL_l z77I#Yyns+0K{cJ(#({#F;nJJL4m)_6cc97MMm#Zw*^lYnf-OJuMEu+e*Ik#_Pag z+sYq(|9f3t_qi+VDRX!4$F>XZ5{MRiMfT+%zqD6-c{4V!?B&>(yz^Y}*{1X7FaNM3 z*}l%~tzW*vtc&Wd5G5c9R06+2OCtqs1O-CD&ys?)XaN)*LY&TW6UHB8TzU@}NG@;V zR7@=aBeCF8YRZGOWfx}{F}a2S2>bcV_?3}<$|Wh30VHJs_9bCT!o-Q6VJ6Z-%bwxV z1lVL)>|SHB=f*+I^k_ab2b!y0O2TQ;=yC{DNit8G*wNy0SzEg4$bv%y8HCG+~aG z_KR@@)~dAyjEo3bEKUSj_(7^AP>U9qwzCyvkz@TsUJ@7Ov@BsHtLhsyv`e*9FUv3i z!2>Gdo~VhHlK1%E@=3eoIt1G^A0Mg$8V4a4N999GbidH3E@vUseU<>Gg$Dx=F6&Wp zDLvZ|vS=MG!n~Lq?xeJ)sUd1ClA4bOVmL9Y5rt}}jnvDM(mp1Wa0Gr(x*%0vH5Ih1 zxMc$(wS*O6af`!@VaTW&C8}S@y*yChkg$@={I5-MbT+COrWUHBBJ>EUb;+J;r_d@@ z$HNpD!#w?s26e0l;;0BE;@p+Ctd)gMNfgS$13XU^qphuh^(dD*s*zPE ztd(5~jG>T{QQg1h7m(3c+Qs#&p$U|w2u!vS7TCaAwI(BU8lOu^3aUfiYMB7T)I}*c zf?DV#@(Pj)_ykm{p+~v0^e!=GRmMaxQpTLqC51VkBpFF`;s{y7D%E7U9?LnZFK~ta zuUR?A$jUR?RJ$9?MqESNFfkCwYxg<^ucpBzZ4G<_)@7p!hDC#-;D%Oczt8V2B%24g z9Wa4we(}qnf=&!meVy5kp&xKGqBlhGCqe}!snOt&Ol6x*LmP;{`o~}V-~aqyfH1_= z5ojivTkQN|`a+O%xiFvwo71)rJ$#@3;3e4az4W5+CLjqL!i)ecNNBg0q%Bt%cHaEG zHrssiuxSfgw(9ddtXwWP-E3nAfamVD=ky)7ojqsw-Dd2VfJbO|n7#vg|7DBLIP=u= zFF0@M{tLI=ZmQ+NMHgQ*Ws^ztgU_~cCvG%RLD1-$YpuEcwC#b@)z?^kopslNg27e# zSGH<-)k@$qop9^NKK|QReCCsCD*NB{b7|Ku4!rx|sv<{vMq)=ME-@2vj&zx#=viSrLX|0$-H)mOa-3~eLWr68Bi zXEMp^xK=%2E?DH~i`T)XQS{TgYyYm7Hb=nT#6G&6fNipSpR;kjdkzR@qAYN1=| za{9UMjp?1|dM`0u6w%2seO2FxQw#PMW@hI(A0qz8tF|VjeQvlN)*t= zh_Y0Xk>AF~6oR!`$apr>Mi`%bxr{;0)NdCghXx|RV3I?iyj(KUpPp1PLlPRlz+hqg zCA#t6WayFro$@A3L;59AK7<5PRRzaeB2{=LeyJ~yE9>R4b-NXT)vwr6*`?L5wRAw( zr6e4wXd#&v)no)qYcegx#jT}r$_O0`f#u>j9nZ$G%iN>Q#UMs6N^%}s==605u-j0U z3W?ai`{EP#j8dO{;%m=5^4yd6e)XBVzwz9?&pdX`*S~bjAxpNKG3_;r_nxwN?&N)U zpRjb!CIY;C;67U|*kvPl*;h=to6vhNlL%-z`gu@BmN zvXj`}l5LCm=%w4*eTJ9!ow3QZE!Or+p*gIHafNs}+y^8AbYf^1UWPaEz068pN3y;7 z8T5lR;YwSpKp)f$y+WzBQ^^Y;VSB!IZ0-Mow7|0S$l7$lG;eQU z8B(^V?8RTsw0Mld<}wg{_2;heTImCpE)4%tE>NpzMbODGm>0*6FvtwIGU@(%Zii|i zVgdkiF|b^a1+12eV3@#od2KqYGRSYAT5;;da+=ZkLC2AG$Eee8TGv zIP&0y@4M(N&STqR{?td`;}K?GmGTJloi|?zFF*U#{eJ4{{qtc@NE601Mxa*%N*o}C zG-HZG#5m&vrZFfqtQuKCV2D*nfKD>;lNT!*CuYO5OR*#Z4ee~eF~*H(G1@!;?<B z0lIs-s|q0qH(bGjQ|$KY?kou{b}3hcyLq=`cXq2sOG+HVn8e)N(H8k>OLK*yC9#1e za=D4qDMnW)F?OtSK3MN88yye@f0 zqfmHec+^SamR!k*%Vc$0}=MG7M&z`XUH0yLF2Nj2$2HWaM?}&EW(%go~`D zC6Tm%vCJl*j#U=Ts}$yNrPP9%tsRG6;r{6EY?+~zyP;c(Sgp}dr@aG9MEX0ZNg;s1 zRI72R*7?~Fo~w1;$sUT`69IukrxkwvkgFM>3-J1rIPfWm9qvZkN4CdJz(aHPz9nf^mO8YocF}N_!K%h!m&E9#kf{u-Z&(t`WvK~GSb379s@106 zB74hG1zCsds3dh=e6*w%%!8OBbPtkFrOLu=F*7u%jzl3^M*S1&ggs@%A;vHrUa!{q zg=PCYHc>z#Xw#ii z5T-0GWdoOs>_2Kp9#&1ptGjxs9gZ#`)+_ACaUfSH7tr$>OA zW@tO15GZ^H{Cd9=ENuG~khDt+=i1waogqYUwbwwo3{Bg34RzX_g^`^`_N?x{^XBd_ zeOuaSKu`$A0@sRIykvpH$s{Lkw9X!T?sW3&k2&n{12&$#{`wQvn7-pS({|YEgCDx& z!gpVA=wZv=dj8p0eCFey{oG|Y-gNcFm%RO$V~-S|-QTgUyXrF_@JGE;=KS+OIgs`f zmwm*Hs;}y|`~%<%?hal@=Ho}|j_{1LZwm7vNxupBWx!Lqj(>|aoJ$SiacQk)g!-{U zw>O*p!>@j>W&KuQF7L7UR-G$U-N zQ)wW*PB+u3Z2KD2rZp|tT?3()YC8?8;S7g{wh?ON*0E)mDj0DM9E4G$uTrijqvh3a z>34~s+^E#~<)z0Q!hUw@!0IU0UG+K@M=)DAQ&TnK1a5RD3em9#xe$6X4c)Z~k8UVq zw{N{&Hq(m09rkTNlsEu>qGqZ798Ja(#c^yG557&J!o z6m!_Qc!12dn89HI(14MLU1qeStu~Z#1See|d*Bw38Rwf{dGg5z?|AIK8|^PY_vEd& zUiZN#?z`fsgLbmN9A4gM#`^os+*nw+$HoWkyS2|Qd)b%oFN+t=m^6Qv4dLbayG%G} zuT4F_>@fE7eKt8_(Ut<#3@^JpV9uud&)KB+WBdNH0XQ0+021vd!d`Do1;0_)JMj>)lAhq*;GlMq z!F^VcWDQe5<48)(3IvC?5vCa1XcoH{mqmTSE}jO;2xTLdCbbcd#WaNLBj>);Sut8fD zplK#-qrs}o!bz=}&3Nk*tBc}+uz6&=$EipaB&@Q6)isXn2y1UkXqRHEGQti?iX}NA z3CSXqW!)}20gF1y<+W^+SCS$GcETV&;`#HR{E&I-rJ5>hvqN3<2ao~B3~5=eE>@DX zv6hFKWZ7v+Nv9GiN7HH1Xt5Ns)-4rbmqdr50VbuELPrhx_JMou1bqJEU;jDh@RBtW zf3}VyV1-Z;Pg-vfu*pIpnhac^^VezF#{O0mx~m&~H+Q{M&(wmdCKu%zNG?pa>Agzv zI{+mv#mpmY^+iVBVTDkCkX=Pc!gqvqd)k0a3pPdn;-^2VyVj%hK)p`T12q%3^Ae_Z z*9o-LoBGz9ko zWtx~<=+PWFXywvRECW)!!`oh}jRik6cp4Ii1f_Ufy7Hcl*{yT+QU z%eD13Tia~4nMK?S~(^=hK&e^s1{bzxB53#J~Uim;dBXzx8K-{++-5p*Kta$=APOo^_+|;020K3eMvbm+ zI2c4Y#wJ{;MAL=r)Y71z>wfy!z4zVOkFGjr!<;M70&lb2-5gC!#W_SxF*#f{c|fhZ z(c^QDTIXu_AmeU@K^;vIykWc>k7yNFL6`i~y_ARY69n z6`{~1#Zq0et-xkMgW?lTL;9JN+Hz&XVAzl(bzq`XE{-XdaZo#66`bRDuf!+z zUka8O1^Cyp+2VN!E<2g2BeZyS8K-4-muTx}!E(C1IPw}mf8iI!7r}Dd#{*;@9laid z&p~OIPGyf?|26i^vR!6pt-<8Bm*eK&C)dx@89@4iv*``TsO4gP=M_XYGpqqbRrN5~QSa|zJFo-=pf*|a}=R;dq5 zfrMb7Gsey;hbv(~=+9+n(IpKKC8#)mpV>QXyS1;*dGWPfW)CtSe(-^Sq96VG7Bh5; z@P#RF`m&br4nrjZm{1!?3sE{>O(6^$Fb7&0i3WsWYFoxG5%>_ghm>6c!tk;rfmj%s z9%AU2Ff~FhmSeJ9S@J@@@GSc!DFrPFq~#FG3HO_T-wl{Yxzv;(c+z9e^uWb7k?lm| zFrG{O!aye|%`FJthLmkF3#tZ#A9K{9AT}j3qWv^slil;q;IrqQx7vKNU4A>xoU`UC zD@|U19jCoP<~}BM@2xi>d|t|ivIE#)GUBoie?Wkpfnr<9Hj$4%YS}?c=6>Lk^KCAB zKQ_FKfAh7UhL<0`|7LjE`3D`f-B=?6baf9gMjB;Zg2O>$F&r9bjRTnqpr}j0ZUE7g zloSLu;v{u0+8B(92rR!GFOz4C;oO)NqgiFASXm)rwATQ#M?s3lWCOfR5+}Tj$fSzM z!jS=E2P4gvX7%om=6@5ZSipC8aFcBzif(b}F3`HvOh`;BNgTHuF%Gq2!rg-ipvk0= zm?Fed5tJxeMyXwjUCM%xWclq5j&Ku4P>zn{;U+s!7Je2*Li?xgw$>yCH(9}rwbg~# zE!!Hy(gkgmB3NTstB_AD`EJ~9$L{RCq6Fac^oF2)S$DHZQU z^z@2H?ZlSlR^WbY^mZLj0y=ow zwVbYkSnIW``d|vR-bi!y8&C6j0+%Nrf6VEpoW!4A$;i*+ES_%TPo|LK&<7+r=1Wt-QMyKJ} zpmaeCYB(CKRAEfr0MmyR>hLakZ03wzylLmmH=N?Tv>MK_Pwnf?v06%#3DAL9k{9PX z1uE8jp5jyycO0ji^e8aavX+;0IgbJc_97W9Rw+3;f{ufY;K=ZrUa36h)gTh2Yp4f^#b9EN-c-~&KogdS0y+)~b2*2*|LljD0aDSKT#lmzJmMolCNb`={ zPZhkHn1zgCm0eN5&|za;-Gi~Zx(;vi)T!Y43-$&N1;fyx+@oH17@P|;0*p(RE+lN= z5Cb_OP&>B0uF5H5gtNi0szc7eCdldBuYFtlxwdy9R!qlw4L5tZU@Upt!8U(ESD*P% z$L;MUOxnrf6W5Kr^)vHw^BCv;AVVP&GEp zp0fvQnF>1twnnikm@y1+02%mIYna+hqLX-^)v?DNr6=eZKF16aI^oO-uRrAkAD=R< znZq&rq0iiFS~ly#r@`2#pLrAt{?@l0q4A)N!@%EkDg5ELzV_&2_W{xO-G96Isiz-# z`x-r~G*ri*5TZUfkF5r$}*3B>z zFOHi9BL#5S3@nSA5h7G>W_DX)b9g)^Ve__Mz=%<(bQJ{T*+Zw`en-+RhMkHILI3(+ zenpjAs%ghu1jd@3AQ4c}tmXDPIcNX1*dEAVmnK zeexzWkXaqoDCdY}%tdqxIIVV6ze1IQ#zaugLRJZi9&9VG7*oiBQsZQ*SQ3uBVtQ~C zCy>-qF6k#TfK~)yk_7P487wYI>sRqXrqYK?1!V&bn1cbbr=e5|IYAx8RjslHJuZcW z*@<7E)}=tR@Jf7Q|D|C43@`hhl-R=9iq~@5ucVg2h@dnXNn!vQ3KmO>WAWeeTO7Vd zv{pxq=^Q<_JnSG37`bPbL(SsR&hpp4JbG95*r%5JZWMZS;`*ykz|Otz3vKrqh<4c3 z%ZK5S?|$RiKYIR4y&oH1{@sT@|Fx%Y^!&2#NqOJbqS=$??7a5AyG{`1?KTl!hLpq0 zHk$1(JCD8ptSR2;Wq3kS@7;YhYq*`vpqci(8ij1BkNWn=IA+FCdyDdO!9;$_)!D`2q!M*S@NC-tjj&kAK z%!LZ!L2wcdT6?&6+g$dJFEQ+FjX}VA$;!kn)fbTM)js~n{lR3| z6INvmg|><5XQ2S(5&%vSQh{Gvz&4r{4cLOl5V3f`%Mi3v*@T%#kKJc9ZM_GK6ABP3 zZ_uw&eipDluKLbMJ2)+g4GJZx`fWyg#}`sen22btxGTXLi^D`EQ{Bs70~^d z;5P?(8Qd$fA!RgSFC@1a?ff>J?wMtuwX*N*m^vHg&YHEx$}3IXbh4Ly-NIwnT;(Zd zNZGcsk4cF=(QL;V;Z0wCh}qYpU}O&|do%Wu`Fp(O?32zp{a7coeU#bzzOKLO6F$Ct z*DarS9@~#OfBelaJ1PiZ0-Iuhw7~*t8Y5v_u>g=lskVIrp*Rhqk}zd=7a|6z4j|VU zG#;r9?aG1<_Lf~tz9LGJHx5m6RyFSOEOE!jB%l;ZV0p+C!1UOfMX#5yTC(~8hvlV}=T6hM1?cR^^mWT( z(~ggHczG!qOVeR<$D0FzCTsGE>#sw`R*v;5qOjV^qg@^!3UO(t#xi&``;EPrf!%NZtJK+|u4XjwL}uqGDQzMO{E3j8S`N)mH5XUZ!Vh{{3(iqF}`LRH{) z=E+5svbd$9c}!Wd>t=)*06CJ;C{$^k>@k+N*kofexkAx8x$5H>ygZ`_F}h~0(;EKV zyZzV~d6EH9HKGycjIr+R(pD@#K(&xB$jM!LzF;JB6fYTfF40^{jO)-ww1I=qv3iIh zBT28=WYbM>Oa-v2XLWP?T5GQ*wPA&$BSE!$?=u^c^*e*#n)GCLs+7LX6{>G2^@{rMMUpZ1D%T|J?jRn3*t5?)Y2*0!zV^juzUToH zKN#M7*WEY6&d+<9&b_xvee$V?zxXA^JqI|$%fKA~?jX8f1(jld)1_Z^)XRLXUTzKa zw|%c@rplsH1OQvKJ3{-#Z1CG7j=P2|fFV2>7<7qnA92$mtRS1k@VMG>5qG5rQ$hSg z16nF$oXr5qWrxstM%~%qcH@#-zas&CZj^4LZtftS`>&t8{8Z%@@0VEcNBg2SHg&X`a?s2(a72!_GKlpVId;(!@eF;rqz*#RL{9FlVS!`Hcz`wy^1UrLo zErb9)s|%*Oq#@@7;>0wNM?{Y=f)bi7Q4k4OR=I3(C6}@YSmX%I;B8_-Z2Zh{nK8l1 zp)8VUrL%_~F#$^0NxrhgfG@%nsH#r(Gj{$QYow;}J@BT5r*YmvL%=Z#ptJYa{cJ11= zYggKzQ+3V}#HHf2l}|lv)gn;}?Fj&4O1j42;Rb<~QZ>bs7Bf-8z(3TN&Vrh%1tKBT zpcqR=am6qQDPy7(gkBI&rOJISy6n#yWPjGc83u%V4Vlx+C+A~B#NzyC^Z_7;tbgqN zlrgeyTMAL&Ckw^%u6w7F3q27m!^Ht)L|e%NWJVMRmhZo34vg#z*#5lShrilTw%rVR zhscY4js{C~NRW4~$pReb@>|D?9tWZ5a} zEw|?6C6{^OXklIS`J+QS&x)5pKR6e*h3r6JSXIP+C^+f(vDcRUGT2Gr-evaTDRih1 zT9gZec>AX|Nr6-F8R!!s0W$T?=dGr^8hAtp#^vpYO`t2-2rWXU9Ri1EG5A|CC&WHW z5AeO$Y}do?hIgDjLidY#6g$y?wrA-AdfOtdwx57;5BcG4!P9$4oR;m3pxB|Xvj8c> z(GV_F3uP-59*2TyLMwKFRT1fCZ`UINDsjZw&|Ah3v`PWy_)}Y6w}R>jA}Y=%QbnW^ zC0FJZL1YRHY{E(qG{`T=%WmiyAE)i+o?yr;q^CpgG`zZ zedbA*UgXv6ubz9BPioWLTiBi_eC2CjJ^GlVK6BzRhfO>4m`lz(>FxXM_`Y}Tb@(UW zXJ`4?FCN+_zb-xRw5u+gcKsEX+;ZLJb7o$*aLz4uhd^C}VgRbfmys3T1V4?DhEfB^ zZNUH(22_iOgzzs;kx=u%5b~9`Bs|X=9(llg{^Ya7haUiPA?3OE-+RYnkIb8K^TpR) zd+zL6mtSzf(U)9u{G*TFJbSio^7X5)xXh@ZapN_vhB*)oR?|m-omG$~RGo9jEdmOa zMDDotrogp7ysIw;Gl^-z+wr&0yivrNhnudy%4!P{1FaTEoyvn~>!Y|y%p_@v#9+=e zTOwMNEx0W}W>J%*>AHo(tZt6=6jTI{>Cx^g$xWo_rdf+JY35IPt<)A8M4aY!>$??P zDeph%?MHn2!@`lD|K$18PPyXp^Ugi{q?5k#g)6^x{@2eq{%-JGDidfncW9Om^% z$I~0{-J3cLXU}sgB>{kTy%7tTz7uBy(*}XyG}T*oY2_d zRco5Ty#DV>4UZ6M;(LZGPFlvBCVq_WmP#p)g2%x3sXc0=uR3}8Dq=E}%LBCXlE%+h zS;)ebGVl~{S+I`=?g^fA*-E35q&;_ggL%?t^Pw(SN-(RuV&xT-PD78yyo$nB`|Z0o z3V|XUt-m%+{Q2E^6<;v$WdjVa-sn{_`s%*lzO&Y=)?Pi$H+%I449FUAM>~LkDl`NE z?XkllTt@<*Q)nBpfq-7UJS^4EJ zp1jhFR$Tst%P+UY3X_(6`HG_+!Yqn*wTY)j79GD8P|cNBT25JXz@unaMQCx54O&ra zP0t+K-K~lPH6Qwy_rmyfDx7+SE~+~8r(pt%vw$YhSJj0nq>`^9iV$IwnALZ^b=L6J zM{?A6umc_yM^R;gad2idS8`fPut~zu$0$_CIMTb^RxjnMxUW7+(gCuhh+E~VwFeq{ zHGn394w2ckWdXxMCj{9%m$2!)*Y3M?TtT^gAA@?zRSPC{?#nAbMzoGZC3Rt5AKCDS zM9;Aue(3qOs~u`k+SR7B4Qtbm8R_&>PO`gg1I~K7+v|67n87-0Id-tYt~54}!Qm8N zBt|qNTZj+&_(w>vcH1>J5cEcBL&}E6p`ZAeEpLrw8(MXMWpNC)arIKL)bDGt*{!y< ztz#Qp;>uzKZK}nMJ6BmWz0wtD7am+Ckb08L8%ClHaWGcr_w2&YgFHvt@2&gT#&v|l z3%LNMH*;<2+Uot%QJ=TP@8O>PeNXo6??b!Z!-c<`lw%)<5KPDdG^B!NW^=f$H>clC z-Q05U#Jmk^!9H-1r;c_%fgQ*Q$^-788{iJ`3GV<(C?m8u-s@?t%uN-yN^*2P1h!OJ zF5UJ_+lpxk&?uG?g|<3qAg3C@I*U;cS1mvyZY@ckWWs0^fK@?hwT8cdHC2N;Ax%lZ z7oJp=OA(efm9^ryRay12>Vbhh2PPJ5qy|I@5qoUw>)h})frJwU?sW*tvw}__D^BSU zwCam6QQ%tX%tA?u$tAD2Bmf*6=uN9+;*=_`BBZGTPNgIkQC%pY2*It4;m|6MS*lPj z*k38K3p51$0dtVtEy*$}P7oojjO5UeZ9_mJf`$+*&|xSPCK5t+IDuRN`1O>7Ks73$ zXnl3p69i3wK~eyXS3scb{09Lt3iojy_yggrt^>{*B6te2b0h>B>YVspbytBj{7S)t z7(YrN4kiAI5XYvo#f%5Pq#-+syx8zW6e-Apk_2j~lv^HAmBLe@7|;b`Q81*l?0O^r zD!Qw(CMqcl9-%78D2Nk+Ik72>D1wp@k!KA=VMqdogjWic8XBQvw^HdQh*$T7E4{j; zsyI=IJv2Z}6Io=8Q=BwxNpMZQ=y-$}vQIUA=2b?-p@|jb*3%M}CyGoI5?pbT$yMh4 zHvgE5+?s`W&iT@VCvO%)w{0Cjwyj)%lnZV_W+6b$X}|C8*`wPB=RwMS^($!HhrbZP zWE;vLv%Tivvt4HIFF!EC;+p^PfRBZZHR!hb+QXhWL2Sz@5Xu*mwf= znmI6bRG~tLkZpiMe@-Stg}h}eFHwD2i#HBYLVPk>thwUwwIT?V1Qnv#VNdS8`+*m!(GvC>U_=vFol!9QM)k z&aq2A!pmR#>d~O_*=L^Q_KZ`#AAS6^vz;*a5a4ry+jCC;@?{sEGX3IH-|?2I@7;gT zLq2-I=MVn~$m~eA_nP75YcIcG#g~8Z2A88?{Uc`$DVrXVcPV@>#qFOFMdKkt(?=Q(jR~K1XT@x1N@gi z`;k=hfX8V6`S(Agv@4CGbhYrJp2u$H2N7-w+zm&&C5L6af@hwwAO@MujZS3w7?C$d zMNELE){d(F_KBYhTnn5iQbqJz&wrq75#5+~$C=m7c)3R$`bplM^SX#SyknMje0Yrf zL;f%b3H9WTdT_+aa0p3k@LD)m`BQE`7Jb2Yxxfo!f6L123{0r-p+_A9d&L^SZh8q@lcO?X}lk zYb_KI!0Z8@5C~a_BrtwR#C*c}2Y5S#>B<_QF-MvLoMAgSfB+zj2oAsqwzq~oLH{>j zZRN>r)$yxY!!FoR5u1HWFR_?gS_LQRrWQ0=ddbDbl@4!qg%cr{EjQm}yKS}!%!01w zD5KBk1qd-AcuLUk-aJJ#`x%lLw8h02Tws#<^rt?x-r8$vV(%tfjGR?&jdSu%uo79D zytm7t*2i>915NL`b;k9k2t|7u^V?tj?B}+V#s{KI7e^g=q#lU#nk&C)_;^>y@8lf3 zsVidp)9-&JZdmEbn44S zj+Vv0{P8!2fEm&%;L(t0L{#OaJHK)B4Oa_@QcakE6`*aN^en|x<`;O@4nPd#^zZ~7 z!iB;-9C+t`Z+zQZciZcYyS#nh-QNAKy{GOtWxaJ*-e`l>1Tb0iSf18ibHzDIg6mryXdJ(`V=#W}On<5$=P|~pFssrdJdV+2fN`iYw zePFyNtN-Z3dbqrVfTg4nq9-eBi_JFH0X$&AAj!iVC{m9V(S`JRqks-5hL2vMdy_2a z>f{tpTXCISPpjj(S_POz@UG1P@7&h|5T#(Vl;}&=9X8*6mz^M4z3Y&JKf-{kPM)lP zvV)%~=%1cz`GzELy4UWzI^+yvZnM=^A3W$li<*Pk`k{x$P>h~tZ^eY;`I{EM^72bf zeOmd~zx=82```S+?ANx!-~RkNO|So8(8th!_v@dx@no{nPxK-}^qGJB^-sJkGcck~ zm8+&RW7d(BPvxxnVzx~;qsf&_e{^Wu- zqgHga@Gi5CjfW63Fiu**Xz+-nI7Jb0rW1-C`!Q%_Bacv7qxybtR4PPQN&= z>2|q77c#8 zRB>Lt*-4?mDU_;2c+*wshyW>ssIq`690$8e8X?5-JFO}LfXhyl;DSD`Li=NV**NJ?@gDxD~C z^ogm;bsL>+Sz9hdS42Uig0Lw|sDfR~m=4L2x9o!gWq;;1{Yu z4O%4@vD9uU&{Kkx#GHmwCkk<^T%mfUc?_}$;?+%u zah}C_6!`|03=}?aS5uJ52hrEChgX;f$c!SizZ?Uk3^F^F?f=U*mmy+juse?pCJT;X zL(1W0aj9_h{8`t}n|ZBp_naHGfMq_A@E}Uj~is7kg(JA_j+nVz;|)Iof3gmIu5X1+%Yyd65}j-e!~K*IjMN6<#`e zwm(MI$Cc$M%k5XbieW3Am*ZT_2E(2TF>oVEw=z%m~)AHebr6e06?A zdt|P5&7epULXp@|ToGL%0!SG~CJP+iWTQ3KTw{5ZHC9^=b_PrYsKj0#Fk*KPFfo<^ zWP{p>wi^f$fV9RhIv@ZnNQEW2{pc_SHZXy2kAe5@BLM`<@#>2c8nJ8${p{fmb;^M)0Dg z9U@yu#T6Kv4X(Xvy8p2Ig`6ga{xrI#)TTOs@#WnO zhYAF5cEA^0Q)>jQ3O-m7oIucpAXA7HU5_#rtQ!CJ!DpZ{3* zrcWLPob_9;H~lJIczksO16&E4Uv5iIo`%Kt*netl}f1^;nZ+T6U7-v zf+W}9_>|dW51J(eJUx11I#}BkY?tNlfBnN4E6k+fPk;Q#kAL{kw;x^b{qNoPv!6b8 z-#vHSbi+4qn|YNp4;P+)>d7a4@!|_lzyID@PyFJ0Kl#za_uhRwIsf}V{;E1=W*gsz zv&UIl#Dhw$W^sYpMSA~&-9Q!os>iC{2o^jVwe7{3S34VC`S3ae5aEFW_g>WqBte%j z>Pd63gthezrBK^p604iF$r|CSHtDVs>u+yEUD5vz}BVhyJwXp)FhKGkMYakAADu zCR`^M&p8yKhCT)_pl}q!3vL^CuHgt2dg8&XzsVSU&=qCTQNc_USK7MOGbyh`@)2c` ziio-FAS1YcZsL=*@rjyEa>^~Ta%s5d9A&QX1x*gCco`DlX^2t#_Y-L2mqdDY4i49Q%B~x@; z5jbl*sj93q)b8w%kww@_w`3@Bf!*|K*A#Qj4zA><%utqF@yco-6QwB$H!x8McP*ea zT6J|HhQ?;f&`pp;h>Ud!aXejQIj9kd>Q9DDq|ugSo&EoLAZ zkE?y>w@zKxE6jUsx5^v0U1i59D{s8cGAk{+=*Tbsr+AqU007z`u=)dWpczj9(YZk$ zGV}*BaCg0N1vYY=+fAOVB{3?Uc3tDTEIcAZMuC0^jT<4ZsZ&NC<3*=$}AcmgBao8uk>DRyeb`No~Z z4nBvJaRS$zYS4xUW$QS!MTzkV*KK2!L1@kT~xvDfQ?Q`ZapQy*{N6Na_r5)S3sD-!DkO8@W5iO z;^eSDXP#~8hMs)_&MbDt^a}*&*pck79QOraSrE6y+@3NV?e?%kKJ>ZIe8LIsFMs}H zr=0K^klFt72jBaaBMvzTUOxHw&-t0u`DdMY`Ne15aOK6f-FW5fTdtjZ>-8WrCyIy0 z#p3K0j0?efjcCYlw|F|@0=oD_ww8NW^peVsCkh8Z7;=fYH2{a=o))?PZXb~tO?0CP z{C-=~^Lf|8oLSdB^6;$39=+qC2WMV$&DW<-KV|ORZ(e@+$p$hB+`qKtE^1{B2OGj` z9)s8nax}3mRILfqX)giF!zO6@D8m&bgbn2yc9iU0+Qw?5Q%5ZV0t3Qe?C@Tmi2tbp zfFWXW=nvF&Y|^pADX)3W)>};RwH)9Y=>F8fA01)Fop#!C^U-&8pjjC)J8R)MqKtN* zagyomF>O%nJZ-zpCQ~+Bf2~)p1`AKwY+az(p;qCAONv7zy3e}>+jrtkiql|HP{F;f#;vw@v#!Rt-exz;2W^1;ru)P1xI##O-$&9$YfLD-K#|!f!dy>!1?- zL?=N3L)_}vg3d2h2L@7LG;m2Aerd@N*Q37^v-rM(@Hdc*>8ykf}3pb#+_ElGlL*U+PYyp}CVMpkGSl0}gu(yDFY zHe`f!a7%BiAstw|^;g8LqGe(sj?m+aCY!y+3&T{)Zo&xA3l+cQ3eA1fX`6o(d|ryA{{j(;JFp`L?5S0PXewW&(7IGLmg z^#S@zt#mw0DlVe;>c)D4R6)EXxoQshmk`erTvdS$J-(o%TDTI+bS%uIS{4JbW=W^;nP&iqLBC8qddA07W&cR)$#+^)!P;X9Hx6BUiU9sezQPr^&8g z5#`qC(-mb!@kj)PF?B6(ToOw@>yfPI5K@08->?$uT3{5Ys5qL$W3;kAHm26cGy%(N z3w_cs5zl8cPHQGtl);6Qi(~>_>!NgsNTz_EYlm*UYj?Le2@!BMEu;YStz21@LVsBl zgjo<#L0M?-ZCOwVC=+oCH%JSufNW3|7*#6pVv}(Mn(kQuB)Srw=8P}mR=MmzjnT|9 ze=hrv2L2d*Lv1!Cfh=$g8X^j^BQv;U?=pxqLU3ceOn_dbZ)hEjVzBExuG&k_F>6H; zH^9|J1?j*bu5{?_T75%rp_sZ;iDIg(3ZqiS-`z?pa8>o`Lk$|*mnHC-Nw3)J9{Rn{8bcCuDY|qu@!CERUZkau7w5fLL&gsQD^gxf28Xus`#Pu!;W0 z8k$1{MG!&H{-f3z%dZF}rdvv;a4LyQ#isX$6XjLVsK<_5A%Uef7PP*ABH*bX$|6t$ z5k-U?*XXW@k`n8-QV?SsO2IHORD@E7fRYgimpugtOmazNHUM2yiRKu@naFjJMuIF8 z10$%ND>ia)%7|g8$kQV9Dd4R1T1R%b$tOpU5zo`I54RKRLPB~X$~cHhT8g<2bP7hN%mo$RUkK8rv zFFTJt;9^_J05SwEh=iRnJhb2r-^adp-cA0${J>q;-aYr5vu`?Qv-OwWWSzyf-1x;d zmwg}G$G-ees{Lgc89MIJHFOLwgUSA|j3^>CyzEH!-rKMFj$Jo=+Z)z;%hWaAw8Lt< zY_rDZ8!o@f3X8vxzx&*HnR6?^ZeXBoP)HdD1R*_3w`asF_O2En=>a>q;nbNOC|h-) z9B?OB2RpgYTx&izen%EPwC|5dol#}_nvI;0fdlmpCN^sy+Q>JBieDc(}%|uh<0WhH1=YV2te(Vqyjql z3}xFwq5xpay@zC}q3FOn7z^`K1}K1x00Em0b{Xu2A;J-11Wy%Y1YjvIh=hxwX84*Y zY)+?l!dyno4C~!Mf0%;2?5}xfkl8&$5baSHSs;pFi~IFCFoo;Q8s!~5G` z{?|i4=sdPhevSUWeCCPYyzor#FWX|ax$Hc43$bLz^oIQ&^&Kv*{w}=x{qhJ=qpFND-o62<<`>}`bdGz7=9Pfu7nDy|3vllM7 z@xceYSA5}l(~g~S^F_1nm`=XU8tMuBLsu=LXF5#lge`ohS37=bgH_ySs6GiodJu#z zu6sk!9sxX>j`>=Cnvs({Nkk+dBw`hzG3RtMxsYII-B6s ztHUAsu#GnGRwvZQc<=01v-+w4{9-o^bTUlzb1t)Bn@`U)l*Azj-NUJMO$=LM1`41j zxKeX#eSOI^8=%vR`0e0;&ZB8WtOz_Yt`|3hqs|HE@0rYV)cKrJK^_ynIM;OKf{wcc zkJPO!Rue)v=a#U16+XX5XJ!DLN}-rQNul0YoRZY05@mFI3adChrBYH*yRy);uB(fR z3*;n|8m(Crvz7trz`h8@Xs#~&$&{oF^$Zg#@qUR4xDll+aYb8Jfa?+h zkfmI==$czlD2iKZlSU2+2x_MyDA+XtIui|nZUCm|2}EIF|1^PZA!w=c(ph$rC23spaWXQ=7$j8} z6~xJ6f{F{u%AWYeWf5YNmp~1t7x8oSF_m>&H$znAaGWaZliVsTUaz1eu?khR;FebU zzuGB?L9BFIRzVFhas+aeB5>i9g*AE(TO|VKl5`{)X=MzGC`J2H03D~s^v{-6C<|=~sdk8x+yAj^>kHoY@;nBugb$Z`9ib5vhn3jk#2G z@q#3B1e`JBDX+GUf^(t-3{fTu$1GHqD;}x!F~Xyx$e38#vdIv1o2nzVT(VGO5GW!$ z9vLx^Zz6IPCqyoiF-U#ZAaw{tkyA#@;@#3gK*8{=LSj}*5-_*6${qznHHs4vLlczZ zVoTK_UPia*>g%dff(RlxgVEd)o z8FP+F#yVwvblWP4BD5B_x|xM=TFRZchKnuYR(6oNePmY@w-$P#kA02c^Y}|&-d~1` z2e51(*-32Z*r&iGp~K6rPGvv5aMtXbuexXMjq`51{IUCPe)PWU=G}7MUawzg(^oBF ze|fvjmJ2WMyv57yFN4h9T;5@mjtGYiBvgtnN<>fDGfX z68!F%dXhybBMU4HAUl=qFg8F0FNc&x9L7fR1u#(h*f0MpPL!`6cR0N473Pn=e_xx+ zpZoL&{eRhCn2$f|(j~>VR1)H)M1q`@wk+Juvr&-@o_q z?=HOeo*6&-;XRK$e47t{EnINj4}UQFa+d+l>BY}+wP~X5bO6tD`ofP6tV0SSpt0R# zXOf+|cSb_%cqoKfz<@4`Q6SiuhMj?KA4i3VovF6T3nK%qAZ=NEt_o2Ae!W!dWl4Bh z5{gGa@raB*yEA?{mmJ^HIrgY8e)8iV2dM2G>$l!Y_MskhJCz>qcuC(MY6SmW^VJ>S zX!0E=5ij-oaMPNrukP10PQ3i|M?OtElpM$1om17mm|<|cX2pREhoW8aD@AriC|btWAU;MOvQLSlGIC z6i|dq#uqiobKY?B*|=~)(QeCbMc^)4Aaakb4oWQO&-!o>hVpa^%fN=>IvABqT%l%; zp+G}P>LPEQlPEw#9}~b0hi~CE2e1`KA9^a9@@{F;deo9vz4T<+6+sOa(@~X6rcpM6 zLXK{ZH=PN0izpQ;qDhOFElY8^+S#NMoyoDNv8t;$OSWDsOJH7Crb7()xTTvwG4>%u zaWdM4)Up7ST~W8nCA>)><1~tuE=ddGk+8I{CtW9T=p_0)=~PwJxc=iXu& zg^uUk;q8wRi2l*{?G%5jVM$da!O&)^k zYX!#FI{6_KV@y}l$WaS2^#nzrP*|h8bw$CTdx)zYn+Mx9jV6lQU}KajU6IEvs3@jSE;tP+7i^ z`ns~o=z-GsAVein)one^P*pW!oVM+WOQjDXvKwVOw(N$O2!yDsD4JE7*H{Z1>dkH` zkP9b*$Bd0x2cui|7e^HLP)nm2BiP*53F(O;qH5ns#Fv;e-x!FWvH)I5LJB%^r!&_Cr5pQnarp}1xclYI82IJDNej2`ejwk zflirBl(yo)viFV{>QHw-K}z+1PND-==Dm@&8#?lyjL3J{V$69uMN0^^}%4J3l1 zjufss49gU{fzDhv6W2G;*+*y;FeBq=i^RG@+y!z36p|}$C`)n7OR~H~5i)djVwHlT zP{cYp&U!e0bnRu%*FB88rU(%fNfKmC(ZR) z12zINkWelZ#igFOh7d%^M;!D)5oj*dSmdNbJOmDQrli~G73bQXsRBeeJsSs(Ws~maz4DF9w)Nw}L4c7C zzQW6}0^sS0X2{gmGqDhj9az8+*mRoOPbBTB*=V+p23+_ymJK6ss0Eqr6hqK(u{V^# zcz^=HkbRr7dFSR{gBjVAJlA<&bd|}_*IGuss+vM+rqr`_pE>^fM2jw73?HR zMS*Pv-)RARy}NCv($*xuNOl?90w$!R)a0N;Lmms*uv0IUQldI~+Q3}IVz9x@)Hatp zj}30i&dA`fvRr$ZKw3%UQ$`a&))4X8XF}yC+dBr0VP`weaIx^^FCKo}(f$X00=jR4 zIgbrW`#$!mCw}()vyQXBeAvMU!pp~f=`e3DpLon?pytz0I_jcxPPy(|7dwwV+FYJ9 zI-$os%TLZ{vsiPnICzpQ&_hzow^V`CB@B7-$ZJ3uVloOMjX^_9MyV5%8i^J6v@1u` z>P)gy9=O*b5a7ab);V|ijNZfZ7v6O<)ck|T@40jC^){Lx|K442A2*w`$}cspY2^V$ zXcz3&G&(SttY>{g8LIW&T?eE= zSFhQ6$m11eFWb8HC0OSfdawiTfxvn!9xn*!v7Yw$&D_Q3 z$=7N|b%<2(*&K5ncTtirO8xi${Vy2_;jh_@T{!=aTW-4gzPsl>{+$Q!SulI%&DZ?o zhu@Yo`}Uh8VffMaAGI|vsZ*EiSwpZAC!7}@&gC%c+Z<^v!k5-?S`D6RZe&~78NVdi zAuPF^qj2TFX*dwl1S>C?DXzEZIohIzaLdP&1QCefCnx-?UTV$n(qOJ!P%xQe9MEGH zzNoj75iJw?1G)(N2-YL1tt^ZqdwmAOXzds~oGPOK=$2}#Xn|HLg-D-3v96+~RCUF` zL`)$gid^-4xg?Q8tkpzKZRI$ItwfseOdLA3mT4)g55?#ppkNT_#^7)kSSrG3j;t$= z(ejNMjWOMVFw84L4W$bWu|}T}L=%9HG87Z&!j`c%Df6aAzP574FvS=oauArJv1vRL6KAyg3P$0V1-?X@v ztrHsW%1xgdku&vL z4Layi`Wt>%SulvBi_^+2X>Mf^5NV53k_d_>i>KjF?9hRw;v9yyB)6{Rr7*Gbq6nNr zO<##JqKxK&rvf$s9fJTp!B&8YPk#9hw!a&H^kX0NBlT0hcA~!~!N|@hgVK;PsO2-) zGRodzZ<5srrB0~&6xP{XscQI-w0dg)6gUzO4?<@^AEYKitkMM$sTx|N>z{%K(wzjY zjjo^Q0NM^iV^HT|Iyl0L<9uC4&{Cs$W(*LdsyIyqdX8$XmyS1-6$hQEyQ1pui> zCAR`Zk4<1__0rkw<7*d^t7h{F;fi3!EH480kxUL?3*nNZjv+xXlMyI@A+V`*{7Uh4 zHLoW_>zGmSxZ<~`Z)e|Lzb~!P5Ji@ZmSF>!BV0L#pNOEbLIsfw8$fnj(XOdv7n?wF zGRbkPh_Xz)l37TOtJKORs&2`!dV+?i+v^Xkl+9U;arBuwxe^1&T~oWODi763fl;|* zAV#3<+EfvG`=}kq&yf`8HZ&BEE4GMw0jFbRkPei_S4d5ue&jDjpU zQPfxICB^JID27hqL8~&Bw-O}@#42l0!)=8oYGPb20+pC1vQP{Kk9xU^3(6hj;}oKZ z$dXtQx3NhjQ(c%*97FVHUPoPe{9Gu7%P23RxG*Rn{xl+0>jU_+XAD&#E2Sab+!7@T zPq!%Ih01cR%6b>YlHi)aGIz{?9}U4G+Lr=4`v2VS??q|wK|{5pB7mAt|XFYmU^E8eu@I)Ve(oxui> z!^l4B<=l1eECb7k4rb%A*Zh{9)^Q$t*R59A@ii}-y5*`{Y`o&+NiTRohq0dnFN4W| zn5|6U&S#tjJDXr2hzCA+%`?0VEh0kD@Gn3I-h#d2&bazIioC!bEWGkcD>>~+pxsq( zj6%wGTFJ!kHz)+cEMZTW(Qd50X$1k5;HoPe?VCA}uh08++*^G8F)74jl?DPThDob0X^AOygFy12u zjRkdtroFK2yGE4h&1*;&ZuN-3ucqju)_g+Hm7#3AQybc40hDmAx3+!G6h8*QTyajb zdw1K0rcF$EnTygEv+Z@IlRzJ(E6!`nvcT+aZEb_qRAPL3;!%)&WMCO&2Al;4v+Xte zhm@1pPTo3(?X))fK?lD5qaS+Di1W0g>@T19^sy#oDGo5r23)uI`^#8S=(;Q1=FT|;j^E$Jp>Z+M)1?hrqW;k z^johknl$KUvo(FZCkyJfmDvlDfYQX3$joDAGeJq^f(jVin&pIMCrP|q&ZkKXLo2N5 zju(mtnl_ZmE_Swq{`)4l>u(oT1AG{CP;2$g6H{yTRY4I|W-exwgn<_-FoIMeThUm+V^LrwM4m~` zsufC{2d*2`!RoOZQN%((c{)(6u4rf3t=_KU;%X}1&ZgRlLrI7#YX?v+6%k|sD{MLa z?|=MFkEt$vX%VeMTai{Jah1{~D4+;|4tf_XqX{$$$u|8d!;)-;jp>xAEf9+!;$&5~ zYQdg(1k%`C3rf|07)15ke7C|fqxH81mrW#5gOwaxb(KY4MPwhs)iKJN` z2eLy{G_~c$LpPLAA%SqW?2M*odWTityK|1ucIgAgrbjDawO_Z75Hto6Xi3~d9%?9^ z4qA>xn3WVSRktt;wq6zIu@^q&Q800i7BmCN6Bb zcB2kI&PyK=S8PP-qJ|0i*eKZ;(PL3O8`PkRC?jUl1Qpc2LldVPItA*|Od&#B4V^Md z7ZfT<5kjLtgmEMhVjI*qu>oO!j9P5*(Qh~X(0R%hoBDNz;AM2d5q$q72%4ui0mCU7m#hb2|K{s4k0T2LbFMzMBsUDXa!rY#;QlD04=duk4k z1bI*%^XdaC?zS$+l1!~QeUE$vh5E}*raGc9ooXr$AZ1C&6a+?uscqlG%brMR2>kQ` zja^4jeQfq%GIS}sjDw+w&2$7x0;>giiItH8Y9VjH9J3&A?I=mr+>+4i+xU|_z*urH zyCp=qBABsdAF239RU1DpU|vQL^hO4E)x&i#Hqj@UMf^$RGRE~~rv{kWanCk9D%)S5}(Ac64c8j*S=mxToFi$_62e9spS>NKnFcs#I5X4vnTxp88HYIF|eTz z6ht95baFC+c$$~=v@0GAR5j9RB~?ZiXpFlZ#HMF5OC3t5d8S68t+*gB`kW@+>cuX&*ATx{!N8FrhsVZ(dvOHO~ zEHW)&*c=`kf`g7jaBQse*uiJVvjy?Kj~!~Z*DT_MHeyJ*9p~O(_A@EJVD>*L-~0OE zV+*{$>^%1DTh2fIgpa>*>RMZD^rCGyUv7sf%loC&cCTK}_rBh^-I{^}*#0Ng_p#eY zc6Ic6^eODEUm?KD7`9peB~Y_hnBThdD|gv)dHc(|Z@1QVTdcO)%l(37(SP#(GPnwe zfeauaKw~)6+B0=d7%H^ja-n@t*VlAy91{il`Bs>H zJuig9eUK?^;;TV0qodl2!;kJQY1t)(+5ImAM)uVsFBtn6v9Ailr;fb31%Yin`*((= z+$NpxfO&<}30S96p=pA=ylkr&R0?XXXL%@Wb6hHT%Wkr(l#|}TwcmoMmuL01-~nm@ z3hw!X>f?vRl}I2nvNKU`33n@EW197(Le`o_ki~5QQihHJVW0dueS8=j#o27z%7~I2 z#uh$%#KDJt@`GMsKI7ys*kAVl<$dF)1((}&nOQ{DJ z&bG@O7O)TtEy^4}qlOnJDfhKhOF0Isxm!!QG1Z#x7ST{D;@WU)7k!F(?PCo$uy`eA7utJ0h;AF8##4mBouV$ zkIMlj=ZKp0&`B%s@JX;*$(84iO_ro7j)a_|il!mc2^q?qrj6)nW&<}|hbChaY;JjC z;`v?T0wLNZL_}PsoxZzkPg6l&yvk zg&gwf;7S6m28swZNLF!r>K+CTHt}>r5B(vc=pkC4ZXP(8E~w`=-KwdKsmy}8C}PNnGq}3zIng1qHuOEi@x64< zY=>Ks9*?jmeNtNm=|BN`DzR!Jic?kT*y>j$F#;3SmqgS8&wu~-|A&W7EGV8enRYxU zDHnqnVpdF9%teYOd{n8iG07A%yW#}a)}vHVb@VRP$*omizMKYaN)fLIs=iiWXPPL> z^8r`Nlj*a2zF6XzjWyjdn>}`&>S&FKA2<8bf?p(i@~jUhhp)(Eq!QqaZG@o5Z@xGv<$xcUteFURevh=;B9k`7sc+D-D!Ya{ z$t2m2y3~UN$>9Dl;m5Mo)_2V|xaQ^UXLf_RS_T-}DaiNYyPKAr7N1G9Fb z{V&@W!~B;D17Q2>-~7UXa2eIkEgotxyDW$ms_9kVF>gVYWmL+5lru#NM0b^iG(ltp zd&dw@_4P49uM^nBa}2Aze#<8L1Ku$51TQ)4KrHjRiqpz-0}&-+P%j})gUixuERZQe zxDpdv6X?krSEHjVizeyNb<`r`UB&3w@VlauR>zPX&yaWE!AYiahoFMVDXA*s5foQa zSzHMuzk*x|Q44J16_F<5<&`}NuIWrefqFs$vw*Rt9}-B^paW?Mtm4YjQGIQ|sFat? zq0n>~DCp!=^uRM{6@>%^xei637toa@xTW^MQ!4)Q#zW^pPNF!Yik501OOmTB6K#^J z$zcHzau5X(x9p66C{d*B>O~r{L%GjX1_L>R!9A+AB$;YVT`fSC4{_Imc%da}v~~3W z-vbbE49sU1JeYOh%6BzCVH-g3hi`Pi5LN%{G*?_+y; zxx?51^023DTUqEw%#LK^vA>KB!<%JcLePMo?6NrB?Tuz|GH9ukPJp{A0>)IFLhTy2FYFi0FJ5XX&rUz^zV`sj-stp% z)4R;}j(uv_X>H6Em+A1M$U+od%7soiAgs!h zS6tDCJ2{9_xhpiO?j9XL<%p~VbjwbfQV6Gjh+Kr@@AKD?vYllxSp;5o7#m*p=Vb@5 z5uxUzj{LMQKG}4(xqSLpKks{AZ`pI(1Kz${-}{1>1z*X&_}s5ub=kCUoPXvm*IjMXrmCt6Gnk_l+s3_TlErehlnRHj%|>j zj)kOIr1T|^*||*3fus>Vaky>}JZOLY!-03JUEm1g zTIb~F^Y)D?&(J(Rf}rJPu1cW>Ay5D-g%?CBR|F@kDgrZ9!VjbX`l!(H3c>;^tENIQ zlb;}-9T;Kolkvxv!83~mLPTI0R>hoH+0FdbPSFZgDXJ=3P2FZL3Rh(vUL;lP60%bg zGjqu+F32v(tIsG}qqpAGf{Jccq6|&wLw~yA94%gBSW1Y}vriPaig0C|?9;76l|?JX z6&O<>!7YVdYQ|_7r9c;tv*vPa*Id@1sQ{`;);usp-2{qp}yf3v!FPf zA_@&)VvdJ;^#swefBFPC!B^ljT z!D?64#D^ypj8Kn46yZTZjv3R(&piwB>Om0*n%Na(Yq%y5(Y;4SJoOBLdIC+VJNnQT zRH`CizER`0InC|#>6duLW3*JgdIM{XBoZ{73i>jkgIJyt!$fSwN|2EvJkxWKmXjJEJln3OdjWm+MEhyJtd%UZpptWdg909s5{2#%gKnc=L( zZ8xikpy@p#)-Ft~UQX1wMa%+22M~es2qZSps$N0AfBeT^Js&veU*3~>f#rig@e%M4 zGvv!StONy9FC0n-n#hYGd@6!pD}<~o1<&vQ@N35d4n6D>B)?*{$roR8KH%BEq;WE@ z7IX!u6(kq=lnifS_gt0UQv$*%Mgc+5;wZ^)$kY%zo~AVPpl|<@^OJO`RvCo`z7#zKg$__i zq!BTU7i@@l(oO1+vAEJP>qSDj;zUPLZb4kPZ%~sURlvytA{9{;k}obvc$zd41gXlR zfK++Qt_T!aNNDQAhM}@tlZ>Z;qNAEdni8u;%ovjC7F|ZSF_1Zgp~{jb26SQ6)yFo$ z&+=3fAy9Elf#NpKLEsRH-Zc~97f>ieh=>wJ+@>uyGnimdS8F;;%3R`AUzS3*7@CKx z??h{hcUy^aRmZ2{Wz&%rWLS(^k}Q8iaFWHI@^C8K<3mU}3e+5$?iFUY*z6{c-b%iI zjtH;}DFe>VU3Wqolx}mm!`QA1XWi8Iv1eR+{;ZoXntSVo*IxFOSHAptQ#M>Wyu8H* zOFNIf)8;E~yUC=fQ$|0Hf{260;bj}jz_K0Z=(d<4Ww*{_i@$xBwf$1c_p$fber@Nm z*I#SWDwALQPrmonack?i0Fi-j7EPyry=-fV_r5KR2DL$#5T9q!UjLK@h~q^&#AV}{ zFAdE>g_119z@uPm)#kF}$6N&rAhUf^f1C2Us1IoSvaV8qP)D_u7$zmw@o&e9;AQ_x z?!`}kp7n_&-wifDz`|Z+wwvrj!CuI=xooAdxO*%4Glw1GoOXEGJIkK6Qoi7$Ox`y2 zCtUyEdF$SLI&Q5l`~>xdPhG)V{=ctb)5k6N&yn!BAe7nxbrqCEPhU$WmOF>CyT)2a>2iNyyDFulF+Gv0bQ+?3yA~B=z=YKlCdeY;1(wX(%dGp zOA(h zE;)PpMQ7Vzo`1(pP&3z;6U#m0-0}uP5QYiIHCTZzRj_0W29DIc;;wFo(PX#@C^++$ zpT;TGV9e_tvg02D$K>Jh>y0*-g<*dgy-+H;fPrF4C$KS|Z>%ElzK6R!VZMheIz8%QuV$FIzik0yC;3u}X&v3|=J; zs%8-S4E-m|tDWJ%O;Q|+;xI72QV@CgRHd*%O|lsP9`Rv18_no?CZeDN2qF|{K4c9l z71Xg7Ve@)UEkY%1bsN%b{~03?v3HLA7QxpKEP13c9SJylL||?8Sqw#MJ`|~BI?$rB z6(@opF}2mMj>MQMrH-o1o{X87#f+RE6<6y~xO0{rNqIenwdl}!pS^Z7ihJj1zkT<9 z-+SH(GGn8ghhRJ_E|%snvaGp^(4VuX{bdGZPq%utBU^5(4Rp4>30O+F$y za?}$SQkZ7zmBRT!50@Rj?5R1LY8*FyM|@NdlDGK(-8dfMbAb6#Y;a)yMP=qd+%nQEpt= zFVLF&A5Ea{>gz=z*WLX1y7_z3iU=V8U|`?h`7TYqh+cB!QoKk|#8 zMYlD~up*2Dt^25&Yq{8sLD&ZaL($DO5Dm;qBAK=_KJlxcxpgXlXi<1;-;=JxPM zME9oB5OnfkEFcn6d%4zcyT&iCAj=S4rEli@4NeGK+W_S%aKJ zNhP*a24pm;_2fY}ktC}N2~zRMGU!>PqPr3iO_GDZa@|UGE0x&hQo=>tB4S7l z{3&1Q^c0aQM3Dvmkdb;rr4ZwE9q5>a>RwsmX@y5wZVQMsR3l9W)#sD&yZ0~rqfi-?xSv(4)-2O5;3cPF| z8D0jKzyH`m=vdqf%kyr(wl8C&K+teB)a)wQU-tg;{qtvde|hfAOXuHl$&9N`-DI65 z;N_jSne03^yuAIUll+4DbsI0Y^A@8&FGI&RmpgzBBNtw`!7`vUfDB4Q(7&SUSh z<7#i&dCmSO^_mS=S$oYD{s~?NS0Q4!3?dVPu@+W;M0&v@i}+ns=n_Ko$tfTjV1y05 z`)QVjmk}Lo1Caez7(n)+BoGoaFz9t=6hD@-QbVTXIHA4TDl0*nP8o&MVN#`!w@pvB zzf9&TE3E+4o_F?{&RyG6wjB7*u$QVV2R`kE_@RG!FSeCeToF>XeeA!mK2U7$*fDJ9 zv3&sytoG)zFMHe1#o0Erh=25W3=7~>F9fQ-z^rn;m<$ovYDZxTx4a0R4E5{+x1SCO ztA!o&wO{#4UKU`>5t1Dc76&5wkT~CiDg2HWKidMoUS8$_!Wx_t1_6zkOIhr|+QTc% zn6ZJ$9dUO{6Ai(_^3p^S8-C+r_rAz|j4POIm-+m2Pd)9FW8Hd@8CV9HVQNRReedg- zqdxQXuYJ*Z?2FF*%HF$g4KF*7eb~Y8`^wQr_&&C;egVtgU%ukfX*XScS^r<=g7W1! zy4GcDcee&k6stC(kc)`Ob(D%3C8Arzk{W3Exsg_RULU%kh(wXkQ52#NR{{&qg_ptP zi6Ok~N)+J@Z4%t(!SYD$ka7!~1bDMtV&{aVa!za{;iQsVE(-UOs^nm%yi`0?RUI=- zPQFRT^wLDfuQTDG>!s!*bY2th!c~$hAEO}7iL$boLIpFkAdAo}ZK@>GtyNOM8C?Xi z+1OQ_oIruAcml+ z%(JHd`(OXS#vm9t`C69I@bl#A>GA5#U<{b<* zR3E*?h;h`5kgOzb;Z{wzh?=9;*P6Nz5sUcK0jQbWCl z>+lOm`GqisVOPq7LlPeiUvYYZCNCR>|8n3 zGz?5(pb5KW6BZE3utS8B=^Pue1l9r6PEbvy7k3q>K&TC;c6VE#28yOPl#DOUT3b^? zT%eVcoSf}ZLyZ4UJE%t=BZ$i_iJ=TDR@%<*&^B1dqgPAz-~RkNgH#br4Lek{&L+^1 zWTi;ePqcWAKGd|bR9|Emdd8#eQQ(oPTAAH?;J}8R0EdpzJsiWJ4+tLVs2aMiqCXVa zgt^qTmU0zGjH11)Zq`BHNicRG&;#DNugA{hfX}8Nob)G3S>on=U-mE&DZ*7xQ#Cdx zhp7b=)>ev+s>=DL%qYSGSN&ngGb597lk`9p*8?%g*wYM8ZmnM`Yvo;Sl~wlGb*lF) zEMa6Smr6#crql}lI-y-ilbvu)Bd*U7C@`|3?JzoKqgOL8NCh4imnw|rnWr3S5yP2Q zxqSI%uK8(IL?0koT?B=Slc0yF1&V7u8S@aRBsA7c1&Sb3pl9Bu5=CNjrADwZI)U(o zee-?ZyywQ7tPfS$g@nIs`H7r%`pG`_1tK{@j41s47e4{ApfF*q7;A#4zhVl8WaW{!(6}-zy+3yz?HBn20%#?tLyOES~qYth%}!sMCu+| z$|%qu3;|t2=Qk?7bhhcn8}G5p&OR1nSID%0!YV2G=`fZL=GimPH!@n=)z_ z=CyI`p~o^WU3!s47jdrMUi0sM|10w4(v*-XR4xljaGQn!Uf1fYETUWmsOVtem9lGME`eV$c_p&y#vy5(rM?`P=!*8Cr@Sw%<-v%KdKW7hN12!qK6MbcIr{4xv_yjp9Z6TqSb_k zkG%0$qi$W3DHk?bN_8vaFw9li*hGmfnbc6CE3xtl;*u!SEoK3+Qd}pFXgos^*rbw! zS)tfaXi`8Qlth;?ifaI~reOzpQv+La6fwx8ElxosO=2deTDz61WmQ)8G)Kf7|3sCf z5{@p5xR7o`rOM@sA}ML{40a}5L4`7=n|L7&lb;!fg3?xAk>m^&jK31grH+ER#SGJO zZ-x;}HEylQ=ml$20ljnA1702gaui_MA#I=M?Wnd?NEs;h+Oi)pgT?~9?C7;!=3Zfz zIx*N>hL;~&cxg|Mjn3ZR2&8*>r=I*IRe;5`Luo9N+s26+(R6U;#D+b?q;E zf!FlrX@483ARZvR$%gClXFy0kfK+(dySGl1ftdi~ij$VH1Y4I8L13H3K8wv6fKNeY zUXT-CUjDuQnr{Js2OYRSL}^nM29zC0gb;mw+Anzi<-h|VW$4&!sWL>YzV?n`TN~v>P2FJ|9{^WD=jiaLn*C@s z<@vA@inH8y(j8ufofYSa0lO)J5Ny7arf3va!k|z(FgSE^{&i0pmDEsh33IC+;eVumF=PsUhqVw4Ad)Ho{J?sPC zU-o6}V~+gP#nVo?@v2L%zv7~+zj=XQFwdEJ-NW~cHmWSj`OhLk{xi>w7w0y&Fs~0K zIv$jXL=*$fup-L3&pG6b@dVHlidl)_0c;hSoPiHpBvIex!smz5*C_WMGyx%P85=x$>kN7mz?{24fIX|0Y=lZv&x)I zc-+%$rzQDsjy3W;`I)kSkR$jbs2Ahc*0pI(smU}b;|#XdYPxR!gX^2&I<3i*p}-9l zm4z>dEt`|W<|K=_^7=3U4ovEilpQ?>ei&RTI}>7EP-V5Nj$Cn-GQ&qvQ&p~}tbvFD zr8Px_9b^=dMT@jRppYrAEEEgu)Pm}z=t>XJ(V>t>N;eLwD$tyUdWEiQT^M?TN+Dt{ zS9&ULSj$k9%cY?P^`J$_g3k%B=?vtIdhFQ3S&Tq|B}+mSVyF{VqKYs$^^}W3LprDe zia=Y$kyXHbbch0%pN22bVKSy)l`GfEJRL=L#KI8P4zG(FC zHlMt3yvKY<0mIh82K_CA)Gk$4M>V}LvRVXHjIx?Te-HuX8h|1!uSK|&l%!t0B8!o% zphmZdB-j>$wy7t;&g#$Rnd!h%0SdU9)7_hAsQixW<=QKks;3Dm>M)NLwSf40M@Z(~pp z%j1`-IShos4HGHk>vAUZ@r+gx@&%10Z%-G=(M$r0;xy~v0peG*A{2+NAQ@2_dImmG z%0j_euCm0Y7LcR3B+PO-*$CUNzQ!sxC4c+7Cm>>=DR2kBz)esRWMs3?d2IN_-qAr9z=LVgtpHQTPibBp5KXuWFM~(Sk;=o9P333t>8; zM>mL`-e^AEM(63#ioxio4Bkz9`SQ!!K5#IP!{hxm4w9o9O!W7^{1HskH`E%^XWrgQ zR-*IRKxi=8zBZM@YbZ9CZ9oH|e#Gp_04p_)v`90F212pGLDL*F6=!N~sxvSY7zpSE zMO9O!AjGLVZ@<#ZCrw)Zr7u}#DPJIGUP+)RPBzIJfn^y3kBpz)=p#s#Bxo0wq_DD} zV|e+TbI*jtg|*gR{p@qjkn!LD{%7UFzxe4RklE7-M7UeCB1%ORMCd@EruR%kDy_;G zUM5p&L^@-3#n8H=@3FS#c4Z>nqhaDL<|lsk6K03hG=!s%?BMBy&PE@Sm8Fj4h?Ac_ zG(=RQBA{d4jCmPZ-~0Wqe`!zo9s9o7u}KFkcbU3_w)oF~|BJq@L~pEX=tf-EfS%IR zto8)Yg*ok>bi%2**m4!$G zV-g}2aiGVal2s(#67ozvDUc4XLte}U4VtM5lsYITjv~ZXM5;)IQdLB1S(uu9N+zv} zh~pUo;t;~c7AEsg%a8@gItxcjD%EYyfUCbi3P1QB2EM_Qd(9oaw#y7J`|6huW&7Ky ztI+$)(6J3=?X*m9*; zue-SO*gI~qg7=s0FOOegUS*%DuL6f1!|vr}VAuna9i zzCedJivd93&MVBIoRh{jPn~Lp?>d$&JNyeC!pk1HTaGbs56t<>@@t;k|(1Gi$xu!os`@FV~fce!FWo#}Buq{<-0uDgXYUhdq7xzJLI9k>CJZ{yL zapyFgqNpZd?vi?{pD+>U+9-ociepS_Z}X-0L$yk59FR% z1M?(}9nPCLV)WcOE_$x4+k#s}@qBE|ylCq_=b1FZQOJo6XJKM?Nke!pyiCIXWEs!- z%|Gd{l>9&yp}Aqs=gjPyz+;C#U}dujCl8r!GGc~7-11$;Q3#`7&sLI7X@2>1@8{UG%E`>t{^s92$3Y@aJoguAV`%3XMUr2bSqISvKc!b z5O~2nyD*;aT2}rCFGC{0a(DzCHhJhiCk97Am~C0FR(7j`>RUr+QmJlP0FSpy)p~k;xPa(wZ%i zhEmF)V-TpSq0R7~A_8T6dxJepisumxGXKt7Z@%f;E3dff^658TdkxeKG7H~*)KNo6 zCb%)aIsEWfjvakX(J==9aTBpvK$_MFhyVZM-~XiB2+B%wYd7_ny%w*BsCyof4#42v zy)_J(T`Sa!w7Vu|H!Z6o!&qS0=vr1B=B>|&D3OI-C223#kg?vdLXc{Tal+hr>ZjT&;qU6oQ#kSfTl`E)r!^OSfu`c#_j{^tKwQ4I3P`5 zdI#wuMLJ5aib%C##e#{(UO}+}B1IGsR76F^1~w2y1f`>hib-rK-Y?yo=8Z9GqUkH~ z{{HV1&wUq`^{qAQtT}V`v^{%f_A`5CjglOZF3 z1#)5ZbR_nI3(pzgp3kROOPT5RFMs+7f1pLl@}z4kl_tO$Y_UX!X}(H1ut-uYq+SEi zz@;EQ89{&WGSDPPy)VO5mYQK?@=vIQ6EM=~tFB6Z5D+1T19rx)ojSPV#hooMl)lrb zUTq;K9D9_*YYGfx;fYq@hsZcN0`kwgTF~i(EgPXx5E4e?yfQ08$_&s@bEYS3S1c)L zDNlY4q^7Fr?k~VuBXT$Z;^CyHq*O{}RUbNbm)httBTqW{IJ@7kn0DE3-}@zCuI`X2 zo`b*`B8s>IMaPP;NWnPV!ylB1qWr;|+0uN=;+uix<}I3o0T2qP-^L$ z=)FoO1xSwKhDMRNB;`~3sklTmPAtiaE>ON=#U)dpQqH_0i<3^AC~2DTL?XiBWzrkJ z;-#^W@k@}CMH1vmh-8K2)idQuiy)AA${89((&bfOnzBnQfgFF_UIkW|WFcG>CGnc9 z#gcxwk&J|>f`vXCMN+S%EW0AFG=$V7C7fU=kFV%Mj6foB5>NRNh$3r4ih;$1ZYJUk zI~ivTI088#g{oj~vc;N-IWDJ|!Q_%1x%{%P+>#9}$HB|&%P_KS*qm+HFYkO7UKYZp z9LQYJ(2VBISGMoj`P}!uz5RvFk8N6a@0wK$RxY2}y+gI09jhHSuo=AEuWQ-Ro(=5B zwi$ck;ay@uw(pv!fW|zaap1GdvAF^-1IuR}*ZaIl1128Pt!Jlt9oyDx*QP;P?eZ(K z%a`Cv3vr*H?bYeb*ASoUodGw96?S7zWk@xF`s)5JV2zPf2!Ppo|0Xm6sEz&9&Sc6A zD9wrJ7AzaEK|*A)rHbx)E9p~`ZCXa`ncb~n((hBToBiN+D4&EFFX$%W|g+-8a>o(S2hbZ#C`tR zXPP@qUdAg%XTOV8U{wG=IVf25J(W;ld;*Z!nmHD5g;Z=f3&IkGUg6Fbfg&olim=6> zF!9Ln%D!&lR3im&xLb`>(*goLq*|2{Cs`bD$(?Cd8_rn;%gj& zLgg+3)4$B}CVi$2`X?HROIh5#q9nnjnNR7kUx}AL-!@}R00yb$+*{u*{m(`E91ka1 z9h|bt&|`0d+vh9$@|*0?n_(rDNj6WPgi9e~yFdhmE05*0rd^s& zGD@69wPauB@dTndfI4{D6FgBD4TLRe2!1uCRI8?z3diuD^GS0cQ*r;Wwt>X&X2`7} zg2HcH^$9@~Infg(fzpwhHA!|5+;%^Js}IP3wq%FTcuQSZHQFYQWft?w6?2*D)5>Dq zP;22rjQ?Sd`Ne22A_8~;Z-s~spVBVy%_m?%EFdR%`;`>6nRa zkl_GIe2Z|RS6SF9^fE2Vqf}nY1+s+Sl`3G(lQGT`pR!3x3gK;}R2d@hVSB_r{!C2z z5pD}duV4{ruV{p*6f`G?=+d%Kak(oM#}oPSOt_G^gh-(%fKo{_Q>660Dr!nAT=n_Q zo2r1vn4p!zLnU+sI7UpJoqCTzQ6&xz6Ga&L*{7dc_v9MS%^No`E|W%PWqTgDw_k^t zUcGhE!e)&cx=qfk;Yrf$YT%5n234b<1eu%hM3t3Crdq)iuWP7FdYmdNAE)2N21*ii zD=s}zfwix~tBV?`g475J$%H6HlBSG&#Q|(b=__-y5e$db$S1Ml@syerxTNXWHS@7K z#SsQ0|LDVqw`$SYUl)ZddHRLM(q!Vwf=3EL&|dKkjWVcRiWd=8Z1hr4K|;=H#A4{s zDYSdqS|B;Z7ILV{@=DpVh@pbkH1O8Nn-INR)D8`dj{~MWb0tfq|2ZZjjMc>?C0Tst za8h7GOvHpd!=2oH6^{Ao2vSHt^-Q}UNWml$jOo?*DV9$%qR}Cilt6w-*%AhnSq#^~ zQ%6Z9<)kTYluY_wv_28(f%HSK7-bOPka@)@h8q!qWge%!YVKRrQDql319$AL;1!(0 z2+IQK2C%hHJ>jG+i0Aq*ijzyTl|*?I+h3~;P1vHS>cWvUg~Saj2|BwFb#WUN929H_ z+yO_>7T6Uq2oQON2ce{3F%whzH8JCeq9)UJ&XMU^L_#G9gK*swF#yGy;LsT>vy*FT z#KdfGc7Su21KZPrYy-v+rL~(EB$O%GXjuSE294ojXMUNO1uBMG1Iv)IuguWe8q-un zTZHXA_=($IaYF;CFD78PhErLHGSCl8r3nl&0r%?N&1K`mh7ZEV6Hl5L11oML4o`yC zVOR_S8+ouu^x#>6()48iLH^R{ZH@2y_jpiR;Nvq|<|v2*0EfH_;KG+$;TpCiGr;ka6fG zr2wEA@e?j21>lz^=WC)Ah4Ct|a8gJ_;qiOrFA642PGB6GIDyI_FhhY?Sx8oWd4z`` z)%Ti`6j|`3B*^x1q9=tApiBslQe_M*sxL4u#mln@h)Xh&@yNL=0y{N!^p6DbEyd7&aMkCJ2KJ{h5aoZv2Td zU}7qFr8}_!WUFNyKa6YxHWRbg_$%(mhKudK4rJzmm%(KBdKGv%_GPa*#r*9TH?c4K z|FZqq!OM@WnBA{?o!FP1Up}~V&Hi0#4eVOSeP8}3#gq&eb3AadtFS%cWdNC3IhN&u z2H9y74q{@SII_L_zTA)9^}xmlv};tie)X@iFN2EUEszNFLTZ4sx!y00%<}Gn9(%;0 zP$3Wp!GUikS@&vs1)OOzY{!hM!J8J+nV6-<(12K=Nf|<6C$)agNXl|8eToP8$*u== z%2#%fpm5L**DF- zfukV#V-y;HSYU%@JoW^f=>RjtARN`+J- z;p}UtoHS906vVY?@9r{72jj<#tWmWpVmfwqs=MIDZf$TF0gS9jI3|Q88gP#7-WfO9 zVGC7I^+7o_%79&#EL;Kun9$g1p>m|jK+x>n_FY57ezMHD&Wl0WkEQOv_x7A&0 zW`hQzL1}(q`Q|y7FJC&x|D?`75$1>PU2@m0H{N>l^{ZFj>-_Ti zH7j4)`JYRS&D5sm^p~tG(bl+3;^`>}IDQkiPNXZPH+fB9`WIMXQng4<6%>+05F{?; zke-P~$_Y$rvX&b9ufoeF5u-syr{HB^Ie0mRPx_w9CLPU$^RC|278G)?)JR&QuXGP( zQwEAFk8mF4jA|WZghufYn+l^k8lwzEhB!kLrGiX)(kl#5G$o%5O^A7D5j+aa@pu); zACHg>V(KECi*n)>4ZmdND5K)F&|$;{Mi`OdWw@gvUJh`EmwAFpc#x?`IK!ik%FaU{ z3;NR>LbSMHGb5+wup#!OKF?Ul{#h;1>|Qo6iuMX$5h8tX*&2@o*MT9hEt zB07K|BzQ*@(1ZGix5jz3P}vOoQNW^+VvFjlJGCFL@Rk#H9Cj~IC!E*h9`Qr{;S2&C}-kI2SMaSATky0l6KuK~;OyVD|0m#7QwNlH3O!XtMc3{fgij;wO=3zQwdIm;8VtUa}n zwFpkQqy!RL@kIQ@#Rs{lrY9%CI}lucnUobaSl>dUWcDhtP1qp$}F^RYcn z(N#npJ@FRzc(6`_Y7_?IVM-phc#ENLn{N+st( zEOAv(cz&X5Z*wTs1&zjyi!dh(d_XLE!INl!5=ohn`GiI++|y5;Tt=g5g0n8Zn{> z!xK*ff_&jMU|b>&M8nIlo_)nY6L1vst7rCXy9SEcQ4-qM>FOAAHkToC0Acj~ekyD!oiKjlJybn!8l!o!g+lY-xtdgZsc?Rdb74 z{RXugHmVEK;)Wx}fw9Vi72qM0F&ESC>5wHraFutT1((PnMw7gw4x;sTS^O9-d3Y8{@W zcBC5HF`Oj30^OKaXTKWJa_uOOxSE3d1i@QA1Els(i1t{v92FDk8)vWL(OpC?_Ug1CbFrNI7?TT;a zw?18J=KON+j@A2gsKNE^RBKfKmT)mR%<&BG(SQqH_QU0&-Rk9UQ}%2#KF)^PSuAW+#o2)j8+x_F+4{fJ${;gPi5_ZpVhc{BBvYh8LJ9*&?yI=8YdF#14@Aq07Q`b* z_E~C@?bdc#n|hd-4aL*1p2p&AS2k$uRI)J#{x$yFtn5xO9(dVOs-wyPwOM1z#TTGq zadraBv;{;v=wa3{!6EP(@D9$i7uyakYgJZvh*-&iXs{hOKxP;L!m|Gy0n0Kl%UFOi z+oXA*SJhNzmT^C_WJIvGz%Z@iEDH?B(sXaw@yAY3B%E|1n626r7;zm1MS~{_fbEE- zgUkW&z;G67SepS9jXjD!Nv(Wsq4eu0rer&@p<{lIC&p#luz_eWnZ?-?%|rLxy5y#r z4=i7J-kHZAJ+A+CSDrQVnzI+pzx>YIW~{ht{@TZu+mF5C&c&-Ae&D4oTVL5;Qi-XH zQR#zNbr2pQ=0iGLlw>qKx{Q#hj{H$UbtRPgmk^$c`ktgH1A3yklmTgqo)q}wq$ZZQ z$mGi-1tGqQ#f3~RMR@4bwUfKVQ3}!1`Jr{}CC}s*W10IsX&@TZ^0=|WfCFKfcZ@K` zCBqbL$14rZ00j_bJTyibtc-r12n;W@IK<@8%!QI!!JLKO_~xsm5K9~k%$P~JGFYLJ zvZv9EN2b0?oX6Bm8K6XYMq4nAQZ1+umbfj9TPHW6^P6g^JqqUBfk97R3T&V@Z$fGc zqh1QBV2mlqsI@-*?zjI<|Hm2Z)rdF@=2))LFX+-a7MIHevgzA$N2)nZygC%5}i&i&42=JEiP#Xc624DnA1jLfgXSctxt$33PW9|7V1!% zNqOG;^)CP!1e%ZE(=7q+O>$eG9cVc_sj{lcykgLBm4SUJcy3`|$2X5)tQtyRMTm;g zo8h@}64a_A9X7Xb=rYW;R(!a>I!3WF{ zh{F=WPqhQ?NRdWY3x#BKPz$o53KV7%K6~bMI2Oo5ZYgytf@A2h&crH&!9qKud#+0+VxW!0iGHYfie&Ubj8PnZCYDl_M6n!L+D41BrXdEfo2-Fx3%?$b5`18cHl zD>OLAEGy+)>trzR+O1PSqUzh~j583EjEv!m2N@aaK``8d*#{5lPtU;({3i+c!)VAr zOpu}lMIJB#yVCE_ka;Y!m5!C!PF?5G{K&Ieqf#fL9eMI=KCOwXdOe5jqL>jdR-2Mc z?3E+&7Lo;u(%=cKeP<{4#&=(M>kW4gmi`p;*6U7clnzxHb}X2XgzcL*S^N5-yQB7$ zAS96p>f{FiaUNd2Y|2G#+V1c0GB%DKH_D8Ld%hA9W?wY9*BU<&2fs_l=z&pE(xtv@VGY8F1BQY%=@=&YPrJ>a5vkuZc)1Tm7Rsy5GowQ2Yw!o*x)CI ztGks4OC?GL7i&;i$d}W;@^zql?z~wH@kB`Zg+#!RDLhL+VIW^eIj_KY{T?sS`<=6H>TDDFVGBmYTR+smqs}3nTK~L`+i2y2uEP!L~?n zFHgvltnnm6%3kUC8YXy#KOyi`1+2gvPtI2~ipTjXUoM3duAI_WQ=F8h0xPyxF6B=# zkom-=UZUWP8P9%%;eW7@gBC$u zyDQz4>$VVBkLE@#L`=_NtEDb-Y@RtRP>yF*7tL1i{7UpTlp*HJftF=FK5! zpj&h!iGf9$ri|;ayPC2Mbpp%qDWnhHs;L4fhEjp9^a>%&yMVK*ZwrC(J$3LaOzKLCwL-ZU} z?kAYCkfWWAlH$j3>j@La9*&LGs#L)rSF14`vn=P1FUDnOmWAXAGBZ7U+O5s8IG1*7 zFPS%g)|GeOI{TZa9tkhcxccnrSDm$B-jv%GU48fMv!7VGZ1u|J_usYT$yE>e*Tt*; z+wi%eipEGk57q!Hcqr%eD;_F24IhDUeMdNdUen8vnNQJB;L#xP%JE23bO?p>HGaSW zS>Z(28H2uyK>3QBbmS$YA~lH!Pe%MHs;^Q20wg6Ps}|A2so%{SHzeNLlV}_E2 z!A(+9Ktpe#nPl+vwIW`oM47ui1x6(^nX*Bci0Pl^Wk<9q(#B5_jnxu~UTr*71%)vl z%9>u}wWJLE^hfqZkaI&Hyns>orUfyLBV9st5DQoCv1y~i-k^;6EM)ZA)Gx_nH zp#bX`??8TERSH|IS11F-r#ujulGx{f#`>56(8eCn8??bm^ooSl{6GyxefHTu;2NxO z{uG?FVhIO<`5S1cMr zgEvi@AQB0O`w;lGpT$XLKIxEQG(?x&O2OC6?|2@z5JkMgc`U*!U&$(r3^K#Z)J5Qe zB1$c!ThcIT)O1S{PXCMoN!Ya4vfE8J!&Q?5nGKJ2>_B;zA}uFkNJb38qeyaB7=4Xj zyAY2L{^vK)oL>n;soIhLj}L0}>vw-h)faO(%+U&V98qfnVca+^_Y4AIAaz9>NkRI5{KNYaj6#LC z|M<%%&KB8nsqR*QWg*azVpk_X-M3$_y7l(kzh#q0AA1NihLN{!d+z7I_z};WZ+&~q z){U?3*#$>G^5}z8FTc1~@2=Leq@&|F4MFVv-wNx@KGqCP%_@ik!i|u+D!+`>NNLp? zCzVPsPzJ9&7%R|p42Y2^HPVQ|eNfDTg*R5KUb#lieQsa67%tGzP=Kbljk#*IN?>V~ zeJZiTY6SxkTn3mEKis#UMB)WY)0E*SLBew_h^r(M_q-zJu#QVCW%gBhNK~MQCm9QT zGm3gOc=9MMnN+;T9$yJz_3qQ%zknN4T^7t^8};?fvnDf2-Z+0Y&;;)+y<>^Zy=~gI zfqXfL415Ne8A&CGo>WWF5G_HF$}31kr2U}ky6IQRg00h~&~kynr_Z=rvLFBP1HeiY zrSb~A-*ofbgSvLaCD)m%vfAl=?1_3GWyU2f6c{_M$pZ<;G_@x4(ZB&Et6`#a>wKX9 zZ-R$?d-w3q&2%4nn((VGzYNcboGz&~IrQYp^hByTeoo`+zrZynr%!$7we8<~Yxnox zei>?}?$U(c_|EPfTT3_Q8O9A1Mh|$|7B6@iYF4kTQ9ytAk3Tv%MClB;bpx!cT%{6z z0>A1G|AK^zZn^37$&*~U21ueeuNe*CR`R30z*SHhu_Mp;jH`Z%ebL3|S$U8}1{3M< zPdsj7u`9b{r}jr5Gv2B531dd(N?3Po;gUg21&tUM2_bzRqK>76F#DOcASXeK7z$~; zA}W4ar5H9FXrMcm-Rd$i1=DWQ zSS}W5fKrR}0+ou>BZTN3@$d^LHY(0f7z~ebLcZwoNfgnOf-H1TebXzy@j)z4ugK6R z5uU_K50#NvDTZ*aC}+hi1QP6sXb6&TvM#bn%4ArY$fS^tApPW$EQ)7Jg+@7xz`Z<4 z+*7j3!0~uhm^dj5;pD<4I)X$@8Kh6ZIC&*0^eBZSR#K2u(v*v@svsnp@_0>Pk$7IE zAbJM9V(3e(Jf(>y?c_2i5H7aSde^pfuyC&Va`Bg+FZ<6Fq|EvZDGOm@_LY6v z{n)Q;-|*7br?zf*_{Ghu*q2}1xoY&VHtfqI`ZpQSy>|EZRr+-<1DX4GspE=lNEsHM zFsvOc3?_T^95 zgwp7&E)9_1LN_~XZnXk7nNfgN9;q?lN*1m}W3v-fOR2NE&biaTeBms!wKcn}ITv|3 z*X+MhbYo;vZrif?v?-Segrhl^efi0hQyvUYzH+ogPcxC}1_Djikh`UIkT%6&HG`uS zhH*Lfeyd9kCI>7-SXco~D{2h!Y6>R!+m2WzQT=%MboM|*M6 z8O*{^0tF)jtFc|hCGNVzn`>Z|g5+5itJ+kNdHQgAV|GEmHvoa4+; zvq0u%uL7azd9$y%ZNV&f`SwLKPdjnUiN_AR@rFyTzxJGkH%`6frYr7PJoB-Kmcq*q zF28Nv|4N(e`#1Zwmee!3Q=%O|fiPtheJ zZej(SNJJD4LP>(2Xb|&ofi1MSB%*stP@oX$cqSd$c||WB(dEnWNLi-HD8N!cj7%b& z9gi9d8=+=$%easK);ffkYp5cYQ_QD8CA=J9OKpHDD0P7bhB8AC0x*rofa8;ai_Q?; z!=S_yjoJu?(nM?^MV1p}JO(I%l9DED8|1}L52TU$WcCbp@T7EEV-H?7HJg`d3oGtl z&gwyJGdq_cMtMer|Lkv0hW+&pwU8j>LjUoVp2;uJ?B=>WxI@J)_~425h28K_*P3Rjy%6RXign!Qio$LbSA(4GH+878FGp$y)z{D6N*51;~kC?LcS5Ua#cn33g&w!LZZ- zfRlEPJY-$e6edUc5(DA^G8+a-pp;Wz$U^#rN=5;XDo8~^z!4Z(s5Oy8iF@63 zZPwt1+0#Gz`2Ao0>Ss*MFYVs(`WtLpZ+!5F_jbR$^X3I}&pziHEAGF0_M91pJFMgM zYm={fkt-L87y#Y8p|`5C5P!e2Do%iLOuHKcGjJ)DVGy3u?(hXO7X6$cX$Th2#S7-s z@M-%n5flJ>+5`(BPB?Om5G@OFIIJp!gOXTmT`i)5stEea7hg5b!ie~i*6EgI-F7){LWj!SiMyivD4hD96=>!WRjm$sJkQcN{(GnGch3H+S+i>QZ=QaNWdT(Hu65hOsi{^@pAzU*2TAnwPCL-U|-7`s{=fO|2113~VDk&&iD7_}cYXbLT zY059DqIJTzB4SYmDRB8}s3ZVh6Fq54p(tU&z%a+$`bE4fFj_QE2ts4_1cy0Cl>z76 z{gqq4iV)#|Go)hYtvq9529P0T7}+aN%*_GIB^=$i zVefX84mq&ukX}vt9MrH=yZTKV*Zdm1Ok1FX(MUlvunyegm@@TEE98_ngAMow*-=-} zWFNJStO(4xUSmr3YRjd#*8kzbY(l2@nt~D3+IL?l(v?<@xk1yACU6Q!LhE2D%t#@3 zH5Pd8$4~5iEE=3)91x~FP!(twMt)neO$R1)Gc~1%{?n{+V@Ix76WF6o#I|tTiEWxt z9&^ovv18SO-v$;>WYDhq%3V2Ga^YXEtjWx3&;but_zG52M45XMU0e=curXDcSt!`b z=~uQ~Dz++M6hoUD`Gk_GNP@ZhE7o%h3)DXTdS%brF^n!?8>ID>M_cgE#I}2fA7Q$~cw(Z%A=g(L$=b9xqUw6XM zhfX^F&;|3RUOVlZH_f?p$pS{_S?tSeAHDC9`|sTFg!dPq;uSaH4ZOXyoFP*MeS@5T`uC z6U8f+h{_-@Df<*0BymYm2)#bGY8AaPtqFqh^z7D+D#Am*btt~j;!4o9{4_BSMZ#Fe znY?J@ByG<1(~Uyr0;3J}7>#X;CR|cVe_$7&Y($rkAnEnVkQ*gXxVQ#VgQpOQa8ee< z@;(Ivj-b?8zBY|UXEP~BouFp8Vssz!UAfYlmlm2_ zq1Lr?XX|F|+q4l*q#;3A4L;QbEOg@P7l5#}BPbj2kqG>F=&*rsA{wX%eHNx10Fej> zV{+i9Hl>n*E&0@;UF%x=szD{&0949hJu6_)v7=!QTzKu-wR1E>1BmI#)KF@&^4pW7 z0~e4G@*`i4-zR~fw2jbQAd+U%5S(UZ=7SM1lO&|2GV5t9S|ukOESly{Zi8EMa6tz(H$;ZD}FOq3JffEa_un}9}IFvZtvUw9Dl6~EnspII3ScLx! z&|1d;f=n(A>elstc>^XfHY^xJU>^=5)9z%S)&HHYAu2h(3DCQM<9#L>0}r;q7+4jqkl?SAa>E5 zNisMLEXSZsi>3h6t4TFEC(`+flB)T~4}Yh(GSizlEMKvQni>Z5?Td#BX&7g_S6MBn zr<=rS1#(mu&7`urn_WhIdUREC^bU1m0x?i>>#KS?(K4Kxzdrld-w1<63|zIiVWAQP zusT61vcN(@lmr{qLO;^4K{ir>IoO+FL6r#x0+l5UYV9@Jh^5{As+a6RI;-JAcnYb6 zI*HPn%OD_t+Nnk+LK)$zuaLfym(GpUDsvnO!|AB`-74v?Rl7!=y8G6sS*3TMZi|=P z3@rc8?|ua@Z`k$*-kE2e{DFJWggz<>nWh zG-aEl%t6-4foM{iVdRt(k5d>$!d}J3SOQ}Q7p@{uwNp<%S(-{^1y8HlP#QGBmfEyg z(;-6#1ZiGB>pB2o*zjR}`}MUYb<`1uk}LDFSsp*7NvN=nlM6?jG!|ZAEN1G43Sg)N z*xBgGwvjVV**Kq+vv6KxQ_g^CgEX73%aFk+B4@pk?D?d?pq8%(bv?*H3ehxc-i&1& z9G-Q)ow{`xmuv1@Q$z@e^g=qYo3p?+58H+fjbI&}mV>?TomJXmiiOWJ4c;wNC0=Qr(Ts3r?O&lSIiB39>b~D!S z&Slvj`ZJr53hW|so@fXX*=B7dKbB(HrXehBY}cw)eyOSZYg0zmG%FtB9#&(i(Grlv zYCPu*YvkMl1re13n9Q>L=Ibv4&YPdJ?{CGL#~<<=^K28+^*DEExNH+%{(;}ymhZvV z3Rw;Cs+s`isz)9)ZCRbbYD*~Wup~$IRr%mUdLDhukqr9(`uE=jVhG_eXh4qSil4>V zt3PG4O=CXJONzmo4Vr0Q>saML3E0|j#m@wcr64MNil6F^IgIQ?S68)9gJ_W`j8bbN zr}tSHhdmJk%hG2KPRp{48?`2%af(0!tnfgwtmUhC$}c0~ny*pgMnF0;0$|U8ugYT+ zf+Ye?Z{=kCgwghj1Hvs^wIFz7#vX1p0413uJd#o!ulD)tKGIQB{YBr`SYSRaqEY?& zb`JP!nEQMb)tJn(tT!1++-eK7!pr)L@wsqCa~rPX*GGIcDso0eujxbr(FhD8q-PsuBW7lHA3 za-mOB6<>?+c#7hoQQ#E&OX&r2#0HsIDS$*$1)hW?9W*>xfu5_#jiiGWzKWGsp0F|L z_^LUumrJ|E6AsHrtiAXkFsUUx+KXZcEL^fkbb)dy5-d%rpF?JFI9hrFqbTMnJdRlz zB4$wbG!=VwYQx6toLNqIOw7QtZQIbXE!j-Wwp%kF`~Na@3@o!LL&re0Pj+a7(!P4S z@9Xv5Tfe<)qx-&gKL7CgHMhO^{C#7Fx9!`lEa#W|buAm#tD#-i_G2G*NON!)E-vq{_o&QPIt5f-F@G@nUffb~M zkuSaIT=)@MbjDVH1meu9=1_ifs~yD*!Dv82inKtwhxwOQ%;e#fRmH|^fRwd}eVJ;< zy5!)qH0h0W5|gq`UkEtRQoeGhWJ-|zh2AI^O0V@&24y?0t#vz(Jbdr~xY%$Z3oEER zaE;wH#(6up=^LhC2pT;QZT@9=LTQk+7*5?aiSS1a@c#&F?1~|GJItxLvQ`WR8pVJY z7>HG0HEj(AQ~_jFU`(fCNt1Jw7*d9{CE_49s{|BG-=MMr!l1O&knyXL^#Oj2&7o<) znLbR}qYX-d#f?%8R;goVZEWXZ)oC}+f&U<@@VjW$Le>fgkp#)BmE9H1kkkSUC;n%z1b z&=x&4Cc{?-&!~k_x#KS>E#3YpNDLhEyK3~DA4gqCSi#f8Jm#w&fF1}M-hqtQjg#*8z}ms$;*(o&{8jAfg^Rb&Y zFCpac4WqDyLxVtJh%U7dlwfz`iBt=uMU!n^medR;xSDy#-W5Xu$uU+a zFwq)4a)hzMwibPsBTmXMTwJdtnNtgLuKe23(vXSFls9bnz}9V=Gcj}3s#iLF^2zV~ zm*4Sx_xVlh0cT*jRqJNWTQpkx)Z@-FmrghDdC6>UxFeZX15`8oft1ZDx-F^GqiugO zG4<(r5ZO=wD@9hNz$?O_;+SAqGZ6!8?8{Vig)!iqaq7t|Vp>z%`LV1^qyYzr;Iz|E z*4%*Vl3N!txuUWAbm*{ww#dR_m?I+mi-&NEp;d>)Kn^o?E>DalU6_e8WXa&^bQQEF zJiD@bTdp_Ly)W}l4YaR5#T9k%kP7=&Vn1~U=cn$1OfJvUba!))5zwl{>vy}XEnhf zQNG$xX9^>1lqKOfEFwmku(X=jsk zfoA*C^WXjUj&*BSIpMYR)|$o^RC-1 z6yP={Sa!gH?mfD|vk)}FU{Qrbam5 zmJvdPES|vBDyRxp2@u<*aLu*TY-7L^oPpe3%&w4{jI`52Z>8^ja~6q*H_^iy|pyD~5<9p3)}P7oT#MLbBjb zbP;jGpq*UA@=3Cs9)t!#5sP2AKs3n_bGV8r7llkYkrj!{I!;~!5g?9V8R9}Luy94J zA|0_Ln*zw%v_i-gELrC%I#0^LBd#{}ETvYy8?%7JJbQ&yM9uu5aW-HVyRwD+?8k50 zeQg_dtj}OFul|W4Fyy_``K(XlD?sjxr9w5vnT7bKv4|%P_|jdOp#wu38dJV_n8$ae#ls& z!SI-YSk+mO?RyMbS3)*!>Mlsjubk-Zbp)IlojJ80F?1+fGgXa#i$<~41)jx<;A=KJ z;ZhJ6Opv&6erQ)-DhrI$Lq*4A7Pl-iC;;mcdk4F=)dq?UdkW>yKcf*i@=S%J)JX@c z;RVpxrfbG!O9&i~SH|T4XBe5|&&KT1*M&E@@@x6hc}I^OeCmlK7v4xmIMrrsc-i^o zHILj?{tM>&HmzIhvag-bm428K$Qk%ax2Kc}0cqp+N{HEgXH@P($koCbHCTVg)Xs4~sVTK2cX3BSKF^DKo{MuAu|_(Mg0; zI5YByc;oEr&pqoKS-5uRPd3LI#3XLY7MG(mQWOop997WlN#!u%dQuyrb(mGnt#rR^ z+iS6Zi$Q}2+5JcrjE>D>C~Ip?R3p1A;2=t-)GRpb0#>9aoiLF~pbO}sN)NuVS2Cz! z7lz;xa7VgqnILc2q9;nQMbjo24li5b@v27sOv?lewLESRTcQ%=3RoJ)}AWPsfcinahB`__h}0X zq+}`T7ECy8gr#(N6CxUo3rBdhnOxTf@bz3Nm|SE8n=mD-zOfWQ14EdM;X4vog0!{} z?Aq?MxP>?c2AMzo;|D}nXZH#f8aeTSX%*-FnOCWdA;wP|Xf6BT5@>%&@7@MgmUkC7 zHEGzeSNCpeil0i3A=qL_bhgFv0(BZHQ4u4yR}x}8G&9;W3}6}(WeQU_Yt)Fw=;B1U zlI0=-X|E2j3_Ew~(7}^hZe^0tnrd+a2Y5`ivZKzKn(5fobx)kfB~>NQfQdb^6kvL< zWgoK5ghaNfnzUS~#48CGQo2O9&b}&THZgdzuxmqxcxxTjC35MU>l^V$d+FC0E>rM9 zl{FA_sdfi->g0rt;8&MdPpzx&@VuzI@7 zK^@@eHf>u#%Ilw9yY87aR$s{vLBdJB((A&TZW=Ohp#E-7w+v-|C%p=3YV))bXapnh z+E*8_erS2h9)57lDhgKXR+gPHR-hHad+xmxY=JC*6hMYG4XA+*;7VI(nRme!wlnww z2!#d6p(PhHI|1`FSmC6+v?C_#P`JuPoJbm2O?o|TCxhb<<2fx9&!datTcP`rw;D~$RO~u+%bZ+xHMF?KRg@w+bk75P!A+`ZOECBk z)n>adP{SUr?gX1K18#s1hNz|snIU2pMu_;6Pd_5|Y=yBu$_tv7CP(b?$F^j#Y{s_Q z(7Q(utzd162mwm!tE-ZV>Qzc>{`TpOJ7T2z5@~_-USkyK#3Eo;f|uD$2^j8Sspxv5 z)L0pG2Ohc*OQq>a-1J`!tvTpU`m*N6e{v2i6H!8rhJAUP%XxOJ4_@|%%0)NNwjbL- zYUE55c-a;AZ@=zO2QTBQE}xl8?R0JYgl8dHP!XJTn8?$rFWal#%j5(w^vuy??S&Y( z@eNCPw2=m?vQH%#IbhSu1J1M0!r-`PY{5xDn?c#JWh)O3HH&N5ku@f`@`AI+!_79< zjj_lS9~SAVRwqo;W)17Ej^wlLf~%nYoVnLaT;tlGU%f_kc5t6+)vl$}U3%Gt3Z|N# zu2py5Ss*L9Bg~*Q;Zb;NgH{QgbRIosG`#Hex=!Jfi$|tiIZes2h&4Qi1?1wFSj^Ds zB`HfcgdrO?Z0ay{So)xZILkl=3>uo!E*f1)bu4)5Ab?&cUIjwRu{AR{$I9%< z&zcM`dlrzH9T`lvC7azDSoU;y8FuEkdHb~&p5MK7z5BlQy!iC%FR$CS@!nmVR*V|b zs%!ho!}>NFc5p*Sn1NzuWq6r=IhJKlpqLXrq(^-}oQeaPnV5yMESHSSgIYPi%nj{P z+kWhkgF86CT(kPu;blyg|Gegj5n46a_n(c(L`>~H2Gw8IF4qr6?imRqhwjJIT*|#s6{^*0Z zKmGXqYgU!;virWaZ`epRjafE`)x-i*P#tv;A(SpoAb*f4Lky4JRN&=&1z=GEJ17$o z85&6mM_=?x9#~1p7xA(Xxl4o-O9~Q?%WK6D;l(R@%AtIvtmG20x6><+0=o>BNs*r# zsX&xcYPOk(#fR=;d!+HiKu2ve1W{9sB(^TVXrzc^`hh#ZPq5UqD+PuR3%^VNv`V%B zMk4YFqS8Fco`~ov#Tfbz|=^SNmHxg33{t=7|WQh@NC5NWIOZaYpI0Z3m4N2 zSve`y1P3NEVA7n`k%nx@6=>IlVbjwfiEp?{iqP1-L)nJuaF}TWD=VOowD4}?_^Llr zRM>6p2|=KQ6m|0*u%jUv1&QR z3>xp)x!E0Gmt1+&4*|W%u(Lkv+C{{oAJ`Utz85c`*@Si{;U|d#y zQ6vbvIRay2unJ;6(0BDUSHO3Y^(4<=8dI_p%HWTMAb?R^8Ntzv-EOJE9B>a$+;e3K zG(CbC$;I+1JQ3CTnq$DM8`#(&%_DgtWi1d($ol33qRL?N$HsUi;Af z%kEyb`2KtEu>MS7(mV3Y!q;D8bSs@THidbrHIL421;bK@)B29m zTm1&fLFQb*US*%sF#%7*mmX*I)*RZ={xPQ^tXx?R{p?@=AS7J2VQ2MHFCZ0qM1if+ zJM#1?&`I4<8oGtVA*kq*h!FIMIl~$!9QO*qmGCbTiDWon&g;-=(@Rn%R?;%ia0&?HSoE$Jkm--0N8;V3gJC4KaouOL&B z6i>?NX%SqM^8zdjF=oWk;R~=V-2K=NEEhYm*_wf3NEtfT$$SN$1DSKW*>-Dim^B#| z=JCow$3h@zelXb+i1vi6;br%Iy|Uw(*LJOAU*52Gsr}eTjq2FBO{Kwo>JRPHfPFdW zxcsuOmH=|h%>o&k?ahXl`*zyTlgsVd!rcj$uj2+aJO1$YBYM~C)3MgDerA2btoAI`oktNOHGCQg_Rjz0C|6OGawNb6KGS%^QH!K4cKD(V4-&0Ph8u;d(0~d-tx`?8z4e6PR;2~FXuP&k+e*r;W?8_2J3M5Gj?xQ{tG;Cf zx~{LHNP+!=DUF8aYs@8VZFF5*+2v(RC%mi-+;PW@rxsEXp?kzJMl17^Pdo?_JD6-| z6r^nT^-c3!GB(q~uUDVko_+VSMN1aV5i)DW74v3Z@$kL3xgYz`!QGD;Kj4Px=U;lk ziOX)CKKJ?y7tWjZ;601{Ps;z73%nfQ0M1YsDgQtXz0}MYNb8jYJkxg&Q1w9uiX|7s z>Z=YVkQg8kkxfmADEb#1AvIsUq70cdQ6_q_@brlT892EF;|Nc|^0kN(8d{$;_2`nK zTV7PjDux_|U@v8x+|rG;ku8WUp+*=$5`3VY(mH5>=brTqb5MpRV+c8zVbf)jk zH-`=y1cR7iDUddI(JrJw@vy=_WWWHHAmOHFsEy!Rr3YKcut^z#<9_BE8jit=qs)+G zjwgN!v-y;^K+T|QMeB$LLM5XcP&*y|vfN!(%TBf?rfK>u3~u&^6iE2R7hGVe*{f2~ zEp;M&!Me0^ns)UO0?FcO1588Md$y1ECBqHgw_IsP=yQSutZlF!KKa*8cGFPS^!Z;Q=TViZ0ohd%N-7E1G24N^N7vZuE^f~^3L0C zUo>>s0CwlubNuG@ryw&ZtqM9FHmVfA+L0n_+D1ZaS$08Wtj_v?Do6WD+YuNwwS=M= zRss!N@G|tFvKF72?~QfvrWOPN+>hi(!$u_5G>3elOXHOmcV@(x2GrQt#g?BlSQ1VtCiC*j~y z-FkJa>{Hp&WvrQyFY$(<94L^KP2Gfl73vyQm ztR8jPVMZH#CQj-|Hq0Y6tL{^^a_NvV6Mz%AxI{A5-O7eT2Mz1i5z!O?Sm7#t7=JjN z#5c{9)l^ql_=HzP+E)xWxMh69*kWRm0-h5{z3IiB&*lho2{`Y3ZpYU3kFLCL!Tec< zO1)B2n&8Fl*2=cM_OkMnc8p-Y?hNf_u@`6sd*Nag1POs$cW2=k7GW&Ys$~R_83cw9 zVO5|QmNeht3GUgnU?;Qim8%{ALM?RI8l9aCPK6_u2p(!>JDh=K+q_MnaHyT|EdF{8 zp|u&zLPXXabUmHXq;C?jLT;%L>j;$)Y*9#CvOO8K9Vk#8&1%%jY6e6PI9m&{eOmF< zQ7yFc{w?>n>%b=m5I+0tp9&y~GN{Nm&ph4wfNc!^3QhLR;P|8U2@Ii9eOn!@BZ2?K z=F>laY`LaW=L4$OsH&Q7jWmYPV>PQ445c-yC@n`x{^h$r3q~d?YGm9n`0E-3hT}2j zYal}6tJi!&rsAG}lD0!rIK+E7pAsv>f>>VhlN5>0@pH(MDKa!1PZ5$pQ8-x_Q7Wcf z6nL*3i*RZ3r#fmRYknyR7bPJ{P`LW0NWRKZzT%1^JjMRv(V$T|_d3Qw(*rdNTYaA6vc#L?takpfQ=$y3%e6T|((DVT#{Z zo3Vw&F4d7*pE4w+2fOTpCNATbPR?zxH!3PR32uuyb zI(28Z!$6rzoCTk5O!1?=Fm2G*Sf4ROB8*3L<`}Z&rBQ6@R0ZaMM_0Pjdpc;m>Y=-x zU!HxvwT_uC&9)6Y8?&=F8v%~Z9n#|&09cAI(ZNcWm1ozL~(2r(JlG)9e&C@Kc(F>;5W)`2+(k7q!v^g zL=eMb!cmIitLOz8^;Mt(pahqELXZ%j_(K^rk1ma-NI`;VB%WA4ArOMtGL9|53`x$? zQpoM1mj%d1F`!hi2$PKn9)L-6pobf*JQ;~78w_*AiJgw7N4hxbBAH|rLS`-#*ReiW zn*z+wPHp@llh(8ep7YV@#ga05!&H=QK!fHJxFLk*Y^tT0aXf|JtWDjbs#)%)8<<;7 z+-7iF)MNn&n-nNZOv$9(9BN83TGG0yruvaBe6&-623`WJc)2|Z$n~oxSW*c68GIwd zpe}rhlfZ~3!$8c0G|}K2hlbG^lj%z^8cD!gqlA>1=g2?QmUG9zxSj=}0CWZrd*Dl3 z_{#V2i)cfvQ375*=d8&ZMZW_Zbt}yzFC!1YsNpoXr~Mle(e-?yW2*>#PwN=r)m@`l z!N|~opTbQ6|Cte9+YJ{>il9aw$d!7La}_55o(Kf0sle!&G)Wq}>KSJ=3O$PINTsB> z3h~t^;fjG9io~&@Ln7K%UU-N<@SVLSJ}I`*MnxbOE#s5ytQN8kTWV7n1+eU(0D+1| zA*&X;kNpouCnKkY0*D(KBwjMAzO+L+R94GAj(!*`6hjF1rsVcf!;i*KyKx{66D4CY zzC}2#;gCR}5~FB|HUKMRP8o=9&b0)gcEpmbO2OU0SWr5@z+VZ4)va`59+47_D)2fcuMHV)qsGpKXVurWI zf$Y`CQjLXmswPv|6S}lSAa{C&c5!kOOwgBA&G=74P#!?Q=^<+iWXzDua8FQ)oQ`0L zMi;<&S$LKt;ld~{y4}SP2SnUWikx(N&l*p-$dKoQk?AKMT!&rvGUW-LK z0--Wgss*q{6C)eo`7Jl!NMi^2?eEgjl#m`sJnY2ASak#zfMPm7EHiiBEY>#)222Do zX>n#gKt>Y-9d5@0h;lfE6sa=C%FeioEzmKc;)&$ogNoZKY^mI`m44?5!C}58#(CV( z({+DINk>B{n-T(I@CX@$l`gsT0^45|18f0p0IP_f0^36i&H$>cer~RIP}GRZ<)SCJ zMwuZRNr^&+jlgP@1t0sg)e*k4ecA~EiA$M7CY=3L++bl@2RT|Uz#=#ibVB12Fo4Tf z=|}RRd#nQh^;vrl4C(5HGekulfj-=Zm57t=@h~!ZP-PV-S2QAnJOZYKzyJA9*usgc zS6Xn;aC(8!z;dFkN7^*3!!AP^eiCUzguzXVi>T=dBvxw?p10q8$>HT4+t$I$RAL)1 z{f>Uq#;0uJF0E%8RiAtIiAStkKDuLz-@ccw^3%I<1H&=RLV*gx%SRnOP6fkfR$yDu z@x)j5dMhe^+imfIz25$W^Dj7C(-P5N#fqOKNMu$%c z*m1`n4Vh|sEemm*E(w@QVVAR1SY#-31`h)b2&R4^oLwBG+I-D1^^yk@tCCzX?J}TS z3xfQ1k1Gbu%Z_Z{wXsW}DiHf?ue;Jh2h*_U#N&=Kju8NLN27hM0TO%ru>o);SAO$2 zyv$=E#a?}=JhEinfB~B9*}6|iY*QCYi)$LC)s#kWiiCxIwa}niO=}vW72B{wRMdjB z=vhV#QQ``mW&APPqu0bFM=X9{Nm}8GSUd=%hIJf|SAj`fM&fcjQjlO$Kp=#ZR|v%7 z=XfgmlW#%KBL#u~KQ2k-nOu^Ee5Dgtq~;UO7#XCjM}$A(A}A>Ng{wtM<%vKDE~&3@ zuM$r>VhKrjJ|$LCO9=WT;x*|cJeewz@<>f{Ap27HrF<4Bm!!-i5iW(7%a^*$N8ya< zX?F!E_$D>s$*vS`RQ&vhDbeB0yc94>3l9y4TFCKuEe|Y>2}TA+WX9#I zc5*pj+5Lh$Ha*F3%(xuL{LWjub8=Z=e#*?l1vs-f!^`f+etG*k`>~&S;*PBw?s0zk z!ZSw>IkEv(i}ejcS$Z)T{PYco`fEWDbO+*YUu!03hH7(t>p$ zUf?sW41R={X~pcYfUxVlVt1x6Gq%!-eHCbTBJ?LII25e5>vB;4zWmnhA!Qzv$w-b8 zR}3?FOi>xc*y8{YI=@NN1~8*sj#Qga1k(Ljle6a#y*=2#va@(@-DgmsX#_i4OH@8b zftaDcaoQw)l~QcA^Nb&`RUMF)2CFXos?2XG0QN^|JptPFRB~(b^ma8Bmqtgwg_q%3 zRaPA}1$fzo;SrWKyi#$yyVV!#n3dsZw_eLyrT_v;)@Q%mQU#_c`>|nj#28L3grNCl ztr)R@P`_8)bKCXPFZZJ;9vicHfMR%=-?3$8Wx&~1ZLd6_^pcxq-f_#E1#_=tU$!6n z!gG$Bdhw+BvoCQ)_Trnae)zt{c5AyI8(z+RU*JPYYbw7VDSgB%RbTJmp;P&kSG_4A zo+yKAeWlzY5W?}q`5Gq$8k#5^N{-T7k<4p8QS&&72;nIxrGp^l0Wst#1g1loWOfsa@bnZ{Hei4Gdgg$E?wAzw1veMI-$SnkRm7ylwcz^uo#h0 znyMgZN(lVxI4298q3^k8pS5+0?1|3G>CVz@z0SQoDA8Nuri5e$r;5-F=^F;LXgGON zv`n&`X_})paAsJl1wDisV42-yN>yOU;4$fE?3FUGOnn07I7M}$fmKM2;AJB-GI|1q z***~9vi~86=y$lP!!~Wys7BSw*rFR^3ucyV2j!QXb1&cGl2=r+Yt)^*Q|D{pvuK<}jVh?p%XPY*4YSyC& z27!d5bpeSW##=SDOo4Cej)w+$I;3V|K5&gZ{S9wT@jlTBr642udJ?>VFA~BLDun?k zIBcQ%aaac##gPq3NkpRymtD!=vV9AzeByWrr%AP@Xb{+5kS2H?yQqHYE}Ot2zv?CRUYFN1u=p z8&WMPI=Nq&KzX<=B260&N!AMZL)btVA;LMFA=q3A`?!SMU{byW#6z&@x8#{#9jT@? z0w%)Gq8NaS3lETJei zi_Eyu{|QtbAJNy(3YXtu6~Zh)67bn-p$du^T%h%h72qW7 z77mD}J6F)5yhjogiv;uW`Y?rC5Nm>MqbTJ3JFc~U=Pwk$Cd7VmzbjTYTyKNya z$d|ER$uGX-d{~dw57+^?BE3A|k8mjj1oa=#pBdc-V<^c2rgWt2cT{Dy%HTCdPsLE> zz;jWEH%2jB!d$iM)Rq*uBu(if<2SamnJNIahXvW(p=#=@UaFk+So4myFxDCUh#7f6 zLjiW_6FG0*qzU7KU#k-&JRxQgPJFY!h%}mju+jJ+u21+ZSh zFkwNOwq!de4kp8!TFQXnR5&0BrLw5v01?7);h4i-uW7(ZSl9}KG;QHtykr6F3DR1B zaNZo}F;Em+9b719Yd2N{t)$Kbs^1YaT`zY+nurZzx|{jkS`dD^{J|Cu9h?;X^^=dq zg3YxB^r|klWCOp9!VZ{2$`)I71feU85-OFo7yGp3OCIdLCJ&tQlOhqPT{#8v zwGv_xNtHP=i7jZPEV{eS)R9fu&U1?@)&yK4SHD61oNJhT$|-sZRYVo+nX%8uQ?VVj zw(Uea>aAK_jnb9)RfMjRT6+dw2_zLd2r_=1C4C37tLPBN8AVabG_O2f1&WJ4uQHXU zED|D+6B37>^C_>%rJ`R*k)1^1ijc4OBsI^9vAn8OoRUaE2#<1dUNJ|1@kCQGePjvb zNjQ(1B2Ml2Md45R6No0RNJ==eB9O;xGL=FS5s1FvRS|bDPf94e6rKlxz@lFA$|oLK zB>gWwRU!GJF~8{Go?2fzMO`F|M!^KqBFuX9H*+g>#na@QS5Kc1RHU&JJDagVV_UB| zz&Xd63%tzM{A&6Cm*Hh5W4o;3WoBg#SoX@e%*34gu{}A)W#;D6&h0(^a{JuwZBM)J z3tryx?D9=dEj|7CzV7>CUp`{+{v3nyQ6mmG=CDpj4(|XO2OWdPaiDZ;%3dL8F!{tW zUBPP4Q;+O*>i90_PwGEmXv=}!8;%*#rF*BQY|GVZlyAxYGW#+Z$WgZd#u!rBiz%*P zEJ)}HKf;XQEyTw#%1_Pa2M>8zLW7rqZsudUHyR=I7rU<+m#H7Q#|9o`Z)GlFW@V2C z7{O0YBC+tgM;c^iCZOgS5G;q9qnQTSh(TjYk)vL844+IW#yaEPnUhcTZi?~A%^}uT zt%M_>Y}f*4{HNGhU!_V3w;2fzf?-ukb!4iXDZEO7x_|)W8x*fp49XxaGJFW7uPnUG z5OiZI78@Cjm+8l<0@Vu&R(J50HUc%!_f#Joc21cZhGSG^_}WSWK1)^zF2Thv^HF9e zG-bs3Bte=pWmpQ3yTsuL`9gUQ+_%&&Yi4D?lww~lA>u`IpyRvlSQvoJvMk(|Y_GO$ zFIznKzGVv+&bw-G-%jkyS4};`eP1B6`YpX>hW{_KFF*OniftR7`R+@*P3p9C8dRV} zaHuC$UkF$P2zVk$Z}N%r%F#U0cv8B&=96cEg#%J(zk#tlD2o&dGM*xCJjf&~&7{C1 zBYuIBq5+DM@}wAI`6^bFK!Liv#>Hgo6|`j@yoFFWGd^%ZXE2r6z0Bg{s}0o|j+_GU zN+4N$Xc`6<^RLAjdaQ8jF0nNJ(2)Q3=^riWfNY3KDPc3d8Igz|G9mV|(lZ@G1Zk;2 zW;##PwAqjXBa#0&TW~+e%k+%^4bv-)i=Ude>X8TSPov#Ac1OFQHyRQpB0FWalbrG+ zaeLPXp)ncsX#!2QU>kLeCYV8~W zB#8N15OEeH^%o0AjCUj-!nGSgD@b3@5|MLZeoUy8_Oyrs#MrIZ>u6SoKu9~Z^$YE5 ztfrF5V*S&r!SfinWnoyL$Wh&m{8V=Ugl_5L0tGV;Sa^s2Ow$;Gy=DQDKn!FWc~fi! z%M{=f;UUrr3{3#kYD!0?ffRTdC$)}rWF(y9kum}vQw)i+FsOnG0KiH?M&PiFsyWpGZ;Jk**VP^z} zo%8}KE=dN*u!1nthwa`pKZ0P6^xBXkge03ic?b``u{2^2bz+_9bYQZi;_Mfe8KsZ6 zR84j+Dic8sS|q(%K_d8oQ3A0giwp|tQ%Sb}m$3VQ_PV(C1#SaWKu{srP$JR=6h%Qq zx*(v4ynw=5Yt8zd`Oj}=&z?PdX7+dX{N_|1ZKcw*7u@iKW!RZXr`U`}s7fL~ln-F- zM$$$#`o7-z1Xbx5^AF>kY{|qN2vY~`k}z&@W!2DL)4RO_Ey7P6pzeM&v}uUWGI$vn zHYcgEqfjUiyE!})h$_VGT~K&ymP*ZpI#KILPjfWQ$Rb`q67C=(Y7k`*7sRz!9u*Ot zMaL`wY4XnkedZ0b0f`A@Xls`yJ6;PfOP8XoU=-t@%oQYXs)K*(N4)5ZFJG|;Om5zy zv3s!Zy>AFm>?37J`IRXXfn^hde>LT~r^0dQLmM`(F>+~(FZXFURX42onhDTxbnA6? zBqUNT4X1AyP#5)}>P~?YslE^#Fz}&=gI+#4vFw^N%_oEd66#HO*;d~D4-5r%02y}| zL!}ScI|L|a-DAY!yKx1cp2}E#NA|%Tph#C6kxHTq4yyEwC;@5dvu!yb#N&qkL z0)Fg%JnV&t=YfAaGlLO}8(<&G~P+TkCUfWdJ}QFz7CQI?drEV)p@MpZz0 zNRb{)2Yjih_5N zeS{`LS1kdKURZP;%q60*Y{{I(3umoa^>zt0f8@NG zNy&0t+ITdf$xmqy?5%L$*H1nKjt&uf@Uo5|7Of*6=$Ip(#C&3G(||WwADICJBJ#hL zooeTLe@dRCRst6@}pq+LyG$tCgp=BJ-z9kZFn5$=3nNhCHB&a z`ToJQ%RU1&wy|IvnZj5>Y=)ch3{NK*$qoD@3Vv{VAhtgA7b5Vi$Zvl8?-U7M=In;1 z^(-yI5BmBY#0H}EJI!~d+CZRq8ZHSj zoHd!YIPE~ZrZ_X0j%GsG8}Z+kO&$K>GRUt(GilHSE%nnVF#l+v2|Pi%Al*;XyJ4$~ zlT(V!|1ij=lvgqfB7UZY9{H3(I5Isc zd8-OE6T*Lrh@*@dKU1ANGFL5yhpLHQg_;J66IX_Y>WbDC%!E!sEfH@;5GmRyQiOuf zg2-VJWu>UA5ON4=MG=WG{Vb>*8DW~H38`8J5zWCEQYffGT`4GH;;|I6F)XzSF_xqV zUKJrJxd9tw4qkR1+bki@BH&rJ;(1p6|H~g8+VPY3cY(>A$Z#=4oV&lIK+RHIi|y%D zcHY9vd$+CKv2p3{txHxfp2okt@}2R0yPezmykk3FP=|lHPv?gIGZcq1yxjka#tvQk zgxR;rps{qpY3;mk_j4;Gsk)TR~4LL3Z4-h_E36 zWKJSb(MdwSIQ~`!9zYFg%8a<`;AKx_Wm5{qG#fZX>hdJs7guCEivnc;&gL{1C^6ei zdgcng5{HXedZrG>o{N>7d9%>O;|#VqyJG!iNIOI5a4aY-1G@#2_}3^^vswXCty49F z-@XB6nC{>z4#9j)bL+LAUP-qn;FxH;w&%0krLf_7mt3}^rP@UZ=%CMt1q3vTPm^=m zz5zOlJN^e>Gp*&!2h=$fM>0MgWiZQ60{8*U`9b6)Oq_sqJlb_?l|F@Dyui25KK2~u za%`X&I);}8=$NB9|4b_PV|yhhygdJnDN`ms2QLr4z0ZhezCY~oAwFTAI_c^BKdG{1 zZ>?N3ck9~K{{OQ3vDu|D)LD$oCw7!&(HLR4GN_pA(1#T1f(*%QiMG5mXiIBorGvPL z%fphFnN=ppi2_mx#=`eX=Hv<4BD4HnCCCs8ek!XLHHvuVl~U>|nYdr$cW;vulQ|g+ zltIe;Xx+P9CZ{29F=Z8SlvsUys!6-}D4l0u-!d(z4gVl@qH<Oei5cla(UHs>cel#z|MT2*ZZuQ-rL-tO7Qbm73XR zd9^Mx9$22t42vv}iS?9edhBt>B8xFJO`EKGblCIEEs0VL6E?YaefjyPpuGd+;0Bi^ zB*NPZ5rAFf$#}MJVOPeAm9ERXz+Tiib9IUyQJim-+uCtr*=!{lBepKx%oa5ZmH}!UZ#E)G zRenzcnb?VrX-(|*-pvC(?qd*|A0QE+iCBo&ia;eO8>E0AJs$PIyq{4Iwjq{0br(I? zFM^I^rkX$m5kJY6kQ4Vaok+s)m)RhAnS)pZkur8j58M6^JE)6iyJ0Go?k!N}W@c_j z7lk=~Y_2f&4Rh_~ktYGpXxZ#G;D@4eUL(@4PY;_L*hYtZNFD+*y?9mam}5*$5gG;K zesUf=ddRf|-MF*MuA{clYT}~CnnOO8OO}|uARHcFts0DJ_0%ZqrdPU(7`ZcCt1g3J z5~f--7APkCh(#WYFM{$1?}+G~zpHd5UZ%-Z8tCu@6X-UW%$Q_SvJ$1xaB-L3@GR%L zW?!keHe9FZJ_B6q=!`8F0w*;4rjGaMT-d?(Y1Rd&ZJeNRh{*kKU|qjHeb@%Mi-0Ap zY2r@)L&}b1+kT_~fF#PQpz3sk>iQ|DS9j=5tGN(~Wv5hssB1qaYXcv_u-bh;>r>w8) z+p}5ob9-IcwbSL7a5wwcQ8Q*vgQFjR;*o0A9a25&#F{4#z4yEO4>+B&-P)mD)P|B< zuqcm6He)Sqz7Y4yNF)lgL1{9tJeXmo(h$hql?$}XwakD<<}o|idOtS2R_i-x1s;Tw z2H$muqo-V`Jb*B%4)qH5F^TaoxAHu8s>gsb9Z_)9hf>8+r#K7WR7AzUB`#xXgAIUW4Z`gX0QgE?9a!BKKxip~R2 zh+qH~1Gu(|Y$p1!+b%FM+LZ#OiIj2Zr!TChOr7M`YZT(4 zMxRna(1^q1x=*|Gn%!*lt;j$4510;?tm3Sjo|jfokG2X>ZxCp z5{!-nBiheQ??FtS$>y0*6qi|wpYj-4LWY|}ucRZTQnD39kgXb5V`>i(Z-o%A8b?#t zWJ(lQ3aJ!4@)-{E%-ggp1QRbRl`TA^()>)$N_8zYCk^owPXm*pNY%Q+xl)1*Ua7^i zs5d`TQ7EK%kskV93F7$~f}Y{|$R{G2pcXQTg#AqIq3l_-T6!2b@=4ESi+aeWo6;gp z_A|s~P%z9J)B(ZLh6>5d-Rq=j+%^z3HeRq*KwOHXE(3WBbnG3Mb}gw-3o4=MAi6NMwG z7?T&DtSRa8aNlsu5s0Q~i%Q`yaUXHpt7p?{IKMDik&kbU86+|T(VWY`f>(%hth11r zYO?XL=LkA87))Tiv1<9JNQ^+hWc0cG$V6Eu=zMD?43#~{<`OXJ{{Qk@vtD^^^2p0CX+G$dUhwkpVfVTs+rN}@Kla;mCi5>ZduR4W|3HK# zv2aB`w0U?M_)S`H_ z1q_!CC<$J9s;H!IU;ld`eaD86yO|BI6ZJK1@f5rzZL3ch1MvjIlQ0oM--6=5ErTR@ zrf+s6s1JR!P2sJ{FwOC_bw-#(N>M>z($hgiYA{w99IXAh9sEBL{Mp9X4Wren8ErEhd41P z?0~s1qNsw9DWTryacyVtF^ zxzf3AhmIFr*}L1j`~Axv|21iutG%X9o8+HKomA%pZ*RZjrvLTxpMLP+0Uw!aCXLck z%Ne!NE=#jL2MZYm^AnEdxDJ|D9dTh&gf8J6ho(f_+5_~8NTVdgO9y}*7y;gaOkpB; z+0+Cbz&m;w)_@N5wa#@S7nbqWwjDh30o*8|$q*ef1d2gp=~Rm-$dg!PAaHx;oTL&9 zYm>OdJ@qM>CtY_4KnIioi+NH!moLG#+{X^e@;~zbLN}BUSdIgfPVz|l=oktMF2=SN zN%y3;0{&6c1W$lkL3ep^ak@bwwqG*dZ2!NY`tWncTdWZ-q#S{56~|X*;i|exWdCKbtVo6B`|h z6ChPI5Ny2UTX;7WNXaOsjf78`NgiI-l}4^k&&WkwUz7QgMQ^-1bsW4rf8ML>%UqtY z!arYjB-`p>bRa_b2ls(gc3SrNe^Oec(>-BHf)cSCV9nxqwOaW<*UlchEE-PK>Jpy) z*+(?QPxo9|(QHA`A2Np=rRV=Al8jp}M~@i}ZbQ(d>a$V}MIiYAkl8H^mb)af1}SVvffE;(dOn)>V(I$)-M%}Bh)o;=jl+g_hD(mDt; z`i?J{Zh*s@hQ3#TaKHY&od(xHXS4AmU2zdw;mSC!aGYDU?HXa2`eEKt2kvG&HWbf& zUp&-4jy6U#-L&Fy5V3^bF@Gcd~NLlQovczUwSRmxZ!YQpeOn$5GmuulSUcf zI*v>j58ev0$t;A3^x{=ABqeV{Q+ko7$bT3eb)~pS)p&+2&S_dEPn6{h4@W-bPms-9 zwSrk%D+?IjpW^C@M`|Vc^K8S||xi#wgItq7$Vn=%)!mMdE)Cl`uJwx$2V< z7+c+%D=>2WtW4Zz_g%>M%XZV_Q1&cPGeBKH=F%t0`?uwCY#3Q2=dqo>=5dyi>%Tai z#ijV?%X_x0;a_$hd-Iyv`*ti?v*@KRmo&Jrd5z01tlQ<{dalU!{W6Dgzs?ZxxA~Wy z$1eQKV6qU%3{#8T*t;dL3^jY+dF@4a^ly1fpQcw`c6yhKPIo^xy!@?tHBPK`^f%#U zNDwIIJBEDWI4DmL0Ve@Bu+A$O5XO`UV0y|Km<)pel>$r$^9BereOWS&Wjbu^U(5i@ zoUn=qEF&J=&7mBiiijXxjTUs9W&p3_L*Ov04_G!!nCGpVyhv7)1aqC`06O;5Qo@^{ zpoPXH|Ih>Xom{(S!_!NyC|j~llp$r!wdnX7OGUxU5V7Wja}40hCzhbAR=~@?ru3mP z5J3VOD@7hn^PO@|_JX$@9DLAD_wmh1_ttIzsLNmGRH37oj93OOE1D#=js)EI#fyiU zdf{d0gTKRFMp$JS1Kr@cJp?8+pzU0us__X&^Qs(Q?HFZUe?4mW6VS0k*ZF5sx!}tQ zZFpGDW257f-jJnESrIgkCyglGAeZ@r=0u*dI)m!Es;9-lBz81tC#m*M4Ai|6q} zID)ouDSaew0 zRLDUHmZ1;irPC!R6Ld|3Q%|+XLL9WtHonj5fn{@}RhV9x@J%~R6jq1Kj1#_2co|5^ zB#e~#igKFhOiePQZ0<_Pk|D*k#y+u+Fbn<~9x zR(OIcc5EoQ3=lD`rWo=dqWkm$=ncvl>@tu5{1Yhff&a9AzH@P^w$2mT5s}X`?0+KI_qktB{3&l#}rK!Yd-R z5Mvchr~yI1L0j-ZC4l9?WGNy-V1-gB3#J!R)pwryg@%YTM|GvA&QgW7#{8*L2hjx) zc!U=i&w_K;<_b91VH>Z|cl;F_Skf;4N7zn+ujgD`tbvvu={(Cst>HvOKK3qbIFKqI z9||()j&z(I3C_XzuQ|BgtDv)hZh(tk-EzY~`JqVQk={}{+@>KEh8|iRrDq|g9=!Z! zQD3!Co2VY2Nuv<27nZ0D>7*yLJn+laiSnDTBnGVNR0GIQlPz5m+ePyJsF?0 zIz}~6!=}v!ktZjV|1x9rL?tv$8*#14bc|BbNjoPFNWglw6UnEvf*;!N+!51}CFw>o z&Cg;1N8~e45KMdVh9zp_hz`lPWzG`?vxF*ihbmO37t9q@QKdAbH1kw`>MF28=GXYXdw|-GbuN(uj^bmqUO-hlKHkz!6G@Ly4$iuoAhh{?h?-f?TSilqR zgNb~pV8V3!6)yK4Y4O zLwqU4o9saEan+9FgNCZyfhK>P@-!bm#)xCa?sm3zkbpfvGNE#sscFv4=mp>PcpAmD zMWUWEzZtOJ@;Oo!{X*x7PB+mpKM`@X$$>p++I8_M>n%Yk?!=Lf$d}`Vf)Qpa6`^v@ zV~-j2#KR8`89VyP1@mXFTK@K`NhN z$oT@A3{OAfTmFfdKFaE8yMP2VmM+sn@H5$YBxm|6wRZBoc1X#8oIC`6La>Z{S-;ytO_G8iD~e5=a^) zo(5X`m>+igt*r?`&Eg_n#RwPiQ=$)+VPs(0vp9*(kIb_inCuxZvyt%U>QsnfM`!gvN@OGW%qsUEnf~V@87j({jyj1moI2moqzd?i%x}?UE~E6 z-*Ba`l3T&fe)2f;Ci5@*PTA3Gu4F$eF?FM)IF$`D4;s+UWnVY@5%{NCYrB=HN|!Se{Uc^2nbjw8-v- zM0igeCuW2@i4A7PNecJzw~BMK0*``HU?U<3g4+1BAY|bGVv?FR*`{DUvRd zJ4I~?au!>xoy4;c063;Q5QcwQqQh_I6Dx~g=g{=XT?pv}dTS#lVOyLC;H`i51jz;$ z1FG2L3uFEAjI&fZceucKfwfLh9K7WbbA(b^m{%>#crIoK2+0Zs$5tgWVtwo^8&VFb zk`f7UJwqI@wzwLDmq`)@+3Yj8b`k|LgqKlpRGZK^vcU~jBE0$RZ+95h29=Ea=N7L-1X4dR3J2wxA;qZao1a$RgmPV4F_zTM<6&<^(LQlMTc$ zldvmHCa@&`k#E8DozFi_Rtz)8`>jyU!ub%=wWZGX(>I!Ckz&YMUKM1MSkz3R3=gWv zLT2bX?O?{1!5qZ^<}a{bLlcfks1fC2Vi!>s1)0rk7FmlmlwrYTOZaKYW`r=PEYN<6 zJofk_R_$Ck!+Hk~fda%CT4sJzpQ^$0-aWc9P4UBx#qMRp|K}e*2E@qOe8_kff$to^ zrni)hR=b}H)Fm0%fiPu&g$Z1Kz|d7oG!R~vrvRAxofzG-ZA*uQUDxcbQWEurBIHT8 z2M@YMMN(|YFwsl4)`1H9QI%JCRdYcx#-O3#BfQBYB{&z3HUw-;VUodP)N3-ytANBf z?%`$M_pu+;#dBvxupAJwprR+2Gr$3%fQquBfal0*qL$1WU}z!OjN2KA0>-lrK-5{!4!=G~*QIJ7+_;FkQQ zo1J*B^GPrCO=uJz%0tS(CpPWy(Q`4|ZPQ1@4&mBA;WF0mWMxN@VrvNIiOXAvPakN4 zc2RgK@G=dT*}ZV=F0XVsZB)=+gcIF9oD4N;W{r-fWeF}sM2E?Wd>HvL)FU@=03=~I z!plrtaXL(~p_OPjRhRDRf0FvpKyyfvWOzSSrJ0`EOENS~ux|tH`fz}%5CxnSIG{++ zuATL}mO7-Wu^7;bKv68=oMar6ur**-SBripv_cceUkVM$?16Y`B6_z%d$lVHV2Gvb zrN>C425M1P2DLi@Zk6}Dx;HR%S5rMi(vzk^-G3Qq?2L9Zl}vzo#G^w zx0--|uTc2aum6wRupx-E8a5a?>e(GTxk1;v?90Jy*tvI~?yk=E_IvjX_Me)6@~@?T zpPusOiL*P@2h%0*$<}^$?BChT1D|-$mG<$=# z1c@|#YRckDg9yw^P9t*&LWtb8fFZLVeBdc_;8{9s;^QgPAUayaNFzXX5ij}_jQUX| z(jZBa*L=)SS`yZLMI?F}df3BHj7D_K*hrZkK`QD5M)6w0CLk@e<;zo-M3Icsw)}=z z0@*lueX#5UXMRK9T!Tfh66N$3WQJ}4ULULa7#a4b$#mFMECUZ9Co*a@Hkz!x{Db_b zxwgyB51>r~I8h2GDuRg$9=Kcs^f(+@w*EesxK0ANF^LW{_g5|W0&=pzMd7Ior0bwo-LBGtvzmo^O<%|s&$|@I-FcREA52PxKKuNS zq^%>&Cwf^w!l#7HOKWii#$6i5Q423a=<21MluFYm37v7^(5Kww;}|o!<@})=r`eNc z2Io2`w$2JvrvoCYqZQEy&R-iEj`UM$-R&*r&04-tmX1lx>u!u`tOgPS6>>!wJ>mF< zslBBkFRd88y2&g} z6J*e>nnp3?*MsRwKjkq@1d(Wt%tf{gqI7prFk{LSks>6Kf~817M5(GFRd~uEC{;wN z#*?jTjg-ygQxrce5@xhZ5wD6yDV2iCQg{+bAQS1y>8W7iDK4HM9c4K+Gm*q)Q-zia zX=#c}^h!?8A{5l*42NDx^epHnv!6mCQjwzUVG$JxMb#onQ9LB)Ym1rp~Esk=>hpD7tQ6`5Jh6!^)X^%imeM zaOVyM5_$YeM@>@^b8N_ zGkkN$?9X?~8U;AGtP%NNXX9(&80vL*B03YGyegp3O*&MN{J3{=qR z?`GuJ_*XWs3@*sa@QFQdh$$-+nL%BFY3eCmPOrcUmau0KSq5XtMy&+nsuLvoDG&Id zxOg&%ppYWTfQ?Y|jL}=Q2!oL1K@YZ~zOdJ!lne|K_LynM)WP1c9{E?@><#k?gV6D< zoTy}zvlxB5XHGU>S)0r-TrFlN^9(c3EX~rZaqKa=iIq!5rK8N8kePnU!$gcJ7%0JL zQ3}D#^L`2}Pm3w5&zj5Fup)~vJmQsOW4Il~Kj6p(H7cXhSuq2PSV3rNj=2%Tj%t9+ zPG8`}y-(O6QUcVcI?ngSgr?(cCQx{w&((9_o{}AT>Si z6*-$xWspLujBtBF@uG6o`G?E&mS31m_(@SlkkQWJs28lQH`D& zq7{NJK$ZZBQQ>r-TG;c9L%1o{JN%LrnZF(c({SV0DE1XL31gc8v2BKNHGwUfdMBT3 z|1blDWK1;|v~JzKOBW3L208!ONP95@wh%S>7(_^!JT;k!e0*mUQdyM4s^iLF@(IS_({f3Sn&}cS`;h)^y@Q?Z*EQOCsSqIt zasFkVWfy8uB$=5CBu^SK&j>+;pd^wBB_|(NI{FezDqVR-f=V+)1qLCNW)Mne_c9JC zIc=xdzzYYH#+ofX?c5sRWT>;KD^b9Ddd*a92wO+Ih4l=?xye-|-aqLl)3LOMVl8)&V-%ao{ zq5!L6Kv`r}DjS#a+hnyg;*x|s2hEOf%lx#p*cO0*_U10U0NZK3ymy?`LXqsat)L8Ig=#atgWZ=xg zEkB*Ngc8{_^p_{y zP1hXFkP+;|>1n&bv|RZWZ9U)!9RxFVgKs|WCKkZl9oo+5ax9wa)dG=WmA~6*WLS@! zP3cCc`P?o6nea8Uc3iZc^Vl(cNkFS(`nnyCpHxA!J-s3{hbu_Qt`Qa8Mqe*D31mwnB7WAfZNQ zbUF6E-Og`2UQ+tUa2h3OIK-+i#DQ7g6l;Fz!nZT(bFw>`Z0u_|2uZ3$`o~W`_Wig# zZqL>WW(k6IeBE}3tqE6z1z-}If~F37CCH{eZ%#3dy;4J#pKB~m%2g%od7k-Ab{A`(+J#S!#V5UCpXHo+@JywU`XP39w? zDV`R2iyu5>6cmk>hhis~OSM83VO1Wr6qQB-eo9QMi?%C{j&ym@H&=TmTU843AJ*Q` zOht00U4Diu`4iHR;(26ClTvV)pIA)_B1sV;F+mmD_)T<7jQE!!0Uh~C#!u;)HYNI* zop#GX>~nS(&9*UZHnq?~8)asV~p3@?Avd2Ih!>Zc#>0iw5W zT()K1;vCQBbl$aTl_S{)cCCY#%a*>jYs9S!j$^E%sZ#p}<}rcv zRKyY1- z-080T+P-1s)^*F7+>9v5)WBgzdNLy!tE^UuA_1)u*;jH(PfnSWUy8Q|T`|4_oReNP zk8DNp49Qfdiqw*y$yVe2by#-uciFx z$8Cx@92Bsl6#7^aQM;h_aGUn3Ivy^_QQXr{8Pu*)Ae5`f_nW!93@H@Q=ZnOKDp;jFV5CXlN1|CNZOr zbewEyre}Hw4?_AyW(5(EBEN|OHIZa6JMoHQ0Gy`)<_9hP^bh|}Sw*DN2OVJxQ3{zW zWoY=2*6n{#8=gr(y*iQ0M!=6w#GAgrB#37^btr=Bn%^jiod<8f`szzF3HmD4ccw}R znlwOoLLe^MOJepn^rQT`Ri_}Rfw~}lp&{lvJAUZcDfNnmQy#U5>r+FEXT!p zEr0o|U+`wWvtTZycJL5eca5j-XPpY(yv1$6{y)@O-*)&3%`whlA0{7L2snbU0BR{# ze`dKwpY6&_wPdpi0rrT|lV<8=XWEG+!l7EEr*qA&5-~$72s2kBnq3J;7b_*d#u{J5 zsF`FUUBjt_i_s8A!6K!Bx|K}iG#os0*&Q~COF*3Rn8vMoCWp9abqeWuON$HvUFoN? zrVD}GNZSCkWI*LcpFx3Dx{YT=ROD?XXgv<+HlVC@hWMbia#&QG00X_TMmT*saK`Mg_#6#DWy>Pzj7G8 z$WkpvAk!2icM}C~zI`E1I-3~TUb9(Ym{`FP@f`|=wO!Aui_oMPN@_PF5mY3j_whi0 zx>a+`8_3f!ndymi9V*>HGBU)EQs!-Tg0Hls8%?eerdw#) zKluY6HLqSdf7XnbB>LCP^6%ZVamV&G@9y0k<<}hsg?14m(GijR9w`!~#j1}-e6*qy z@vn_pI^}l^59dUoQCdxU_84h9(GgC@j`avZr7|MW*TaO#CN#&AfCYuAmf=tFpaZ>c z=f|951IoD$Dq*Vm>TiGXX%%-fyy@#dY6Ih#qo|FDSxW?(Ff{=eRLDnIG{^T@PHn{# zO?B-^xSbDfwO0>~OXZOMJ+{*srIiB<7krM^PMVURFB0yX=#XZFXuN23;CS*WK zRH;OHqz4-kk^Dtseue@eh{&n2h{!L5rU^?NSp;z_kDPu+fPSLmm8bk(i3=irO2kQ6 zl7bG&3#!1FAWspB6r~bE5CwI4i)>Xs!y%TeFP>pQqEN6AW+I|IHBbhb3; zPz&C|o}Uua@bDJ0DTO!0S-`UuY*^!y#=!1pQ2>hLwSqsmVAsoD&R55qTuaVCZox zgUSE))A!s2=vm-pE@mJaQs!2MnmL_4{TJq+efaK=59~TrvF*LR+dtf2{=vK3-TDP1 zJCf~+?0&nh&xBT`| z&SPKM>CE1jH}Y?ndw2Num6x8|a6a*z& z`Yo)eNnzxG#yIg)&?xy~*kEnN<*}C8EJei9j6Q%YV9EL-Fd_JWSOQ+b^*nKwCoX2~ zLc~wo3m3L&l@E4c0a}1+8e)(z0WK3lpl#NM{oZc4slUS94oYqb0iHIkJ zf?=p^@m69{R^-T#vg|Y#j-}HA$~>^7O2=N7X{C&70a-yOvRGn>!pp|H%*^sMnSH{l z1Q^@3ZEO8yv)ZPmhv z#TfPwR6Az0-5@0p1;f0_q-C>GIyUr?@q!a3D6VhnF&flvdPR^UWt zbaWg$y!MGF0HhF*ZY3-7XL|Y8l>-+Y{fi(6C5u!+g2D`|Ove>qN87bmIu6CX(jz28 zj5NV3iNxZW)}=4JQWp*>jK+E;_-Rc+f5_lLP0l@+w4piJ)&UjGp$rhHbo>x+<_+@4 zInBvlfkP?0&n!vDkD1&Aiw(@!4N#q&*f7IDE9CF15ClmBd$<}mZY11vdB@TRxvDUq zs1ZwqZikZDC*zhN7 zXD~PlC0ghiGwcWJINQ=<4MXvTVLPV?YzGmeIn%TZ=hASPi_9gG6+I^?jim!L7KcD0 zkYcgWSSk(FXflJ@w4j+XOSGS0F>2GQ6*y_X0!^x``wVUR>GBG3i6pOxvijN+%%uO6 zmZ~5wBm)&9Oe#ufQHK2V`G6$`|Mp#-gsp$dNjRtNST$ty4&pE}d4e-`Dr`V0V*Wvp z#!>@2LQd0jF_Q+)U6g0C(h%*XIXaZuXfl9cg|+HwEcHeoldYOJr)icO3Zq?8uxI76 zkjZI$;jKga_Ld<;EMYRxIyDB!;Ee=`E^W8!nGy{vfCaiY!_Y|_YOfa2QV~U{7WT|Y zGDBx|YoCZEYNk2^+0hYsoTSnmA592cdo7zb_TT!Lc3kIN zrUGSAp!_Ctny-tQANpM)9<_xUkg$$0a4gm*)vRH-ne#EG?NJQ>)Lt72)_AuKN` z%25nv2HVCQ$Pbw_yjaouw(&1KGkMa;H)p@HeCeAY^M-Ycm3n~*G5)zl3uZ#f@G`Lc z!F#1SlZK>e1nVc!QI%{&n~~_eK}}-XZmP$Ux#-J3eL`1>?!J5Ok)M)h+Gk2xbbNwg z{?QkNCcm=esV@*GCAApEEIsOHz1DD+m}$*Wl9?94q*Q{k*)4JY5I~?67BFA#P(_Yf z^}6NetG|Bn>F<9BM*z9tl7jGvoWM1NYb(#8M;8g&?cvWgQ$gH3^5AgwV@jTV=#{tE zwv1U0455eqM-|-dJY?{)Lr}1`v)>%VuF*SCiDBVuA5gnN)OY<*upkFH%I{lS9<`RsZ?|NbUbd8mD6TFh$#KxAGGN=ifuQJ8zZ z)jB~$iacH=JwFR9=BGHyNf8lD;F@Ot6Nl2_GJ+CGU8w>=LO>r`DSlS1U0h~dd4)*E zX{e_xt2#2zUIF?@ zh3#;KNP^7XYDF57x8j*gO{a#Z+1oJmG}b(gYZ2*!h-F3KN6uwmIhYM21IRv#;!Vy+ z%_5L;4r80nfn`W}|E|sN?%Z^6&*noFTO7~kbau6u^VlC9+J0#7#^7a;dGC%@728(r z+PoZI-nL<0#kK|I8{ha~@4}m}Y1``Dquuw_`|`62yxhA(V|cmyMQ3-t@T?veH|%$L zldG?2*0( zzWDt5&CjV@yT(!f1TUL-IYz-lC^$Z17B8cM{}g)l%G3M<_kno9n~=51%@S;mVxL&D z6_;NI6qv+{Flj(n%dxV-xxk|genOPuC^PDiO?63^D6l0!W|pB^P*ABQ9(}}>Mb^xz zQf1lR79%UNN>M{rL<;TP3zUmmvJ|XOOfI7ju*Hkd;VPKw8uD=?WV8k zuTC8P?4!Bk%OUN2(d?~nna7QIWbyo|Z_aqJQ~M@259~4exrZmc@XWXokG=fjvlGTV z^~S3cmM?gNa~V=D-?++$%h;X4B15Ptbz}%lMZ%k$8QRKv1w&bOAqchyePH=8A;L4O z%o6W_8SKDpW*{l(t<%MJ56> zw5MiN4nJCiAgco>8iId61pM<6etB)Dx6-dGwfm;m|CY@V)dXen%$z0=J^}*aqQVB^;17vq(|}l5^#R%pasvuiOhn)X9B}3qeVGkC zlv(&1s0VSvK4cD&iH2yY)=7jJ5fR~cW7FnyX+qOpW|bhKMG-8rIQLdMK6M{ETp)8A z=&e^GIsy+ws;p+RiYW<6$|hZ6v`BMI3t)xzl9hfT&j39-q*Pm}RIeQ>TnSI5m1C9BbE#ek*CPY?k z85}}pA~HmEpRQMm zgkYK9;0DTs`PnBQ>kIPHI+~;JG8_nBFo6@7u^`xwwHD|R6i}u)#ZwVPq!StqAw4SL zjJ+y!3J^+YfU|Xhc2O8&H=ryxKDVJW6ay#L4nsz9GFOX@5pB_-bl#wlu9=jOPU=K$ z1j8d7^Ai2BcB>t6r?aG6Svc}&g}0_J!8~Bz$tZwHoo zXRzyEwgg$3?o3nW!)*Rb1ON>{@-*bFDP(0avJZx`=}UEHFyo81C`*P!O}1tm#AP6J z)S{*O7xpw!XgjjzM$?PFrbSMhV3 z1a{(D1Sp>I`UIf3eA{Xnz-oCw=8?l6eR13{nZ;#*k>#8;arl(Uqap6~YZv+N%<%;7x&bb@cb!ZCM!*x`nu?mZY_s^tg&!HD<)=1X!pm;anLP1DnhbR7Ghmia z$|qfp(10y)w@TD)%YS%Okvu%e{v8+w+^Lu^I1q8Z;%{I6$t!wm zIEe5zLi9H75tq{-0=?bi;lmBk+w};4_}y>i)Y2$C0E>P=kX8udRYO3QXY$}Ad89Cj z#PuHuq=<+Cm7F4(y;YQvt0Kr$?b78@JOL5HS2ndIQ3gMgD8*ou5}KY~c_t#Bkd&fe zRf38ae5NdBWC-OjrnpG(I3ie8L}KVDB_fajY9ca+h@TP_NeDB=PdyUmrD&;~p`fgs z7?6&dibTR^$QBet!gIk)^2>(9fc=77IT2A&LBvl%MP4Z=DCKS1E)jk5mx8{ZNlz(H z`DF;7-UY?vD5Ol^@)*b#M8G-CNgGY%l#wDV}BDHQRds@PKQ+{8yB%OAsVs#dA*UHTZ&??X0Em(S6CBMdP)b0}s$nJaWX20JAZtGK z3_O=jb>?=f10&FyA_YxmBpXRKKNS}jREiobj1)HTqY5dE9JU9O31xZYS1NRrk{?Sk zfn>(E>O3)^76ln#%zGA;ZCl9QPfNeq%<>nA&AG<>VDn>43i^tRD~ljH`0*3-Ry5dw zY0At~iisQKZc%es)Vj=?GON#-Ib-B=&rW@L(p`h@;85^Y^grUVXZ^yZg>-AHILkvrjxQW9qmm6NkggK5h2>GUxKP^(%I4TD^M7TPz&r zjg^iG5wnmvkzf$Ta>!2$B12R*5w9d>5an4ly`Ydt#v@Xp=BIQN67rKVknva~7U_AF zvfd^gh1gCwXB!B3^DMG?>(Bx_oP9`!)Q=?=?-Db~vI{BbJ{#bS3iANtN1`>|D$Isq zVcR5O9WiT}MtEaMGJ%N8OhRHRQnmHkO~U9ywd`J&fB^ToZk=%zLjou40Z%+*UJysn zQ(PVq)Pyw6Q(Ok-lVzN@g=>s~#Dw5Mu(Je_X$~gY$*f#CnXj4IfE}inCzco_Iwkf7 z2AI2Agn336PAD_46TgYoSP={ZhJX3HR5SC

;?K!AZxXhuIhlff$y1o7srYyvtY zL-^vl7{fZ<$klL45>nm*Vzw7Fmg5^5L87@u zqd*A;o;WSlDFO{dL@RWV$%ITaJZCbYZ1YTfk!DL;t&6EHiK!8BhN!ue@oX#4Y{SxK zZMv~PP?5~F)Mf?dP3CNP=Bq6Tj?SE%@s=H{`OJ8f&4B`omsSk9`%eD?_<#P}zm)IT z3>R}NgT^2;=dvrB<6oA+d2Hz%&|nKU1cbsCWp=W%{A64~F_vuWH5-67Sg$!29gOUF z$t7nnF)Q~y+ydcWr4t>SABUQ6aUph{PJ#7>UcjXJmRM+xL7}BsHR5OvoMq7243XJS z@-z>VfLYh)oZtxvY>lA0h{Sw=mNz}I}}L-E^`o?0#HR7MHy@z zNI(0`Go2ej-`K}9=Cu(8^?GYGQ4$J3!ThuVWpkS@QFu9xBN>#{a1hwowEblo#~$UU zKTr>$X`SjwfY3AnxAYS=MeNLo*jm&VWF;qTa^`9Ez&i5R9oQyPP1Xx!LV$>vMJrda zqq*uhk##y-j#gw-#aVsF2x&#o3T#VfKvAo$5r;Q(DdSCDT8d^7vLHhm@=)%n~Vjgn>~ys`117l(PEJYBd^YI)z%A z1`#(HHB+Z+HPa%3RopC1s!HJ*^M->_Z>xt2WpvAfbC{I==oH$kH#Ln4;kIJ;2JYa5 zb!7{oT@GbP*G zLdt?8+{S`_(HHdGTxvSfJ9?1j5UdRx|AC@<jqu}sM1(2 zX8S)Ttx70;9PCWTKvxqT3Y^+>9w;`GcDnRpcPF~$n%bi*n~$bJc$Z$gRumA%IpV&v zcG(Vpw3{V>bUXtxZX?kOXTtFykHh3{_;FG~+^x2sfAV`Xt!$=N4GgNvC{R3zOe84* zN@^FRc$$R4C2@JY^|Jt3eujd#^2jX35DS(U@m3H~q$=gm_e#McT_m+s&0Nq|M0%>0 zEg>li7G*;-^wk?kD6wi?$`VC!AXy&8bF_ebQbkpoX;(P$R>a%zgOrH2r}=)$KtwX6 zQg8i?4E1<~@0Ey<;+{fk^vuu0o~d1tLaI_ys$ig)-&Ci&1Jb><+w2+6MT9U=P{4e_ zq(%7OKFxt;&#QZ!b7Q||{kxyjv%}dvI-Pf6bGHf9 zs8zkXF)JK-9y@T-Gq{!cBv>MuzOXQ(OGt{RI9IaS8#uLUh>LgvrzilARs{RW(5~WM zL8B0~ISncFB}$PKeThl&6l7LBI9e^t7{Jz3P@N(&2uvermlnAJP zM5b4a1=shM5-DO2(4ic#Lsd#7Hm?TMAA1u!BWhSvAU_ zuFxSLKO-Lv#1(tkL3g$1DMdrz6ct&#kTt!#^xM4cLKxp|nF*cMDS_}8#<=9f9Ba$b zrl>rR|L7r`isnY^;P;2#=?u2x)?-IK4Hxq@gUL=}3qEHCpQ{GnFOL}Zz~c|!)vHTe zczMX60T13cXr?9dA%bd$QH?M)089$aPiyhm;suv@KVIbluJ@G7v zGW+=t@v;mtDGj=eufn_$v4*k)1d*gCzd@Qr>3LPu=#>L2x&#Q zMf0y;{3iu~6u4tR=JY9fGAbwlvWy~jsU_IVz!YKRuy6n$d1C%q?rnoew5<(Xvm}|X znKV3EM9g5&Bc?u^lhama#2H8oA-&1~&`gFXwlN=hO2K?4qBPlCufn-(GKjMZuw=?I z#NtTR5dJCb`3Y07>*yhtWMhVDvN+$63_t|qgb~H*U?f64tT=`lAgrY(Lx|hc?hp%~ znGu@Ie*5`ON`PAIKn_zWJQtA(I|uZ}Hd}?gr`5_Daj|oVf|+Ny;Y!;XJRmcJkV&JB zc+;*JRPeRxSmW@%oZ__HXw`$nqSZvrT4qqmkhlH?o1uhZiWgFPHf+maWHD~hmjN?G z5f>F(mt>?wUFnIqK!}*uv`Ei_Q9upjnV@7~Hk(V8rI4DhuCNDmv!7+4xB6NhTxpsv zfUJ2lNf`2iP~yHv!GC`?cOj0Zg5>Xs4YJbd>{x>JG&SJ?fYN<(nqKrc{wX1w+|Gx62P_*z6rMauhG30Wfh4t*@ExvTQR5$<%8EGEL*dQX zW_V(xjmp}w(HwcAEi_8oNl&1wnymLlc#}yWPD618sfVW;iCw=?A(?Sv;OqTpl(9e` zEO2_0tZXiUFoCOpj6ENkfDIk0kBHq;qIbL^d5aYN(GYJf3baToqBDBdfThE!$hME= zNfk2C84C@KGTR`IXM>R_%x}h{rfIJ@PVn!oVJ;iQ2kmC_iiFd@8tawjXjkSOoFGe8 z0C|ce8-jEUWjg@$&_pDI8A%sVLd&H&fg+h$beXwGijhU6Xlk17pt>LvI|RgFySmw) zTbU-fx6>l%2}DB~G5CQR=Uw%+H67=Kj6L$ln0_bx$X}v}P9dKX^Od&-(JKZaqY-h= zJ`Ov3LC1Y zPv%W5{_B=A20r?LEGSsifQT4f9KOB_9`Wqcyv9<5`@Z*Gy5S5A1Y!5a(S1PGQEHG@ z{v*iw?tI5~av5v$&j!hzDK;j!6jxk>t!<@z76tD$O(0g8PaZQ`XOv5;ZsTxk`l7Xskn$L1dJu5AxX?r7aRWi8XKNU5o7if!m#QvATw(6r0uC!20x3$ z@Fu@VUdg7$5El=7NTtTGlAf6#D5l5-UJf)iE#!)9 zuY#SO*%m3?_qBVABiaA@{$9X&&$hDd8m&vOzRUcR|+%YNM&_V0dnw@d4}@9U~=EjzSrRKL!# z{y&FdC>)8Gp}c@uE3c`TQ`H0|(KDx`iz=^=GHAQ~epyQ-VvAmkkC8|ZKfZx;z+X`NxDGz82%(7~*;fVp6MZB^o zSX*tRi!+-gA_e2%RyI|rU5z5jiX*5j;-1>BxCloutDe=tz*ec^F3)M)s1c*lJjPT2 z94vf_YXuI0Voy1>T`g+V9MbUCD@KLIPDP#))goZbeJ|_>RfF9AQIT2Ezriw>UO%9} z1L&Bx8E&R^$j84;^wEvm5zV?7=+(WOok+xkED(0BQl9manBl`#%x^Di%%KLbqWU*5fS?Yb2u3&&E$AYsfi zKLXY)$B8pqlJ2KC8>~pNY8zsLh#-T7&(91o&%83$ig=1-03vH)RV2kdWB=fUjieR? zCCY64#s*6jDNPzLtk*u+6vQQ3TYXoh->Ho%m>KvqZ!k?| zV9P=_@eQc-;G`4m#huA!B*oItKDf#@9RY2v~(u^+ou_~?zKADkP zheM5$j+EN9$XiX8S=`f4PHilyg@UmM7`p6-N+(D|j5)9>Pp*4HN~us|XZpBF27w(V zBn~e_d`vRg0D;uV>R>L);1ftsRtlqw83S%O+G-FZ*k(o$nP`40B}Us3JP4Z1f@0~BgBFb&Jvv@C((8QLWouS0L!3-J@kRjvf3h;- zToizmKH|PtwO7Q22oV8RdK2e#QthOXk6!bWJuFC9BvKWp5`w}<#0R=+SM^Z44E9oMdAU`6s!t7yUbZC}Hby;}xO51tBwVKfMqf!VeXnvq7Y)I|W zIunQmOVt>oX6TFKP@;I{0f3|W_35D?8(SOUZq%q*35nFLNkrHa^t;Yg3jlTco=&35Yr&y8cQF{mUU{?V$d?*1UeAUvX7L(VIMEU z&U-5SyQs~OGM}=*%j_)|GpDl)!2lvlJd8|05DIAQCkSKFw^9d0vrE}FoPb;^pqvH8 zwhTuz=Ajkf5n}>w$(blJaP*h%pd^rpML`CGLNn>M=J3Zc!kNv613?MeJM`|smt9)Y zM#i?2JT`FH*rs|*hJKM#&t_53cealNwWv;UnqY3sXpIVyydaLN3}HX1JzX+LQC1P= z`LdmVz&3bAE9k9?EMf2h-~dEW8<^J-g&bS~GcB5W4gWk}WDaytc(tHx_-Lbo8ffA| zkRlm+)C07%f1>xzW~vtI)GB?RU@0d$(~*jdcLy6ZJ;REkQarP( zR-mIg4acwySNH8}W-|=}IQqq6XME92^1%b?shXgO6qb~ef|>3J%`2Sye{+1Da2zAQ zU7I$Lg5GD&S>~8y5SXVVP{ea%0t6#QsD#<9a ziDM`-j8%aSn!L%b-FC~g0Nwa0a9*<#iJdWEkH|D-S=aVYe*Yh430o+c^EDRiwI-r( z>~L9HDV#gP3{G^Jtp-~PginhMaNTN;BJw9sJ&OgIDXM;?-sbH_4I3)rzCL|Nb4XH) z?!IeKc1V~L=351o!if>87H(Gk#SS3{+gv&naombT$g)uqd}@o06q&CZFu*HZ8PV7? z8&viM(fM$o!reP-RxYsF4*vqegdH61*reb~!wqYfg3SJhlmIx(vvrg1n(OH;tHbF^ z=5Z`!X0sERXky8vM`uI~4MFeFE;?z?(@c^jl2j>@9WaxR1?8pjV+r%WzWj{7=|mbu zX!?}46E9V_KchQ(bnQf$EjLzP#uYWDs02tR%qoethO(_{DzMm*1BB~;qHz#YHh_XY zIR6bk01L3HDc98I`uFRu>E~dxtvaq=chT5g`Qo!rsF0~ien#5RA%j_BoYYQ41J3%_ zUV}@v=^Il1DV5=Bf_?JQM<8fAtU8FsEJw%ua~3#PS*6T!G9XJic|WN-NAo@3{Z7rw zV`(sQqq9l}sF;F|Tl*J~sz5<4_<_Fr_3q`S4*#$KxaQGTog?3RMTPDbQR2$_rIQ)>($sEy%SPONH+bTkE# zG%d`iI;@I>zVu}96i=zp2{Y>TN{HN4ghCjWO$4cgB&rb|Pj8D1#Va&1PpO>7F`-#T zF^qDNIr2wAHqSIohH$QSA&HTsr?RP0o@5X)X`87ssT#sr7EOJbU5!l<1Jf%*PtL$$ z0NFg?&^0GAqzo4eUIiwb8O#sjGVIy5&ShWx%TV+FT^m7WU>Q>O)E0*G;>!rr0g)ZBiU~Kx~|W8T+ChCp8_xU?cS>W`3>uw z@Xx+qwzF+@HVX+bonWnp^^#H~N*6aNfeDN{7MfX|VZ%a$pruGx5HuEm!GKeA6!ca$ zb_h@za0)oHDkaLmBopyUHWWn6glI~)Wr{bp5EQH+3UVrF36N-+3))o#iczCPi$24^ zEBV1v=?;g7V9>hE(!KRc?br_IDngm3j1R?WD|0y8lIfzj+O303I9>^>AqW6OGf{}n zlUT3~p=+t1*oZX9iYQn+;I^vCM2pnoDZkrSxKY?+S|kyUh+tKR4HQuGFRQG2PG@oR zVcR;$-tejNyg6<#dl4>n^jgFx%sk5wbV6>)=5h8pvrn7d^yR)Us2N`7U-td-*x`=~ z)2ED`JpQ>^)5b4;Yr6j@wYh95P2RG$^c@!a!nicq8yyxSOPdAW4+d zPdO9Blaj;`NutadcXCQ8GDs|l$S-(RROeMfQAF`0Q;OPgg@46E1U1XIy&agsdd-ft zm|96a$+OZ3DcN{TP+DyskeMCGa%Kur2J0`wHM=FUS<9@;@SSuBLPe|>%&0V|M}`<$ zcGP81mNxQkv+8Az)yJyAEvd406ja<#^7PYOgl%y-izYr8+h_ zMY1`M4%^Zb~T9(P5?2SW;z-A;K_@mYO^npJ^lJ1;!p-GEu+_s>T~2$plg)4VPkMv9zf-A_kX1 z$EScjuh?B8>?Qf*rvT^KjZ~B<7)u1Dr7_H~hbu!>_u*OX=6pFt)Frc^jq+=+f_{=D z_F{p{$tD-iTXIlJ235W4g(W{xV9-JhrUhPO{$B!Dm;737f|3()d}6~?L(^D> zx`WK6pa}?w6C;N&{~goObYuX(TXcU5%S(2rlh1Dx^+%YG#t^QVa)}Wtc zPD3=%L`nk#M}Ztm`_Mm{&1))3Mhj%^c?EcBUURJR6z%1BLv6$O=2=wJLI?I z86z+k-9ujw%5Q15B}jVaw7|NkF(Mj5$DA!O8B$B`Z1N;_bBMR(NjJ1hT^QDI6N|X! zpq9N0qBF!)q+T^DE1M~TYWX;qCR?f`8i}eVD>a(^$hjnqQ|r2V3kEUu0Id$;Gyi-9 z>Oic6smw*1z@}x+2{u^BM2`@cmI^wEP7*bNME|iG;d`o!7P0-9m+VQVV=QC}#-R){ z^K1LZNnnT@#+=jkmP$}>nxL=IBtt=UK2N}{SqriQkoj#h?Nco(q-q6eo#qo77c=SU zDFe`2uj2&Zj_7bjdL%&pU;Xt@HoL3~w%iGXoJpS0WbjkOwkIF4h*x;5fsF<0 z3oF9tAZp#AcZh<7sgs853r}>Sb(kkN!oeg3W>KgMd+#4~h{6#{n@G+qm91N|bnWUT zo*UP#*s*Q>dk1_+;}-poWZ2_M$hFItF0#ikwQSYmn-qc~4Td6FLD}eY(b{$4eS2ii9BB{bNWb;g7cnD=h!fJ}BR0Gu|f0`_V zQpxNo9tPyE%DV@^i#-N!&Aga_X<*miy`8;n4xBP*@he7EG64$ z-o^**l`oflzs$K@dF$84eLFajH~L4+?#K3omn+Iw@7lV|OLbN?$inwz@ptMs3j z`?b35>ej&WpldI17<*vv#=Sb7)~n+=y}LATeO`T^PyCbnzW9QnKZc*#OkiWMZ_W7u zlH!asZy9LW4mI zer3Y*?)zFa@AW0~XKh@)xMKTyr?MG}OeW@##V%un{bmgktY4Pz7%F~>TclVHa>|n+ zF2&oV`X{Q< zkYJ{53d;?n1uBI8;78^kEXmT7O@73!+RPPCm`Fdm3YFF5gFzPK84VgJZmqXrZZ$V= zz{{RIq5Rj}qYdi$9?H4U(-cAT$L}A2JFrSt5DHreefh~2tEv$2DJ}zogdmQMa1yzR zqo5!wRCQ)5R)@AY@^^lV7L<%9wVG%=|r<7xB6-=3cXwibh214Es?^gq69g z18Q*9ScmKGGhg1d_5!w@WuO>L2AN-&`98Kcu^)KwZlC&W^z#ONN|;P$pi4 zJEse<2VWB6HtQh<3kDSd7)&_t1_LY3ru>v5 zgG!-_fB`_W9CAjY@Hspa%|e%(VL-;&G?Dq-vB|Rhom`@p!ydnU@2I1AOD!?Ndht!jk|2{N zO1CEZwHiMzA2khH3A;Ud)71rFwK5ZfBeCRAG{0D)=MEq32k(~_Ocnu`{9#+cXFZ?L2;E)XNp%$OA%UzEn_?Z z^MK2j>OI63ta;XWQw%LR)YmB>>=ws!>2e3~1k7Y@RJ*6iq?{Odl0ZP<4uz?sL$phK zyE$9A%t2Hr(up=0+++X*H0p)E1@LqAC{ zGHoIw4m~8<}|MH*z>wo^2t``w$ z5}gwGa;TtjLPqMh7Ok!nUgem>t{@#FwAKs387K^2fxif|E1&o{x^NYfDy@1s91heA zlfY0;EW0?Gx@JcpB$WaomM3*5pJsdfm#Hf7r-ua-<4y||B%n1V&No0Lh757RN-eXJ?O+zRg8okrV5cNcS0rQ^@>icGh#R? z2AijalyHemc$rv|ICabuH4~ZBS4XXUa}l|3xNO}{yAstdOqT+}-Nrpf)41gV#jvp7 zDgW*7>^NK2THfpKA5td2U-rz20KAM3S;NtP_nX&#|Hdmem%W(nV_yoSY=0SQ_I>Q| z@{)xQ_$Mh~8ESsxl?C3%UNrwsc=_2UZhG|oOBX(N<*wVTx!q=~?z!VSZ^Fx7$!?b! zUOsH!Ex&xgoJ029#P_j5X1{UXf9H?%U(CIUZHpOd29|9x??3lX_S@y7pWbD|jXu1_ zyJyc@ZN(K`&dOWg$A$qxR4CF6YfWN3Y>p18Su>md;9?LJSkq)pYwc%_Hvd{_m}N{K zZt`TnJ$f|37vLTZ2s+z-C2ZkG37qS8MShJ8tzWB%9_EYsdUpwJ%_U%yyX(FtWF@ zL1~8&G`{U-konXTj=KHEi$Ug{x83mbpPl;`-#+}jdB@pb_OUPJmg_F{e^U0By@~Dr zq~Hr7ZI@qYR0~LPJHKpM1qrQ(eYN@c+g~3 zNmwfxd6SS@8)@KGa9FD*`U&%pd=}|JORvQ0;>-$x4+`J%K}Sx~z??LKy1m2iT#(blV8}zH0=A>3}agU_OAQp$x>&Xm%2*@G*`Vuof zb;Nh4vZS}{bo+7&IN#`^hZ|}>xUEUP(1d`aln320(eBJ%T0pqyoG*NaMC8<1lOs~ zY+(t)s4Y#M6mf}86!5Y;EzH6teu`QW_`rXJv)~*Vc)AE2J~a(F7cN!P7CteF6$sC1 zFaV4_8E}iU_9p2$r1MD3Stfw*Y9dS_TimcvM>|c-Ijzo%?mQ`(vLs`=D~OWmw!jK! zoGcTBxkD>e!Wqe@hgw70?8!s=ji2-wjcoD9tN~V&WJ^tkODPe!lV$RPb6wGdHBrt7 z-Q|RpQH%hcdWl$~WaI_JxoC6F!ANd=1{Ej{q82XSUhSGvA#J8~jj z;?+@r`KjiiV2vVsG;W5@f{V-4ckaCdOk2}!8 z6WCyKno!xm&HZ+ZOBiCn}G>S_JA#MmRLI#mqx8H1nBG&{b%* zcVS_wg10naavMiqViqTwDB&$VvZU7Pkp7~^q?T~4Vi%MqOz}d{x(f#fZLiZ(M-#k! z`YFd6pLt(BQ~3Pd^ck29^=?PCEg1_J_+rF=z}CgUQe_ zTwO=dScecyy$IA#WX5*ZA%(pJT=;U)w|I5!Eu+uT$|I-tX`LlCQKNeoT;H(oa zIq&4#e|EY5le+8XYiuz;^~k+Yvn7G4&D3KuHSd_q9FBEV>jAS^^RL=`Y5_x1K`2^X zYnswf)2L0gl(*(FjgDy%jjyAtl11_^(pCkG5>pVwxJ1&Snb+K^AhK?{B%ov+ks*9) z$>`_~@ZbOIe}ZM4i(Z`eqbEy|X6md+f(4-^G(DaWqX~FeZ7x&%#* zc~{?)a4UhGw%^{O%gi3XwB!K)G10U_I%!j=ZAr-iR!9m*?a;=$Eq)(0wQ_^YEC^vte9EC| zw<&w0-e>l7#p+J@OjD~bf+wPLA{374q9cxMs4c49lf4h%9&0!`eLSH=M}`RSiVTje zVY`H$|Hw=`jKa}M51lxj@hoa8C^fnmEn-FF%^4n2VyJV;`Lg3iz=<|;+y+G?EwC_L zY~vVM_PVwD#hLGY*;Ce}b_hEwWNbj2*3I$;qC$Ji66=#-5e~vq(`(y3|98>3vh0v@ zUVyF$Yjri#q8?|kTnWJ`Gd#pg##uP&gcDrXTE#0y=Us{jC{ChPHziJ@*Z#k7$RQ>p z(HR)PLc@%JR%)FisgM@(Wh^E)Sd<)B3YwE76@)`fKFgkzy7sAM*WE)af0867&p5fq zbB0n*fahZ54CLPFChRPbQ8TS3259T9FW6hKqF`6+v?1~ig`j&QKLPhTH$c!s7w*Z7 zIgv#iddL$lC=nruu_7BJsp1kQkfoPLNrWk+IR_l(tr*dm4(V~fa*iy_+~ZpKIr}{) zF)EA!Ov}7!ysXu;N~hIe&L0YSB6dC0luMTw4RR-i5A?U@Y{?o}rW2Mi>I}1f2*3~l z%ScNOpJDftSlRX?Ys$Gw6@^k^gh(|j#tA?tpOX+~9?!BRE=|(K8Nu?9dz_IUVru6a zK_Mmq9cfk&gq8RN2xF!hvnG6;^cYAk4MsVpmBpI0W*QUGn501=tWK>~zG=lFMsr?? z*C-OHGfSXz)DYE?Btyc~>N0e7IHW=5G3iV>j=mHc206oYsnDcLBg%E1b(e;bW|77a z4b5e`>nofx<`ST!zW4@!S;kxOt4o7QX|pc!mH1VGwdC}adZ z86@42v>G1%8Oo28{UQK*g&K?v@?_k;LLUYa!h6v;egbbh*x=ETWHJQU3YbKjXd#hW z11W47cr=sLG4MhyTmwpTk`G6J?|Ysgfrs3njf_GjRY98$)tUzQA`b@-61{Xi;?x1Y0$)+yN&ZcPEAd`)nrs;nj}uDC@G&LF}q`^(~NKu8%_j9R-H1c zw01X3n|fORFcDLCL?s7CP-z~@b;Ox2Vu1~m>hqV>XIC0LCRAi6VE(tA!2*YU^2XHiy1mbEBIOy@EUgiC*^fkFJu4equ&XZNkw-hC@SVcy_?oj(CG``#CrTmhMZ^X;Zr~lDg+_nKHDX5D~Jv8`TQW74BQ7f>IE#@eziW-ogRJZ2eCcyH}R37 z3X%;zqXVH1Lzi$XuI(!g9G7w{LpM**0u2tgAiG}B}ME|@P00+OH>6tGN{nN(;95%F2- z%9~beHewXA!Za(Kp;>-PxL1-y$4{6Ss-}bt-LvLAo85<&29D4N{Ue+Cs);)#jIA@&ny^GE{&Mxzp4&8I!$w!}h!gtR; z?HFL$$FcpT)IGOeV}JS9>o33armKNvkhwL0Wq>)+gyqmQ?IExk(@L_5&s113XPMDV zTlfmXgxCy-)|~*3LOw}^CUktg%-760RO@2a)7sQ&mc-Fz=ply2iJuZ2hfU&$nv%(= zCXyavgAX_iHMwIt9VT2q-PL62rIoPRmJ$m{w+L$qwF%mf^8E7APln~eTgolnP0*Yq z0L9qNv8JLNQntK-o9`7CUoOcN%`CF(CZ<^K z(!fdD=wyy#EgN7rv|bJ^KjITZ(2!%DCd%=ZlBSE2U1DVTLkXtwfZt8kkOr20Cr9cm zSIE?>9Hu#iJVa!L1`d!>5SkEL3mM^1Uj?lYkW!M|lS8en5T=bw2r@YQOjSO_B%D`T zQ&KFljYFq=N`*2!!O@ohwLc#qo zIZ!l_ElEzP>9^`%xx37r%w2u!9~ zbc-7!B;n2}rY3qEI)iHYHy#j5k~EMpiU{MdDAM$+(Zb;^-_Qgw3%n7c3CF!cT6iKr z=JQXAqJV5 z;|bxB8E&8nbB0$^A#f{6C)#;$V0cL4(6Y^bpW$I_7$?StRx)+~&BA=6nJR_;f}++q z;WL*wneqwo$M-g^5-Dtv+yKL23rfOQ3`=WobGi>td9LNx+~$26B!r*tY3}*%t_Y`6 zESj7|HM&F2NT9$uqsbv!oml5i-$~Rn13+IPpp#Dwr}5B9sTZ~B8SLMijn6+69SGJ7PD)q#uv3%x;r648I)+E z0m`6_67beo!qlC@dO6gJ)>IerG)P)`BiW(W1UjTpFIPcX^(MB>Q%u@IKsr*TWGmsC zGC@xQ3~?h#!gbU-t;gYz@7%q$ev=;ZID9UdT%~X*&!U} zu}t@hmJp9AY%$ZX$N-Kr!UqC4g+Lo6TDgj&iGaf)mg(}Z=7!;&o~#d58A@{a>=2(7 zdhAwv!Cqhb8DfqGBaJO|k~vR4BAwN7s&%Z8)T%p+Lj#4QkSCK^YIV$Fj@3P9oFgNd ztC$%g$wR4&liC#0BM=v(0q$PZeFNc8>F}*}?_&eS0CIcEU^2Yy=Ll^mV?xT_#8xn0 zdH(6wmM&2HyztA5pZxVJ-oajIe;HWz$5Kn5eF$C#mVN0W!?y%(tH~aXS?_X`! zyO)2L4`r^j?6ND)USalf%iC5SkQs^u^k6r&0^h0aR@hyINg+N z0*7YPnO$I;9l$IznxOl#GJ1FWMJ z%K6DRexfV>0FVvn8=fn_x--i6bh=YXwZcNor9 z;9{89SG=HRc-aq=yKsZ}XSm{mSt_{FcpSVEM{R=3RB^nRnlEHN0$p`N6wxdiKemE2c9`vQ|Fk zFB6lM7_i)83BRcv-;~|*z$9)$w5Y?>#e}8ckZck(e~~0$-DL`Eax}XUCZ;>ul}Ld8 zF>fL|M9?rw-E*TRb^6IOp{5FFx}%ww2G+G&Vm0CN)0_)y;9H!FRMG0DAqiC3g>h(> zx9D{$4S1_W6FtpViNt6a%SEb$gXsZywL^t1`fK@%EZiK%0;2-7~b zi^Z$#bGDp(eak;*S=F#YLq69XE!fmbCc>d6s#1y%WRa~rREtg_NnjLRaENuv z^1r_LsbvL$nQ(a{DHScQOb1Sc(u^PkGK96tp@JZfNq}kQ2y@B#GHmR*c%8uO=w2`M z+OTJRu6^j$*ZgqQ?oKkV```ZUZv~qt^w6w!sL)TFD0szTD4^E>n1;tC*#}V#yf%Q` zf7}VZeq-C(6HEJIJ}cyYMIl3iO`Aii6jp@8ZGn4a2@dt@OpVkb^u?^8 zM;RPSXr{rH(rXy>lt99%l2SpDQKPeqZnTRU2R|QlSI`PS1?3i`uZZNqHBMOIXv$No zuR})bg*n_tXp)L@RgBd3kPsh-o;=K3trAs{F4|ahWAvQCbPTlWf~kwFb}6BU#WIsl zr&!*d?$(58g!P+2VgM+xMy+bw!De%}Tt-WgsMG9_!=7~rkWowoi&lLtDXC?P!q&so zwyIY$g}njdxt1SI_=T*Oem%FRjR~I9>})FhckRcNSVbCH9AOq9fjAE3L>H3u zV3Ox5TGQwlGlgzPDJhwCA)ik9H!yOE7*)g^>SY!%*lz#F zI1+^^duplEBwg0qS{;@~#G4k_$;&5si)~we1<2gQ*35GubpGS;$FOl@kyWk66?ub4 zkpX@>9cW$nZ_ahov+mN}P1Y4)sxjRJJu8)IC}VX>eM z3jLP<%d(`9R05LeDOaVaR&|FoXORr|(EQ|{9QH!E zglCk1E(9DZxui>m*wrON)no`~D30z5Nr%}Q0{T)o3Vuzx=OKB0byOF^z{~n#62R9B z-C3>ixF6(FNA zaSB9@qf10da5&`oUw54PYMuB$g_m8SUBWKRZcuNn?Jrkc5Pd50E&cA!%<9EOK?88eI z+`IUhJG_s*bm1-c-g3rxAA8rW-#3ky58izfkXe23?i)|O^tGd(GjFi}&Kvj(=EL^e z>4+5hW(Y&dxWm2ET6 zT4vehmR)Z7&1@fg57YbD5TEJN z${8O}g(8E8&?1lsal?kTXtlPg1igt_QbMu_GOQC09n(gG)gTlJnd${s!pkrzuWZGF zu23+H9JZwaOlDCN0)P$Mg2Vcf0gx%=USQnJ%N!=6hW zHi`HN_3P`QH@+{f5YxSgbO(yL%BYTrJSRgylNyr)B9(3abLp&k$6a*6sTZDq$|V<_ zUb*UubFaAU>@&~!@mG&H=$x}oIQz`wfB1uMU3Tf2nCG4I6W_*Wb0*~PD+3*%y6)%3-q7|1UAIR!CA}CjU}%$n$)TT4CKUR4sL)Kn;AvbLs0IoBI=XlH=#xD6 z-n-9s*$eZXI)=QO=o>_C%kDyy*sn1#{`Eip2OR~1?o83i2j&Da3X3_w2e$auQXHW# zyLFdXt(0>~mo&r0$~0bvi@k%52|?R>hLpXP?M-ZaXxj2pu@tl@ZDrEJ)gc6p-+p3? z6O%Y|j2clcL)gudm}w=KxQ7-9LI|B@QIt%%)f&H3-zXyHQ0)v#wJQ`&p`3TMGOCG8 zDXOn{iQ&*mKPEA?;t(@Wrn4G{|Je$~kY^OriQu-rwX(>BE}e_?Nhd%>n!UKtGCD9V^j z8mm1{X`D-%v&oZ`>#mHYDHBE=D;9?YtPk0ZcJ)K=MQSnzF2fo&?y8L zk#Q)Q0udK0=uT+Sm4B^e*r76UX66fEJVa~dwqA9Vw{lxk8c3$3vxcclst`^$f5_la z;^vclG!GpPoqXnuQ$S&!J4FBfw?2gH7mFYUY+|Ww2@MU|kAs!0z5V>w4yhlmPFgi7 zZ~Oun0l}=;K_pvg6J{ONwW>8y&K5Tm4)eiH4~N4{3yPDN0x^UN%}{Y>HIXFav?`*I zXTn;AtcwZS`pT0T>7ty@QA(-kaCD)EIT~;#Zp^%Z3RM{cMwGjaL1kyq*f1Cj31G@# za3eA>Jh#EoWuOneNNO@#FujJ2(+-_po)73AWNSQUj*>LPDWhm=_!-_jY2@QjtFPKE zkuaSNQO@XsLna?|(S+thOA-v}^WM9E7<&gg<){t*uG^N>shCoy_(YK@8i zrh-pqxP->ZblDJvk%p$H){LSxp{Mm@PKMAQ@v>{jb)+CB*U>w_j}0$-4clAU2%F3B zvH}{n;~ZWFpWAePC%o)e%)a{d>e44(eja3gc_U%vR1|6g9R@UG|P z-}1tUKci3R7jXr39 z+5a!wUw+%~eF1L16zZ));MA{meB;7o;SE6l3JOgCkN$wwY+@2ItpK5LpMQPz^3uVd z*|$AnpwCnbZ_)t#LZAU>=n`NAV=+Cm)ddg+7`&^iti;3_;z8`GX74>u@O$Phpfpba9x z@nIj~3k1lm_7NV>r5PgT$pAO-$I%KU22z>cZ&w`FYV+8>k z!^o$f`lDlh_-Dr*d(;`H9rKeD{({&I*IkT;k^7>TJ>^?&yz<(sF0zkoquI8y9q0C! zZ90$lv5!9L@M|tRi7{QvS#j{UaH<#{LnC9rIx`O1sWyzAzxeD4cV_J2}P zv*nNp%lu{XGU>E_v%o`RwhD^IG?(GvH|aM)Te_G5&D8iLN5_^l4hfS)bQv;X-;>>x z(3y3$tF>8GQ+lXPvpQ8|xS9Yae{wRKlZ8NlId;v|0>p1Z*V-*2ZF+G4)0|F*W3=gO z70MqAc(f)K&Cb@A{N10lP;9-Uq^&O8!;ishGhvQOmHvH$?Mf( zhH&OIQ7#ax7hlRXq`h-qW^8!TBX9!rkXPu;T*0!4Q)v7ohHmY|lT6@O(x6L4$4-#C zqWdViFnQ+)Cizr@vW69Y8z`fV){^J+%2kg6- z{jYbgG#fm(u=kD%%&wOo0+1Aox--a=ly}Hg;gm!2h@vB-$ODdIct}Das~d{K;gY~P zPdL0hHkSe9@G^{y*81x=UW2AFL1r7x11#&`;-r;{^{2IKt?1EYJhuS0s|XM|7lPum zG~#muUFOxYjc=TjhQI4o6ibP0jen>PZ&W4p@>mJQp<#-l_l~riH{)AsX-{Y zp_QfL%0AHgDcg6~%5BJwL{OCV3W%=p#}))i7@RJbCfNOVkyg#&^r zrQ&M5i5C&=WH}@}TJbUU?L4|&gz0ivEH&vRdliyQ3_Y~rljMw{i=P7qRq~lr*QAvO z4sg|lpd%khViI^0UIvOQ!Csj6AAbKgfB!c=c=gL)z6yW4+wf zUD^sCr?eOVfPgcuAe?u@%$>(!kL^0%=1tK`kW)GeR%S;Y>rC4>-DL9X_LJ=0mLn zrhDOQO<-IX9csc!A)U-og`h{%Xw8k(rkQ|_^{v-%HQ$DC@>Geg3mmQcFoDQ44R0-) zQYcuGGDC@tU58wyoDaFG)gj?5#xL;B-QYMJ2^ndrZ{5+!=z>##%pn~1BLzmCI~;b^ zB&n)b5vj@G*OWzd943ZEItk;GFRp1$N~)*3p!Fip`p||WKeFUIiteI~Onnti)UrZY zmpqRtG#705E>}<&vf9PvLVMfN=da;q-^W(N$hMWC=J9&AP3Q6ZWz2VaAKO>5y^=k% zx%^oB%Zs16Z^09H&VT5(*Oolt``8PfxY_ry7d(FLbwB;#j$6KWhb`W->o)J-bNjW! z%X`lGlkoCqcHL-Zi+SU|_vMe6Key*bU)X0;o6FwE_J2~7zm(c-V)RIsyr~veHOqjCo(7wCp+fKnko5C9 zP|>@}Ai1{_VNaM5Vg+U`wsp}8X`%s7tqx6ClynHULf>ebVO~va5yMX)2#kYICjxe6 z7s4g4)*ZngwV78qU@UDciUsZG*uxFo`PPOqd96ILrAUttBl{2;D}cG*Zu_j0moRZC zlsDRf$+`#u7REsX&T6WtB|vl2aj01n_b$Hhv}>=v;N+hi4ItZVo_G4OS6`{&$H)Kp zJDAETC;dRv`RATQQmZ$&!C?hJ29{xGv^TM%?J|SRUg3tF(Y}xUlVgwcmr`e*^8Nkx z-1-OKIl}LkZ7!d2(hvNE8D75b^7HP$@OkH}<+=5&nAr6Gwg?mMTSeIJfFvKf;xu?4+ z8#WY+(rPWV#Vf+5_+FiL$bZh%U1BYGsHGuo>XaZdM-t&skuHa253N`+vYUolWeF39 zWTiwU%$&?PcOnu9f0!edHjc3(Y64u6ZKZr7a0nBS5IX76Q5Su6vB-1*!pMG2Mur12 z!yK8;Mb4x%RqCM=(cM@_)|sG|m8|mH zP>dXCDNZdt+H+z=;+VwW4Texi5VMmO*3Xxy8?@{f*QcfAu<< zHkv_ZyE%bkYg8pTWBP73uL+j1z`#LtxiQo@7aD)W#f|O;T*ZtQ0>bIDkQT4RYSK}> zJQtDB6ohyS8GmTd7i|EPLw!3_V&?#wFrqP_)uDoDfT_u7RS+?yE{jRx)22koA*1MF zE9Hc_$IpBtkOSEwGyEYyf8i5T1gadSCS?qrm4qpmu2eCHz%K+!+@0LS=qZiv z7_<>1kpJaGt81SmZF)&HJvq--4v0~A9cjZvD+r$J;w-mTv_flj=g@g~V+CXk4|Rh9 z4bed8z;dIMA$ml?4IDlcHQ!1sqk1*Q(nEQxL?)~f(;PM-rnR?3xN3O-fcrUsx!WN0 z@rQ<@7z>sO*CroCQBGJdpE8Mq)`Xd)sN-XDTkVhrrTmn9z4X+O_{7x7qf?bv#CUFO3k|K{s|P?HM1U6-Zj1Tve&-Xre~tP2%XTyE8~UF41TPZ+ zlYjAwt+xqtbRQedI1>$jyn8bzOu-5*$2xb00XHx?=E~JnfKKa}3p5GPp{Z6ac7YrA zVl*BQdeN3x7Rlo@LNZrMyVW?+1{+KPemeVbmcii4@s&Aa%m^W1Jm3Tz!VzQ|qG~_4 zCaiYLQVb-&ceXMhqmWmphcFZLbckRV_{Bu116=ByQR}W(95SimIV)N#_yjaj7}&{f z+R&IRsueO6CYc*u2=NIk1Qh(~CZ153q4X!I;|QmYDJfBEuCkjjg$~IlIqH;XDl`H( zF{5?Q`de$tWq=_v2uhr<4l~M1+Nv{(){$}r5gjotj3!SHg-oY5;ao*0kS+2E%n@dX z1@6w%&8J?8sritAUgbQ2p_!85a6XK~BnjwNeskskoZnw%AIBg1wCGr!BCv_pV=B>WQoU9aEBN+D(rJQ?>=3 zDdUBA`rg-z&pqb%%P%Z?7-a63%ihQKee6f?yV3tiJ@eSDPd4)ZQw!!W@Zgu#f2ko@YHt&D$`zBxcT6We7 zE3LTv3bVbK?Hxbh*IR!!N$vdF$XIn{+tHK1&VfBaKmQRqd)Z|j+KPqm4%l~3t%{Gb zR)d*9B0LBwTh96C5&!4`7kjfBoCJscGRR->fZHTNUHHg%!aq*(L4o~usf2vF_DA2g(X2k{#P6Sb_KCKePdmkKxSv;_ zWJCG-YrH*fqx@9p*elt-3&$GXL(vSb_z`eKi<7NBHHkZdI zzkDD2u?KE@=L&5S)I~wZCRju%{4stQ6&h{y(?*qCne+*Z z7hQ6w(vfvCYE8OOVpUHVr?glq3y*eKLW#)6zM5ci+{&;NrG?|wTJ zuiGpudW}PBN66AngYI+^cBo_}A21P}R$^iq0+^$f!kiQrLeZqK-hDnFa+Pp>$C%_B zqx9<%6NfFnIScXACcXU8B$>{g!XM5{7y(RH%iCuNEWg zG^n|4kT{8S64peR4@Nc3ImsDfI`%CuFIrE7WwbK+wessPhLpXPO~!V!pP+igZK+uS zpRI$N!7VB*P_0)@_bnvC%RG@ENpro(7&mfQN=3&Xfe4Ma{B%ah3*Ru-L{c(kujF7B zqo%S%l5$0uQlXWG#)(>?fg#D%su!XW#dM|aB@;($j_DYaPBJoy&l#>#OB+>a9FjCE zjWkW24jdYW&iK-s&R(%$J2AnZ~V>}ob z4Tflk&Q~W_Hh98{f#J?f6(>uACWC|(&g=M2g+&8QYui$kqd8oR-UgvZ9{j+BA+b}K z34CWzP^{yt1tm*5YC@D5bgjBetb!m(41aXQp+V8HVRZwlQ7r%n=i^ zCcQAZC&LI$IfEu|csOx+m@O#S#xYUc5Z75ESF@5&bzN< zG{7(^R~Qm0Z8-WeXJTqFI2|-Qmyp7MCu87s3&QUfWNZKn>W1GmgfT=l`4eX!oAQ}2 z3Kf#@vbz}#bWVWA%yx5zbg@7&*c2SC@N%OrjH8QTuU3*&gDqOU+)J_tiGClQ^&m4n z9Mh4L@o^L#>A_@ws3sntrD5`=C09k*P4?lvFre^PsH^TB<#|A?P0AHLrlex7y^xyKcAnk$-x`uG??_o>f+7UP_F1O5?r=6_$1P$zrv(B1p_`4II3x*;=Lhm&C zN|<-K@oh=NlAt0c(7NLC@AB$0Y-@E5Q39A?DPRkMLX>bW4Gw`;_!Qp6aR}rhXtt9T zfkOc1RqXtFX$q3P;?j$JH_uznH(Yb2HATz zPDFT_&x!{Ek{|;J{Gx-AJp|B7Gef!{V0JPx<5>YH9h&xbw`UaawZja7%n)06qj z+|*oVUp$yREz8 zOxO51fEb1lWbms^uao7|%&M3e2h@r{@M~fv4qfgZWQ#?omUD;A()!=GRwPr zAixU3JU2%ZAdgSRBB7aG$SKr>PCsoeV`!k$5;W7g&`cpEIi}_YVGAm~D4`8OKMyIa zmD>OIfBlQB{*V9rUlDdmpsE%U-iTm@fT8PVh^`Brly^TYby_uXfC=)JjyOzE1ykU6 z5^c@vOOL+%L35i}@p4S~JWQvW7*A-XQ=S>c6g65cfvzLk^s+#r(6S0pN?6S(c?uPJ z%0p^n>caF0hap`Y;wQ#=v9P5(>jYn_j80#y)-rY6T~ ze3MrboT&;kfhOF#n5r}>i8XbaqtlN`iDejc$+V8*H5f`DCkw6UoTj2EO#VBRZE2Cu zPEY}A!Z$=|)2ktbDJ^VOMu3d+fhPs1Ra2{u+{Pz^CN_#-(c&c^M%GJo4#fg8E9lmN zATy#vUC@0e*{tnQVWM?lRyacj6G1H+pAYmbcIA4K;}4Xdc|!e?`;1@O`W1uA&5pT` z#m#UWqL+9d#vMSS+$g}|3H*Oalk!_nBRqkakw^T}!yMmUTf6`^GziP8jLE$0IaH)w zdK)~A9|BGmJ{g>Pb?#)oGt$XY6QHe33Q9HvF3*xyu*F;E4V#uUS{pO9N(cdkp21J2 zdLlEmt=YY+8x&_E)MSht*Q1NEYihJ|TX9i~qC!~I0_40)Ndl!yN|$;R=GiGMNf>^C zUJ1CeW?Fyq+h5^4;}w)wU;1zV`XANMhgBTU{f({_ol_}%LrBvIg#Vo{C(*fE^xQMf z2t!VdL+0=aIF;;Hm{Z2d?L^}cR{zR-3)1d#aGLzYU?vJ@ty3upA`+TI!IrMBpsp!= zt(qL7rGku7PSo-ihd^T^v9hY8L+72LG|;9D2Q)d$70cIF?=FO{1)R4Q1AzQwp*yn? ziEubZMQdZVJWHm;b_tVd%s9kP0Fy-~rVk0|MPaHCDbbyz6Q^WR)5Pu{x67qMjOEH0 zbMt8w2{xw73jSR%|H-zqZC)>ZZqbX+Fa8hz`@d7e@6SB>#G^ld@U@pb-|#tKZ`ymA ze}dqcRx-_^0i!l})RJIOsYmMU2=mkRWoY3;4f?vSFAi!O@<=%ksd0#xV5sK?3DMbRZy z$)_AM>zd@v-NwyFiX0NqN{Ofu(h{?AZvGAtUCXYdQM!tI@7n!4XbdvjUtYA}QG{PK`x`fSxmU8`WioHP zy!h8IJ@?x4&%E^PldnDh)Ehpd{o*q(EPO=y^~(!?_0rQzo_TP|g8N^8;qe!qeeluy zZ}`Ov^L-!trRN@e^nt7GFZ=!SQxE-g^9@(<=gWKSu=Y+{tiH|0t9^RA_54)Xm$7Xw zAGqsB_uX-Qzi8fhv-c_fnt7icKIG4r?LqIh<(hkKyRKbkzhypr|IK{??8^skdH82H zKIqdQ@jmuWn}1;2&DQe!#ioCIE1EQPZr@~d*uKuoD03SBMi6l4K@dX)egZ`z?oM^e&Z|T34_f3 zIL6!QB5Hq`GZezJ3IZ;cBA^@k2C)UmdC(gnA);fBfI}s^Cjgl2qB+AfZIsx1Wfz)8 zE@%uvUwQG_u=Dj-TzK(0r(SgSDOX%{7UGoSj`q*X zmtA<$cfNJ#-gCF;_sgf9_K z%x*3aQ-7C<=^m#U-+D=L9cjWL9Be_8uaHORVpY)k%B0Bz{WxAUwPN?MLbmnl0wk=U zsU?#nf&e~ktw09H;V9YCktAmn9IDh6O1Oc~TXde|n7g4WMeIdE1AZ`&dK6G;0Y;lbh5*kwqa#(+GNC)8IGSiCgP*WoXnZw3Kh?TB)TE1{ z;`7u5$Qxk3)VIU7TlzS&kwYyHZJ@i|u_82tzB-cEH|10*4h6z_;2od5jbqO=^m3UX zfN^4C#8IP%D%w&=iL{h(^faGN&Ih{jDX}@iQ=!saR;&3@UMx8P zLr~GjWPHXGM;8K=nEUkA%b|{~9GVa@HJPmt&3an(wT5Zh%|pKNTtO(9TrI+&~a0W686~L<<0Xjz#0cO+b?pMh2Q-r63i&h5)F; z)CAz8X*>lw)sYw^1xAIJ16hReT@J>EB+I9|K`7zlbl^z4R;R$=SI{^LF~haXVVod* zdKBkT{)KEkqb`R=g3wC7x~w)-8k2Fp$Gi->Jt*GG@bo2~3(13PPcQb!Wd~Bv{8P zEyW$aQz2m4Y^^(4?*8!w%OOmg0cJc%sq`WoQj2e>7)ZtsVKfdx-H1Zd++w??TI1-5 zAkcant(Uq)l{)k|EVlxbC9yZH_-Whx#~!u2ZQELT{NaZldGG;988o(M{lens7BBRN zbieq=zx_R#=bv2&TpJaJiidB$w&VlF{!QC0LQo?Ru69^N?#4!ucx9$Ab6Yk>(U;>`z=2n91GgM%2h@`{Xtp94&nqU>to^N7*m z&|H%`hY`~>-(+AqQVNxkKsp&MWTvUDRl1biz@amXgmZrADYV>919MuF9Ch6ZyltUB zyF3}1InbT0J5u+qZdtBb1??W#?J*9dJc{jOFxl^y+i?y=L(RRj9fJPNt4sdsmFIu4 zbm7a3=Ko^pGr#dS%`Y!}Y0+cw@-JR^`jy3xEqLs%1&`nH(sK_jUHHHgKfmeqrB6c8 z{(t$or*D7p*}Il3xcRaBF5Kc1E6&|+%>(xMi1)F#-*gRo$KJ=@ZHsmHoU=ZF3>Pa1 z=onHyWUq~U=?jkDZ>JBd(O|NlGatOiCzQ|a^U=@jxqjcr-g2Y&escZQKluI?S6^wF z)mK?z)fH9%mfK(c17x1HjE^aAx#`A0?#3H_^y2f+fssG<;XiS>(Z@INPr%n)@zbw< z>5FT=_dT0!{0TI?3_SXa6EA67ki(vKKVevlb}w@KlPQqd3*Ppf!9Cc}kD#ei>^S$U zXI~Gq4+}3ti{K;~2oX-SCb!Gn3)^is<3O4O6gX6q9cr&kbD;OG5d`kM^(G(K0+1o% z+itqy=midAhMl>A z)}2~9)j)T`B;jSk$Nk{X;bn+8$P5=l&G0hR9A1W_HQ`@)*2y-b?Jr+?{)yi>;()z& z-RwKx`oekhjy-SQagg%m7oC3dbr<{pWd&ZI{PXg2^M7gJK77Arwn^OdZ8_Ka!C{w4 z*O4hN0Wz*H7m?Y}sm;fmO zFTOHiE0zFMf)F0>Wu0l3cc7ZYF2b%#W3cHCRdpk zIo4hKeF*>C>zS30-g`*p=dLzzZQYWWc{Od^P$(a#i7k&ZI`s&i_+_6z;%WT#lO z0f1ISmS+p*KmPO+kDAR>)wD|wF&s+jf`c}_QbHTCp{*r{dLlUkliymGVGavf@=Lpj zWK4|YC00c4U2dCF)Y?WtSXbWS#)Re0kQtFPQIJCJ8AHm29I0i#O zLpCB|r9hTV=ck^29CibKAtoq^7$S%Vh1s$sKTs7oMj<3sF-{s0*?8wN{eKI4=mN$w^6@E{eVm%dKUSaL`7AJR4dizuY!dv?}OvDBEpG@vxHTn1e4I#c-;0Tcd#hVX2cY`ILV(=qhhg>b!)P!Q`Gl{N3OG?jQc~ z_h9njpa0xqlF;4E}tSAXHBF^;YSm5;-u6Ksv21K0xe15AN!%UDd9*7S%1wqyPWct-bZ7XWU*S8I9Aj9IdcLt+8bApm7>b#KM>{ z6HZ=}6V%Yn5Kcae>urlfl@QaV`@DE9^K3(0e#yo6+;RJpk3Iq#-*?xYciwu-FJ1xR zp{?I|3H;R;-H|-K1Rq|v`%J7`lN+oRoh9Fc_ul1R!E`>WyGGPp6{fh<-ca@GsQ?Wv zHJ=LytxJ6iO_L(<0fa~RglVIB0AUeztCXViUq3Lv3(@2FTG`=D^N)8jt);ra8 z$+x;x54UMh*d6|(rMFq8zn99f>}qwGhY_QrL#nc5^dZdv70kEJoh^Xe(#(BV}93Xth_efoOZs z-q4Qo+t-$Y%)fl$*;f}o{mT~?{PpWg{_535uRj0e%g;Uj`qHQTb=1?3-0{p~x4*c^ z_lJMJ@QFKLTJ#764KKg4qfwc|W@ z=N(pBeik&1KK0b`}SLG zh1)*n?J6rQcifMD;H~UO9=y-jx)1s60d}14zxxg|wTJ*TMs3%^w)F7C7tGM~_bRyU z7{6Qwb=%UlH5`ETd_YItZJI+Rz7K4#c_@(w^0XC^phCewE|^TSLm|tL7zrzAO$79^ z)y}BCBBB#IX0%oqS$@b9)8+hmKY@;6WE;vAG=Plnt!!XfD?$^(``CWGtQ_^tL;U~p z55M=-%Pu_S((_Nb;^H&Bl6}u@S3i93&HjJc_r9Kd=x(3Lw##hq$*gUbcSTsvSyVez zEcRL)w&H4*H#54dEEP0aRVtPfIQR)`8tX1ZG%+g&wPn1eyK}9JCcUUl6)RM=yd*ho zT@$rhBlI056FkS!Mad#b%GCk^A}C~qat;g=1q&%dtk4AEJV_u2aA0A|TmQ88=>a)R%rbfj%a49ABET)94# z&uw9HvXc_})`f4WQlqt!p(aF= zi|GuFKbHCq3G0i)ZwtBfH1m>F>N1+l$b_j?&|I+~B+nK;dODe06_+SPjTS^XwEFceG#CBaG# zz4r^RA;2YsLj~-DrxXYaP|^f^X$9^o;8c#5L$I4Nv7q>x(Yz4O&+3NnX+onHd@9GEVcyb3ip z63}9C%En!*LmW|;x?00}Xd#m!PXN(GfS4{?_0`2SC~4?XMh>&6j-$K5;39RERmKY0 zGWg#pooEA~)FC?iV}(4H#0@ISaVpccwANSMxFQ2YG_G*E;u6C&ZgCI{Wg^W2(P%iS zOOe{4H=W8JTG9-j9+jVe_E}e4ddY+L-UBZ`{J{Mn^O8l+!pQcQ{bKs%rAzE7Yl4@d zW{0j3MvaZOLOgYG&jA*uM}D4Q+Hn8pfB#Ps;`ZV(CP|s(t&tYK$a4ynvLWO*?bK@; z7nJbzEeYhsuBp~(C7Q@^sU}uNd8bo3x|5`Ki19O52{NN|Ko%T+tCf z0Zr1&C`0<98=nY=nB`~4BeI*%WN;KjRt8?i(U<2Gx+x8r9H!ZW7jN}UJ8GF+^{%V2V!`~sQb=zin;2gv-&^Iplezx>K`Pfg?H z*Ott;zx?uZk1l@de(z(yy5v#i`DgBP_}Kk7`9G-_pSySQQ@7rI%UL&Ee$1vHTYk=` zR^D^Rb#|Nc{_Qqd-MiL%ZSx`f%X@D(`JFPb3 zE4$2I%!Zo%c-c>w_t^debLV`}_r5;1?z`7m8fu^|y3fZ~rp zz*|kafYM@d2;1^p-_R~^+b<>~#?Z7t2!P3Ie`)P^%@a60vmdP&uq?N2E+bM%Pp@06 zfnrD*Kn^CKd-@3%oOu#{Xxe+)wwP^0U;We5ufOuF@BjH>zW4PP-#+}@Gmf#p?9Zfp z@@ulieC;Lo-gfO{58MGM``DN7V>@dW;Vu)Gh8Y~rvc_wf*P_va!Mdvn(VS?C*Gdwz z`O($X%-Gs2=2%WpNCV}xHFfIi(j$i1vVjBoQkV=vX?aE%Q%QgZ9Y^6$ZXn3eQ(r_J zSV?iH@I1gvl3ImtK_{E+PzIz)Mk|MvOzX}``BA_tY7tJTOTb&ux|Er60`U>nmpKlX z8BOq}jZ~@jxWFiaV!VEA@6YhdH@0|Axn9|&hbM|)ncym#X{mD^*;!s`_A<7WeYV{n zxA|`r&vRUEp2JwS`PQqr08a`7+FYVUDY0s|KJE#`cVfMXtpLkuu(SLxGk=XFC-4^! zX=9yZodw;|_`Kq$!b!H$U-m>@DHWRZMe|DRWXP-O@e^!mab>klpAz?Q?ny5JV*0B2 zKqsS46TX6zW8;tjnmj&9B{wF}c&5n-6_?W?G!`WYD6@bM@mZdkH_gkf51U3?8<=7c zfxBQ%b9A#$*asiDqe{W4yDqe8sukZP$O;(%0eB1#sx1qpr6LbPmNCIPiGyf7Pv=Ci zis(wUz$F8MN?ilxyO!7}bI@t>mo$a01bS z0&>H-0~Ax9Bu?>0uvnthnI7HQ&9RztNj*uLF);Hob^r=Ka@Y@D5#c7lN~=RCDNY~` z{tALYWFYg~;$^ui^F~prbDMI}x%A$;gy;^|B`{8$LrR>H#-}qyPZx*_M>Yh~SSoLV zl08FW6!{-TtD;Luz)#HCK}!^V7?IUDRr=#ONs^ znS@Uz7ED$UK&VusfoO*^Pv)Qg-9Jk8NhcqFz&?9<8JnNhd*T(oa4hW#htEY}Xf-PI zg?yE9%7V-xU-dYlPNF6QfMoPIzxc$sM4n4vHJ@|4hzc29ap=^byDK8xf22`1Bnne0 zGo_L6o5q7KnuraB!vub%rnG4_FkISWwQTu1y-uE9>rT^Mn9g{^VBCVnBD^KHn&xgz zbr}L3P7g4{&q*P`0j+duZQvmsVmd*M(rMH9LTh*sDG5R|T4YZXLYSHqH*>#ed-Uh` zdq3au%*zH3KX5Msw)L=1I2?A>+e&tqU`5mh38Pl^YA+Ug>H;sjy$~)OVU`?(F60d` zdnML~x8M9TG^As%nV%sJ!eyqkqYcY{12324%;_RO?2b(0(9l_9y`mMLp)}{-Tk9OC zP6-*2>ZPts_o95*WpM~+orY232)QzC2#raAezcP-V>pbaGmB}6^Gp{!^cF=l#L zH|(i34iyA92x}bzZ=$vGc`!4|s{-KwqD(S`_60d(QXuGB(UP2ooXj_Tg^a`Tu`j*I z$VT#Nhv`rLo$5MD6oITDCL@35BPT9Z7q44Wx3%bTH~atIxICiwXmPTS3?q9P+ZHpJ ztcH}^bPg|jN!yZ7#pK@QK%#Jnen#H(q(x=CWd!*{85U=2xD7#2vYyX}V4;N`70TKQ8OEaUswJ8!v$-!B8j{(l)>_7movHhpiu zQ-+uAAP1Ru+5G(o`_A^4{doE7hivz?gSYWkHsJjE{WtvdPHXM5_3C?V{~`asy!nP} z`TynF?|Rot%e`yGS+ka#wd}IX{IAVrc-fXKEWGJPpYYx_IC;v6$5|wQaLxCb)V%KE zt!Axjth(y4vmdGkes|tsj!o%r{^^%NT`P9q(*_@G2m5L2 za?32U((GCOLUfPab~|9-eL+7mfFb;7o!`sM2tX20RcnQmX$HoDP-1|mLYs|gtvCQC z4MApiLYvFbF}w`1aDJB^wsp((vt{Azw$t`Fd(GX&HndlF9=PXD@2~cM)6hytoKaYn z4-|@Ji%maCz`NEqlpV5Vf6Y7CT;&gR>>+?nH~-aWjm;k&1qHd2x5MrgBiNRcFt3P! zV$SmxBo`5j&@>L3W@3=IxCpZ)nZP?M#7YQZrOZ&Gi&`#+=&v2T|Bl~1@{99MJ{nSnm(Mx<|>@k8Bc$2%?O)<4u544(c_=jcE%uha{54giDZxSTXb)+wvm|ps}uE2}{AzQ0! z>SA;mx=6LUNY&Qx*7n(@1c$bKV8xnKYe0a0VmVe#Oqb-7bf_0UJDf&L!sI+vERs<; zHt@2iI(B>QIo4Ne5mAn&jkkh=CCM1`Ub?Wi3zG1G4COka`JZN1I82Eq08N1tgUsP& zAko{i1VTDO)7N3~f;5HH93Wu);u*H*-Aa-#PD|X>$g&|^SI`ax$P!1&| zKIICIz@36lP`S#8UiS3T($}h3 zP`W4qB8OxqkWmY!p7zT7inX0%|Weisxp$G%I z!OxsxJXdzPay17U5|Y~}h;;!!G;;VBe{@#^(k-tlRnbU8Bu7IQSZIZY)X;PqQiDn@ zP?>uYhE&Se5^WdwM`QpCoRQK84{oj|b;8ozI#=-erDBGN`ZJDwXG0oUWzl$-yjf7Y0ejt)iS+Q4~e|X%;UP z_m0q)$CjBeEHm+E!$2@RVrHD=7sI<2T^lMm4IW8%m{WSh)NUZDjt$R9)zU{8>a1p> zTa9uE138LCoW@bZKUQ+bgfKK3jXevLl?Pt#e4rrB0;t~!CzDo^j5%U3YYVeHP2?eK z@87|~HjdG!oj}8wNfQxQha+f57b1=tG2)5y< z+`ANU)Ea%URHk;Tl+vmt0duPvN99W~o|6RHBlM6Pqd$N<=NDTixxp6vMby>Kb!;D%FrHfqGm@GGbaO zN*|dTk|{0K$`FX3&Ll|OlGv`yF%HuKPnyU~=SRDv6%Z?MPU4j(!W}-ykbMtG#e|GF)u7lX0Ev@$+V8in8mGZ)~m6I zA;QRzvX6R!%+N8sjADy9u-u!=;bng{CD~bSbNQ)<=lR~(z{^iBzF%_sI=l=lFIjkp zgV_-D;sdjf4lEv+g}EdcX{s`vo~LTkFD2p z=z4O>dZ!J&s_f-uc`&)1Wn0fmXZ}^{Jx*nNulYlJZRUGld(K{cr>$1qcjs9L?77AE z@7-XHx4aHs9_O*g`^)}<*;exj$9@W^{L<%+_vY`9FS+pKFMdv(yy3cQO)-u(Qu$L43ANo!oXV z)MvpUN^jCZozSIiEJ)KQ;)5g3Xa^`{Q1$Zhv{z zH>{ZQz_La1gZuCEwl_^qgS&qEn2-5E65R{baLq;=u0sfHNT44%0a6N(zw_*E@Kd-C ze*sN?1n>&ycJF^L<-u6B>^^J9SF#}pwU|R~7~iMNfF_?o>qBJNPB`w99sdTR!4Mw6 z1|WmU^k=?`kTSg7#`FXZpK-dTF=&f#fpKo(Nt{aQ2_~QZmCv7c@(E|2{$(4=AhW~R z(D4-)p9?k1+isTWsJ81hm!EUvH5YvK#A6OUV5cvA_DJt9`~PJ~*~hVOx&BAaW8Zh@ zPwg-JKPmgm60{#Gd&x%l`_@RisS$`w%4z)JbdR*$Gr1vF&WJfMX^a#IkOGgRTIH* zs0UQEzf1y(ww#zHJS?M@<|w8XBBUcj6Q!Gzl>QDozo^kqoj#{2vHHUgeb}6 z!|21GGD`7lk<~KfXd0B`B1D4?12&~KL0f0beKyb>^uO)8Pkfc%@Wab0GJ(!4YDmau zMXf7EZzPb6NQhS0oB5>iw(97ZF(j>9Ar`18q%TPG{1vx%TWUVgyn=?8Iym z&M-^QZfr7$1%V!Co{WW;=$^kZqmuv~fQKL<{-Q*+sLeBt0e+Ton=P)KX<(oz$`*DR z8%7Ru8nH5*#!TMeLt~Qp$#kp58cjngQH+7%WqF#U4^{cZHT}Ed%n(;atU#A9zKx<5 zI=kRyiJAN+aVZd842(x2ElzY=(Lt1ppH>on6b!XRD=9Ptwb{o6&EO!rhrrYTyfOhL zs0uy`c|lU4V;K_KcEpAl0K}#UI^#p5%VNc=#!0tEsZ0rxx2;`f3c6eTqYF4+k0?cN zfJJE~k>`g(9V5*!5W2YKL$`b=)gw&KY2%8LTVfOc$kZJIEpZe%ZjA~G(4TJ9kWXz* z56wp)l%?HbW(5jWCHWFcLP*vpT$HOhpZ9{XDDw8}L#&9-V?VNZ!FlJO?Q7T+qq*_G z7CIXw3`b-#qv(uMk|&3>dbwWHK`7d-NZ;vk-{1D4v}6E;gTAZL=v+m)PNrLqvTRB+cvNo;1POCvS4Adq`6il2@$8+RE$V?^t44BI#Wq?NoG1f-PSZedi*5d*PBXF zTv^TGCY|BxY1ji)(NKxDDd38k+a;yyO4%~aj8eiM&nvH0PkE(jr3XztGp?tDy!EOT z%v_erTL5LEz{QaA-~8UK=Ym*kavmFew#6(HUIwD=EdTkhnV(wR{<805``(xLmw)^G zV;#nRZpnk*Uj~``*q6=a#Sh);dtZnzJo_NL{O|+UEWH2nd3Rkf`+aYD-#b^>Yp1pM zn!VOe?^_KV9(ehn-8MdO*NuQ>kQrEZ02@XQL{DCG-exTz+A(ZAl7E=8zdUD$)%>5- z!Fz6Y@ILQ@mwoSR`u*jBmphNW%k1qAJNQ5wuTZBCeEIjN*Dk;O4%=+y-?8M7k zS6$f&ZSMv5{x4AMlVSch%HrmQP5AEk&m8mix2?MUHd}&tFdfJVK10gZK;T-`?Pw@E zR0#J$fbz&ephrWt^U@@7m)0dJGl+-g8*(FsGhUijx+=i=y2Spk9O!9@UIG6 zH0?w*=*k?s$v{MZHO1<~4?WNca|lK2FfXuAALKGi|UTE;vvCyGb9CZQ&<8< zJQ@IPhrJnYS6V|hrU7#XvfBS-5lbr$X^$H}!(F+~mNCeD#%U+{(bXl%eT5bZEF$m~4!fgjlJi^m^v{#jq^dtX1k=zF*R^fF-i?pv3S9}L6$*h&pc`iJAE%#`?C|R1|M&m-PdGsmiwWEck7$CbBnNW@2>^~N5#eQ{ z#SbqU#iK4vCy#+Xw56C9K5A((_4tH*>XBK$ z5Ga%Wl))*l77s`$u1;w92!JLSC}~9%P5V5c*`bsV5wit@9e?H%LO8XlA!(Ncx9Z~2 zic&;HZe=o1bIhk62@FvMKgoz0!Y;p8`1BBrCTR3v@-*=%5d?acfo}pZ9&{BYDwPy@ zu6azObE_hca!(=^<5AtMyi(VmbTatWa!Z2v1dqBJg$)DU7)s2^r8zUd={p&Hty|ki zW+?_HGd2&j1%|GAzd6(+YB9OIx}r@ttAm0Y58bT=~%Evcy28c!<>s$w_lXCrvGMy&xWT)ouX>2Fw_QFmqJyYvIg0JWmrimpri=ws&050)Ivu=mr|(FPc2{;~=@<-kUr<-C=@fYlX`&X(S5$}Maa$Gr2u*9+ zHA=9W!YUdM%w%2N$6ki#3adnY$7&RA!HHttVO9klzggx%#*GoNp#-8iHut*bRaZo% ze$Pa#X`&i1d3yxL92Gp#vr=H>l~-;Yv93~_ZV}b3qN_Y<#seqA)NbWb@Y_)`R8(oa z8DPyy6R;n$3?U34n;cAs~AY*Z&kl+CAaHA$Pmp@g+P#^J9qk0sPBWY-vsiuiiOWl0#R?=?z z;L)6^VTF`0EhP=J7VD;jxJ69QRCPP-tw^I7aPpphw@T|`6s4*lHViN@oQCvAvQ z#P;avMncRoB+%Rh&od-TF)NjZ;X|1ulOSmez4gIWUK5&2NYzJ0M5U^$m2jCMEjnf? zdh+F8nb=NYbgD|E>qiv`dGY*8}ZTFR_xt!Axbc$&_LeFWQE%zX#j z<}&#F_kZhuO9lUb#>+r-Tg=|B@MltvWcxoU?=Qdb^do=pvhyz=_vZ2+UU>3_XCAh> z9A5TkQofJv>WkTkFFwBjUS9m*b&oH)ZsEO`Y`euOTW-*QRgX}!^ zF7Nd(%(Hgcay7}0vlo_uWuVw5av%7Dpq}hI4ffdnZN86v_`dIj zm$!NMy72N_SAN}VU-hcjEx-J$XOf-ez_RQ6x7@<3%C?g2FWXc07tG>S=n?{Tbl4Zb z{57+ETdx2+-u%TAj&oGf{<2fe_I5jm>{VquT);2bD0KH%#Ru%WC+KG-bSRWGC!B50 zLVSp-IGGF-1ISP_3OP1Xy~Yb3!jI5pFj)qWgexT(oDwJvazd1l0-#}mwX^I!YrD)o zk?pl>&Dlx!FVeoidB}nLo%DqhQ2d>=U2=Qn-o*xfQQ+man6&~XL*KT=(HY_j;M-ZY zP3&~E9|!CvqwlrH&fqHW4%ianQ)YCqS`26WlO#b28jvnm5MhqRjW~Lt;*vPlq|OeIf1R}U`_K|zHp{-O&EJqvP?7*FeoY%af?VMBF^YCZfl4rbxBTmlG7(Poa8I@{FRYtB9?Q-AS1p-lrhu& zpp73$m&um!X<*Q3)bEg*A1(}F&=U9sPgJ28n5kX+t)B0629`a_|I2^+58fBH#ca$7 zgy^6aB%(#~3oNZXYN9HJD$#jnVfU8)s2f)1^3s%S;RPY#_GkXdB)%VY`J?hGd54XM@BoGc$@N8a2W}U@?*|z~;?mi-32rkNMa~xQmTy(OgDb zBL4kLdKYfXI)8leU@A z;BjSL2I8NjnU8A)S2cmXuft<;QC&ty^F~-;8k-bjhsETrOGU_fe^A+VJB>SB{DQx>=JoHAi8P}VxhgjFij%HRBT7+V63A?03P z2Aq8~+XGyRf+6^f=kI^_%Rl_;8K~K*Y~TCx|I7B5C11(*ee9_WrW(eLeKEE3Uo#^joj}_WG+Y2Q2$O_5r(ZxaSV*+F$m)FCY8bbDIrp8N z^2<1n9c2E(XOH!XY;P<3GPVun<3ByYeRFo*$!@aumwgcXEvvlYwXa&vv26$%gHy*g zmwo0M=yR4BtgydqPYT{dbPU_qv}`)td$pl#gLQ@1yv85G+P8w2?U@40&K`@3p+#+~ z4hfP25_(Y=eX@mm;d$s_GB682VsKR*SeCTgMy;)0Mb3~rC@#Uv{z;1Rl7Fo5E{sn} zh|1Pkb4`?W*IXT@_E%YRcH7zCNjp6bz!?cl=?J@nX5ecZRf{T~)Q3#fvw zYpy=Q;D9h-3rPXO3`DU9%_tVZ%l<{n-%D*WYZkFistaiSAmH6kyV8vE?)GYL@yYSm z`l*40Y~dotas?+GeBeF=dgEJ*+~U^*4&ek_YyzUYjjKe~_qU;g@6jy>z!C!Koov5@j*7k>AW zADj_h{@HEUdul!X=tH)c!8G`ypxR7n?g$QL#>6p+JQjN(npq>oqtu*|H?34D$)l*H z2t(6QQTajacKRyDSgO=5Vu35MF%ypB`5FA7n}ib`EW)E{u+C?!{9AFTO~eRyuorrT zC2hs30=|L0Zgrx>6-q&Ab*&HNpul&a8Ng!eR@md|Zy@<;C6z!<6*bDb0w-csM{y+? zD3iy`Z;J4wwuo9=HA*XJhy?nRPkDKn@DWXTfEtFYk>gX?)-n8q%a92NQI95?U@VKc zg=BI>bUy#(WObZc6x0dT)M6d^e(Z7_+7S`|!)~J)k*ZQ2rUOn2xGfg?=YRS~%J8JC zQbo+smWn*g5NZ&cw-tf6V6u+@aCJWdK-u8)fBu($Z`SO#^Y&h9@FKRp6HRe1TXe|Bm> zQ37$g=6RZPtVDklp4V|SXF@?Kb2wZ-GI=D-BgKHHAxB7F^2>lU@7{ZWQ=ulUC|`y? z@~&kl!|JXygkk0JX9&(7@ziFWDvUyOO(te3N{@1mRT?ozk*PX4s?dik+{*LPc+e*q zQAPSdWPlQ61pdHKZDN8&_9dOrhNgY{O9t{CTa%cfFbGaUgq{Gd09|k|JQXx+*mG&% zNEhe}b4Mm;a-&}0Pp(!JzlnxHcc4fz=!D==OOmf2c|c-ai62V3VEBr${4gR(#?*XTNvfxs^fbKTTt6x}0bJpp{>`L>xHRb6q!zjc7mg^Mq>z9x($>@mK41<~* z%E+XiE6!jw{(8TzXCQCXU%Hs>%w#e;S1?wRS;`6dZLqnu_?THlS+#NzX%;9&WadnA z2v3?7Pf^1rEQv>^Nu;a@*0qQ_da*G|M7J2G)1Z&|8KuE635I75?Ke(zgz6B4h+4*+ z)i17=AgiCeelfrn%)8G_H1_leQJ8H>14r^|HTIO^sTL;~75vGx9&PgZ&{e6m3x(}U zt+$t6bnX?G{Sain^`>in_571hJ-!Gi{{3&ADKf46TIkIq)5l!%pr4>-qr%{6fol9T z3z)_hrPhk31FbMH$E;yjv#I&Q11J~}<<(_K68I{7F}&&)QAPHa&vv(}4CXQ@lSwy& zsv5?wh*goNBu=+dO(?B(73<}S62*ijh0;>(s+-JeRZ(6ONgMiOBdVlsLh#^}_^OdG zOv}TWQ{9@tjI(ypr^OPkl%6JR^E#cif=#0`>f#{^g>aM-Vv<#nh>AZtp6o4CyIFxV zovD;{We72PSy6K-U-f)a6Zs+G=-s~Zn&ffCU(yQZoIrSOkuw;U%3#2zN_3fOB>}Of zWweEuK)2H-Bc^H_KP+p{u2- z_0ZJ8fYOo(2un_1hnHR5+HQ^#YDNU8OMiOt`QQKYX`jfp%lzwS7Qgu11S$LEm(As0 zJ-rAzcKX_9u_gZ}j9(&$Bmn^#P(s!)+>TS1Jd6#Y8HfQ@a z>@WM?*WTN0uY@G`KBC}V&5L;G%Qiy2<_{_>n1 zH`;o$wg1xl%i(1Z`1{}cjxA=d&e~}Ng+Z+hGJ*iRn4;%i>zJT@Nt%dYUU zSCU71$v88fH|9zqs@VvtaT3~K<(Hc}Dc zL`gLgm*IUN8pih>GZdT7Hqg)f*69E# zeNgP6fxvFzWqD8+_|=4PU8$XM&=|Z0gx&h+x6NNx*hAlEuia1}SvFDwy!CT|FFrYf zj&6HS;Ef0%^9e*F`1g|n`QC$O4z=4lcj($*S)tI!0dISGeq>O}MVz2HP56|P-Dfe3N?F?8Ab+>ue{_u7#XEcez{(J{^?g<`rQ*g{h>ql z-|_TQjywB|lkG2Gc=K82sF@cgNOPoM z+a!|EN4(^js0fe8pSWFWD)oqr8NVctN9j^=;}D20kD*3o+#)tB5m72_s(KQ7_-ZKm zI48l|43rSQ(m#GG1^^A+7=(E$<}TNq zfG87DCI(JG=K*r$i-a{nG081~yyQxyoQz*`ONW7dprPmtd9qie4=Lr;o; zIKT>nBok;-7w+!pFSdcL&^eszxY6_p{&2Nq5c0Fpo-s)iDYhz%zl=It48)Jh*I00uJ#2Z8jepEpb8WsQ0p=1)#q$%HEke4W?2oH-4 z4=IZIMeXT?f>S9gNHC8klW67yd&rb~NAmMUcv%M_LaeU;Q@l)hniusMA#4ors;g55 zI!4r@-q8x8CUm7Mx{P+yYG7@2oLYA!N@;8u7+$9^pMkE!A-Ri-WEfu2xXl&AW1a}s zAG6XN#A#p<%X`%&IS;$7A-{I2%Z-|=6vKr7Wmr^(EA*+b$v^u^Cy{KK69PLIeSv>FN z8?OA>T{p@Iww>pE=1FJXeDmZv8?1(Rix2&2ViwQ@k!fz_T`d%racn*aW_^R@nPBe7 zAmX90p90J&CbU42#!(!?InAA}ZV}neQ5XWeoLL7cE#avnhUck4D~V*xZV9QPJYs#P z2oFjLu@=!OCZW(o#q^X(2gEorB%CYMayzVu!7XA5gQWG6ZY352(+I?aPMSf2qWVyH zC^JMUDyiPyx0C9f@JG=KIW?yWPDBFps#X+5bd~3Xj7^83Va+MULjvX~^65;bBu{5V zoch$2ggA4o+tD2QQi^r^%4-r-Tq@y2$rg>KjDA^)ZQSA~%5BUt>EL$cr+M;ERR%*Q zR8f8@9&98mMMs$yJN%EiM1dTbEuz7eLF<^1%%ao^CNI;oPX><#&@473C3Ch{mCaU2 z+55|Xe97a+!%md$Fv+7z;pIuuub%N2%rDOTtCWv@!OOpX_7Uf?y}~T>o97;O0Nb|m z!}D&o#r*ph9-VAAFL~g($LB75;9C33k34wQ;|s2`zr5pCZ}xvu&SUSs?OO2i?pv+x z``A0a`|Vy;ZgY93_q-Kkb}HK$>^7I-Wk|WbW}D9Tmp^sb4o4lb#i9FbwBK&)?z7W7 z;N{L^FY|j};bjM~PdnvguK?R(hCRK`4D@~e_|G_u?Eo_b>iy+4-u7lizK`t%XPcxL z98QLF0Y-7ApdH1&szcg<9+)WsjM7Xz|7u^)7F2_x_K4N;PA=RB146#yc6oT&W-HtZ zD%wkJ?-hjv{GoD?nb_idP+f*VMeQ&9x)*>C&U^WqGLB)}H1P^HwL#qV)?OR&-~;yc zkIdkbU0t`%R0G8zJbm0k#BhXLr?0*83`z(5vrhC5QUSAc-hZDt_MrVr&~F0Jz3|>9 zGJO(@0t?H7%yx~P*|u@K`8#L1Wwj>Q$TVBnnt+{cZ9DZ%A1AK4i1~a%PmL1dyJ6mM zMdVwKf{Pu=wu$TyqIU5Z774xGD@LlNd{i_iR=?kIO4E_b~)uspKu=g+oyfO zUoiVh_66she#>>2J#hDp@bbKSZh@B{Uvxjn;OQs)^SC!7%oh)Ik33hiL#ANQ)!cFG zAz0)f_Ld63JT&pkV~8!fPyvPH9-!u(jN8Pzs)a|YQLU&b#Z%oV+7+#NC?{W$Oj?x@ zd&n@SChP_41*hnJ+TGA{c$fswZb1%vjR8O77*3W2wyPzvg8761UqGKK_(_9jHD|>@ z$867t=tL^#7L@`3xQefY6Tq|IbtnrC`KCWMllt^LqP>g;Z2&5 zEKe~R7}GB;e(Lc`zU@&P9O^O(03=g&(AkPY!|Mp-11vEBU{4``> z`v@M3Sk0xjr+Xm+Sk|;3*{DYiw|;cOrX#dP2mD-$Vwah;qmTNq>4S|xnzNm673mRT zX^Y=j;sMp!$QD<Y06uAM1yN$E;y^){0VKW>RCo-DvSh?0QVgD4F?-M} zMHCT9*yQ7hztZTKd#))$DTuO{3Mq@?+RagfO;LlNy#|SHr8DK3NmY6Mg2>g}&P-$8 zRbqP;$HXx(2;l@iVY`YlC?ZDf3Umd=!ei}fl;*6KP>fwR8dW$Y3UiAh(KR+<2`6~w zswiLBhR$C4qqvSlvV>DA0>_!0p7Q8${nq}OD4Lq$c+DOH4*rj?#;sWBy$CJ?8LD>;c$-Ac>V#t({8@^f-M z*f4N4FHA0>Y@Qk6E{eROF}W}g9}aq>!ma8U3?vy9yCq!Su+bNizJMsrbaRkcw-zO% zuE}n$HSrBa{X()L{D?A+<%qca*l2OKN~>y|OU5}Dahjpx=^%s?WqB|G&Zd%w+V{S5 z>i+xe`r!}lci|7d`|yIf0&LJ2c0TjlU;X}B-?;L}7eLLAEWG#eM;{b;3%MC?Y%qlK ztf00<$y(vcK(;UzbfPo-ZcX!tQ!+{^NplLz(AnA|8g#l_#z->=RiDh4nMtG-LV{yj zLDZaUJ|DL^7Ew~S&dQ~u7NW#mwcO@LX>gCbv z!RVt%X_}xfGielamQsmhwM*1sPXBRDNqTA#KYE;Hh?1#;h?102fKP@$a~d|x(OHKl zw~Tg|%4jZ8nKSB=MhK(~t4i`IFxtg0Pxz=lBoHFGjR(UkQ#{k0TC6V;*sXROGu^)O zstH7FBe{*}N`6hJPeqjuQEp4qDhZYZ zl4V(QZ7FjFmL0(ExV7K4`ra3!FMuI}&t^0tyezo|mVf*6C*Wn*-}$!o&zIO%_D@p& zPf7+}wwpZp+vSCK_)95x`H^|Ibsl@syz3sg>#}?A_~9cDT=vfOSK4j6)%V$TJ?F9a z-f?{Z8D94O^7ikV0A%nPQihl9FZUL+-DEtyybLcx%{G@mcF@)*9JSk#2frI$K5)(k zzV~&|-tXOGhs`!#|E;U6xZ*3nj~!lyi|x94PuZz#Y0G!N%Z9SAN%y&I$FpaxzmCJq z*!*V`aO&%|LQ`9xKp!v*w?fdsrp+LyqpgHsFWd)`z=pPmgvTObK`A&1Z^DV^$~~pZ&xqk36*f<&ZMS>?C$zIlK&3dy9GU zcg*LW_M;zu>!i;e2`~FUsWZRvMc?~!9@`aazWs(P9-4d0y?5N;FQuHveroaLaI)vV zCtZO-^F`jQDLnAtlL`(|LZE?Dk9(2L*Evsb_tZnAZ7qy$>9*1z%>%ssMK)a!UPvqEv>$x&tcYcCQJTUyM3v|Gn+iI%-aH?baz& z5|x>i#ux^_{J@pW$L}qBXRUcgEe!>}cu9A7xd2ebV`7s4tk5so)p7PyjMCos z_Y(wu$!G9^R)d_eG=V_za}i5@Ow8Qrry*+i4b5*vRsc;awKDW!fxZCXmm=>nIO&1N zK<024ftC@>=B9<3b(#aKbt|!$DTAU@FdJrC^x>YDu zz)_T2)7Id0)hP@yNc1?jV4I#+T*UJd2~snK?Ko8-idR+f`@l#- zN7hk*{xpZ&jbn-lN7>FI(I%CY;z6;%B4#6^R4KzVKhv0#2V7Z-jnfqIurzK_3S|i- zT%%F}d1BFPi*r=`vowb;W{zHiPT?sSZT^A>05q6rq=DChSMXF&I(Q`?*6T`6cp1pz z#=L5FaK0a}j2V4ls2IAuYOqO44M@XGlHq3+C3LA?s2`i`#w4D}>i6;{6pQL~cJUYo z@>OPXOQS;Y!(B=_1Op=GhN2^~#+A_}TrAvDcOqfxY|j`EG-+a{BsxmU%SY^r4V?*t zGh5JAkwV10+V2d&Y@P>T!^<6puOqP0kn;MmQ6bL+lM9_8l$X++)q4Lg;bk4?QYDY|_kk-gI9b`bxY^j#AyKbIbcN#P`0eW(=$^g0~E`zV%ScMz;5;Qj{w~xGEX| zD7@k7)_SX=$z@8$ql;0bNng**M4~)dqeng#O1u*4-U+aLi2m}V1<#j}J7F&{uH3F3ygfiBy z30R&#&zf&)SeJSXnJE?=Ub1xiwN{wt0U^Q>=XkDp%-wwB<@@ck(+=Bhe$H8^J#gPG zOCG&{-u<`UaQ%-izVN$efA3VpYkqR!L-THb`pNmf`1#^rK0o=~Uv@Fy;L)O{Ez5Ms zSmrCTrU~{KVhy3cv1GiZITQE+ga5Je=^I-u)-gr-b2u<>^9+HApoF{(*Qrig#I8J? za6Bp^qLivmc&S^ex|OeMPTVhj5UR_D8k)kO2>#17gODrO=c{dEFzeZzt14Os2>gwo38oJg1Hy(vBO$hZ?=;EU-o@$?=O2vdEeQyoX6g4 z+YJF^hp~O{O9Gt1WIN7~vP=iF?KR7|`rg+uhiv=d1KxSa2WB0-_pFcX|Gpy*nZ4ic zTfJ-6+gDlPFTTI*tI_>+lnglR^Iw~9x{-I8y}WGS*^A8HUw+SKn<~2QCs)`2hACEE zd1a6gSO)jtIOmPwM_W;_GcX7XOJZY7BA5w=_;9o^Lr5Hd=vZ|S#uXxlA6?bO19c*T zlde!FI%1p4z&LmtQq~svlEc{jpA?1RWuN@=kIzSa$+5#59!@ux23@hwsku{$VR(MDX1&Dy1?G(dPegr`AyMYBCwD+YiA-y-X z!30Ek&Y|S{-t#V+z`}qA1IdJ!$>bS^!y5LWVKHVpacaMQ-L=+oc)P#oVl}+rR|wnX zkho_M=it=D^;cfJ^}9D(b;aerc-+y>UHkuK|H%B)t1f|;Z774u_LL<^xqq9w;GD17 zV)p;bA30?AV~;t+|1W!O*`G=I3ugZ+6<)souA3g7cejsY!^?tjk4+D_9`0fQ$&~5g zZl)-bWTML#OL#0wrWH=lN>{b;OjnV|U=EseBe_K+N61k_)pd(FV46oO`%>McnUTc>bR0M$`Q(o;S5v9hea|J?Ks{!z9eei>fH+dBq zD)_yt3oP4~g>wNh`^8oar-&UMfDi>m28l6)M<#5G(@%Cd^=w|?FAE|KWQc(~`Y1EKj zW3|;F*3CBE^aFeD*Esyo(Odo8bZXqpoMz)v0X zu{XW(jX=7i;NI+@uqm_moZaL#M-5%6Yq*OVWHQ#m!t=JvrG5<2G`{6dIZ zW}4dE5=J)KrNASnSs6qf;zmIrVB$IJ2`GL5-^3>ZLLrt9WQiW#Vk@HxW;{~yzfz%& zP)+Ewn|Hgi@|-QEKBRn9ouS zY-EvZksb!OOdv`v7P-okpW);nVvuBbyD;7@2Gu2si52-%8Vb}eJV_v^3$jVD7&K%9 z5g-EA{(8a;Uqe;kRnIgxPqS%)-w2tM%8PtN zL5e10>E-kMtkAk7BQZgq!c3!H!z>~?Vw@->7&aK>CCozE{^aoS>%_Jb)_NY_MKBH{*+7& zbVQ>?EsJ(Ga)l0ZWw;rHo@dxAgHjP@=A%ftF`wvk)vf9(8WlRXXj84yU{(<)HWW#I zaE6!lqh*JqB%Tq^_<671j~U$v(fioj z)5_1DPA4u}aQ8`H`qUvGoO9{LXWe=GHTTWE>9$+0dhmhU7S6xxmYc4)^78XxXSYu* znfKf?i$G=;Win-GXn~@n1&8vA=!VV;`q(H5_*+BFCi-)i8PSCDQ-JsN>;Yl3=&8s* zIn6a>(KRCI$FbPRA)gQq#K09zw?=Qyry*sOa=xp)t{D^pry1NTr8eC{$GlBv)>&Y! z$ubfXY7sv*Ff&J^40k(H&T)%bk!wYoC}F5|`bZ5A1BT{V>N;&W0}0XH+M;AdT^p0G z>i(O5{dfQSzxtC?*<{kK)k(%Jf$`)ht&=(E3Bf}>^psRO(oj1&n$AWH@~$KV3^5Gr zXiIGZnctGqZMNgg1Q`;lwan{i9b2`gZHX8qW$^r^gkjW`ahpKbp~GlpK;87LBH~~` z8A2kumJ-X`_)B>nosIcfzLZwU$0h=_Kw0#xc$OxMVJjYrSQnZ`EDk{tF@nj^u_V(b za(EfVA?->#unjLeA_6aad0Co)%#-uj@G_)qi`oA#JALgBmw)r~g)ct8$ji%inZ3?D zf9{RXJa*4ZFD&qV?5i&Q+WmK&zyBVy;N_jRe#;))`3vR=K;CQH^?mPa&imJgoxQU> zl3RG$8_hoU)qhCIBRZQ6GE0Z=zv;nyzXM)A;=rvxcG%7b%z6K&8?N?OzV`(z`%5OE zGQ4az*+-=P?~)DW-FKetv(jR6c-bB_1p3vJzXU%D(La9V;lLy0D*^{4@CaFf!Om2R z*hSjnb1y0?vZp3~_S2$6#a2tGxov1*6DQ<|Vk_B3m?|K!D;SFo%Y>;x15i)7#a2#_J?BZ zOhGgsJ^WxF2e!9sJ=Z8b+d%_qmC_dS@{TrXKvu|?D?krGfIi;hwAaQK&jl14#Dv=! zWRBn;NP&?FVH966pHCPo52T3v?I5#`94Ff$5no<%%H7M+{6KANN{{}?2mSitfc=+_ zKgQ>;LFVs#{bc*gUR8EZyFZy4co}N`!P#H+ee7?Ydi*Dk+W+`Ze#rN}Y%WV6vuo$E z;bq^)hL;`57K97@%mp(=4B)9JSaNID2$?1U-b^ZdLdPQ?e<1;i35CC;Gzw9o2$Qi$ ztZ2d2Eh4sMCZ+LfLW+M7-6Gad44M$12w%xWH|z1(T6Gv+clYauO@EPzE;jvrC(20h zD+fOt*!E%7^^~_1cp5vg-_}1I2Z}m+%?jp}2=~NyP{EHIdYo3km+ppnAu{-j>rk}i z)=1E&9Yyf-i1XouXe)srnVAy z#s<5RMmDFd_fa9Gt(RkXEa=CsNqU{(hO9BgBV7EJRTy2CkELz4u|NMZM{+LyPvI{I# z6tNU^$s@8gHtJ~NpEbr>JDy)#OsWmN=uln&}I5zL;1wTQ6rNAGx9Z}zep>hRtPz3}Lp)V9g&Pcnlcu677AQ%kd{MNxx9GjzEKVNru+DAp z^A%#Zyoack(6=C2Ms)&5%4sLQF|Qh$6(43?m2GMyD8fhMXm$0xEy-i8ir1333 zCtqJ6j?N`KX3}VwWLM5-Q4a_T_vG=u1ejOu+=o4nCZi= zZe^G$MRc1qRa7TOs&tGf_Lht*MK~$xopBrY-rw{(WD2AnwIyPcJhauiig+Xq#XL$Q zs;Hsiy}_n#GG(ZzIikon4BQTx_|b8?4r8YcL|z9FkLZK4dC3KSiN@CG4 zUnbv5E?K**c&%wIQ`Rd* z7B_VB+B43=h{1m@2a2I%c}WI9_6jq|3_G{Kj7L%Dv4LgZ$Ci8?TY{H?Wgo|W=F!P+ zvj3C%<JG+k0~e8eaCjFXyrCFMs^-T@KxM z_LiHi_tuqP^Ow$Jf8*3sd=bkF%l3{z%Yx4%4nNc%F0cNURkz*B3zBbq^BZ5k^A6kk zQ)A2mT|97y=oHof_XNYfI4#Nr04;`KuTWM38-voosfYsTg9q)bfy}~h08p@P2U!)L zeFhI4L zVCE-8KLXHsuibaSql!&r2Tg%l`;7k42|BioYnvDvwJ8iTi|{D`Ak#C1B{C1r!$<8e z1GCUBGi^-6LB8n4G_ClFz(WBYFmaj`O0wHd6n>ju4kJ0g?W8xmSm$uKQf=`SXiQ)Z zQ41LHa|9=o=}k|*`pQY4$F{<9%e~<>%e`yXEMNWdy)VbJ!DOeh?JS@D?XUR`7>sPA z8O7E!Kn*Wndcil(KI6pCede&wAN!H-pZS%`FFwl_bN?shdtc-Kq~PTx3-9w>6HfQ| z{xgD@K`tX+8X=Q8C5a)6=c`gkk2uX{exZmWz$k3Z#{`nld`w!zQB-LG8-dAmtI7yq zGW4n44HZqU{`?Gm7b|85TcT#3cDa#;T%qC7q>H6MihsK zf!u;KnTVp7idd&ei#dcGdHmFn7Y`5;PeQO2{D_5;!^0AR=unRY2Zf$G{RT8+t%&0INWsBB{MdGF9YQGtQtgHjEz~!THT@<5L%)=p#eJ6wn>~+*L)M zuP-cCqnpfkRg6i4S*h_LiL_ZgF=y)Ne)*z45f0lMJm}e8a44Wcam_hJNAjwO;u-+1 z27~5|kjWsQ`Qb%|lF=365&txP5UrflF3GF7%oGCU&uQ23s*QgCbPO_crcy|j|MAeA z6$VE3%2Qr%3btvUE&YHHgMJy1d7xcHk|i<`-AaamMuXL^=+g;jRcKNv%Krjh<|yk1 z*YO6oK)!rsVAxqkRBAbOYpxjvig;K;N91;O-SR4L%edOL5s2a}KOOhV^GR{*foA^@qLb7U-9GfoX39g*H7Mh z^Obg%FT3=dpImvtb=O>c`DN#R|Lm`yd(P?Tm@N_1Yiwu^=JhU~Jz{{pvLqI3OSiT0-{>WOhdO+ zQYD$hq6{@iV52dxOd%1Iz)@1Bp-<=C4jvgY$v1K&nKX}q8WfOZ5=E=baVrs&fZ_1=6UHC zPuW@aaqM3`wb=Kur9SrM6fPd@xJklB~9ee4Tf_W#T9^5YAgeVw~x(am#jyZGj7 z&z%3Wi{AInm9}~J8=S{(f7yQVLA$>5@V(zBwTax-xog;Y8Zz5!#_Z)~|0LyOUq|e} z|nAYJ7 z?qZ+=sKBz#<%9R#9UMOOq|e%1w#5t=`!lJIXQRtZzVvn8nevxi_-)BwN`3g?-M)DI z5#C?+Wo&OQ``FhFS6}qxBlj=3@3x2L-saDjA9?T|-^UindB!z6WW!}N}^?Ho7-yufP;7@P)D`k+9&B6e(qtMD@s9VlQn8J3hG!4F?%fgdF(tRmgeK>%%86Fzs`vH$$< z{_Rss?9$q&c=#93K6BNNFI(Z&%dflU8s9wa)PMRv{_%hN`@cKl@WbA^>M9$qJNXsM zdIGgI0NhfEC+#n52vUZQO&=-=!`#Yf8eo*?4d{=%eDcfgzg~u7s1dF-oG50PC0rHZ z845EcrEzkmWXy1j7?0cG&=5D3@UxM1wP-akZMBHO@JNX2I)!3Xr5m-;iApkMMg|+? zt5jW*DaN-}Ji^H|q7>ngv`8g9>XM*2NhxIva@|3`ge~Fnv?Z1m@t7S=2h34iwXPrO z$#29;v?YNnDB~8hq%91Jc*7_vuap8*>Z(P0D#D;HA?US56CTzUI?0q&#GEW5T#<@O zNx+FmTZpdZ6}jb^gv6E*2DU_)U&Fv`N9Tdjv|6Jg+f}5%M46Y54FhpuXA}UcZ)ihU zfhmYj9t2A)!&TR!L=%#EQpa%-TlBu-$;sM6SBn)AV~HAmAd@R7X?UBAML49Xt_tQPYe#k?qU_h*1;T^b!jT)7RRks14U2Y_wKu#tZMfcSl zTN22wVrflCAqAx|;K|Q2BNZfb5oT3}C=6~>%=l!WSA^htndUVOb-F}P_VSy-Lmv(a z9qTEH+gZdezmg@>Rg6s&SkHP5u{Ks!m(c~RGnma~$%^ecW$ETsCTk0WuY28i{pCLP z^~3Ye6kR*C`Q(!M_ug~!56(Tq=dyk2%ZIWteB-neeMx)aL-#zk#NWd?;s3}JPcD{u z@S%HezUBJMuejva+ir0C+|RwB@#xdfcniSWzxOYGbb;ggeiiu}|8@<@&78wGwZ{kZ z{dj^d`7OndEzai;r{ytYjCFaKdv!WXZIgSm4;c4@A=~u3VXV{nPU2_$u2O?!$gEH6 zRzyt@-nivnt~P8WqDU~{fn}~X3(N(m*>1RPARFQ&tE-m-o`jGl)pXh}b)9Zh-Jz1U z(kf6EZlaRbQ3z=`xnfhXsj528k|Izvio6s(_0Xj(btO>gU{jR&qe#0bsl=|dDuY=? zh*@+z`gt%fLbP&A=b(i~B4CrEGxJL{>@r?rBuDsJ(+ty75212G_$Fr$!0EPl7> zG2@q%W?%ww@=q!;Egl(cDP|Oj0Q9n3DTf$HEOO1F8d5|Wu^Np{#%;S>DbB1W zWK>VLOv@srh@McEDRa}BV0{>CmnEwO-?CuxHh(RuV6v@b0NM7j`D-6pf|1eNQ%0BM z!_j|y;aN!exyKgx|K%5+e&pv%=0EkwebSNz_dGK1jz#z1^4R=4fo1#4l8t7c`Fi}} zTjt+)^+o4><)*98n*62IotJF6$s6bFu)zm*-e~__H=d-O-(lkzG;RkOK$bapk4;g$ z&g{))8C%S;O~CbT8+&v4xR34(GJpJ__kQfa_Z)NRR&I|!YWE`!+-~+(>&{whB_K=s zA5imh%P+Uw^6>1tHrd2^U>mRYSpA8xcopQFz1?<`uQ6ZnePGj@jF3XO)oz-w){Yf) z40KcrMFO+~%dny7UHA%&3xEMnt0m|PUjwQjs^HeGM#Z1{@o*ckd z>kzJuB$<$MSDL^Gu&U}9#Ig2sEycEOskinTZ>KFeusJ!Hp&mPN;%c>^B;WKxjcIy1agC^Tmiq5$ zO+Z69wE}<9kXH7T@$i5~=_8{eV9p#BH4TGnf(OF&cJP5^`uOd_cfmGVZ#^*CcfG#y zh0oYi?thk{K~u?{WHIM z)sMcv@c!HHx%Ha6Z@Csy_Lou%?z=S0&#(8JDcVTQR=h-DsO=y7k#U_)xRplJnByc?29J4dfElh{gLRz1 z2c&eCF%SPj!A8Z2U-$wWicF@qm5^wiF?w^zl!vw5NG{bds2 z!W(#Y*PV7!q+(-0sUl&&m;{ahmi4UZ1K4_pS`|kTpj2lV)Y9EXEMvtLFdDZDU1%bf z9umwN5d{NB&ukkihL%>g(CKQRI)CGIZ<+a}acEgWv!MZ>JWi;n!0q+ZRo}8B;1+G@n z%Lst>$3FTIp!m6`Jing&-LE~TUcBfB=fC-lE5CE2S#Do=-j2ra9$mE1Lc|GMZn1?; ze3}~%JYY(`Va3UpuF0nc3dAI~XFHd&OIv=6;H997K}8LN!Th2j2a+S+3n*h)Ow`GFsO} zi8M(bgA8TrYZ>yHNilSFHA(@)fLL@gC8eZcKv6|0V$LB@6+OivUAa`JFq007D1%4S zgt%%#l1VZXq9|6#E{O-L08PG^OCD3uwl+9SIlbB9}#D+ zK+(SvqiR=OJSd7#YSEQBV~#UWa~|Sp)qKZ5ZMDd!8-Z@MOL@&r+%>b^8CdpetpqjO zowOgVNJL!{RFC~f@>xL+3FN!FZ3L3fQh8l(@JPl9&J@mYRb;5GcPL323^c)`o6yO( zZdeKwF~>rIn6I$aF8vYff<)oR>`Dm9X1DoE&ggn#HLVE0c56bTl-Don#+lgU*L@f` zuy&PV(@N6noirzN;)OvymiG0zcBAkRnZQ3PSzUBnXB_`0B;Z)6goS*VpN@x00o>8t67iHscCL-{d)tE^` z;W5Fa=I9^^GK(`aK_J>Km>^_LD-`-j)2)yrH5j66=)b3_IDa~(6>=Br3Tp+%inOa0 znb^?(9P!SegYv2bF;qdBXgGs{8N%d-6HSa9oz6c>5qV59NDKq*lFzr|dDt!ZR==R= zqk2|%*cg?#owp@^Yu=%Ff>`3LkbY4#U-+}(r!Akq{>c>={`k`KF8|T_;%aX=I_M1- z`zNV?_~l~2`Zv!@ik^Dt1C5HhD;LVNtPNR9BX*@ON;T$ zpHdLXm50f9&AkpdIqHl;ju}Ce{&l6HtE9y;ue@pzn?|Pz6cMQ9im+d|ub>LQ?ICMLo$UgdmTTUSJ2jAtR9d-g`}+HmE! zyUeycOaVMBY?&@`M&Cfo`n5(1lmFqmNqnxFu!@HHWDce}0tqa-%g zVt5&32681;tj2`E%M{RDU=|J~hgiuQ%9@7OQM3-oJ2DNEQqu1OGUT8$nH&|hi_@of z!7J}Y(i8l#tpNFw27m~jIU(VIFr%nVzPgY#W5EY_nI`CJ_0s`1zcXL}jpca&gIW-Z z)=^mPcLsZ%fF7LzcCGjg0kY86imzJkLkH{$;rgrPlRx(<=-AG(mzO(|-7#!m`a18s zrz?V>{h8E7=Y8Fg>?04`6J9>?groeE)U7|g%%3mA%RY{M*Udk<>*j0VWuM4)^_29O z^BgxFOpTs>u4a&Fg08gq!QAmw#Umq)h^rVNdQW84Wl$8AnueNaRw|O$MDw&-WDb6r z1Zvu2M9ed--jJ+{u#ij?dXi>4*pD8NtlxZW@3yH5@zrY%2>^PcT;ocJ4iTd_4pl)U zLOQ7T{tR8{n;E4a@)x%RJVWgFLU)d2L`p#?_~ zsF(5>(wSslrHmLuG+wkOZ-+E;0BQH+=wE- z40r(nv;;u7h>|>v-gy#}1(8d&(l&=*4Ct)XL~za!76=pwq1np>!?IJ{qO5)y@Fg@a zFLT|yHr~kQ^1OTJE_&!eTgm_McYiwL8>er);RZ(^d4$`?9$xgv-~D#(U3aeX`js|$ z$A+hW?W^J_%0P~OG$6T^#II*Mv5uyjB=m(!%yCPB-B`6GUg89~N<2o+o7Igd^<%&o$ew?ZZ=Q38FuT<0>u83L(*wJz7T-6%`JcCGV45Xpc!T8Lp zs%w!NDw+sxHR`I0gkKQ`iA+^kfen%5@N%U%6BS)^Mw+H6uf;}Dv&mKK$`G!Ie08am)r}MC;ioX7M4A?xl89VKEQQfY8xjan9ffciTA{dN zK$P+l$ry%zaALzF5$;xtJem!&mG~0h@oZyGq5aR zhMLQ}mBPzHXmd*I^dZNU4n#>BLYL&pH-L4P5u#Jlgfc1FFlcb9MFIxO(0{C`O5G~L z49g5Q3Nu+NN+kku)``|1)m?c2rHp){$ib{4ij3QeXj`wTQc5wL8dcgbqe*p5E3M#k zRhpUfPjiNgRH-W!?Th;41IWTxQGJx6JJFzwQglg16j!vSD0+~g09akyZLUgP3yEd$ z=n_1tXs(88i#b_DTY55tUCc-10Y2n;MQ(X)_)VsYGT0OqA98~6sTO-tfN0GX#1@^p z7LLkfOOdTSwn+f|^1DJZHPMw0+9H#Osizg~TA344>`JCoSX;E58}UdPUUR}@Y@=4U z>dIhXvme=Wkgx5{9@@jOROGIloJ?!&i79Dmr|Epc(%Wea&FT{y!>0M1S$XNkG8S& z_M*9W-*o*oKf3*vYaV_0{&^41efpUtfBUDGe*3#$-F(aS-}vU&&OPsYbML)Ve&M2d zww0x4o_*ZD^JPE2=m-jBsAjf$8L8JwF<}ccPU;SY}>p4fH(}Cr` zjF(5Jb2594!1meMCIj-YJr5X@c#POaJMYymJSh!qf{v5lpl`mN2HogNRaeTryd9*n zqwPqxs}`MwGT<9~m>VY)lV4_vlNHm7A=1t8pcuM#F%Y6fx8$IMz>>j(IVTLFM)4p@ zcxsdnGeOa`aII9UOrUIo zW3z)~Jhhcw<%b*$IGJf&a#vm&{FF}t6h(Mi<%k-#53xic@@*7(tz<|)j-`yN=IVl4 z$2qi;aa-@yv|FjVN@Zejje#`shndq}LbM`z0JSWzQnCn0lb;G-{+EsEb;F@+e;p-( z$#Ah-2pT$;+E$JZK|lMHm|=nMec4}@9L)ZMe`I!Adj>E6?iWjb`@H{R_TN(TAmvBr z-?sRnTV0=7a_7^J-SOn&+wQvMB4GLcJAZWBwP$U!`CIm!y&=53@6NOQZ!#3@ptWsf z`^aJE_LsqDp91r*Qm%kA)Lc^f;eEDn7~5YkAFm%`3_n6`Z&`I!6i62QM2Dk0 z8an<~#yMxu0B0X11}JP}0nwTO2F2&Vscl?&P0J7kFxv1E!`sNirs+0(fmX3D3bsNR+vGjMPp;n+~@SL6ih<86dP}suBP|2HDd4uhKTcV&k$sLU6AD#H& z4}Rd*>#wk(eA>w;xH@`muh|)FSHRh}vaf!f^{ub^gJzJ~&a#)6&;9O~k2-v>FMak% zc=jf6dA3UGnv$B(V$ETbAq3AzNkA{UAY5J=p$JZL z)MABb89z9TfmgXs$2COlpZfl@O=rgmaOykf2ejxLZa$puCzm<9?WUV79i;hH#E%|8 zt`BSbvB)6N3Yh{A41QT6t#K~i5HknOgw4+I1F8n3F~CM~i0F5(J$7|0oHCLl;+!Cy zusSgT1D%C@gMQ(TqH70V-kk|vkWvBfFRmxx7eQY;mSPRKnlXp zoZ9TtYxr?y3r>+6;pDg?;&(;oWJQ{Yf!n!@1QQ=xN6c{26zMqpdWU31qY7^rd%6-^ zEn*;tA+CrTRf|`ZqGS$!wUmNpAukMWOWKv8r-~S?72$Vun&^lcF`UawVi~zWn2s@+uOW zlieypPgGq@;B>;A8cL@V=aevOHz6o`L@J{#nFfryc<9;SA(Met7>NX>GOl@5#nwjZ zg;HHbdO(D$d{N@S%RwpuWHF>@d~g<`i`lBBhy+OnC1|WvK&WmMRYoAHNK7c;6sclx zn{!yfH0ER@b109<6}&1jwjRMBGm4~^TSQglRgZIcgDn~oSW-z6g;D^80u(ERPM=iL z%1Cq=iDa$tD#@DADT;Vi-Ha-@b(J)sEg40X)+l8Pqzi@-rK(VAS`?zlS)z>Qw9ZVe zYtgN4H5A$FS6MNVU-HF8^0ewxS}05n>`+x&=!F znkG6WC&=iC^S}S?bI<<&`MMM6J*(>8z3pmmd)FO$*Shz9-v1ZQdT&DR zyUsf6Jp1gk_dffa=Xv(;Ip=v^+wH6xd>vjEQTM{5zn-zgTz=_A-=2H%ITo7z3-b+2 z7emAke(xUpuD^Wd>3{g<*PVFM`>vjU`I76eS-0N)klWEW+`YD?=S$~a5MK5wJ-pn5 zGDz8?vyCOoZ(geQ7H`matYjy)x5zy`*v$HJgKJEARpNObfOFwxKCxNa!vs5}Jx|~$ z-r)Z+1Vg4_;x2~@l}%}G>`iLICj&I|K&i1K!MLM4Ux@X+3-2#W28B8e$9&U}*kN8q zPUqCcGbV{c#B7se)|^JU-VHm!D+#BDcxY6`s)&RPIe5?=Vyo|(44|jnR8gR=RP!O3 z-{PsZCQ%4iV4FNe3iwSg1HoX-v}b_wbQ}>rgF<6tkR)A!oV@aR^i{5sQc| z;c|MDFpZZ{BvmPLL6CyXE={7Tl52%4hV!~h2L@~yJlFE7vIHjEbnOsihLlS!I=8s& zT(*0}`f?xpf|vc7RQs`g>C58s3~;{LUrJeA_7}`@pUA#*&BEJny%J-*v%i#rmu<tLii1XK-1tM9viSlxv+!WnipvTPDCL$n2c!k&FjMissa@uM54e?;8bj^0H=$V zamo={0#n1h){-c~0If?}f%cjJp@0bnu+*V0Xu?Pme0>Ynrb`t-3P)RUw!FwxD}aPx zmY^p>NYFvR0Td!oNNm-Q91RK7gh0z)J~&JcMU3&DrhWmi$^`min2tj(puMhX!ch#i z_^z#g%%K9C<#mU4Yl8;-84qLVvV3eo+5i4{^1y6WHt_-uHh6nCmd99Sm^25#;pIJd zefG&GzT2n0fMQ_TYI1KY`^*=-3^;?y)|V|VPhMm`>(ie;`CX@c{5|$#fAqvZJNcxy zf97NF2bLw@$G+x@3w$Db{$=O;KPl_W@Uk16T^>(A;651|?nSF=M0JZIs4ttev6qgkzvi!cPvOq!3Z9vMT(- zX1SQesqNvpd1Wewm$}hn5R1J0Zxz^(89sXLls0)JPk2P^M6uAeYHO*sbDP74z>^}z zTXmIdf`Z>OB~N>V%Ej{x(8_zzb|zpjqhM9n7II7j{OF#L7-(z0>{P`Mg%k9O@Fj4D z2PDCavxz$RMF$BF*71@_YpvV{3D2WEXmV<3^Jem2ibmT$jT$llk0zZO=n5F5%)_H$vl>x>>+ox3P)L&_DiCFThdO7y5!XCX zW`f!p62%o%XAE@`SCGu$&IOeCH6g(m8AlzYNTNj_3282Q;GjTP2Isey+6mz$v%<`w zf*IK8Wq*d%LDCR0laQ7e@YBmG_9*9J^NqUr$xN&qC)Lx5ove*>^qr7;+U13CDf93U z;uO$|VGb$M5s6qT+JFrv1IWt$!`C19dY{oDfZS`0&@?EGg6?!9oI0ZPj)c6;$%s6l zQ>u&w75wQY;yS==L{-|dM>z{g(03EnwA87Yc;h}qJQT_G$Z1PO-lI;x8c+p741|Qa zzLz`9Dhw&AiQKrOkR!*#W>yhJY37ljISTD+xlR*AZ4+K|WqRd=sA6O@N`NPrZ+%U{ z3XwEWvsxG`@audRa5}3cWvQ4`P$!lnK`Q|;ViCKx0p~(KVdRj5t;ktSC}Akx6DVf3 zJhSUKvvo~bubcl3oP4_PcsPM8@+baUk*2c-OJX)lvYFX5(q2f6D6vFRcqm?-xepSU zGs_9&Ofscm0M&e}U!)}=fpRK@^g$0wgD~V{Q%1=N!w(9D!PAj29@b7&Jbag=ojvsO zgWN22sdOoBWkKlWvJg7+BCYGW6g)95{9u4sOD4$<8+0?@#3qmy$*JlhX0{XM98t@q zjgHgP)o;n-E5P9k&+}g=UlmtdO|}l@6=v8OI!5spv#r`{moM|hum!8vc!6T&-S@16 zh|l`!m;U^nZ~e?EAGhdi_qAo^`ybc0Gz3x_9d4Q$rsfiv!z4DutT4*y#iJFI#stHl z?{bul@UkXNFs%sG1;*g?^|O#5ingk-1qL$)k2G{m6SFf4bqboNodF8NGdM9!?;B_N zbjm+jC{L!GoN2tAmts&W@%c!N%joHFjsjx6)|O#7wUR5Vo%U5?u}MkL7F&na;ukyB zWmSl3<*`+cv#KFV6TX!cbdCI)IK*Q_3_-rCd8+X!L~&tv3y>5fS3Fk<7b_90LMOFC z+fjh%&Ee&im7T?H7g2znVQTBk;bp+tX6!p}Tju3u|G#|4itFUSvh`)(`;z?mvOkux z8GFUDOIF={`O?MTy?M#`bI&{V>@R(2*IAD}dd@D#9P&JyvEk)bjR9d8`Ax^{VY_t* z8bUf%@(W(ZiN~wW@N(&x19mv!u;={IaeLUF?UP@J@3-@Av!C?Lr)+jVwiQzG zDrgDzLV1z`)abh*py*`nh8_u0qw78=!r z5c^j_OY}Jh9zY9#87dbWlZ=gCoXWPpVzcD;(iiW62Q%zxRfrjYDxPd(0QoScu$eR~ zQ?BjGL|MbNYD|LVV9i~k?We^96ZP>T0Kp;=ftq{Y6O_fUQI3F`F^Uo&ccxLm!4w1ZO$)O1Qp2&H9Lw7B&Op zU;-EgOtnRvG^hdAwxO3IP54}MKn7lxUj2$Wpa0~C`?QzUWUI{WzHV2xC1w=M&Gu~j z|K(48^qnB{>yMiQET8i6_gP>5$`?LiarxY{zW^`KzwEo$UVW)AV}r~aZl7$PES4xP z5RCv;S{nD>YcI^EoUeFUq(|iV0J6T*t};^&=TF{P6xfA%nu$8(8$A-GF#U?uF=Zd+`cft457Od zhy9l0`OuOy*D+NS_UJG7Qfi-u8T*zuzRq3D)m{|gPUP{s|8n*p&AaS8%ez;i0{=

K3EF&KKvJ$4wTmJ=Sn8c!FdF+#s51l1?#$AB-kT;AI_T6*@O^g<4%O zQXy7E17Sc_@d+cHk8%i<>*C>3J#xXX~bCh zkfGo{=Ik{_kU6l7B9OKSWsNDkT&hh`yX?7VhcN|lZEkeE2N|NJQ*J@vCETU@pX zCA5Z`dkfM&Px1EUm!5m&<>#-u^#<=XTRw)D*RERy5npoY`CmHY)bq~&Htc-<1?Swl z@}_(5^AFwYtTIE;7hiJT1s8tj>iP3pLGJAK;V*l$Ehr;OZOnGQ1IsYAy(L_iyYy?! zE0hLT#|@CqN&zBM(fs$k%>F0O7J{)&PR$LICPStX(W)U4lB*VCr{V`|!wEXF^R&@j$nVM*5#eoWJG|E*; z=^D087gE%?l*d1O$ifDid5B3?7%4^KI>{wMQgB&tnR0=$!h+JB#kC83?n<(`{n+RL zvb`eKlqDy1Z#o0ZHe>rf_6%@ddH0&-zK?A)wk2kG`L5OS>%+^w`gPCRrS@Zk%+{Be zUw`51<#QKZ@wM~5e)4Y5e%!H#?)s|3Uf^S2_G3fGlAYK#TH9r9`FL!-2ARXiV6p

9h>eAPvgFMX}M?M5#z-?x7986TmMdw2Ic1yO{19J-LhiG$)W z2Dvk6B!Ne_LUhc^DAg(>66FLe_8=OCdo^`5rs)wucf)HjV(G+1}TrRr%hvp`qI9v$kH&^$^LE9}Nw( zSyV`Gv$tm*&?w2qQNRr4i2;Po0tQpaq#{jF4N)3;NZig1$ik(D1l=J@bRn6&63??X z=)qTdfx`3RD9jFtg0N8Z5sJ4&4q!4HRyE(af)KdC05Kke=PmY5cn)AWTl}LffwYTN zF#9YXM6a`yie$7U8NDY$W}{BIHmY?3>XBzwy5mKAvl=J&QUr#38eqZQ_HwUaIEjej zH~d5iz%*%98v6EpfIwAx#{-On8k`-@5l-_Y!Rf|YqI_2kCkc|iGg#jx<4dXL=7xz> zt+8f&8VkdW0{K#{$Tt(5zXNMyHu@xx7TT3-MUH}2=!8?mW64wnH5c+35e%j!IYL1$ z3XU~J@nFyvk1~N!l7d>qsuY-KthH5eNE(wUtx4)=Z|LReYQm7IU=*@bTMZ?LIEUq$ zFg|Ok$N)s%U@&i(v^}3;12#$D33t_X#WN^4lR%fAvPU9;kvQd=(4Nzh5T{yfm=UR# z$RR<$^qTQ%>sr><7EcD4s}k+?(YTxcI5nw*4iSm1%WR;FeEiC!p-T=4;bclB02wrH zyS3apa%Jx_TVHNJcJ$t2?!uCGVktFg%{?a|Q#p+adYxP#xzm|Txq^14Ho?rXhBYBP zHf9Sp1{(rC!Jm9=Bu}U46lOxB5Iet9Zlbv#hl7>=trWa9xM#4Ew8FOq^5lg9l zAts0A&cOmFgQbaDRSf|lIxYqaFN3imxFE2=9;Of>hol^e`GsmgR&WDFLszu;!x5y3 z9t$8*;!Ft6U^2S0oI0GEj8I(c$1OREwRy>*4l-Luit%*HRoyjNcetqjVyj_()3#|w z4nM>>kuw7gbVU3qjc5}^2Bm3$p0#GR^B2rpeC+~{$U4V84g2X|{piO(`9AF2#*%iMIQLQP*8cyjFY9s; z!+e``bJeYKu7Go!#_d;!e7T}&*<@}U9|9u}@36(dfu@R?tl`*HjJa0vJESyea{PdK zo3TBPZkol9&ERUq&PPm5l8p>QG*9G`<`Ru~fG-6JeOQhtsYSu5A*HpIqYoE3)Yc*X zVG<%r($H%-v{a>*SUG74#7T=<8ez#$YLp}><4Sh@&th zpd=w9qldfEb%BKXTU&~vIK&)}yxNV#qK{M`q`2$+_IsQW-_y*M-z!D0tKFeRhV8s|Qct#L zuV3YRUlP;|EU#a=)ITr(=)T+K{!eP%%IkdZ3tnEoYVy^uJ8r$^?zM~k^YVr}7QxHw zR$ggK_LUc&_M+!J;WbCTP&p^O zd$8Yl)UK~PVwd9&pZT8loaY|6=k_~q|Cm2`>}K&Y;07VeA!6tZgoKMBQvkurGK34d zVhB*TBnm2GzzG7lz{0H%t~l8u9VF;VD*z@9kAU3YH(P;q{UjW-1Tc~VFDEmQ4uo#? z_{W1V)_;ZNU^O}<4rptL0%AK1HOtk)mBL_hn8!|+_rBv~u}`9i4cJ&^fw(1RumU)O zKz+{0O@Y}CH7}(12B<|YVUM+7IEbA-`5Rug1PKVUgj-@3(hH3!ZQ+=F8@$17rs^;7 zCD$W4+eDr%H(Os;)+tDY@j9h141nV>gJaf_#j)Opg~A|l#$eV9PPPILmuG-4l^Ku{cvW{#&_Ickt(|&AV+4gL?WZl`G?Js`rq%%+d$XCDgvDX~)viH3G^`HCX z2mF~7r2O4)e$l@$Uv=qstS>KFaGCd)?Z@`7Qi4@ckb9YP#I3EE({0WOy1zSF)C)gX7uI;EN07xZv{O4sguta7)?;xFzK(6Vx$6oF5(bpkS9?#dvhIRjiDLBJla{# z0J#oo#U~h8OPY>wh~Y3&C~760#jijMq^y<-1?QC0Hb_Fgj)&|NG*LwbP=lW+ZTUn_ zX;qlDMWL5E-K2tc(I#A`iH0a_mdGKE6R1X1l|Zd<5{1sadPiEKDB@B{!AwBcQyw;3 zv?atGXp*p&%~~1CaSGr;p-7&tDAW;^ZIrT>UIx43(M6zc>KB8lAQ=%`u1Fv`YH?y< zu(nNCbTdlx!KN0QoIrhKBp%*ys4baP3!#-YVU_0WuM0S7r`j+{I7u)}K@6PuN74jc z*DxSDtk(|f2s1P%lWGP}dSpL<4~sEo=3`$rW-EZkC1q7Ma5}VmbFxeGVQ9m3#L|Ug z8q0OC#~?bwRChgBiJYmkd82;SFtbTu5Azr%>8vMCeA0Y2r6syBH}3K}Xp}2w37H$Q zsby8kgeeJoaxc09)eXlm2@xly1xT?WroD+rk?!crFosEpyejk(^Fs|lt3XlY2?&VF z#P%p1DofD|+XQk#N@z<`5V>ih3V-0YwyGpVzyv(O92-bNnkr4?%6MoO^|4T243O?H zUUwWiPnpW44(k^RItn(0d{yocvB7GBkruzUv{0H}xkG&^<`m3}LRrpQbfzlvTO-9| zWD=NfqYq+MQLU|R>Q2H5DUF3EhtyF@MQj<9H`HbiOR(X|fz>*sh#6fm>aVg{VFsGI z=CFv!v$CG~FjCyk^LC+!OWV}Ii055D(Ze{$jn-T^O*uDt~*%$B#r>>FRUVN1UH84?_Jj1A0TI)##>m%v!OEgfwDixfG{EJVsp7 zL3%YuDQTr`%s3^rl7gyXyz(^0q=*MHL$2J3gQuQiK&K*s3Ra(4%Yi8P*;cJWUdkbi z#F-OFP%GiDN9@VN7|2ODhRWsjt9&?(gc9ZqlQWW1NtL#B5Z&RBNw1Mj9htc?%Oh&V zVE!3;iE>B|QwgVqG|W=kYluw&8{+2Z&R-g1AfH&Rn4}=7N~uDW$t7;)n%RX>*(J*r zL$GHsJE>g^9jbEqa*{sc|49MM0J3G}0hvAcn(P~y`QDfBWBW^~HOsHP^@as&ZeDDO z8D6%;?5~z3c=@3}U%q+1mzPV6t~&d+8!v&E=biugU3PfX2}kX5{3~Acn!{gUOSbLS z<3FVO8!4dJBC-`^2pV3tvJ6D`?JtLjUUOFar^oN}N3VL3_2pv@+7bN~2R!?r7e8~C zXFYzKt$x3Kyqo@?6aWZHfWS5(!MjfWjxe?8DxgA zeEW+fa&iD$Yhl#s5lE6``@Nyv-j${_WZiqCto6n>*aB7QzFy=sPS9meUv7ihT1J3qcTW*GzeJk7FOZCaGFMjr8 zl7C+Q+8G~z*V~TweeBPC?7e54`jO9n>O||ym!AK%i_ZPZ)pO5Xvf$Diu9;<`nC+s#dw?MKa3n zq1`=|r6LMq3o3|0kvn;vp#?2!Swf%!`9$HNPIH{9&?SM(*&3$a6SpfH zF0;n~Ruvd{Vc%5L+|+7)69YL(6G#w6i{@GxdZO|P9?Q*UlsV)1hQ}IUV~BwL_&v9g z!>ZS{_}=h z-w7AY|I0u9J%n!7nVC8sW+Wd2IW&p`J?#Oy<5#6Xtlbnx9WnVXnI}}?Wv8AhHt3F1 z$vmXmvOA6F_Ued{YzXfxJN@&Y{Qd(5&b9Qeg;&qr>hZs~{kBiub(dKu|K$hlBfD#z z*z8)hKmG9ot5@C#5IH*pPUJXuoCT7z55*EQqV6-8&%<9i0nuDC_qYh%s=Y^T048ho zqT%MHfhbA9i4sy{t-c!((wgl=N!&t+TJot@g+Z?+?h;QjL#weyql`@x*8!I!3=T1C z2%QmVZc}QYRT);(Ci~eeVN0n3wx&7>!b^t{l?gF9cn`;MUvAvVnnLNX`(ohn|RNmsz*$sJ9NTZ z&QmT%)m9mHBcBj#MUR}8wd_>TI|V&rvo!6xC`~_}afq{|j8fvUIIBwIbOg)(5whkD z_DFbG*yJQ3W7v~plPM<_MXehgbJ_R4Y`6Z*C;#HAc^8PN3$uMFJJc-XwiEmMYp;~-$F{!g z%h>)jYW12G@UqW*`PdiK?Bm$?-0Nd~>nuIX?b%jkiFw(LOPtGKvL$A?7;3g3yU%6Y zjE&-g04%!*xFA@7)(egb@5mkULW6EfJCyVnQI~sCWM*PemK4r>2#Y~HCVn`0QFgOMcnWTV6QYGbH%u#gBG$X-b+R2n+))qe@ zL?z83qUPDDxmr#(EeXjI49rHC6f`MECzh7FlQBd|suHV=pK}y6mut)Uw5LVy&^!DN1pa!K!?K#!_Fa1fjXj073Sg0!wL=&)uQMHn-Ou6$p!@p zkEDPN9fOO*Bc^T&u2)?`6tuH%6J?3)S`eP~Iw6{OR*RUmYL>CXs%JDd<`{(6e zVZQf{TYT@!vhs#CH?Fwh>Qy&gV{zGEO2Nw(ms@4FvJ5Y;zGdFxtG{{K`KK+t=A3iC z`jJETo8|wPy~1pL*+;GWz!wzkm1G~j?w_RWyGD@|plKVntuIRs@x0|#d&z<8_x;g9 z#~(WTl?OiyfXw^bUk1k@I|;x9-$3T@va;n@2?dS-C_q<`Sq|-@ zV*vV8L4OW`RzbDU7itJZgLu%WkQ-5spAZEC$$<)q@;pIJ6D$tu)%mii< z6we^n#~2U-DIC`7y6JQbQ0MQl?Sv z&!K!q7!*y#Go0{w` zx3cD6s(Ax)?*(M z+9(b+bYFGGY2r>wX1PLThZ02+er&}RT4{??ZFE%`lWJNX&m`No*$80+>w*$GBP`N#L&Ib61mN&j`^2s&NNrd7;dXix}V`suxZp4sR$BTY{GX8IdY2 zrWtD`W)M!PhJ+BV%oTP)Fxbkk8BSM#NVtwczg!soF`Q>O|xH!)cl=qoptw}R%4g_;K2>IuUdZ1{L27p4V~J$qtLS~LQwwefBC2X^*{c- z=(o4O4UmrOAf<+$_x}A~|FhwHRZ$2;(W7t^^T07xJfKR$CJCh`gG9C5A-Xh;qtK|Gf=(nyxG z7LQtOGlPo~*!07ucaj1I{H)S1CtXfZ6{r)^JeAVA`3X*4Sk^N@mM3;FOqzhkSP;OU%lqlO|3C6PSb|69vhMX{u*~ zoZgbMCivNdZFqoKs#R&FRsrYeg+rVuR3urR_Ns8EJvKtf=Rkf&cUa%C6?X~D#PA~g zg168_0lz8*Nid1Iyns-&S(xXr$ZgY{Ks;bY@-UEqayKn= z#IcCt|Dt8xuh(kP5p$XX8&KFFl9yK&Ejuh)#;GsE5;hoL78+Kw(KSjO<=X2aiO5b2 zI>OJ>crajs;@cFy(yTxQb#UM0J=~f9kJG%#W=@*(grhrC9Fpl2keunxO{-2`VfIlk zU;VP3c!tdvH|5o9%$a0X{Cn1?Od`i z8p<1(m#;K8SWN(VqmWoR3dx~4(li9PBn537to(`1sNtbSEij%IIe?ZxZXXGyCJF{x zLuRXuvq>^NqiU#0DYlU}-xx1NR3HSiLv0<_$9Paw)h~34u(4lXFzA|u$6*rG;z7(I z4tZ6-218=y!%jR7hf;?`)kj3lH*z3mwWH&SLcu)5CJ-@tRS}gD$>e`?9#P^|0vS29 zSDQmz9AeuvpH>=k^5s=cH>t~VT2iE^lu9uVqJoBq!;^e2;g3j&R>mcp(}JjL%Ggxl zZ{Q|+H-neE(YSiJQb`J3$DE-h=d5pkNeU>wJm6f0ql41UWd)~so3Y!Eec##@_xS&~ z+aH3=mYDBbx7;VcAZ7o&?9ZemVA)?Vuf6SR`SR<|yZx5Szj?-qFPy!lmzQ6C_-@A? zwyRf|+jI>SD_Av#f~_WRgqPKNc^O{DW&CSO*jzj}H^vUn}!e{^Dw9ov7&Dg;5Wfy+K;_|Y^^L+2?)@6(CS$nIun8&?L#8rsp-X~IW z=&oAyI~|xgYf2*ksDKA^fwx;H9=B$Ed@#MsPUs7wsvq?)Dv0l+NVRV_VsM8li4=-D=_N^^Ty=-P4!?cA6rM_4aiDL23zAxSFInSQp*;zY0)xNuxw=DV9&wuE_qwD7{e)fH%2Cs24EqK~z zV)5E(V(W$1`CV9a%^>ms=g2u3if1QG?PRg&uN7P2ZNj_J(3h6l0Y(w9yvA8S<49= z0`BR%S`?P2f@H*8M1qDW42XzLm9msKl8-L2pIC`Nt+JuByS{6siUd{Ev0N=J^_^8r z;+F6`fwbp044TMug;unyMbTVB$1sfNLvzM0hQRvbb6&Pv`_zxEm||w|S@0}{mwSh? za5@mpw#=hTf$Q{?x4XPhts{h+06OJlL6I_&J(^%Pf=tSIW*9z60Y#PBWs+!%vwq3- zYm~VNn`;yX+R4mU7+8hq>XZi>edVe!YdBhISQkbwRAG>cg5=6%l2a9aLfA%NLx};b zV~m1a^e8YBMnXYOQDK`hY%BCdDTqZW&J>|xC^Rh=jZVVm68U%pqC7~NvI5EYNl1H9 zCE*TP#o#)qJ1msz3yP#L(x7O9M;}X?NQA_xHU>Nr`$rds4Ja+86~3}WEmwK0|BHrn$7z&JOEa+9x7E!{SCCVdOt~+&&2UuccO;-ju zZQTU~PF>R3$q%73kkEh{R-Ts>e(C-(V<)|nZq6)kS=wu2>vf%?qM74Nbefpe6zO;_ zb!uCUntb(orH|NO>AOI!O!W%0s9TurBVb-$_8IM^OKk613=Ugd_62NU*;2B@8*f^2 z)AH+m{tMBTYz14jRfU&58FM=KS?pf2aLRi`Cf(p7bJO(-C;)Y9(D|9i8i<^)+*liE zGjJq_@N&DO1G6ToalxO2F`DLT(-nogXi?xaUiH+#4Tc;Nv^8X%8Z_cj#>VH#Z#dv< zwNm4Yjk-ozj@aB#P}MZZW^}FURAw}pan_t+O(Z$W;B5YAi#TKs8!|&xO`>C~n-1yK z5z^vOB}JDflZED0WdhMPkrbvo;xJ0Kk*px;h@^Q+^8iY!M+c{6rUC=D#BQ3qrm@O3 zXQ(qA_~n@)S4A($U6(c$f|5NoVN4x#n1vZxZQaR5#7dmsVCkBMSyB}?he=bw=0YSX zI3!RLoYPIB*JW&$o}IRayAkR7uxaje?wsxN=qz@KXcGur3?ti^EvR!S@N?RV@enO2 zJAa+jVP~g#`?0MuZ&-aZy!^oWl{RBrVt(+hmEYTN+xIrydhgns;N?}zufBEJ6_W3L z`8c-RC%?SEeBZjozK=co8C&@OWlPM5@As@DU$zr;3?o}smaH0EWp+3yfG`Hhj%$#) z{n#KgW~IOcG-Lk2iRRAxYs8ER%N8%iG3!?SX6rYdlr7Lg1Cw0hm~$8ERGHeP3jbKMkR z{CY&4+eDb%3bkvvwwllddRu5?9=IXlx6r0*Pk!PP$)Si4eL;EIzWWmHb7$1$A|M*9 z5XmEI1@nRf_N;;)4AXbsV3jV@;xB>h+=e)SFV7;lLR)2Pc0dYdOYl&Sf5q}*vK`kj zGQ8}aWq8?AvcHjP#Tk?iEMq{o(ER1o{?aG1Pkis2-~Pw1vc7!!DJPwN%7-sF=gT%@ z&%5}WH(qvF}~C(h{?%j|bd9yU`1r-0wtyMQ9@qbf1mlhUoUUX%e%E zuZpEHV{`x2B+1HB(Vg1SJT??bt<+hGVWTP#&pDeMwF-=M$7ijw6B#if$(I`oo0Efe zoC#<7;InX7L(F<@pr7=F#{s`kvx(T;QlWQZ8%;dr04;sjOB3@- z#3f*0wuYXl7(ulLU2IIwh=CpEX9i2$LK>UV1r=aIKm%Q0G^Cy13b{=%)0-A3-b}Mg z-+zl!WL}9$U=6i6<-Yz63p0S(ChBkh*0)rdFtQcZm6%f@Wlz0Tlc1-s1No;t6j*_k8Y`TM{7 z**o4gdBxl6{BF;mb^NQ35EuRJuYQ8=61w23i=BF`m3&*<4wG%4Hu?KG$+II59K0~@ z`IT(|JizA$k#pF?Gd%39_Ye%{;~w+q7d&saM_ZnE=}0ryu<92i;!uJ8v}l{Ugmhm} zWjMm8;1#6CjVR3}oDdDVFcJoI$PD8mm-vJVN{x#_VHhG6+A140sMbRUNv*>{cSvCB z@R$Y+R*T2Ha>(Z-BkFjgPar?wQN_Y03F8qZCMn}dh=$V8$OGsZh7+6S%;rxyP7>m0 zKLe;9R%yi^4q=J@svRkR(xSOsEs8{V&2eh3A={*EKy3bNg2JP8_dIID~Zy{XV} zB*se`GPSlfPdEv*Yo(xfuAd&rkVfM~6z|P^7Q1cOLS@P75_AkKZMjDbyXY~g z{zi8?I}I_QlOR`?l)I`hAc|HajerM#4N;1Ph@~_M8BdO?=!uo<+9XZwR1_`>BgK}j zg~1L*gYprs7w?JLB;|2fdDFc3w!Fxed>3n!!b@RZ0U-)43@+8FdZcT4pw5;@8FW38 zR1FD3PNLM3LuqN+Bzgy4u2x4FCLATC=ELDM8ii>^F)(!k*r(n*)?d<+gI=n$=y^kJ zu87V#!h4>NZaAdmXo8a&)Q!Hg#N0FMP~^@LGt(3`U7Nk(1pv*P%PwSnA6xIl;YJXS zUgR0&Mdy3Lxy8XwJ#BToE-RQPE_cpBQ`$MAr`g~X@c7iq@+B93YyOoNDSKO4jO~p` zF}CmMk3&dOj`SKF*i-=!p zlIMA%q$Wnv$cKNu{;Wx@3>G4vcLpg0WHju|LeYdS1C7EGCJmX>a|3;tooac^(J`A* z^^pXJ#+15J^AH_#oNU$+<>`K*;28#t{*HzUcuMKjJPC;Gk$1?v<}J3WGCLmm=*1`? z@JeJoywysViaYG!y5v62|*z$BG3xJhELHo*9wXxLAa}lggA65;|EDZGGcP! zw^tXzPi^TXjaVu$kkEQG6sZoU2<62av5bQ|&WKs{Bpxnp>`ig+KA( z_rb!jvn6IPFZ<+|1!aH73`EN&x)6`<*Dxea$Ve0I3kp{j|ewqwcdEimc*vPejC4m{ELKOoc&GLNstPTz)g)=?4)G^Jtuo6sN68WliIoe&4JCN%wPh&GYrjAl zuZqdtwZ#bv(6n6v7MJBl3U2zhfB7AOLSmhQ%fM1o#qFLV6yt7C@F&e7Oe)@B_CNmi zZ@eksp$E7MIC~DG7ZeFa1Pt5@X7a}9a^W!^c5XBTc?qW~!XYp5w(!FPepB6Yxy{)6 zqRc1Sir#e`ocztNf9_3Wonkp?Z0b@?g=JZ)Y4k_x{wK?VwLUr&bvp-JTn5AWs_)|g z1!T(Yi~il;|GFnXz$vAkIe^LLxsPUhE@b2P&O1HrNn364M{j=h-~93i-+yrJ&whH} zB^RCj=-=A{@yNsXJN(dnUVHozm(M+C#m$T6UvMwy2fB@%6w(QI}Q7uwighb zCLsJj{N`7NzyM)mNl?ouILDnP9#-gM>+D7rL(s#Z8weg#;2CZVfat8@Bg&lN4LLf$ zQ6ZuOd?;!Z(6JGs3by3|(m09)^yaWy!-~QBGS%j7bZxPz)j?H8jo8}iuUab5i5)#+ z6@Cd@Wt`1qT_Bl21?t$z5_3rzfAmb!oeXeTN`joU+VG#)xegI44kv2L)YQ1WQh?JA;Zf8W5Kh$0=ztsa>Q|HhEPEr`I$-`3S)TB!zE9O z$ft-8F=Qv^EL6aW$R4%&%cb}`$2C#N5BN=KC#JdTn7j~KHIJB?PhF%vyIvQG3rM@e$Wc?Ei0B;V)ojye_md&@Cu*yLbMYb z5m>gkY@aqFybKZBbnT1=iX~6XY|@62Z8GsM%WuCgkBYjDk_{Tn#0R5a%hfB#GZTbKOSWXz428m zHP9G_aPlQ$8#U!|YA8Hns%?ESyG9xfNmJJSBscv~9BMmM=btD;Cbqh-FcQv~laP^d zGJD9;aN1XubKR(N$Sn?8MUF#qNMJyEvuBv7h)a7ydk`?7=|H;c@Za4lq!Mk<$_5L!vZ1*6T?1`G>(7H3cEdAvEJHyL& z-geXd>u&wN$71VOcz@Z)zGhxzUh4m(z+_Ry|q1irdOUcSXW*^4}ncw=_ zmpFu%Uva>5=j^-ltfxO_b9fo(0}=r@z_u;fkR2#zEmg1znL4Z%1cppiX`)cWKrv3? zI$&5^IdBb%YG`c~S_CRB*8-t9RmBfi3KwVI9o_BXwtw83V&ec1Lz! zF64IeC5@0Q#FNm!y?9{O z*R#C{00^5%P`oq3toCrm2iSE-P#p%>&?2wpb!M|!6a505%|`xr3NS;#BtQDdBe~JK zwnq?LdeR5qr_2B6fB#RCx7u=xt^eTBa}Ij(kA86HUF&bT_wL(%^^5PH_`wf6_R(8B zf0ym|-*>mY_I%Fv+imrjM{RNNfqPzc#d+N5EYoqg2404~eeQd^ZMNQd)-2~2+qe{1 zV_Q5tqmM>}F`iC*`|OF6Z4tNW{eAC#w=+}|zT!q6;8l)dn=&F`qRgx4a)+eJE0d2I zQbR<^=gh-^9vjChhri6U#u~G>hR8!=y&<_TpXvf@4 zR$#m`1`eT7s)@F0*<*C%n@ie~Kr9Io1B|WB934+rNCj@x5voxfQZW(r@cNF}EPMfCxLi6cg$TxaSy3DwQKC?-fK9ou zbJJX0R}8O;Kn*2@B3{iCIATt{U|T3vK^Iu7Q+WVoq$p~o_`4M6jeIEQwBf*sv>BvO zxJzjrsV_yI^|~sx$=^sz>JYPV_!&J96!exynU0x<4Rmoe=u`lYo=fwXCd|mTA;i?8 z&LA4Qe5H3yC%Jjpi8C%Kpd5Tb5ugFnd1o0sB-5}m4_fij?5pR^g{F;&+>|y+Bn30y zEaF2NT~b_Xx|vd~)xa zz2-I_x4+qUy?oBgnlfArHT$lYPhnqw!y@$iAFvhsHjo)!_T6ky8Z;i?zV4LwjV~+9 zy_DhU*`mes=U#S^z1eCf-@|_xLFjVM45|b`ns0K4x(|SG4u?n~*|ujJ_h~2;zah{> zRdWUrMUImjeNN3|P^Sq$8&K#QkxUNvj6$+3~mmj#bIVEgsS9h z7?VNmG$jjB%pcN7Hqi1qD^E@mlAP+~$T4S6D%6r7&sT_eT2UvZqC5+y z4KU?&mcgnp$DsLWn3klmjqAEh4Q3jLQbYzz3K~+BE)=a0!<#gsXHo@}QJcVxa%_X5 zoQ3Ssik#`*#g-ZQbMRnefX?C}B$*Bo@x(UWUli-h7!ZfGHJsjqtric84T#g%tBuNC zth#KDOBS#U5ktp7v6q*7ZP}W#=UvcoOV7Q02Qn-C@Se$EO8GwaJ-1IbW4FF+aar>I zvX_^A?+aM={_>hz768r`mu<#=@b0BP{Iz=dTwAj3$9}<1zwiBJ-}|zI`sI61zTyQe zL(NjJEw|hZ0^4y79m|zrWaW_%C-g&Szy8SQ{pksl&wmm1^1Wy6x92l=+V-)V!^>cr z&>au}4M0Np)L{@6lv9AwP@uoCu!1JwAi-egvZ1zsG6DafKK`jc& zurLq6IQoJ?747i5C1W{e{Q|(fr%9cqc5j<9TR>&u4|W4~zyW)Jwc-$pfMqDy+Oh4{ zpZdrL;AKb|#aFWXGPW(*l3FXy{!i-cuYBt3U;e}g-~Fa{{K>0r#y;zdANT(9H_!Uq zcfN7DzhGXv=<*dy7hJzkevLnq@~=|vXhOIGKe=0;Ft3>ajxoT1vf0xTi_7q}Wlc>y zFfq#XGJsO+wr%FO2uqhO_nNar&~7Of(G~?t$+1!_r-&=U5yBN+@jlC^0l|Yenx+ zM3jj~X)@76oenz%vKE8GL8)AKI6*>TfTX!ek`0L5m2h$hXR!I!V~QjvQ>5C`dUvb!;|9^1%?^WSF6003H-=F$n3!sKP8kS8=8wP4uWNS1b4z z=K~Z%eb4}#AW#8AAzb|EkC=)Hgro(_z2 z3q=tflAs}vWd;hz5+cXrf~iv`Ky!>zhlZXFjBE~!X@&=gvs&5+%E1$YNcSzu)}2PvV^Y`BOf6-6H=Vcd2z` z>FO(dG5%swEiOaTmtT4=>}+qR6jX-Ue*juv=WVlx$5Ij=uhe#lZ4qmtTCrh3AHsF>n_zvJdrAAq= zO>%zFut^T2pf@EFsJ4a<6Xg&{A>X0N>>sWnVxj7l=Y^CX`f*G3?RnUnvhjm%2OhwenVSOi7LX57gFOow%auQgA zuDPUD{Di2@B!!3$QH$n4}Ja+=9s6Ni{9)60^2gjrHWV8fx1*oY1jh`%l< zM9c$n0#UqnAvZ3x#K2vI7!=+{9AY;r4Ey<^Q#+qzn_S!JL}RA1q_)&a)OA^?p(YLy ztCgpB9L7%<3Q>}Vh`m3ed8!F9X(u1U-^}`QS5&oKDivH{5y51*7+wa40cTsXQ5+&F zpuor$njJp$YV-P4eH{D2J6FTYmYDBbcMGI!m09xg@?EQM0FwjDlTT!?Uf|={_G2sD zv0|S6*kAv`2X@|Wi#dDmU_bUDFWp``Z13$MW#HG!vd?){u(;eC$_^oA3(!8J4Ncp3 zEmwXh9k}CB`)~i+BPZY1CTjl|ZoBUu+wVU6DVxK~ARdeizCi>+UyvE5gLYw3!9P0K zE9wrzqC<1a;Yx+dm_^#yAWFy%{KN(i!l#z8FpGWxRXpG!jc^|Tiw#8e-${aG(NRbi zbI6n^1&0A-1^84-qie6E3Y|1ct?nsglFKq(m{H(XX0#ZcKp=%K@DqN)Z;6@ZAOkFn z&T=3czbF^lWu%TET1tR|!FGJln-Ev9J| zir@j8L}v2Q$E_BF(-V(Vl_<8kDNFeA5bN$-RE***#zuR$X@%k+bK5$sO~12u+6lt4 zEc@(dJxjrux@$9uZynkR46Io-_N2o4sqbhRBWD4~=>)NfTEvLo-QTB}jff`jGH?wt zD}$vR$N)VRnSil~bp63rTPavM_33Gwmi=v+kDr^FLS?8Ba{-24=-u$c@PICGLfDFS zY@iAQnBk+}B%3X4gT#3QksBL0lhb?gdIZk<+bKx74_{}kCpl1R-4VlkXkfcFKkF}~ ztcl$bh-AV|!Ekefa67$CX?k5Qpj3yvE$Q+}A9_1byvGZly=vu>pa1LuTe9z1v+SM? zD{Z}AIR7H+&h}=%^Br%p?yPF%iff70U%4*)$AAAf9)p+J%rn;F$yCcZP>J42B8VUu z*zOF)@cI)@@QQ@BbzU+$A!P+~2ET$ipi_E5RMXNJai&_&G?bg|P>;)z>wrrW6_}ff zYB@Z&8!4W5dZH+sgyc}cD#Xqm)=K=JSZtUH#DmCtYBdRR63g?Bny`dDorZ+hS+!E`B3!Lr%c;{A4@D@n;6W#pDoOTAazLfI)!-cIqZ|lYuZUGhh3$OVjs%CK2+?7R`Z|ZQYctFNc>CSij^B>qTQ~ zfZP#0H8mEh8ejI;+5B9N!a}W#wO0JVJVX?O!bn66E??x-DFqq}RCJ`VM#P3;ToR3M z-IrI!j7PZ(CPny5`6MCACJ7$7G9nekGS~oRp;l@QilkOI^-h_csRWZMMPJiPzGh%`VX z6UaOQ6*f}DGlUerieMESA`XqyO2xY3UiL(!5nhuVY9cR;S7r|h4tYk$_0e!Euz>+= z(^JNnY#1bE43qi1?snG&sKm-?o+nm;KnM=8zF)iW>ctDLGC-z{S*@pp=RE~e5mCS< z>~ZO!j&K)zw9(#-^YfF z{h1W(?915xPs+Y)AN%q@Q;@QP4ciLWEV}B#i_X2+>ABzv=-BCOSBdl5Sua^xUa;`W z%jaDJEK3eu2Rtj|LyqE7vyOY^I@oAV(^a8k<6FJvbDR>-@DJR9SyEdfp;Rkn5)7`{ zfMTl13xeI?X6QYdBT^Zw$WGS|hXAdK>Av(+txxyhLN@i3bgjhiaMa*z$tT~gb z(NTzc#FWgMIA=Lo!`f;tWnnqJ9F?y$$$XGv_i^l7mR{`r<$KpI@_lT0 z+55{!9yr^_zAOsQdGXT^*=t)L@`8)OVaWH*$G*6Kj8c~1Xqeg>G&J3xPhoC7`mmR7 zbHMITJaYeMXn4RL+q~p?Tf@uyzG%nI;bl8E!?proNEUnm5EQ^SAQYVRk|WTE2dsu+ z(Ovfh3GFL}G6KRVgMlaz&&3cnbcMu;hgfZuVM#zkTndwF?n_p1B3%F}@EDM^)~tzj zRiF~AfQ4~t!XCC^R!cZ(0C-hhjZ>kyy|O5pPzOFcRL~p)!-!=qv2yR0q8BP#WyVIP zhLTq`)2RC?_0wq&OYyd&s)#@(kX5$UpeEm9>#hl**`|Lh#Wxf zE7@LHZcQ1{inH&4p@7vkV|#_!`ttkV`Gzl__EE`xY=6Go`^yXGoqyf@OT4`7wP=4S z1uqNR1gq}y-Ob(C3OJo0?s~-%Y6~sg1X0`&JCEE4QPkor0#PX8DXC3}=qqNNc-*z+ zu10Rza(b~9!_Y;thLVCqiAIT$T45$l(nP|8s4adfQsjw*=MNT?y|WDAdgIvtB3a}$ zH-x(Yg4|lFC1(4s!AX!=**m;=EJiy87)GElUeG8i03~2LpNZ=~yMagBykTmpE!x&c zkk<58MF<3{zy|BK-iWtOt($t?Wrww73ZAJDrSLhkN52qt7SU4|dIkUFi~nI3_}A8G zo2B^#LLo#72a$#oDm0}{a%kQ9vcwF_#~$Voh#4*u!*d!NsQ%Y~{s(6k9?uqB59tNy z2q8_!DRsd_cW&AjQ7c5KADd1&xwQ3{)BpUt-xzWJr$R@-Y6#c=TX8-g0_M*5?9@A_ zC)GBwQ>sU*Y$oR)-Q)++yJ_hRWrmT`DaH^>ILVq1r{Oev;Ul?n;XNTG{ICE;nhV<` z2J{p{!J&q(0G_8QlWBD1G;$Gv3L4UgUKJaR2se_f2{t@*;V~TH4ix^uP+xdVa)_=| zy*{j8tU^>pBe5)(sAG~tL!%0X>o9XQb#lhjLRWrhMr9u7*NWb>$3_vCD$5Bmo#e{o z<0LIJIHVNVWJ8_8lX)DXp}gT>HU@3gG8<6`)gop+t|tA9HmJrSfUo2-kAnw zrg79(ce+zMzwji7&8`52g+5%@VE1M4?M7JSYNY>&pUW>rGx)w7%?6vK3P} z9cpeVNX005QP>|HKX0RB;2-MH zQ5+VE7cnMQtx(@vv+fBw@S8zvOmolAfBtDh5+BJ#DZFAP9Tc&gP3 zFC~S-)N&M@_|aXhG8IKqasm@*-+5D>!5c!)U}N#qW^(Nvu0_ zyKIaMU)Lqm&v2{i{)kwtYfOurvx<;0g|aFNBH_cTJRbmIOeV?erUEEu0`oRxjkzjz zVi;n9>kGeo&R4#4`lT11hfUYCGAm4b-wIoJ)jSNe8zXvwM;2;JDW1t|(KGv#(Y4Yx zv+1H2W|w2;Z4;heMykRLOhNG^Gl^IYc~VZ1*~nmRxm{;b8j(CqNU1K!36%35tU*z1 z^2sSSgGMvBUQ>ZtZDTa;&vXqf3YsIPf-xwPLK@LCj7({0VS_|+!U-v{hcss3loT8y z)|?-tVcuxsWYIO8R1@e>l>!wJ2c4+&VnED1iXvi8rb5l-nh-8mj;=g9k|GpJ`8M|^ zJZ5FRkUNw*)Lcglv{5#c3M`3@sPLe}5<4|SX9kMHAuWL@^;9j%`B@p8G9I?kg6D63 z{#UMM-clb8@#vRC2xgXOsFobFS8~W6l#$|>=QP4yo#aW-P+>YzF;Y#2ggT`yO7d}P zODTsmI>SN*bhQc^N<)r96rr8ua^dPB6=Qc9>89Y+aczk{bCK~5(F7*TVPrY@+$uAG zJeHN+;(%rUuD*8VGB18UxM9tY?qC0-``12j*KK#*expB2zjMv94R_ph&$^rLUBCS9 zwM*BpzV7y0uUU2T{5x)1xMAgzwac%*apAeEmtFF}x&>c3^&LArb&Dep+F{N<+Z^=L zryjch4o4g~%Re%|e4m{T-+%V8FWdPIhwuL8qxO`){_q#P`RF~~aKx_19x&^;gLgV= z-)9`M=O4`3<4K3TWb32$d-^di+wSQ7x1F=w;|_SSqO{)J*!a=A|A{@ zA5H}cas+ZEORnG~Bxo5LPQ+lzl^hHbI>|t&h#FD`R54J2Sv;>QP)UL^=GL{tx!^6m zLRoE9;iryF6ei(Uu&a}_+G0bY3$rr#+#AYjwROlUJyMo4k~I7vt$t~xwkG_5hY%Es z%%Lx3kz1p!3V0(+;1kO@)zDW+1!BWhB1o9@;eMHS{wE@kIefiYrqO~@%> zivf|e+J>{llhtBKox=o1kxLpDkQ0bOTk7P>Lxn?8wBhqwE;Q%sYF_5`;;jw9=r~;> z6f7)S-+a!qCkxOpGHeSw+aYg?x;_vRJb?w6;m{uMUibw+cbPq#Gys9wK_nL$i=LXi z=2fq<3mW<+7N>_Q%oasTx?I$%)g+joK^$W2AlT>{HdXWj5k|xqDT}%dkX2w}&3p{- z0y&t8(pFfD;xRsf?AJlLsr#;XyaS#^v?NWSscyuv1;DY9<_%@1Fwdx^QAZ?T4K~p) z4(DXl%tCgka!F10mtmFrRP>!(!6~Y$Z)JOeO|S#%ai?W=u(cDMG|RS5u2* zefM+%T{+dN_zh7ncuy;iGA`)NR@zG1O2p!4E#dJn04J+DAJI`v zCr*%9(lkKVeHv@7fKsnJLCR4Ir>jL|5(xl18>q-L%*fRI!+3>Z8%1n$$grw3L>ZRT z&b=g{Cqa4oFlm}FfJu~U+la)5f{vnt4C7BS=^Z^%K#>&C%|^P6uW4rVNpr8)+@YQ- zNHIH{s3RPHoLOuFmaQ^NK=co-yZMU3%v;P8m^||Zu-2P|K&9zWQ$Q<*7wvm_6J8cX ziR4kHXBuOqw99GNan|ylT$L^q+h|S{ErJ#~QKF{JB~ef@PoYskQF1iNNO4gikP}MB zr7pjt%yrX=yD&Ig+24ez7QGa4RK-7uqh{L%oFwpOoWa_blAWa4%Ac3qiwOA zj?@=)wVI&o6gg6O2L&$1#!+ERVI=-_(Tmad1hOh8ppPJ&B6NquqT?hPF}EaHtCGW%j>BmN ztqhs-+PD}mBdE+PrdF}8T7@3Tx=Oo-(?voCYhIA7K>kG9!!dk3EMzCc*q~hepEri( zI8hoMW6H^@hP4`EAQ>IXn197(*Dqcetd6jagK%x1P;}i#GeFWc1VbF z1!aZY6(=`J_{mIft)>rAG)!;wQ6@RdF_ct|TFjtdmJG|YA(=H%&dG+7mz2>7L1$zQ z2|-a6Pa}d&VR!(uga@TQRy8E(DU-}1Y^um~!MXVSbG@^Vo?&T}Q-l+&mK#c(X0>E; zYKtLG!lx4=JBLrILaC=CA5BzECuV5d@F`2Rjh}F;38^ZBRVj=nNgHxp(o5YhTu(-F zk?AgsGQ|TvKa`;6z_LYWo3X>oEiMndeE*%R9=L0D>E3lKHr&44Kfl4t_pH15-u1V@ z%XhE!LF^@WtXu>v-+JSfH!qsIdg;}7+;r9QMdz)!?))`3U3kW4-?sbgtq(t7`-As> z8pwR)K|39N$ZX5a@bY2%?R?CBJH6)MU5-2Gxkv5utRwe+<||*e)6x6waOjJ-Ire}Z zkKS+lBldpUEB1Z*u?KGdnnRu?9XDsk!}ok5$b872PlA^Ze9_iNAMotM_L)6<`^Rq% zFZ+j9m>2W|_X4@f)<#9t(4X&6iN=*-MytRUk)cIw&@Q+KfRqE9Ag53nML>=@NC;@O zsEQ6VLYLU&z~=wu>pp-!t*Qk7H#tfW6(k2q4N4A8H*}!MIV(vrN)iMK5+oQv)KNhl zMHB>OKoFEHIZ6;m-PzsR+O65$Nsi9w?9A@$)<4vL|DX5$%B^2@!@hOu*16}NbkBXB z=RN0lpI}I;tjN{c<5Nr0%(NO)HLYsq1GTgnVnsEL8IK`sQu?gWhDafbT$e05V3B4m z2C{}Y57DZfxq(MlE4OC|1Du|QP;v$x!h|T93umwuhvvdJE*YW+B1()3h5b8op%C!I zD9(Oifas>}mOuTI4>iCMw&bNu|3k$b@#{=3v&3xreU-s&-Yc9X^V>X?? z_SGAzhgoVdVX2S z?2}SI`0m$iF+X_koxZ>P^rQECidh9Amy;69rER&2s-G;>%l;jYOx1tnvMFbf1PtyB z(v}k;by2!QuWxi4A;M??(*h)bZzsvxYLX9OJiN@gp?)jW}Y$m)d!KchTx z$1*l|Tstt7U2L_jCQDIB3ZrMAh14C<*xV8Ab`D944QR}20bUUiB{8QNE|_3a~c@6^UIJRV-q(orXy!zOrD3qI# zDO&}$;FiGM-5pfT-89)!J&Czv(hzZ~s3jG^(c>J&2-6)2MgtUV=3Yp(Bc6iL!eQvg z6CykWMY4Js4`=d$9^yi$tg{m;Vy4q5uXxCuDWn0NIi92N&~$*%QLd$y9z#R`&7_C` zY#R-14GOeD=p5LQ)P5O{{eFh4>zcCb}RGwz&$=>l%b-8!jpU}>@gq~ zLslphtJLD5dB`SCge^xJFc->+=W_u}Gby8(jTRzdf#*MdwoDMK+DoSFG=;EGHYRR(V*Osz+rrX?6HKVAJ@wiaB9}=A&vfN;W zAVai~wWmp8j-{N=GWiyTAizIh&`IUP$Yaxtfe1q~Ri^3ZNv2UiY#0LFNf>kp6X1`O zH@`P+IV=jWOjf-AWr&Jpts~!JdHRMFpLs$94^4_Q?N3d{9}f>j8$aKK4yAze8Gjmt zg_Bh2f~2&FhO%Weo{j`(ilZHU4Y66I0b3JkE-@4_cs`SbYjwj@l2;8);*&h0R}j)% zFqsaCsEth=3<26w$dJ+4;EQ+J%G}^d7I8QUryqkM5w^-e+|q^!k7yKb`uQ*{W~DYm zq;!Q>i=Vg_f3iaybI{7d@nge~b=?$B+A26q(3aW}E@!#}n>fKqHj55AoOlJ%70-xc zl?eu>DNS+c77&+u^^e<%a*XDnOzRO7eN~SLR(+@~$Z6$k~y&n<) zMAz*hJ-Mt#RxDR9qd>fW;8(9buVnTi9`6=@>1nrKKWu;bwHF`z^H-mG<7ZE(mw)!$ zgD*b)qi275&r^@yrCxsVv3p5pDRdHD99-1U{aZoT%Rvk#oP(UJ>htyR6e_wF0+ zH*XW4V^%NE-+9Bs=WTJs{4JHt>g9zyym#MS)>AL(?Kkj#?6<3zCH!hqrJoX5edoSn^__Ch>xq;V zHka)d*)3IdYO6_&y5Csohp6-eHepGjs4Eqrb(M{VFjw%Z8I86bOb}hGELAwGm#eRh zD#j}(RlX26IV$IKiL0dYQaK(WjdB4(i$=BJh*PWp*c3x}AOp+FU^M&4z|t=xcp z;&F%jSgi{B8#mwhg&VH6?ayeQk@`F8j?HhcF@GC!#)r(8*g-nmcEXhxC=e7*sd3aT{9s4EPconIT+0?KD0a@d^xkyE{T zOJM)$AAh6lrIR+`x=Ig=oPo2FA&bt67z(LU+U!g`2XF%J=|Z?I=KM)vQ`0uee+cHJ z5;RKT;2g}N!LBZTf^g38Eyw{*k}bk;aYN?-W`@uK4GhVAO;gD!m^4#}uHwdE$^d^R z3?b5np%p=)p;d$Phf&a^;$fG2*m77L1i|^pD5PV?XsYng&uH!y5JP}YqeC;7Ootvj z{?*HA=m=pkbNPT79Rr1ibn}N5hbYKg`kBBMU;~tpN-!&krimw==^?J`aIz4l<}A== z2;HHM4MH0tS#Drv4pH#&a}tp_RoFUa1dtw_96(VotAf?Sz*~U4{72X3vJK@5qG9@pdGi<)0|r;AC|cS7Xlq;dyRTVic6}MQj#kL^GX|&!$YXg(*0 zMWNOW*b+bd1ZmJx?nX*(7!q*%o86?%3UfyB&|D3U4bVed{=+F$V$1f~3J}vz2@Tp1 z3Na5rq$h}>Ig)LC4Mj1ULhXPeU?^ZHTlqFMyXkYVb>SVbnr3e|xiis_W6#fh&XK9n zH#I=GstRz(0SZw@{B`+Yj(7m;aG?`bxKQLOHr<%>85^&%0%?vA5Sx{9f3>?Qlp$gs z2x&(Po&Z11T0@L#vzUjulyYpMO%KEjXI2J-nkV37YEM|20Pl-|8G~@{$bd(fE z93o?7W5W{zWRl-#6Bj)qoC$Qhu;`8zBnZc5h-p8d8FlV+-c|a0-?>HnS&0oN-?W%r zH-w8JLx^OXAO!yaU4SQGNPM&yimlNRVMN_bu#!1~DH&~oNlmu+X=v_{1R;1@J~${o zh%Oqg>cHiFTw7d2q%sg%H+0~E0)#Eh~1JsSugkevYWBJSlC_JFFg5> zdfEQ+FJ5`_SFb-aT`WKT%JUDs^o&nO-K$=PHedf8Z&xoX z^ppl_!zw-{v(i;rCoz^Dd+U*J&?xj2cd9&fpQ=+yXtN1L^5xR5oUF?t!FR$@`~(>&OP1Bu|2A+C=Psf*|5jPFxsNR~dDKXl z7de$m1WVYLwXb=MkmGbFLFlHh5|Z^%S)rs z$)&6)$J{ZiweLx%k2)!1-BC?6E9DF+OgLj7$tJrBk!B`bI{3!4 zr+w|-+A~#AO3T{-r9l`eRFY^R*V@V^Tul-1lxYAf&9IPzw=5v%_i1tYnTMGvXmOb}{N&|qSU?_BAD0=8E0sj2aH4z@tnFCFM6pB$I z^oS4KG{uKYh*?C$)&$XU#ve|hDPYiru*g-Kr>eYZhnqDK6i1E-x5`|_K#)a1KMhS5 zGd($ zK(nJzGDa3SRirm8%4AB77H@eX=PAW1%hHVqStPB}&zbpyei z6meYPK|#l$r3!^k*Qfi$ZEypDJT*{Lu!-`Q>EU56BJ0FNwLp8zL z=7IsF2L;~Jn-^l5X|rU4@5JeM32cZ)EgWDGvwCfm9vEmaWi?BVF^Y%J1mQ%O)AFsz z)H-1y#33|re(1zWgCTC`PU@Vrm6bxIEsTtY>ttT#cX zIX@szaFTMR(hAq8mveP^OHUqhgSLVU$sA;GPC9^Y1zQf{5LS}yjpFWM;5It($x(QK zUp&WAnm7%JeC|31cL3mLnnl01*}P60en`$Up^JeyuK;A&Z3=bKaRNL7P6HEgJf=?@ z2K3fK7ab2Gz?M|qxt1$f1+NtzZ|^u%>5o- ze&YT+o_ydo`^!JQ|C{&R_L)y!aQK`Z-nDS{+KT0!wq9x0wrlLZ;t?Y%|>QB;UpRiqXhqEfN)P(7GtL^UHIj!m_zEXAxubvL;0-FVbjSqfB? zs#HU5%N~8i!3?QEJZme(Z}G#DZxo0wUWuAK_NXcqZ3r~rkN)wX@=9Rb}%)}aI-cAN)Qu=2#7cf6x=?t zS3zF-iF4Lld-YXTT2k%Y=CT_3=CAl3)+Las+pupC)z2%R)xzH5^@;P&@P2H2%AQ%a zn{3nBgUsq>1@i6R`l^!I*0Ty)d(#)M{N_#9ee$ByK6>gA*IoIs8$WYN-;=uimOr`g zu3H}d(Vadi<^9<1#P-Us=bxCmp;Z3to@B+NEt^tPnO^yxf5HAi%e6rNsi#uVH}hxz znw>#GwU2+?C0Zk4!z0}y?q7MiajIyO6~jx(Le!fOMhy+nZ8SspbIG}k!c`22ETS}| zEotYEiB!X4@PaAnH2*otD+NdihBRBr)BdSU18xhI0~o4sX=X*&zA6f}5@1iE+N74Z zrvyJevSKDwmo7MfhZM4f?!cDkQ=-dZQhd`Db|q@LYt!dH1~{H(M^8R@q53jQF6ltE z!5zg+ffAnE=0#9f*~=CWX|`l3q(l(-$wLTc6R~joVVyP(3qtWxlKYOc$GW{I&HK=t zBmBqX1ayze{5@v5(H(_2DSj$jm}Y`F&($w}tiW%Gt0=ZL8PfH`g|gbbw0x)t!a$r? zcz6rZ#Y4jYbQBtxfE30=5KYRWqy!NVOvIFn5<#3~3kV+NKQ^NorPCDJ`!>owT|^ql zHnm(elprDmGfn`zKhwCm)QC zIWhz%(%gp|#HrQdcTa@_IOSwh&N15HL?{6eT||b87)qlkjwsrK_-k*jupC*;~^T7AEFp7XTxJU5u1*ozz|B& z#!zBOgMv%q+C9CVW%lweNi1y&E&+u6)nEQc_+5A0Hh1=}%Pu+fS;qC(UejkSq?DK? zz*Na6bJZ+R0715_GJlxn1BW?)6JX2vf=`1{L@S6hKXY=-my#IDesyV}zeOiP!taGJO}MVJ1GKDG*=kPxwmDr?nrS{~0+DGxk1k4v z80D>yDsOo^qSR#99F>%a7A-upj0vbXrybcw=GL3LiWv0yW#Nu|Gm>|H3LIWN! zLTzZ$95-4cJp6>4P880Nih&7T)6WE+T;&Noz)VR2UnJ(FjE*5}#Pfu#E{Gvy5rqWb zj?O)!#So8foZ8H3Y0hoL03{RL0F!h>qYEKk1jMA($1hOXML{oaIO!w~@Q92>F~_05 zp~;qh-8@-j%DN_{Wyq1-yr=?`oO!5?f5f?2H_FKXF^gK1bh6^m>Ui^*jC=kax18r# z%rxio=(0izkKb?{X2jtr=z&>_i7N~SL+X7L7ZyB_V!|0d&{?byhlVlD3>l(CFd6Ek z=qDg-7)C(&xyUpN6o!=RGFK4b=Jhkr1TG)}djAV_ zUHrS?sFyvqJYI<1o!D(R8){Ekiw=~`KY!_|0g7cM^UKdZ^y1S$@xHHVc+maWo?jkd zf7$cPPyP7zXCAuksh@oNfgjv>#f3-i{QgxuzdY*YJ$714z3k!TeRtp7ZPy8*p}jUf>)2gCcH+F#j-GYe z(Q}U6XUG3Tz3fp_NNPSj9&QCr}A0 z6&00ulz~c46wHcKl_UNH@vCZ?!=NH_uXSIyaXV{;ssfWOda$WRZD#rYj(XE$q`uVC z1JpG0fl(zYz@~a7r6RWBOLKIp46(?6zE#5OBGS;k*#vXmJ-VEv0TAZ^6FkQTX?A4& zsz3xl2y!w!wN>K8Ib*&mZhEweaWO&UhEdvxQ%efPmlV{<-tqOJqYv`puLmEv%U1Gd zu6573N3gFq>K^R2o4<6UZS>1j$4X^pBzsrE3=Y(mIul_{r@fhG$g0R=4_I5m3Q@zTRTp@&Nyco3c7rZNZR~QFFiwn zJ0<;RHv~~H8$yhEl0OkVslq^ta^Av+R5**~f|&!{R!j279I;izkp(8g1apFb%^?cG zV2+Ahl$c#8sHF{so_339Q+f!U!qAY+L@SNg5@!f5q$+e22tO&8IXB8E3J0XqG#CCMGG4H(a7HeC{K4$G{W!60j?)Nn2sBLN-wsxY;E^)Di^mBn}@}FiN z@}c>fvbhj14qSpXW&D5!^Ci_v!6B>^A`EaqDvF7S2ImvR4<`8y0X!Ya1Ti>HL{Bu( z{lvi^&ci*ajln3-c?IVQz?=!o!B7VeD-g^mI6*$t|4Mx&(CQ@h0hQs;1-2%)bgCT$yyoYKgPLXruTMb+Msn;by@NM_s--BszezjvoU-%qC*8V(x0A@N_c_X2&NF zjfx&ldT7HBT5CfXyct`i3?QIgW{)%Xz%q)$xgy%;GNSZmK)H(K#9@l43zv|{%Bq(o zLLB2T2h5^1<+-)vMqNjgc}1}nE=+P5O6ZiB(VzyDx}dxzWRdB-f?z`&zLgP`1!?)i z3V=-FoDr5*LBz&&yJJslt$`y4ba*3%I|~MIl||NF<|>ZN$Sh=x=rXjDwA|Km+bAFs z11HhTFd>M+5HUfcfXE~Wm)okPg%aHgXE$B{(?9$@L8~l*lMHo)|Sbv z^{&;IUwVn1w)=orPT68sTuXgv<};W0pjfE38gnr^GR&z=a=KVX#K8YCO|k`nhtVlC z1@6hKfvwUsBN7i%$W@Sr;!umHfG}!;qh5wnMHU-|sbIeO-@(#2yq$wuc*_m9h@B}; z5Z5$5@Rpu@W*x#+lVX%w%8hE#Sw~?-l*|`1Iw&k001=5G(lDmO5W#U&Sfkbs0t`{; z#8HL_TBVem{t5~#As5JbVU4>PE%_-eH$=h^Tj`4(xve9@I0U= z2kPQUc}PLfB4XgPn8eQnx+8&x!5n8Muwl*<_#%*9qj?3vfEaz?6avJ-xrqP7IaWM# zgDhqu$qsRXIE|t-3*qVn)OPm*1P}3ehW{vlhSsi_V+Qb683X0S#l+=;LSDIOIBq$E z2WQOmaFwd`5Xsf7FiHwT4liQSD4;NBjFMW!PZZ(-AMn5n2oK>b<^w#pjmD6|x`twl zC$%V(#4hZv7=bHE;7ZcP#J>`$9l+^7f)d!1%Y96$4P|vPiXz%A*`r=I6nK8w%dtJe ztZE+hvXa^UvZt7PKlbY{c~|yS)og$HS1&#O`ZEu{`qX{TJ@UQB?)%1lcU*t*xd(5# z;S%$9d5?M-ESR;fN0>do>}G8H%ZKi@5^V}A# z57eri)vzi%HKu1g`)I6MQj)B=Qy@!~6@VD36WfGU0Jc@CV6>g8@KhD52Wdm6RxYi6 zR7zJfs#r0&IaVS3fvq;1x%uYqXmzuzQ3}igxW|9)xfz>UK9K510Q7L;>|#deFgI`#^s@jWJxrtMs*aV$9#8fl^W8tV)!VSI z|BO4;KkKe+&oAp%O=Cl}*X$l_m9qP;zjf1%_L>#THkW;E+2*njF)NwfuB?_QK}&Zf^O7$Awf$d~fH<2V{%hk&9A2X3{3>&ECZhO< zZb<7i)CDy6?;f!I4lcb#T_DN;6K&EOsoHWEGn)y#j zwwObtkhrQ%Hg3zjC)o7yo8y>;9}v)Wh13Ljq8g=K zB~G?g&TVuItr{hCqI0GwK@0`MLn}f#2A<;wG#e#OIe=@8q8sG~kpM*yFc8OYnc#eI z=Kwccqr5WR#V3EPoe=R>7eAwvm?ofv3DYlhD0m7xwj!Tq8W;lTR1p+OvXIzpwJbZK@R(nh%{V;i#w;g4Cs8EUOb;qQ1h1 zorgt9Kn9!mIA{S^5#t};(utzmL>w4+tO$B>Hp&p9_0pmY?jgZ1=<^W%@tJQ|jVABc(d_wPkITMe1eS%}VBq<=0<)(u2$%URE-@8QaUg zx+~jf)qnZglfQW7G4--rvY&h6htEBHcklbMzx?n$H``yn@}i@**=#v4`%)~=^|I>) zn;x?Fmiy1!T)o`$%j)HQcV1t~tXQ7E?R&f&+b!9L&fVzn`J1>I+b!9)l@FW0!Lj>q zamwM_o_)-Ym!3BNi&q}^Cs&^xa!=uDdsOMCLR3o{RRE$WGigu{0>v_-dRZatF=V^9*xG=W zGk9`&^;K4oj>^56z`zY9DmPTbUb=hm0ejoMato+!Xi1R*nX3>+6}(Dj<^U54Rh|>1 zM+T+fRXa1v9A>V<@z^oc?WS%#6q4nY8oy0;qgCAIOQ9iCM&T9B+=CM|-;C190bLHW zqKgeMXKxyX90oMRNM_XODP)%6q-k!k$@%L+m5lT<$Ycw|?s@N@+E< zy=Gg?3S`wWP$t_=*3~MFRm#BDvr5@^vm3Luc&`5BS)ab@JkKv5e&C#see}4`fBIte zvYWAeObWOk`_5Z$ax?Y=ci-*+_GCd7r4mn;-o}%hQ_5$(xMxRcTU_qr&M{@us3ccwHv&!~o$oAR?I*D?AuT2@?p+# zWmGM13Y_ut#XtYyw=~ehVJ5T=4@#I8cD@->yffiP0AVyi)4{3h;CJy^XRS5-Z0L(! z9!|+FDZnVrC{&3I%@Eqia*RSaN`P$usr(!hEULVs6Pwm7xPi_||C`JOZXukKDB^(4 zFNc0$bo{VLq}4&k%qRzn408ZQ7biMbfo{G*0J<(tJOfOGTE~nKvEC0a*cfGzA$}6< zU{1M7bsSF8Igf|7LZ{7K^Cie_#2mwkuFJH^vaV$@B{^x7Hc}j5(UqBp0*->sC=K** z7@#oM)Y5E2c|e7;&E;N>tzHg1(X495_N>Ky{u$8zTIsh;3CK^RHz*Oyov?8!2h6C0 zK^GKf?m2gyuvU|k!bp$3Gx?I6guQvyp((kN3xTq}tf)9B2P37#=_b{PcH*#6HBuvq zP#GQ5#mX^ebp(os0>U($4}OPn_X47G$MJ?pgrB6WPR`?ph(976Ky(phiR`jfu?#`6 zImi?Aa#0~dHyzWIn;~)=Kc7q3aKdvw&vTwCg=aUq%OOn}k8kw~jv~}8mpg2;_4~Kn zY~8iq?OtoJ#;PlSVCLqZ|Lk>Y)&Ki{{`WoS&QVHB7iR6Wy<4)u*=L^i*Z=uH{o!}N zv8KGT(X3^3!%X%ovB5iJRXPuJkgy6V4SuPONn9~TRzLAYxP_DRFyx| zz+BKm41Z$KGGu7@#@i0{+)e|YywW;)5fu)ONgy~yn3ZISH#>BPfiCfqd2~dFM{eK; z!vUVFIHE+PldW7$tl*mTL7 zujz9vo>9}qPYGKCvVxf)oDY))U~KVZ0$ZWTNkm;ZPJ%e!5|~a9o8r`5NI6?w>$qx^ zT?|b>ot(!^+nCyL#spnmi5q1U1OF+6kXqb<^rQ;gh>W8Mu$~AcPD(P$B?u^#F8ltgVspQWO1aNS*;7^^R}|Y~_C&LXmtT0+eUkRw{C4d%o?e^tc>Bw~CuNKI zjhCPFNhx2F0>A$Gvp})z`^$=D@W#&`{nhJF*k69-$p@Z&=+0N4zUTQzZ-3?KJ5E1- zckjpcC8_zluIVexHkW-;%Kq}+vp01=w(m)K<<|)Z?BEm3M=aRF`@ZZi&;7tV7w+(G z;ALOx<)il6M6rC@k=vhl;+*r2-Q^3H9(Cj8AG-MTea`sc>|^%Z#`l*GSg_qD>#XMc z%RU0T#1hlrUtVHa-(Ox~<<*sn3P4|2c8{gItaa6i3cBvcR?m8BSlK9_ROlH}9x5Bv zyvk(tpcY+)s1j5XDg$vERVTKwYX}9{z*S0PnBG*q)}FGh?b*BTptx31wkzF!E@oET zII8^g*z(FNEUkFgWkU5zIh}mA!yCUnab3l-=b*_lp;lFm;$)N(qlo-rh0m4JJV&f_ zRnO9lQst}CHz^b%CTm_y8^XXb8VqSinamcRYG=xUE+4Q7snoq$BjUuyNp@@1#Ws;u z%65=Hea)o`XU{JyncahJyIGm6UcT`BGf`|b<5V5nTn3OyT$v2K=F1J+71ZkGDW2;u z^?vO0&pP(FqxQP^{8PN|%lD+b@9R%)xYAp)AGrIw_uTQ#m!5v)@t@q|W^6D1k{c@) zfV@uPBcUp7mb*e^eVr_2tr9{!aHkLYNW+|55~`s8;D#`lY?t@;Z{5Fg2q6BJ`PgJ8 zFo~D5F@Z=4fJ|zbOzODTC|AuW;13vnlI4R58a1irj-jr^8#Gx;unkpBUvO|TL?P3F z!t6FjUE0#XP}a4D6onop;z6WGTh33L5Yd1Ve{Pc!QWq|lGC1Wpb#nk`zH!X5T2RVS z-VTI_ASrI=tx1hIkfjHt1oHsSu%0=Ba2j+|rHh!ujYeUMh9Z{fr10oEgmR`Sz&l{* z!Nvzb|FGrwG(iqKbnRtx+XMIt0XLkh0_b#|hmcZ&;Z!DQtEW^7zMbv)Yu!Km!{6CW z_O`=!zw;en^Y?AwQs9R*F0obUI56x|+bj@S$xqJ10dI%r)WaHz~{K2P-_e;dehZR~lG>a4cK2!dSZ)rxin+)8KZKHYIdToE%o^qSZDrMj?XVo+w3BPnp{;zZ@ zNt0d3G-M-^8I+MA=}OPI!GCsH=MRekpL4)b=k62$F=Pvp5}DywzjUK=&%?H=3?DzD zjb=kiunM`bazm_BXhRVJxh(dv%t z1$)f)CWWmwf8VAXt^e-TS5Yr-zR89wF1wU>qewm}+#doYiAu>XhhwP zI-{`}D%H=B`3_Yh?2N+Eiv}l6Im+M(wr=23SB7P5?oCBXYG>WUz`8AF8;13J*wQtr23we zk{O^VlV5(}r&>GCUc~t7%kC0n=R(W5?}!-Nz1uTyn;~ zXP+?dlp}Zh@ZodMJn^9Uvp%3+wq?A`GD|P9#8NY+>*ZxvSVak`V)a!T4?#tr@Cb9y zFS`xf&Dg%beDC*fl^$0w%XmwiB=hQy65#&-PNhlHrJob0Bv$SuiE>VXkT{)C?Q1yK za8g1)g~Tx!1&L&f9&}JL)+rg=A}yYA3PVOZM`Ut{GkIH%H#ApMXi)+db*ObQ7928c>L6I51@!i>jg*Q0OfEZ1e-WI5ky@G=kuS3e)jYeoR^Zhra8rmAwh?t z>cHGhgdiY7rvSQ2ti9$t-Gl9GVh8QNmrD8AV-DYcp9KnM)+xlsPqNu%6izd`)D+|x zPY}gW7Z3pu(=l0r#mO!QsMTd!+i}GioMb7-uhpfWLJXalh!l2jHk?k~h}7~e%|sB3 zVe>Tv;P_cAe})V?&zl`tzFGH<8Hb;)QGyg2#YTAn9Bn+hMlAt=@Lx+77^0z2(9?m$ ze*w{=4^@QQz_A2)2q1!1 z+#Cje#&LFVo0G*557`#c4^2ayAiN+?O=`N(ke=~2-{8V!UUqhXoSb`rd#XhU58X`8xBvRje-JV5X~&)sTbyKFKFI{5tT4(nhxrM| zUsSQ7Xp1mGv%uh24)^5SL1xH{xVPDA3(olU>Rv!gJJOn|RGtGd`jIz}nwb{ki za$qPptfwJaEqY2wVb1v>MG($;ZL18ypAU^{hfa7psclq<5;5!c_@=V@iAq1m*d4NI zizk0Zv~$C%wc?{Y^~=)q<}&L!prtTzK1}LQbE7e{$g6@g>08=Tg+C7yEL??}aura_ z#1XmYFhlPgA(DOwXCiWbc!i$Yff%AEfP*wdXA45lh!Rc;!K6rnlkMaj*&066GbTRi zL)RsSOa~6&ti-c;yND7DIXqc@;t_BW^D`rVAPx)+==iy06b{Hb>XPK zi8rZyGeij-(%DLixlG_RRD9xRXcCIKcG6;;Nzs%3q|%1i`Ob4jJ9?W4J!8rcvf}CJ zC6X;Jah0HLp&^|^N%5Vn|j&50GAq96_-!{$4drWVbsN=SXM7rGPlvJOa|`3 zRwh?0+jRanKYy`LFuNuD^%sAtUjF54Pup33$wSI7JnDYzU%md6`>{R3{Mw5T0WbUV zitIO@`|%sk|LBn)e(mx5Z$9m~Io|iR&mQaTH*bRj7Hs6>%hk*6FS{Ar$Cs7N!2UAu zvM=@W&YLc`<0i{&yWv#HJZI|_J-KXi`RelzzUJKhKYji|*M9ua%g;Jcz5L z(RFdn6f~T2kexZ4X*@s!~=s+lNJ|mQ*JyFqMb)dA-`J zEmvi=eOUKYQ>d;~4og`f>PnR2pJ77k9reDpgt%x>XAF(pWQ8F8X1J<=BZkqYo zWYO6*O$a4yQA$(~}?s+R){c9z>*_P#Im@|QpV$y1I$P`&KoWngpJ{n)<0 z{Da$W_A#mZ|Li-D+<%u_vX#sbsi}I|W)?&`uJH<+gNf}kJ$o%ohd!>s+;w+yWT}YW54Q~mgsFf;61~|h#KxZ0w6=0*8A==A5%0>xae z7E&z`@aXgq!JJw+DQy=tbZmgm*yLy!h|~F&K)GOgdcDWA)l(9CoJplhe6Z2kER1r# zX|`B~>gH8#Y=#E376LUczlQimKW|AHLLmsz8Zskof}DW^Ji{(;xvJ~J{`=qktrM*i z#~0)s%+%VVwX*Jmbx#paN-(2Sg6>+h*1K2pD98W)fB%>3q2K7dY0y>9IqUXpBHWm? zTR?~lNM6CY#QCz;JjaG^Uce|EbG53jvmgrG1Rc+!2LW6nVxt+6&Vt~u;E7{{3~w_yZ6Zb? zi9mE&1Q6@R3TDor7}aHts{w_0TrMo1A5O=SkXJbUi04O9%gO-Se7{rYAl)jBYs`b=a(b+-#GB_S;)2tz=fu(nH*6u_ewi<`~T`oL3dp zIEnD8Y?FwgpbVBCoLaJe^x)oiZ~4=$x7hTZtE^-l?RT&KjuqeY&Q)EA-nH604%~My zyUdqga*^LgeC3KBN}6MfDBa~A3ZT%ab}+0R_Jox)WoD;6}VlLM6XHVA)YD-W^|))KnZ~G zb5bWmeCQ_*$3vsTdR_dGT!l1EZKKrU*UhM3Lab~Kb9JmE=3sb;r-kU48Gh;-n)D%~ z+(3*CG8l$G7>x3Vh5&9-(XkO}^&uFvG+PA#iAV$tD7%L?cJ@bBJGFWX#JEc>3+&z^n6^UEGy zRxf*sS>gO|-gpN5@-<(Wdi3WnKcao%(I39?+{3?k{wJ?J^TUVkzDd1&>anx8+hPU9 z^8WKTR4?0K_Wk7ryKdm&WhJvR*_*MaiskuR9)G}gCmpiW5eqh3u-&`89Q%OX)?K*c zyXI`Y(){gKRWExd_Qhu`{M?0ye(LOfrwZru4?pYpxyK!}!*K`CKK_UW`^?>OxuuqL z>7M>ua;c?PP>0P}YWWk6J*utbdAskbv{VDy3{@D~*p)zfh)*qSzg0o1{8T))UyMfu zZ6j4-i>MY>fsz6=ctzNUk3UAWXpdNJNDxK-S%ImvRBTq6Dj3z3y6Rl@sRCBYN>F+0 z25ctypq8IAYe!a?P$1g~R&r8ef4RDthW(Cx6f_*&a+*9jlks!a!Z@1z(8Wo?WqwV$)e!?dfLUlTt5xc=^bK_qgoB z(?5C98Q%2ut(&jE@iUj*eaAN*ec%U5=DWZ5E$_!xFS{jMlBtxFi^?-)u%*6`@?oi< zf4(J4hJdcD3F6%G-&uYoA(WTuVlz~p3@3|C9*al%N}*O4kI{ez3IQZaRu*rKGU3d1 z3Nt|`kdaQG!hiHI$JoUz6{IA!a5Dn<(~J|5GhC9TS6it*ASm?bm=-h<2wP2)?4+$c zuf-hb4khsFg{+s`3y3KQ-DGeu5T_g+6fSybU=+m^FcdNLvRP8OM6gNWG+%gLE#Atw z5dHY$M8j#{Q%f_V=@Vp#HUQx#dedpWbTPA=Z}0(e@+Y8;5~BopTL2NM#gGO=S+~Bf z(z=6zwtT>kh+h{{WrWiW*gmf9YV*F0*7f#-xpQ`r?aL}fl}>AFYW--#y0EB86)$Gw z0_b)cSCUm%Ue067-u`Wkn4{do!i99iVa%6Bnw>I~b5&(6l0?M893e!Kf>VOul#L=1 z;XGC7JOR!fUH_X<91s1sqKB*KMoE$7El6Q49+ZCOB1o0^P=YS1hS(~+^klKPVbIkA z6g)WbPx?$x%tj$JYHkX)ja+?No4B*z((zq6R zxtXZqOpq)d??7{<;XOY)-_&KhGLWpw7P{$++#Hb^gZvx9( zJn)v)?1B_pWgh03R`vxTz@+>L;00VZUJ*-ZIVNZW#1JP962C+g*HbZk^KMdz3{ZHZ z82X3b{EZJZy0_W?Z+ER5^1BHW!r~He$9KPN>sjK*5Q`oke&2@coqx_pz4gnj+9GTR z-|cZ?g@K1MOyS}{BGpZKg;KqYxone%3AQM8QFsOSa$(cX9^L9@+ozr$pXnf4OxZtyQzE zPNL>Q7SK#jK$TFBy2B8PJRb#JpmY;4G+fHbw84c!KrEs~5fJO*1Tk4d?2>9mO_mhX zlX1|+qs5jvI9Y(6hg=#(LHzU>YF-eo4sQ*iCz8*mgThIR>i}@faMgjTeC8+3lNxa5 zfS4Ed{Le#PwS2=3=7#?`W6myBGzV}*qfISNj-@tE41hm$4z-II!hn)gNH{t}6at~h zoK_sv%lTl4a{NO%H-=zkHIYWqA-c2`B9#13YOuwrI{<|sqbP}>=njV$LDHZV94jKi z1Q~4!GM8g0PF16{aSSpmYB z#1OVtVbbW3%5BQ=fUF?$hiq{(5vPeE8W15`Jh3^3{a{F8hKj9;V~d#G#oILGNp?xO z{=X%-lK6+^ADMp#a2FGo6c^FAtCv03tWxf7ZST-_W466!#5R|ESJF#QKjeE--t^V` zz96r?IQ0o;ulaiB@w>HCUtxaiUi-^0J%0BKkKOs`yX&~FJHZ^_3-li zUDuhr)4IODtYkiN!AzC1dKscvKI5p_$L{xjTg-~(!{%-5V^ZFa?PXscUcT_u`Inrz z*JY}mb>f|HmAGYr<+ibq>yH{Uc`B}ZZ%(5%ZnBHPm$@(0O zV$%c5>OLi*dzcl8c4KX_AgWIll}cuHqN-UsZL8Ov+e&7f3P9zs>aR`bID1!^NB#yHp(+#;)S*>&>t7ugPV^AX- zQc73Ss^x7kp;X#)%uInen6Eg{tmb8?dfD88T3E{mdmr8buId*MJkurZ1bz-tpzhWmWS%cYSA_wN~A9{R9ENhj^$ulbz%ReQ_5>B|x%4y5%9SgF=R+L$M7_-CWQz=iAb9+pG|PgcM(L5hQXokjIi`-OtMTcof5JT zZeyd6cwL?}mlW1DG{9)y(q^&%Vid8MG>RTk7qCK5g6E*p;-nvsA&60s9$knJWV@%- zK?qk&`KtRWD=q89QEj=!I-^usUmB_c6wW5%z2bP>iS5+Jzx*;&FF90e@4MGLXSV}} zh=-c9n;wqw2S6}XeXEqcq8aXII={}LC&C{dVs@x>j_3-ez%uPoYmpaiH9mwteB&5O zv!evFmWD=~pgD>FRY2T$!cWyDactx8HiUuOg$o;?0R<#p8_{t>1Mzgmuf-F?Kyb2% zSqoh2T%wuA7C#1ER*dEkoo1os47y1Pg0MhPVoO^-5I0R{0T55XI<-Jd9HmNT<|rJ% zG;IJU9vT3hO%~gDc)2^S+g8@9l&8KP3hUp?2 z)uPL1qBs+?&aq;}4by>}J1|ohSk9Y;8Of~Vv0eh~*5bY9@8R(q`Iww1D^9{P8TxUm zJ`BlfsxnxV^D9r#OOP;2rKCV|p%SDb(>ii2AG89(6TY<&+y>}MV=Ll}gW$(tA}9&s zv19N%yW||bdctXU1DpGi; z^}ejV7tD2@s+Zkd?Tb+&fQ|k~AACTKMvzf4M3+X|vfpKgZ9PhC&sWiU^x=p2o|Nq& zMg5*INn3A6v{en(Zv3 zE898zo4@`q_JdmAR;~4F$+RI1v0E0cg=5R8ITA0_ihVe3B*v`s4|NtMI6)} z`csYw$yQFLT&qjGdJTh6aDbl)Hp)GKE!k$r7O6r_KO&bbv;)S^7j6mgGmCZMB+fKo z8+uZr9X?FzPi>>kg-e4tDV64-Hs*BlmTAlc#~g7EqSy*`(Z+)_A0WdVBn=R1hm-gP z3ZT%5XNV)#)g~fPR?5K)X)VxmQWtTkC6dKt(=(V`4$K)%75;qVMx2YcT1y zFPOPXPpe5kI?fcf+{q>qu4MvGr%ZMyeSik8O+j$p?IV`8!Yk_?EwT?f%QoKg|8uDrMlV z>^*l|Z|=_T_5Ee_@?N{W&&$44%Iz{AxBs?E=A#yF;SuHq+pVEq{?L9~9J9}69$vP& zeEvy$oOAqc7oRfsf)jT8+$BeOKel@LBgf7=;n3Z8+IpQ0)?9hx_22C|Sfj9iN_u7gH7sLWu|gZ-3la}s=Pht&6Y+Ys_)c^%4Ef|Do=~9qEs(y<=9n` z@@>1r?c{E?#U`p_o2qqHzPf5mJZe(Aw~Aufsr_7)tP)bi3fy|l8K$M340({yUDk?v z74FeT9K7y(-{rAHm9l+XO4Oc8Y`eEU(7fN?^KDYAYCU+U(57FyORZO#xv5e~3plgs z#_O@@0cX`Qe-y1Ez%2Uz?3G_Yz3et@t=D|b-gO&al~PKtyuyqRY_&0p zO=pF$&1IjIy7ux5eTCV!vfX5$Qm$lHBlj#b3Wm>JbHQ!j`usur?6T|j8y~v=oMR5# z^XgBYciDw!UU|vccisLq&oBSvhqwFwl>4!(m!+m9x6(O?Y*VsVg6fp<&sXl`|FQ(E zOia=xr*$4FMieYkEOfb+GtLPWPv=+pl}wBh;w5ZaJfuo=aSn)B4zPIZR0U-5m*8n> z&PuXniK8{dE8V8XG}-iMrG;G(%GJ$CPU}fSRX}xXjWIWhDPyuC1{vp!07diX5u*ODbj3A-FlIhTl7FRofLIA#nyq`w6sNF4Dk!IE*t z{n$t+ITIx&P#io`Q1n|O3qsA3v1qv-JFg<_^%y&*ABg&z>a zoGnALK}1i6ii6GKu4#&8N2VZP7DmiOs1hO+l`_ONMBVDt_TYmL*x8kaxRQ8m`5%A# zH)5g+?vTzaEue=6;sS12X|N6b!J`gUYx@MAf|rQHAP>3M;ph0z0f+<470gA$)ke%K zoLek&!O)aBPh=beEb<3H@}#f`TC(^%ZAGIYE!^B8R26@+0*hEow5DT-TE_{_MIc)# zF@(**#tp55%@2c47Due$w=}6@jy6Os4Q8P=gr_)|BSe=%Z9|yN0zd{48|x_5zv!l^ zMImd5Dlq&sw8$xAw8xXjL(0nJc9!ipL;9o?yvLleRo2PeKq;#%s6xO&jv~Y(aTx-ZN;9vs|SI|+%&rUiB9`u~=7^Y+Z_f$z{AsVu{ zq9k)u5-OS=df+~lh_X>F%t`U#InL&y3*eMU^Gw7}9|q#|P;1ttP>7q6V)+|4ec2Yg zC(BeiQa%YG4Mtf}m`nc@>9&|r>k<(!-;x0G6Zt$%<76TD&L2e2hsIq`~?U+TpQ;_xo5Hj@rxA- zI!+J+dTVDWXHpC2gBBeFhyiiZ5F%i75>O~7o5J{WLxdeIW7V)N4mh!OT#=;>B|llO z$|HryC?p;TPPoY$$}2dEA)Iu=i4Va%)DGE(nhU(R4gJ^<0p(3?$R>gg1P~{wVy=ag zTEtu)5`^R@L4a9z=z-AI5Dn2u;n4~RfV*mmn84H`-_+V;*FQye1!R@?#EUpgP%R;O<$0QUwQr^ zFZ;5y-21-lFaPX``@J8#dime|{KuDn{0Q~3o3Yi)-uLBZ?0LJaJ#XjrY%Y60_Hp}d zckI5~_P(#97S23+udRH1dCpd=?zZJh2k*YYaR;m)(-B^7Ton<(8h>#&zeg z{7p^gK4f=6tDqHnib|EEtz>nj5)Yyjm0PxZt2|U`Vo=-K_9d%sRRyp1j(6A;R!(}s zwfu9%SFsii0VpGYFN9&KDdms;&RK%TJ3uC&As9a=g%SN7Hh?_ zQCrFKOy#G-R?8pEwxoHYuthY-!w=bCi7g9NEE^@F)Ky=5*SGu3-9ha}J{0_Z8Q{5j z(NET7QFN=X%~Z`xv+|ZR=y*(1&5J=sO$pz)p+H6v3ipts5K+;qpb^#bwz|xbU^O+n zH2A4Oz3fe2-r{xD^|*wpWTcNS6M5ouekV(J8u2r5eMzQ`!1U+md`lpaIg8g;krw1 z`PvPSKX}&<@BEhUFRQQZFS{SxZ$5H48J&Nx{tL@Cg}Rhj?pKyoq9r>mvz3&VFgv0A zJ1$4P>#Q4__Dd!xC*&+xjBFt&2q7XmNwOP+fY+S`d z1LsjnxpC@BXaOYvLSdsEkc}(8luOmE7>G0@svy*+Sw%LkC)w5JFvoD_Qr8(cDMYpu zIDkc_5&19LkgQjIu&ZkbTbwkclRuOgN})N@BRRJs6Zx_I%t}cLnuux%~@eo^x%d7gkyl`EK-OWkR9e6 zs47-c`2Xdfe&;%~%1SF9ap(cog@~K3DZvA$gtr7y{KBIshT9$XtJ+G-FY99Ez8cpV zR_s~Z@mB?_WHDG}K8(nlg?M5^!9clUnV{eLd~1Mg_hwU$&7znTz%P1jCA;vt_nU}I z#QSG%>WX2PnZx)^1Y&}`bsBQp$wgJ6bC7V%OsVPsYepy-@LN6>3mpx3;7pq`CB=;i z8`(6I#Zy>xG2qk=gcKfgC(?W|i$6E~;9%k`c6WrT-?2Bsf{j9)Ca%@(>cIi-VQV^k zgXg>!@L=+Ct07XLd`9&;C#1NK=cN}eq9g(lm$_G(pU<}2|lO{GMvS{J6J zg{))nkofqPlZ{0_)5Z!T;44ZIol<68}-kIsG``mhY98U*4#C z`N8||{rkW9ubk(VPhXLu$Il?#AcY8=d%(N^Js_i$vjq?G$0%k}^gs}K>oDk~(6&HU zt@G4D*h*>P0DbBT_GTToh+PUPccfuRGasUuJ|buG1d^-ROi&EbQTkb<9b3=fdrP@$ zR9NtvxKZ8$@xg2sO@s;E=9_P{bqwe-U3f8LGn6MS83#~?+lJr(5_4)*NuXdl*G$|Y z$}vB%Xe|dT?yxmPDkUgF(Rr7v3^nl)H{GmbScE?xa%PbhCrBs6>>Q%pbC5I5Vfu+s z-eI7HqvX#R(xn7*ZfH5LYZf5SG1HmDT+L5i#2APqg)^5@$PK<_3tkv?5&1(EwnVhF z4LvDu2s1FM3-Hi0#51>e%Q16tkZ1+RkrRbxU4hC6OHbigvf+G=Wje3&fkH!W<0(*i zHEA6)h5(K&Z?(EjGh#;qwZx+*MJI@ddtFb68=W+i2+koj6w*mt*Qm*+jdgTgN{pH^ z4G^P_9Pu$q#N~)Z2>$%n;+Zr(i5m)Nn*^7Ow*>z|{O{1ZhV)O|CB=V)xBH%yU1qnL zJo}XA)TZvl_Lgj~{DS<4U%&S6fBA~4*@Mi#e0A!RQtd21{n))PJoRJ6^6M`>>NQ{9 zj17EM>gUfs^y`;@`s!2nJ@?4>9{i+>h{AwP2G>ZVAi^O&f3uX zu@9Ix^OQq(J@v3zCmy)tRLQ(xi^Jw^w%3kp?Xua5Gv7PI7W0t{H?hC0Ot!h~{n%$7 zv(vGAZ*uZMGq1VekWZa==!u8!aP|p%df(SttIqIxuVt5o$pT6{l&s~1w4OhB_ z7<}P|YrpvU>y*G#uh+ir>V@-mmjSN0>@u@<+VR9=Kjf!iIe-r<3BD`^2`|$}8-ind@#a~frf6s z^;=ZAZ+y9>XV`<@ew&$Ub014puyb2=@B7ZOPzsgIJhzRm^!6UJX5`jr1+@zItTRuM zDo=0qUZh@D$-0|b2`nm9euc1K5>)-d z$N?1J%yQ2;AN=@W=bc)=AsAJj+j6 z_?DZ$pjdW4_C@EOZg*Jsx+^YHM0b0(P3OMFY$v++V_$aRsi&QI@YSC@=S$ar@~TVE z`RW(026ukvtGyrF!^O>W;veLxm+TLGldxV zQ^sD7O#=$e5T2(8o(3&uIju5NHZIXsKbpQ4vFTSnSf1B-%gG@J+?qrdp`SHYkE^0i<6`sV=t%BwHD@Z!_prI&qi$!ns$GFoXlg}?Ut z)T)tXw4+Ej6TEeTaEyBljZvHonNfg0B!6^sNiZQaJ1^)FA{0DLhlnmiC=8hbiuqz5 z`H8$;!cclC6M*QdQ{={KC|Pkin0u}5BliKR9%1g~*a~EKV!I_<>&azoisfJZ`VGt~ z>mGT=sh~#hb5x_^MptE%o!Y9Nu3Y%ZVNO~#b98_?KKU;!X4F-5gyMI<`Kx^v&h=l+ zG2j^`N3WYA9Kam_jyJ#907sO=W~xM*-sv_w)rzi^Ou}Qn(jl!ll*<`{Q{q^1_(7Z< zG;@Pvt)PR1hk;`Zv@VtmgX1TplQaetv*09WOo53~bP={5yyBi80a&*Rh$=_%=NnG> zs-x@WpFQ{LYcIa>i&q>@9cgtPd$0f8SG}yxtAFut|NNJ~dfidykZ=OB*so!J`u1Ob zsTni;^2|Mf<-A`tMb*;{jvV|R(Gq;eM#qVVDJ%-S$wKfrVV&S2<2M9)h&$Q+{^Npx z=zmFIC#Kys2GS(1%pBxh)#$x%>If<#3Tl`J%& z2@)hK2$&EVa%`IJ1{#_Mx|@v5=!`S7GrK!`X2YD>2|K$pvomM+oc+A-SD*baVb7^k zr=F@?w{G1^_g7WV^C*H2r8ei#jN;s*V@KKbLZG??R+a#x!bzrD$cs7s^A$vaGB#3> z5iboR!<WQf`1*h`BA#gtlwqzQtxO)- z6_IYm=P*~gg2dBFU4?i86T>uC6)-_;)SFcC^rD=5bSy>&A?t-&5Gbzd;HlOb-YTms zoQD$eN6ZPJM4;AuG_t26oeC)6GX6sc>Z^_c5VdZApwu&qAm=U;WC@>|CgGBgpT||L|RNOEZkfKl;rdh_V0n z+?-kFjelnd{|<&XNGk=mK~908T*MlswU37NK#yUTltYT)K zA+L% zCFM|>!Q?`Z6(`_Pq_PC$#Q`5-MN5@M7(+BQqDa-W0g@yYq={uzoYZQ8o0q86_n{Nn(;*USXmlD5Dsz>QokAOsUBrtY~4%F`{wi zXg+Qw$yL)JrYqG%j|{yH2dM;8D4Me8dlacJ=~22Izp*LCoWti#9pW4K!g`}i*Z#l! zn_v6zAQQzTHhO0nh~~R}?Mq*yJJ{n$-}{1^-+ucQ?ya|8e)G+j-hBOq*A}`QHEY33 zGy4nk{O6x|cKYwldFtK;&p-C++$ZMEe(;6a_x#bj&++Haeejtl?|O0e&#$=f#GSTW z_29iXIbe^C587j+!zOQX=$;$xzw^2W?)u3iCQmwSuMLmdchWKYZ~D0dHv8fsTYcf6 zE%x7e?d>Nmy~TPT*>#H*r|h)WfxE7I#N>@Z(xbIu8S-*VYWmz;Uj z^;1v&>aqJCw)d86t-L6_-2bE&@&D!Dx{oZf=tp2N$PQ9wBCfOMssJ6-2H80hwJ2DFo_>T=i`ZxGkmDe2Ca=?*Q5w2L+dfDj;ZNz9;4&+v&MeA$jQvm~|W6 z!8areK|3OMgWaO(fE$#yWWdg@l>391+o7`4l3WB(kfUdMKJM6~X_XVhIi3c(kvW}L zIzSC{6HGy5ATzw|>=;JA@Y`pA%-m0IzY$WNcG1}(XxFd>rk?i=F%F}DcnfGe_1v>g z@QLipzVr3(U44Q3*#5ly^B>*h``C}&{}cDIpPBxU%h>ato$iR)y-L$^P^E<|GnKt3 zp7Fp25MzmK0Yzgqhmso}rRHR85tPwL^fB5_ije49rm6lzVZJPNRy%;hp3x8sx@!qb; z|Ni%X?VopLTz9Rt$i~2CFncW;8CBD#W`R9289rXa31^u#f-4<+phvFzzb+Re}OMz zjc`o>2U&PoS#jnID-=(9NGgK`bCMIg)Jhq$h%YQ7g2>QQLNNlInD;oP$cSF8wQ0Rq zT^Iu}9y^Lf#203$FdGEKKs%jpf|g>~3C}(^vxDP~m>IF*TtJwi+u^d0eIc9UEo1k+ z$LP4PgmoUx2dE{%%aF3{eD0TKfZzRhiKsNJz4RObqkvjOYDAB8MPn+$X=44vt47N# zxtI>tFCJ7AjfsmY%>4e7xYPDq)6b~4jPED2f>buVtX}Htu#B&Qu&Ps9y9DClQv*k# z#NZnyaJNErup*nX_#PXB`iWA6#TXQtPgr(+9xI*JBPWZbQKO9aCETNSsnzA%wkAA6 zaNb~Lqh^M&#DqsUdc6I@*VA;ZmtKCJ^Z2V@zw-ybdRq^yr`fsznZ*!;-V|gGhs6vr zFJ$oc`e+w^EEc&goWKo>P zov8NaJra_wA(Ni<1WZuMqIM$1=sbG90iqOgYXaJKu&dEo$l3XNf=*%x1THd<`D%m@ zp7`hn#1IhE;8HXv#v_pkBdj9TMVJ8M)wk}gIwXOVX`7*9GNgZ78EhEJvFD(55UV&J zPt37Wo$5jikMYE|DyIr%2~>13Wz?M*|EeiaSbaeNyI<3G^h5v2;(MN@@QqaT_U zAZ6+bEF(Y}%8-{6<}0PLgu#<07HpeLM9K&(ziZ7kSX9cn>XB1WB@V^OTM|FEl8|xois3NiA`pnlAZPJO7<~-I zP>VION6Kj&r6rMil%C!MWjq?N{3cI>FaKJ9>?YHCF2k*YYG5c@+`2#mU_TVkSb&-R}XBmSh%?r`eoc0TEltu8!e%8gS`xcr;PT==ykjy-7m9XDNl zxy453%O8lB!9&iUMuAF5FbD}I1Bp&-!DOegd;qgk&RcG};-(uezv22z0cYQHzVh-5 zF2Cgb%chRu zOkHY;MXgi7K1wehLZI*_bgvdnV#mszGD~HVf}HB>3|+k}d5|?Su+$6>kXrQsZD*{i zv-ci5%i>---x7cls&!0kV}Wr?V_>zjU?M%GXsN@Q5XT0GU^1%SdzSn zq1?nva#aXK2t+RFB z=uH?kHF>J3Tpls0Q53-;=4&zpDM#tDE4N0J>M_pplB()w63|A?&Ggd$eJ9Wz3}kU2 z12Swyy7*zduwZRy&}3&%$OsP$^W81clhuNWn}j0Su_DPYV6rm4+_y2T2#FO?V=MzK zV2jDnb{4~!ZgmD9Fcj(R*x#mV>#9J0G!BJH?)?!5#O5NGu`%T6hnC2oL|8+B{4`)J z8|gTsA-sTg^uo1i!uJdD@$tXK)E*c1q{oOEcQ~=sJf_*0OMaxGb5Qy z%psPKi^psvsjMtRgPwIfTQ^GotN-*D-d6CPG&6<{mTOX#n)1nCyZ|xy2qXy@#bP`L z-*9H>Ij3{c%Ui>bK6oEQ%r1qRo$r7o#G`YI2ZTZ@_L$CAU$Q@=%guXW=s%s~TgpH2>`Hz23;a-lg(5tVm zVvrIWF@k^jr~gL7HYm{0vs{|Sw4+ic0Wp?xGfeT4E=&TZ4jm5|)V;L` z{z~^beIHXLn>WZT97>eM>|`CW;$y4#Y61mO8kb_}lT$m7>A%Tbj3!9!BoRg+FD3@B zB?)JJ1F;roB)eD4K%!)i6s}Pk0{TXe+1n#CevE$`zt}2W`-mixf;a&k6?5{`+{)z$ zz!8aTQU&;|0{YjT%$817>+*>zpe?GPD(g$Sx<^h}j5>ywJvJ6383>Z-L01fFdF1p-t6DPy6-6l{4q*owQ<@}t zb2cXzAVab8;)XzcVA(#K;SU|d%O(N7u*o6U9ThXo=(zcJ7+EQ~0E_tdl$f0Ktl}Do zs;yA;MhPYjU5-yP6h*fhlqP9OT~xCdFSS!R#Xw855vohH@MH8WG13z2vRfe`R_24 zXLYcP3!_1H@UnOy^wyiNgqIyIUvu?$`~}Hxf-k-J+>0;#HcV}g@uusq0dgHHue;_N z>#XsK3(q~rBRG8Zw7xyNxNl!@Q1j{^Ur}*h+JcL%vwrm8kq=y-e)5THud#+NdpK?F zhPKb4_#P%O?y|R|XV81vB^UXcggZ#ECy46SG@EwYt+(EDw_VKDASa|elJ=(S;7$LO z!F;);m-f=MgJV1Pl2lESs=>=~tN&^<5DnX&N2*&s73&gy1bHbwKz!-Ml^fHZ;( zCG)e#mNsi&D|)S-6aWf`~;T<0J`ZX(*Q?KeNP2N^Fk&q*DEX zSfR-vFJhDmP0y)XMZg?M9tr6Y!)k#EN+r?&q$!?|xK($uDXRvmI4 zI*oGBY;M=ee5DWu20>$dx%J`y@``Ya(u);}#=n<4-1%H#*(F)U&VLPa#ewJEt}&l2_5@Nizm(Ux~ycKat!Hb<(}@ z2J5+k!f0pzYY|E&HkH=BiB~mgL(#N~tx8d;B(ByJgN!m|mn5%vVOjVZR4z_f(#VbP zV>{OajTek@F&|n9)Cj;bFTe5vP{S9A8@93&Llvk(uiz@N($V0Yz~TietF9=l--`5D zd_0Mbt*|7yJXR5pA{C?x^%=z}x*pC7Vi&OiSVTEPVsPR)kCGTmj3`hS=5^u&_D;mh z%v~|FWP4cR*YAWcFt88XV^go#mS6K;+bqZT4-FZEKR1d~x%dlA6lrZ=jr#oh~s zlp*>e1&F~UW6rVA1e`nTPIa*NoG*{M*4uQ>o^yOZ2Wl4Mwzg?dejG|UAtw&%XvSkr z(LDY*R2KnN<78Bvf!uWDQHgv-t0q6P=&QJbQcg6+yS}7r)#25P8vun0dQ|c13pl8? z$0h^;1R^UYW-bU_l&SHV<&nvBTv~nY)SY zu_I<=;zyUae}_;EieOaesj>!Np+r@prlEftplw}|PCRndB8d~=G>~3uuXt#8a^es} zbx~Xe1=GZf39eQVkD!xy#7ak2On53$LtXbA%DDc6<(HY>UK%8)5{CkxMONg>$(o zLMe$ia4XfLBqa_6SQSYs@#W&zqw28=5|aQj#;Qm0oJcObEIxJNz~Mh|!#{Z{(Q=^r z!ZDXo0L>tt#~kJ&>s0lthJ16BJ|@5k)B~mJjj%AJ3?TD?<=}G{v->`FN6aOSAoKWe z`M3TXbKiLF`PUcN^nT{0IZuGh4wo0a@Z=jWJ@wWrPxIZ%es;!BpLyyh9MlXi&wBih z(~sN7*RT)V^Hclovi`n1t-IgO>%q%=Z~KWUJFRp0-k)+A`{*g329_aW;ea!MyxW#5 z0n6)ud=aP2E@pEsW;EJ{Jup8IOGO&E@cbz+r?qg58)K8JlMg#F(ap}c??KWF% zzT{#bU1QZ%Zt-I7Rny>H?=@SY)?a7MpM!k2+~{+W9{p#*;j-gq(AY;T0pZh5I>EFL z#GZV@mn^|9X?Fn|UiN~syceB+P6yK=Yp*;L6)P`a+u(I&+F7j~PUzBl>MnFJ-*w0B zw%OdPVcsTIg|KSK6mWLp476Gh!BRk*LQMYR?I$~8cIJ-1R9nOVHG|eMu}B4pQMxm0 zD=)Urm8{WjakId^dJLQ6#_DDvvz)D=99H(96=8AdM4Mu1*!lu2cf@Q5gW7FhI4fuQ zBYxob?ndC0S?Q}RxA;cutO^^~PI`hx&xEV$UqMbNP5_Ut|DKpf3^L5{W zmyiGa{-+#&=#`gzbL#nD_5bCc{P243FHe8uZvS8Y#gA`&@SeN;5bdo6FFf_gy%tI~ zo&}4uI$O>-F^wAIs1?wo!Ggw2K5;n{w&Yn-q*@1YW7%>79z|O9-tX{H1gAt~I3xgH zamwmZc7xP_fK&r#rHDZ+QUxvF>`C?#V}?P(ssxjR zmPD#_SxA_#I@T|~)TC0Dq#^}Ga*E@Nqyc&HQ5;i;gbXMA6-{jP0(@bj<}xaVM;wHe zn83=dE=)v9D3T1+r9_XyxG~IB3UlkJNbp~+$s>%L06qj0=F392Vrnu1$W9NLPiSgK z+-gnPIT=wZg`q{~oJf55BqR^TDsHHz?6Po}R7Y%66k?XXCNN;zr-GN60W{+z#kg<= zX9RU^mC6fc#2QmIEO<;x|KPc3Re z1GI`>q|};#FUFCvLp|UHGzL$?DI$dzjIU~sFw4B>o_}HX3v-_lh_k%Jki8b<6F?+4 zQKXt!q+E2xC#?Xf5}Q0Zxo}XyBrNQ4aH?RmGQu5ZsXGm%hwyTzu0Sz6HjG?2JDJZO zX7jLopjzk1L14G6yJ9W!kN@P?!cWb3{P8Cq_Ojs+j~J|WvOMwJSxhh)`sO-}2APEk zR(;h`O;v=PoIylODa7zMUUYj35ll@8Vx zjs9jI#v`QcO1AgtJw7*Qrp{ImwBk?{JupkC$k;T33`t9^T`8N}0GfHgH8WdM#tz5; zy4tNW`V4U0z7A<9k(n9~KV*N?h&38Fqtz6ri29OusT%uqCD;G{*MHPf(6L(|_T-hj z)#j7H;XQWULAmr`2OCthOKD@g64?ee%ng6V(HKB#bUR|^w4<^#w;rxUiftean)gW6 z9U4+K;BV5_KnlXmL`Mk-lt?NZ@F!%zQ0s$!`h#vStdlUI8Nm99MiFCB=o`#t-1JEe zXYy$%>8=V@5auKlX`qo4*@*x6wl;alsr5;MQW8oPw+}!bRig?zu~-C2;jjvH-o9%bjtt3`@0ESD^tu}n&SNI`u=yZnZEgMtR4w39$H;o=C& z!p_un^-Kijkdox0BE1)L+M`3AHZY>=N~LWol}kSt!vVs%uA10n^9Un5W#(Yy9-TNt z(4(X0F-*f#Ud)4ve3(ViQYCQ$VH^t7)@hFdwF3R8D@i)Mtt4m7Xef&IWF$X1>llF` zO+OSTP(e!X@UemU{}lSW1IhtRVH<}eFI+}pk$A?oN98IsF~}oHjK_2+Mqa+g;=^>o zz@Q&u<)Y_OOZ*c?n7VAlDJU|2;)$vBgcM)VDN|B!SSwmg+yp$vO|B@bNK9~05l$dt ze1_~X;SX6P#bjbqHHwuwMz40lMbgZB_{V1=s}xdK5hfm&iiR5XNJ25mSyFj%^H@?v zM^7FB0*v%O-mKH#a*cXjP6sQ@N5df)$IHU(G&5K={_pF|;BzO=FtWFpdyyHq4k`Bv z^V@H}=!p5vSLeLpi(aqHowxoKj+chE6+XRWclgG?|km*pS||-qc6<- znfI4Z`_g28Ti$>7jhrvVL(2JHWEMDJmvvmj-e$uk zHeGuW7qd5CcTs1}du;u2=gaP6f9p$oobiQSt~vj>Z=Z7DStlN}^@c0{f9_+0V_;gg zsMl;^XU=D_fi=dYFSxpd?6}9tj%5gn933lPd4W%Ud4>7r8?N--=BuV%1TRZvIyy&o z?CTo#X!T!ro$udtBdF_q89-ig@x^`k%Oz@mcKF5_rz3E7eCb6OSkq@_ z&+vUK2hh;5NXXjPW1S}ZK3HE|6yr+v$}6mJ)L{o(pdn~CoUyWil8tprcnbljOL(gu z$PA5IY9*Q5p;u%UO*Ik&RUj0}hnWJneXp*~h+Iy@r&14cnP=c-d`jSF(Ks?1t~2 z|AQMZ`NC0?zk2df*IjwuMdzLlDgW%w8*ab(%9&5xJLl<#@B7(p5B=gt4wpHB)-S6h zXSp1fFps8d1H>%Nx44_15r`DS&j1jXWJy6I#sr>4E#R^2aS)b^LmxK`(c}_<->B@d zvMPOGB~nHXjYvzXvJC3%4P{AcT}}Y>abPzx`P7$VN?G)CtOY$mnB;g?BWrgy6>f!P zm$Ah?`KzpzU6L^6aLV#XsuY`={nLwLmdu zvy2lJ2QZyUk=#nk#ulS?1VAj1{2tjh@`_Av{ANf5gyB>lnSRbLQiPZUQ+)!X7a)%+ zsJ{7=ImgbYWLp6cA3jtUPvVJ}3q2=eIvo0jBs7|gA?Fykk)r<_Xd<1%8ZWGUF+PJ} zGm6CvoEr6ZkN{Y>9~DO-1`&G|+4}3Q>pOc4T}`YkiY1%-kj59D#`S?yrXcHTih!!Q z@Clwfx~d?P29=Uu-N)rFXA91@n+-0AMq98_D8UOCQjN{dmB!&E4BL+_wwOrohR`z( zz&JHZH4jL#!vXBwRRiI~%_7u$_@JN86@6~Fuqc2je zaX?&K*Pk(uN$orPnWtc3AcdPZKG$==2R<(tgJof`W$ug5&UtdyOi)kJx40TH5W`a*x17y z!;EF?v3Oy&8MDWEF$fI&&UP;L%nSiI7jSkwS}?&x0~)w*`95oyERV(6)q7&yoYsmsGFEkC-w(ThPLKn)B-rqC%Aj zACXWeQHnuIR6gfXLr4vYNthLRRw)sPj*ws_$+uC$}uV_nZorhk8+t| z3{UI0qAk0J9JsI5pG9PNTGL%ELuR6|<3R(|g}JSGkz^o`smVP7_??i-GA)qMnhv=e zhV*apw*gMKv`VGvzb5c#y3@)WI4mr+QA`-FS&sfG1dXB5VXG6*4n6r!`AvmX%?vgW zK*bJH4FNF*j$uW3%bcz?8bPnj#aSuZnt1fk5ECCUQ4<3VIX$W}4*IaVm|#u3j4S%r zD=0)f!SJlxS5uGpqofCU5f}*gaI#2JDaDyFw1pTo9moIW5`yi%%NB}zgwKqpxS*NB#sgfhP0VS<2@gq19*lrK`M zycp8h2$YSBCPUM*N06B>X`&biDs+hSD4f#bSzS<8K}^J_TQL|alrS=iG$Vn`sYn(M zH;*FalDC2;vXXI7CIU?O2*h(S5fD?+d=8(1Rg~pI?=eaQWhW#Fg~ccNz#%4a7|NCEuf#goZo-*#n)b*<$GVwmwo9A zQhxc_hn+9K{p$4RXZ`$@7as5;^W0f?zcv3c=gYfp^9f+t`SO9gZ|HkpZemZ~ZjC*+ zS&NZC-K16Ev=2L9cG?0z!pnA6nQh+QWhh!EO!Za`NdP9i*7-93sw>Xt zpyr!zoaXmN{uF%m6&GK0!M9w(aY_r-dX;I1&unA)IAGs>etgFdJ6}Ha%f~~$pFR8# zN6Fx@Z+W$pL(ul0O=|+anC&}Y4wt?13>QP$POI^8#O&XNu5Du_`{tW$?1700B+T@9Ig|}hGkWRD2@zeR@7E6xtKOYbD7?a>>`H(e*yy4+C|U}d%S0abY!2FrhJkxy^1=FG?M2b0hG%8A}w zcAD%1UyhsoKniO1g0mP1di3+<%P+j{%5!eJ>C$5k-_tA1H(z)0cg{J@%gcA)b>ok2 zyY|6f{BZiiKj)xt-}~|k^Yn**VPUd_nU+n$)+}M`CTEuBTZKxJ%61awj2hzzu^2;4 z7$x5*<4|&iMH*{pIHRwOBDsm-h=(kd6%)RKETB>=f&ipSFp>B(GhjCsg+Yx*mN-Zy zp}Y|b1SGB97J7@gWypHXnq>w^;;i5q!1O=^Qk9;Kq>hsKlCYABm_2}%Eq7Ld#T~tR zDNZV4k4z@UQkyNzEk($SNd!^iqZJTy0v@F*MQTNhCn~XJ2Z|Np{6f!Rt+^55>Zaae zJQ~{MVRxxDvyfmEqBxRNN5EDj&e(Ek2?3{?q*4S`s7or6ONm33YkeBFOhcnYQlh;e z*FNc=kod@=MU+VSwUpvGOj4I5q~=1JtP~JOiMBY_pk#4XdViRx$IQX5=+fA zi}_r5xpQZiv^~PnkTM52E1CfKOq2&_+z24G#!Vjlbsj$Aq4kNUGmr|gBo492>Vi_z z5SB#S1jg2|(i>UhdYW8{BS{7@$Q*=Z3=hzlZN|hc9$7XCJJ!)KU<)k!s23~@2t&<| zjKSpTPdzqs)^uSeu2-9(W7n`tg@|+DHirzc!)Qm+XfP?h(W(}jn@>o(VuW~9M`5*A zouHFydNdj8LVB$I6Bm}8kfKMdnyPPbNrn6Lw-L|ri+D_Z&be6jCsRilboF7()mC0% zy|q5E*KRv+@##MQhbymX`2(! zKpI6u*zp6Kp)+E%YhkWoxiP@R_`1k3M;->b+ZgvHPll_cwq^MGl`dzmrtO-pH`u*- z^sxsXf8rrW%u+e+t~eDpI=tCrVKofZ#pV?&#qe>^43Hjq*nw0{lP**nRx~MK7@70$U@+2u>ZRLMO~Xx7d`TXyV&P>3 zB#e?sIt*zMXepDJQ(o8&`r~T?k6v#U)+H!h{}&d?SOOSz9-T_Yh85vkV$uM%QcaS; zuYgn?)wtFK`i4|~(>OM?%^8xM!H815#LK8-V2e^lPC%bgDfC?tF)JdFL>r?aJf7CX zs1D62QKYg|nKp*LC%b|`fXx27nF@xN*^ufe+(-x0bW6b2xB#4k%$*}6;EF*L3Z||B z^60}<2gN8NY!#wu!vUrJhx936;F~A_VJFCLUw6vPH&+zdOd=+{jN#xQnw)}^4(SAx z2ta;<5tx=V7Dp%4+qkoE+dy^;k#p3-E@dUsJS8VE4qQC>Ls${Qs=!w?{HVkVjZBit zR$p^aP9&WV;t3!goJS5o!&63K#N-btBNH>R6p!jFMiS1*%oP+ayGVI!-+_NIu}yMh z1BW4A(WT0WKs=X7l8TgT$T$=B7KuPil=x39H0JMTI8;qQ_JOlU z4s&{rXOc&Qj2lE6h|?8?+hj;Q^ofc_3^8;A{=|X?o&rjYn=tx{D5f)lCZ%R11JLvv zKrY^N(W9gemtDXP8Uw|`fBc7r?Qbt>!^=L4&6VV{*dTLX$^PZrFO4Ac>wen){JU?= z^~tY=bEm&GfA-t+XZq6DOV2z6HUIMU8R6y8t?Vc7`r~(>J>$eF+i$iqusnIE^?dKk z`7*rhN_L<8f{}gfi|dHlqa)@c_xY4lW)5)PbDLEhG;g-f=%Cr>zxLf~W$yD+Hu>7I zdtUbK&!7JJ$&q zeeCP|w_J7S?Kj+Z>$ULmh3B1RC6WSx#MqnfN!2Kpa{p4c+@_u{oVPUnrLeox-L1Ul4KI>~=L1wjeY|S~U_E8bYTI5Eb zTn8CeKC$J|8doh?q!6p^Uy)p|()8sntEUNBj5A_80Gu;%tDz%lwS!4V_sK82NOoJ< zENT_EfMR0#vxswah%WE)Yg8+t)ZV>=BD063XOX{39PBjLDE4zU5|{(ElpSE#Zh67WU6&AHnuN zDPf<;_6RTc$uHNiMfQiOYo>i0UOxJ;J-+?*FWq+2MigR{^3sOXI4e$A_Q8okMnCwo9M}xl%3IyjB%?zAv>U@Q;aGr~ z2dXnTeDEZRqB#MN6{l#`;VW8;@Ciu+`V3&G!>}aTdqKqxN$t|;gb<%;5I3@s%EAgR zsUs)gQ6z3drLa|B)j>}%dLVfpu0c;E^%6-E^&*l+q(GZI0He~00Zxi!pknS)G_`02 zJY`XeNAhEW8*SsDAgs{%kOIlqTzz#Nz8Q&u>f$AX|0j3dh1fQTohy|^Fs57?#>9}G zm1r>9_XDo0zAY?Sv_1lR6kWDlewk&IMMC4w>yR$eNF^st8nZb2z%^fek%@G!=Msn* zJXKAA4J=X%5*Pq+16(BMSlLKd>(fs;0j$<=iuOo{y)EL9i~NquX@1(JmwJKP472eD z8_)?YX(l~~D(Y-NlP1VgLZDH&L3`?jKLU~D*BrGAAC1;5fQhlG>Cq$OLuzKTu43Zg zAP*3RRzTzV3w+FK^xmkj>#}`tNf>N#@W%np*s2cx6|F2x#E2(qa!OK`M@~)ENPPV^ znQ4{Sa@ENRFNf4Umx=@oaKh`HHIwR-U#&H?x|jyFwE!Td(}G&8Uo}NWW9}pf@Qq?Omt5>0u^j^h zN4gv+q`ui&>i(5ezTC@7D=$Af1Lam-X$8wodrcNBtS~1Pc}xiUhFRCBX<%wo#ie05bHnZA9o!n$nD6??Oj{JJ@ez0990;73X)u2LBnSoovN-H*CZ8)^;Uqla1E@ ztN-{PG?pzF09jwEQ`xI|_PM7$ir4qxXz{K$_&&Do=aE$8G#JelhLuWzqM$3C*tqs6 zo+K7fw?|aOt2C1~+DNUf(PjhtX};E_BKi`|+W})Im{_doK1l9J)b0TeMx{F{E{U1G zY-@jVopnH9aM;@IGKm&6-Yw8f2&+FEV5udRFq03qC40%tU=lX!!Q^Hk;VzAkfznI` z&c2C3+NKUtiRV(XYHA&kYEc9Ah(HtMY7wbu^bmsuPSgZ|TSxqsE_V!qjg1SPL{~uu zJc?HrGRSMjGceU%f6x!-6q-q+ggMcm}9v}T# zKOteGRjy(XSd8f06ojXEYz5>(AQda62f&qX4nd<70rD6%N M<4k=PzB(G+IzLAb z!?-XZpw!=}#b(0H>5tpkNmuBl4(H3lCJJZ;xX^E55P`-N3<6Xt1s{H@Z@0D|^GY>y_B(aG}oJ~sZGVS*3lO5_wYIQ251NTuOMcz=AF zOgWUyH*f1abZ9vNT1bfA`95*iH3| zMH^bCy*GjM^FWkc@#OZtcfy2fGsCJ8I*-yl1JHNNQ|LDD^843 zA{#O$VHxp}caU5p`Y027kZK|m9FRE^r!E26i-{*+E&?cBt_0wVsRi{tVPvINP-G&9 zIjtVm0%7LYwM;?HR{}a4XFBTBUeNLA-Q?F@c=LzQg}?c=wd4QdWq)4g;APZ>~Q&)uRiztuRRMdzc}jw|C3rc@8KDb-7#n8-H+UN^HUH1;NSgb-ZxG; zXxmS(=!eUD?J)Y-mn+%+B<1C0sQKtAn?cIH_XQ&#vWNI}y~W%?b2yss_flS7?hjKN zuDZzNEmqun^A%1wbgPq(+;QqzM}KzTt#;mgjg^=BP`u0(1Qp>#uijX%;l$o}GUvOs z$TW3I($T85V|4m?^Od*%;2ORo=BuV%=oRMcuesbWrSJhBo&Wm4*M{q@>-hMp%P+h2 z#v5+_-nFh+yKe1L)|Hn{bsyU{IN)rhv!2?5c1C>PJwLOa`cafy)}}Q_$zZjkYQ#Qz ztxCM9ZtKInw4ZUup|Es>er4z)pJ>!~e*&Y&G+TTh{2928w~ zRz^0t``aXO%#FWQ*xQaC;erm1E%Z<@J_0n1g9V*PR&A>-O1smhN9Ecu2kqBdWm$OH zv9fbypcrI!vg{@{-wo~CZoHabIQV?U)HA1D{B4A#YJ(vd;K}APHLIU8!Ho!2RBq5J@G&E+#%-+tLa>nXz11Jag zG0gN6BNqw-CxzVxWG&m10qY#FDu@_r!BtRJlj-9r5U^nf)pSjpCrzn$HZZIMPy1svFEIoaHsv@K?0H$1c@aDgt`5X*=yT+-N2} z6HhLqvLWXDh*UUJHKW&vX3esPRi9-mM!A?fr1R|p+>BPcRm4^b@m%dNra)TNMB_Gy zLv0mNdcBke1jy!d1mX$C=X5jla>;8fD^W(R;3S%&)Vg7!u?S$^Ag!Xc0&S?iTLEM< z12~g1@Udb})0m``LqZ%Vgplg4xSU4>l&01KbRu5$Nr;R*2pS7@{)Pz}@lrXdbSPdH z*=Nn3J~}lX|6v(y3Cn=YAsX>83xa3>8I7(u4o@W}jP|YQB85qqkXl>-SyIU^fIz@P zW1?d`6IEnPsE!juV7fx4dz4+f8;{KCI&JOFFbfp(nYD}uzA&RYgBF5dNHcN8um=LX z@UIKi-cxoo>^^oDFSFKZkw>Y>pkpUTogYa;115_UhMn2T9X*Sb#o07&i1kB#hVYc5 z;=+Uwv0H6y?c24L=1IcA@X(MpVSt^F3x#Gp=8A5D+)ik2m znHhUt5Z-drN%pFa`@%6csI8v9qUp0C{T0!6E^e@)egzonlZw^>l1ER|jXBdx`hUN# zE$UHxZEB2nJB4DQFqW$Mg(U2J7sFOraXGibAZ07H?QW~TNY)wxxB3C(&Xjet_Gk1f zftjM{^0iV~U=6sJa|isy*WS(s3wClZE)!A+!T zT)6{|5HOq#Y8AA1V?^kRhJy(KeBc`?ASVYJt3HHFKL}8Apspm@QS#eQgLeirr>_DO z##-2CfEWV61rD$|RhtKFteG543th(UK$*|Em~A6&a2{pHHs-=g^k~)+!;xIW1(xx5 zmSjsxQ3gJJ(ijb}9O%ao=8Q^;;_5Rx57|D@DZ+FiNukM5S#(GM@$bP+6hbX%NC;se zC_8$rD6RQNyzG+jtVODYj3N<-&X94G$;HZyF>F(yR2;U-k|mLn3UPu>lpjXslo+>| zzsHw^TVz0o+>;li>M-T5a0yF zbNJH_0+bO{Bab4H4FU*w6j{c?9tSen)4YmQZW@TgAW{NHkN|NW80u7ve?z!wr}1va z(6v6$!NpLp17%mT9WjHlY93+F!Y2XD+c`3rmQ_L((&?+aw!Z|C)#FCVhU zh7OmZW_a0AvX6bi%Unp=m24o|;j(zCy~zCOwHEP5=IuVU)FvNaWZQKYJ#w!N&ivBe zUq61IlRvwM^W~2({h@dnUiOrK)!Daer8wAOkP%*{JsA~2 zu)h7pkPa-k5=Iycu}hmc?7qX6@ba-o9Rw6RTy}^IICm4#9q?<;`V>%Rc$#ms0*G_2ff$J6ZN8DTm9?&wR|XWm;~@W1+QlTGywOpHptuvGDuYylY~7B7sOfK=rsNo0wE zp-B9*I?7lPl5#OeCLX1ZVM0hq27~3Exo$nO7F&a?&emlaF|?+0Y(o7?b}<%u8P%5! zf{a5T7snnzfKLJOa3wsh2%;uo841Af!!vV&d4L-Nj@pI!m|(6rPQargtc=7H5Gj`> zQcLMDUbxe@A+^<48NGl2E$*`8wk}@ka#?*aEKo@vlF%L|N+BU>b8NYx6hf*9moNxa zIU5ECS~4`u55m;gO05qAgNBLIQt(^{wszB@l^r)~*U%Uu))wT)Q|TO4U`QLZB$8|* znXkwk&5&RJhfjCfetQ}aPjY(GWY`*pwxOI9yCQ;f+Zs54+s>8THX(`E8r`DQf7DJ# z9l!th+b?n1Gts8#Az8KaNB{uVWpX3erH&4raaMhKRo28vBshKbRaRMJwbjfXY#5zf zy8!8`Pb37kxonllB^6tBl%3fOO=~GLEy!#$L45te&}OpYPXN_4BADeA=utBffW}n3 zcEPrGp(ydf6kFfcxb>VUsa+E^xDe=F<;6;%EFuxl8-Ms3S^;E$6levU@z{xThs%(Q z`>`N1fP;gAWXyRa2~j;_h%-rqMJBmQk(Q5=v`Hc?pxxD6~HM9USF>+BC%MB=Sy2KA7@L76nV+J!**>M{X!M@F$0BIpyzT@DaF$m0#<%?(f z_D1tS#tdfevV@t$A_u%2;$|(w-N5n_Pd?JcYy}3RRF;snk>=opRY$y$s}|(v)btPV|>!rGfc1J8#$53l)fjt?N!pvnB4*Q2)#Q0aozxeY%W#oH7$qUQgY4Xoy zCfR@ZxBup^|MOom#tfVv-EoImPRFZXk%0oma|%)lRTIx}&}aA`=p@y?#L#?D5}sQ%P=mLPV|4V8eCt1WE9qNtWg>U;sAJe1w-dtX=aGtYfc$xE# zfPU!OL;ExO4^1l0`tJ5Mw&1E=FLRv%XvhS)Bl!glZ&Fm9^YB~V8+;*D1D z)&@8}7GhW5X`43agsKf#@YZZ*`)h}7x2>_{U4~h)Av)X(?fIBc}<3 zHIuM@u1f%Jp+bid!a7D~v*{iIv}pWj1whP5Bo8snJof9%X3x;AiGYpm5f0*IjtMM=D?68?D0?YQ(94j|(hzv@1?hM2! zB?+rba)zV=Q(`onswqPKD2h)ld>WuSeK_-}Za{~$#L*bw^MQ{1+yIeOA>>ue0G9+x zA_EObHHWl^Kw83+5P?+O#CQziRWC{6OO11YG?7q791UgCvm}&-r3#l3t3fISp~O~F zk+B-+%QexLJ@NSB6Di5%sumuF5vb|lDu{%fj6*>M25uf@kEYrombZ+9k0ybo3RGMf z`MA*!Kl&(3ji>P7hM36Y;)D@kK9ndhl%<;Qr;$3im``MAhBT?_QKlDtOjy9s7WhEt zSh-JR^MPVu*{jL|-Ov{3L$h|f1&DUN1@^`k`4wv1>erMsc@Uo9% z`;(NnnE5Y0dv8Bq2ASdIKYV-ESB~EoQg#^|YTkXD)kjCnJFn|}`H0Drjz4@m2g;za z%h=E{e{eqe<<(|M4xN4JYnRPe@;bBdZj)BoYqRA)x8Edq`OFjc^*^b7cid=|WfuLg z_m|;KGn=c(78jeVCQJ~rjal2>mT86}Bbga&A&}~`*jG+Fk3agc)c3BKcIkP3zI^S~ zm!gr;DKJFr%U#Zw&pzuL58r>E6Je2Vh`DYJ^SZ=rJJh{vhs$s#>@3D~2T&uhH;%^j zZ1=idx4!46KeF;#AJ0DP>u|JJn|<@sH^H1ZZ@bmz@Uo2VMk25SZ~kY=LzFPA`PoZ? z_Ip7FJJJ9z`xzO>6jxd1uEiHh=27TcH!WHIXXYGyXP+=;fMa0IL)0+W_vN4pT$UWq5{Ov7$v zTd*7`I2LDzJCepzTQCn;=HEKwBu(iRW*4x(deX7pPzH_tL#mHsdvh6N7Iq~YUgqC& z{YBSbb?(W>9dgDgpPhE`H$Z0Buzl~#5%c{&zs=8=pMLCKZ$mp-_6oEANm;ho1}#-q z##VOAmYG>N%c}X=II^gT5s#h|usCwjo4^H(HT1*8?vab0-#pJp5PeyMkqHk0kHT2V zB|aHM4uuME;s+Ncc~qnlEj`56Jy3?!Ed8or1ur`SvjqP1VC{Cc#Z+j0XB{aHF&be@ zJm2DF?G>X?%NZ1mp(F+gtBKWtEq$V~=*z3<$i&DSXXb%;PJpPAFv%2=OJo+EjL9#f z;&ddvTGw)G!YPt1o$AOolf}5!mw?A^EgAwU!eW%y%A3K18`&@?Mzs**aiXKWEG4Fn zBJk925t~;z=d+G`j1iE~_ibEgRJt3C)lqw?K4o&W>QU_!k@i%Oqb0sb!YdaUbJ3tk zY<=F)c8ikcSyP%rtC872ZU@ZJaI~bQh@=+(`p^I1EfTpLyBqj&A*Ky%jdZzU3mTI| zmy=YdA_HsLtyeVN0AZdH2?62t839(heF+4{uRHJ}X+U;@4f0S0NffHBsgAVBh}5-M zQW2F2O)aX-#-=?RdNhp8XcAvTu#1?8bug4MtcPg9dV)u?=?Uo>jl3dNO+4Yn(|~Y8 zKoSkBNV16(07<+_3K0V-_djq?pIUObJiNc`ek>>r%bJW6#UZIYDocsV6)%!J zm6*VMMT;p(SbK2-C7@*aPeIb6qQFEYSHf&iFHdv>K_XIvSe) zKv%OO#*O4AI#b-JImSnIfBEnK+ycGnr#2*jv0#_mP3&EE*w!mlB=mo)1ZGc$mto-r zRKa>O}2bl$QLN5i>`BR!gO=uQe8wg$JP$^YV7X+lLL#m(+LtEu8X!**4 zd)k_V{*6|`8|DVGL5Bd^RdENthQA8Zu-CinOWOmti7iz_$P!xt7@UKq^;;R0ThZoK z=7Gt9so>N1w(?A{2ncUJq+Wy6TELN$-pl|=ORAjG@mC#`0-A;|%n6GJk@%Vh+agt5 zlj-wtqVBE3J2Q3CDj-t#&`%5jsbaccA)YYi7PgAh4MvL=q?`t(35T)gm;lZdua1Ny zt*V*xsR*Ug^#Q2{BWEI#R}8@tSR2XCF5(1~g@6LdY-sEG7|LjvaI}G;QYZ>5b0Zll z2tBq{mLwK0))cMkNUVJwNx!0BBU-K5fyM%KX^cxUA|M5n!o~|nKlA`BQwSH@1)`}- zD&?SrEP^u-N7IPN&_})!DNvZh1fJ!LdT(TS9b?!`GntqiT*h_>8+^9I20n{~m%R)^ zFc7IzS7=2M%FgH9$T*p zG9S6mCP49Fdu<3cJ4)sRT+H^duYGq|!^_Lyvq&+8-P}HMxAhL)X{}=q*z_C6P5#!& z2l(FCfxCTr_2obGeA%i2HUeT`pv3~FGiTbKwd$Gw?WV$X@FNsy$rgZ=FTeC`hs!^@ z^M)V(;QH|L%{N?)4A}hc6_)|X@^VKUa`2tE{lGHnSlR8Yb=FwTJu#=rZjPOD;&F?O z?_&eY2pj>x%PwsDTnWqxad*Ca-PKn(bjHfDGD=|Ci_czamUsJYwzQAR78>Ag2{AZJ zGsy`<5*oI_ODafY@sBKu4DeB@j_|BpKJ(>#d6~r)arN5yvM+r>$6&HkW-DP~he+H88|?iVrJDH-Nr54IYh_{#tcFEF>cEEqV8JC7 z*;uM*l%AN7ETuNXi!pY5;6=>(i<@t9Umlru89pL&cV1Ho{{vsxr)W(-lqOBEoSNca(d zU0Rrmu%aBdIP~q!Hu;nfj}QhLrSMK()SZD!czn1TlrM}S6(PWiB#Kbwl!+~Kmnv)m zxF2FezfR}^8?t6V1xD6Hf9}cX&@Dpir#)Bf2HM@h4A z6(=)a-Kj;Sw?ttU$H%_2bl?3yfAFDuz0nN0xQ4x8{E2LV&X?!CGzVo(k2AetNOdI9 zd<2kWm?sR*92%u4jvA3Ex^hLT1z)bDLL;VugPZD?q((F&BQoJCSXiXenZC?sPJn?o zxBvnp*&1O3*3!eMwmSpig3LkV?o_jN1sJp}TUeNJ%XQW)$>EU`WFT5bxk?f<5pjdk zE@K1BoJtXoukNZq^TH`y1tswr|3(wxRfm>LtrlNB)BTVk7Y)N}d;``es*G!|w~x4u zU(n8!Wl=MS)C)fKR>nb7_JLCEB*V)p4#~0uWuZ)^)J9;J&Iy>9)<~jP>xu+GBfuXf z>h}bsYF~{jKuF^e2O|*@aggmjESJ74;yZ1(jn{{Ljl@B-CEF!!eI0Vo3MGWDG7g!^(vtHsn~EuuzRM3v>`SUzwrZCG9{+~SQ~^O&!lNHXdTM&;s6dt@-d!s%9a z6R%aizKwuFs*FdE~~a-$*Cv?*QxQ9*UlsZ`3KuHUK)dV4<1C#Nu0K>EQMH`1ci?G>T(D62uC z2&uAr#1?@DRWw%oR3ynHz~Kf;sFzwptg55#?qhek+Hk1Wumguce^Yllp&V*4Y9LcI z0!9aQu{=2etN@l^g#a236UA`?y(_9{j|ecw0RT5ea7_{B1apYXDxk4%fANJ)EL?Z9 zNAcs2MLV$tl&~Vtz_YS2uY%f6jp*63Ybl`?1}EcI=g}Dq)!fh%ejE6I;ne`O(h25D zA%mKlI+9@`XbMRsNlGRmm+GVj3=za|@Zm}fvBfQhDDe*u4lx6bR2rs6B8kL#kS&?9 z;*jOytXyOq8o3mjsPFgDC{(UElM{`wNEx|UML^*p;z}ta<~5Tf0e*5;m!Vz}6H-8- zLqQY8PMGQynHXhFR9aP#I?_pkK92p)`~fffKK5G+ zpBuUPv)_DW*6Z`8cfRb1*|9Rb%zt6_J-(0q_JYSxIC|Hewp{gyDO-TdzK`vE8D8FP z%a8k?6vzxO=OAUL$*{9SYseI({GD z$M*l_!zXXG#)=>Lu=8aXlfXb9n6h3w*@PS+HDM>4tT@}SraqQ7AC!X={YeU5hMMoX z!!_n>E}MGJ<(Hg)>rK~mGV2H!)RmE&yvOc0U4NbTmoGTyTaKG;fV)r)9s664kAe7f z=TeJ*1h}@U+T!!>mDRT2F1wG7lAY6Ur?9eT?u-CZb`0!O+YY9^5@l~tb_*x~3o-zt z4r-aErfayekMOvCZIS_>S*Kl?bneU&RuOm>ltvG$vZcLfETc#jQB%H`8Qlm4x=s7- zuR0_42%*rD<)(F(couFG8cSGquvZKk<~FH;IzHpAl&+zl*i&Hf&B@wsO@Q1+QG(AX(6Cjc)4%Y5ORuD$Sk zSD*KllaBPguWPS3_v*{O?SE3PWdGpCX#-yN+hs@04wt>cY;|K@v}(6z1uF~_YqupV zix+{gHPm94Lu~D9`IBl1=1UTvlZ1mvi(oE-;7{>)t@6vnOZA;xPnf znH)MHqeWSA!E{VG!&xT;A(LN?+To`+%=tJgUD23S1(HjmH+2y&BRk4sO$<&)?R{Q- zlBw@p4ANvM>h`)fC95yCY%@O2B;iD|&9qt-;VXhLl2n$G1rRd^_<*FTiEV7uF!Cvl z0Tx;(iGm|o#oiUq=3`paYPIMb93sftY>O0S3QN!IVb4e71Gm+HVww|I5eB3mUITkCq zDo$OTdD$*g1?OLCL@Kf=S1FiKlq!%}SOd`&{s_b)CZ{Ay^C)9F7oO;bYSLA`rGX5s zI6;O}I@+85)T7`GXdGT%u+aOwqpR8oocDPo04FcOG)i4vuTxg-OP+)wk3cdjk=U{% znuiY8>$IqYW2wNhBW6gsBpDqlIZ*Z~Ff^SNsm4y*;vDq8iy_< z{DF?)lvFiM(8k0CHTci6g&S0#wBaYcuxv9NgeRMu;qbC1U>s_Tj5^&(U(%zXV;DK- zx658thK@z*W-v7}Vf~HjEIqW>VJIybzn1mpJuh77Bphw4k*=be6y-bB2oRez)KQIe zPJC#9&Ry=}{E-R-28x*nmSCFK3B8fQ=5atQ3}_pGG$srExHjVgp6QMNnorSIaN5wZ z{m05SxDln7cu_um#|I}3KI}-T=9WBjuH$vlCvdr1Cr=zwZ1U8O1zTPi%u9eraRV% zIj1RzHjwE>Icjg5%B~{11P-P++bMduhSM~PPzwY~9N93B+%FdmSAC^($PEARvPL8) zpQs8IP!yCR;_c!zWS=T3aYFD6MPU>SwPrEfxZV;qm)-y>N`U>R_J{guZYF2A|p3I6LZJ^V-S%s%DVy>{7pbw|uTk?mt&Q+D~J``CWL ze9&&|dyyGH?)Vrw2AK!E?0nfNGX&kCvv>j6`G{TDJz(3_j^1Zuc=@6;kGk-TqmSC} zGi$8)q36raVF5$)yv4;XE2PE*1=BjcvUWg@RudMoX%iT98GG7g=lIyyO*dSA%~cm) zeBn3YW#`MtpvX>#z0d0~_o%}Tz5Ui(d~(F0uoGcxI_%7DvA%!p^wGUX&f7AsUORE# zb;s=-CHvtN`0V4@fUtLyk2vH&G!BKm$m~+Lzfk$`m!%oyZaZ!7$2`6w;))l_Ry0PJ(}Bs?7S@V!6YaP3d5u;h~Pvd?1s-j{$QW`ACW znqAN41VnP~V*|@xVTPB_KJz&LUxt@&x&BgM+4-{j*e+wwc=Q*3BjtSgrRSdV3ubGd z$(fO6owCfeL|WH`Tca}q#aQAQGcC2k(U-+i*y`C*C^eG+P1(id#N^_T4N#J>5`{6r zc1S{B68m9@EW0F2iPDE$GUk-4QW%zlqEzR0=WDt&zU5sMTPnX-@tk#JUu z<&v4_{dau>sI>9I@KX>+VBB<0W=V3AN54X;6M})5ClXPsFR8>-E&&v+#43nDl@%u! zV@CgX`-0exRk8Ge)!{8ofdm&D8D_N+p&> zDuw7jkRwZYWVnvM+fz8>5KQQ}%?XqpSKq$^?i>Nb$N(~^%gPN4cL2on1&SRpbB>Y` zfWZ8&aEmF@51Q}#(d~Et{KpUZxz)@k`|1}~-ekP*{(DR;nudB)2vwv6kA&B<#MZL9 zkcy-__cU~#hL3nkP*aLR$zf;}_6eyv2E(3pq=GCTKD&tB?thDGe`53{I)9S#T9dO~ zNWm>=1XM==`ZV>KFtOEPfJAnYL2B$sC74M3MWXpYheI8yDb#Qv7*Uz5>@NSgaDU9M zxJ_^PdQXGG{Hw&uW6W86TtWW%Jo2O9OFPjyiX8&)nwyHVy4nVjlqfYT(&(Hn{4 z4Ms!Nk-f_vvYSRQ*QvaKW`1yO#{Aj+96T|hB1*FhX9$~od`A>UHqFX~Ej;TBDq0X{ zb27YaStk-<%rmq~y?QmU%)T;xiNRLyt%(cZ6QwM)EzIXYAyrTi;=+>9XbzDX%&?xm zO|U4pwS}AC76aYL6P4uB%8nJ6r>2I2&HLmGmnlf z3o;z2$Y^aK>&fIHq0S?tnlf)x870-J8p)L+SNsJ$ij+n5xumkn$gWVEJd#98A{%H~Ftet1W~3;-;vHMGLuS7k}mqT&>yHv-F)Lj#CIE+{>wG5a~3{z*A<8Bjn(Py#oP z(1b^DDb7;Gqr_yy?f2g@X&C-?&$^fmKHGUShnP>mWOuN;n+-mTNrnNc3N1v3C=Ec)0*R)Vq;bZuL&QwsB0H)MMy^vL+q7$QvC&@&-EH!Pbcsd!> zf|!0LK9V0>Ste?4VvP7;T6K)FLW|GGc4Bd1Km`qvVz?gP?<*0X;sum~A$o-hb2T3V zk(I?KMmg*y7#I_M2vxv8Qaa3cT$7W#`N1oO<|SdvCegavzG9p*$9# zEmNkTqgWUaG`1eG!=0u2QV{IMUX{cFJCq7qV8ieq=8>o zvX@_asTG%9=A`3~yX*GboH$>4(FI;>vH|XXH9+l3_Gu@Lehk5V<&-bm_IGyd2A4;u z8BDgwiUFKAo3xQgyVhQ6MtT0(--MUHdg@8Oqy`@__VgG|x*`m`w;Cd|V*I<=fJN^R7e{Mn_js$EcaFALwwi-Wwd270)gT14VZ6_!hNrx`J}5t!51 ziUEhI#p39-O{abUIBrljK+Rl7fR$5Y5E~y1o#Ug2>*Me*A-LGy7haPd4>6oTd(%Lug4$wDfi?<_dGXa^jE3ZU;gcvrmTaOGqbVP zsz3{?S=yr&R=|?oBI_};P$bs^CrJQ@VzRZ$%eNHD-dZUMvE=|Ck(`Xee0il7Ks*$L z#<0b_$bs0}A&kE)l5)aguu>_xFjSU(4Ekqc{U5^a1NzgdJn*B%8CFO|oewn{3+IY{`Cp z|F7Kh&)~S{-1DCMKKJRjJ5ph;>7&-Ct4R5b*<^ zxDh18B?uFHy&bLSLOq?~K!C4FJWwH&0wQD)lqPK55`?+1Buwg&?pEM|6$G)L0LFKv zWM{^ec$jJUt6-wg*;wKtQ5?es=SHAZ(U40Af^tz>QG=eG;Hu6pM!|#@#FbodO95qi z_1YK~8PdV9^zwt?Qw~TgPt|D1&+rq+BA#)rYh2awtS}84Geh7>$mS0I&VW3`GmAl3 zM;|_+ry~#RmDS06j~-iD*m1xX0-<#949)Wap*3^Zh+M0OPV^M+ny?JiQ>ZjyXm^N% z`6ZBc7SFL_GgL6?$@$z!khRgLj6Kvc**&!|Bsdu+C*X|GQXnR$iB3ewWBONj%iJA3 zw-m^IxJWCWPQ5J`KC0l?=oTnX^Yo@zk>(WR6Z)$(l2z!*2_))hI3egAdX8Hp6(S&@ zQbI^EUP4~zD>soK%a>)!vKYBDh@(A4!lINf`(s=P>ZC9Y|G_!_7*Yd&LbbYr8)mM6 zniRC^>3|6cb5hc+dWFZ>3t|$H0-wpc09*a#ijS)@;rU!HAUt-^rl^QBUCm)dnjkhV zch-?o;7)=h!cLi;BMWZDw7rb&9J!<9-n*75_x`qPFJ8*ErO8)bb;V6L-ylJjEaM~T zR^-k%Onnlcs%e;|Oh|1Cw?Ih>OnJ()-8i%j8Fh+L$96N9`C54ozE)?N28NNwneqZr zkg!JxR7CA?PWQx0oZQ16r69HKt~SE9tW1y>{D~^6z{1#olximPeah<3lAn%QUwP!pEQJie8X}+R9z^vQ|C9nM!KY9E2d9 z0;2A!RUfMJxr$z^D{-UPWQ0rw>IS*7?IB6;9?KK;f)Lsu5yNXOoHk$@lkB2{`Ng#O z@E*>za;xwg9Lf-M;|*UEP(ijzGdWF+Vf|u~(jhC-6NhH5h^XXF-@(BXE8@VV%z6zU z5>eGk0ezhmb-e~34(1qI@erb7j?(yNois#7Y_Q3B)dl>TnCZ&u9vxUd6lkW&Ak-Qm zTB}5h2EtMpUNy^bE17Z|XQIni_!Eao5<2xibf&+hz``PN?uX3sWlM@2xp%X@p6!&m z_pxOwN+O7>2PuXp{&?a{p9I;ZS`_ew`djk zm{-=g)dT`_Z3RPeYgMU^l}dz6Q(~L>t_lQNF`<5;7!(U20yzHCKmtTL*0$=)?H)kuK$d)gAAA8=uoA^Gqm$6kG zD@>$A|9Y7tNbXjmwwk&)nD%vlp}F(caL8=7V7J<a9qzpNy|BeSPk;AN}ZM?|$EV zF8a_1-udZIeB|=aebNRYCzbqq*8OWQ`eon8e$DykUH-eDmEn5B*pp`ry-n@6$sYBe zamtC#pqV3Tg9}lJF#V=WBokI@T&x!Mq3+H{{l6R~0r_8<# z=10ww4`^?*`R1E!BEhtzNIY$PmR4!6=#(K+VDW|wLdq-%CPJcQ&ypQWlUd}*jCB%Z zhrDdJ1c%nC%~?oG*JT+pQlmL-@>ZUm{=jeQZa}AHMW0m%aZY zhs$5N;uHRb+5bs>=Z3E+x7~D|A1|+7aqr^~PeNAPmhIARWf8W`wQ1V>Y<6v70u0Nw zsid|2&c0^XYjVLsqRDL&4YFHBQ`azEK?;fO3z!6lkDDgU0ad`&(Lzp;xm>H=WQa4P z$3CJJA!Hm-SpgUYq43-5b4I(dEo{i-G}PBfK;$@j+_=(Ba8TLY?Niu7P!T-X+@ZC3 zaSkbDks`TkYoiRgMLU#C6ZoPISDM2#vH9AEl6kO}LT z1>0`5l@TSbqfCB~r3uA#D#A_yo?H2{COeC4PI@b*brs)AfhEGi+Jasou4H*cxqy8)Tkh;+ z8Kx@)v|h%>B;@jIRJb{M<^<+5iqBc}8G=mA!%@!=0Ieod5hsKar4n+uS(xMzmL>ZJpl2^0u~7-T1Kz zDHs~Je5h`SYT{=qsZf}nfIqibWRSqo9n`Qi8Nfy9#gAwR<)gzN{r0ad7 zf@q$x-8*fUv|7#5ma93I&W498G)b|?+7}kXghg)c3%aJ^-O8i8-6A22&K3vDk)~QTK|mW-9Raf#TNlXt3;-#!UZ9g(9bx<% zDoRnbTfX8<>QTOo*gQ%ui9|3HTV34vGzUd8ykUhz1qgu-!Wd1uK%KS}LyNHWL0?#= zh-ghPzzj1wa|J5#7;ftDxdKD}cZ<1kMT)vE>q<8>$ckq<#XLiV{RY3Qq|&@*xOb`z zMZrH~c>~;#1LOxjQKZb^+=UPtS8$Y1ei@cXKujo9DiBI2${^fo>OBh5lWQ8vvT^7a z%9ajkvf^;ra^W1=f}p7M%Vo*3Q)d6sj3?&!P`*?Ma|)D3kq!<5p~6wD_Ap1RX#C`A zJ*C&UPT@nI*9*E}(hFjG2>9gws_vNn4$OjU%*8Ybh04?W?;7Q&FI zNuBZ$5?-$!#)%Lm0=?aFjTZNa6o)zKmy9!>{0v z3Ed*ka5%9gD|_9-cU9Opy*|7O(-1PcELKLE(PcR@(i&jK*z=1&lPYEI8()r+`+KSB zXUfyhntkSL@};k59+_00oE$OteQf!%?|nV`;9XCyy7vc<-lzELm&Ez;Rk!*5@{0Rz zkT0)V{;mJ?qvdaZ-LW#|S-Y)&@XXEU&DwnKo*N#t*M<(4eeBC8vL(pQk>~Edt|CWv zpe#EciL<(V84z0UW54G38D}os>6~Nsc>Spdyynyc&RaD1;2B%J{8!d`zAUu%g&awr zEx?vxcb2tD+3dW|j3jOn!`^3hHtOipRcdSrb#m%fb_v09+MCY;a&e>0$V*c~Q)#KE zsp5gsOfOCgw(YZh)Y`5G;`~T>+@Y2I2(pmdGV7Hp`x-Swp?LZ*CK1wM2`soYp$wGf zn8<>a*S`83aV(3RF(H(?T4w17HpqmlTj z6ecoj2qmzNy=(_CDdR0H$ITu!FbYFC@4*MmX81vKXF)*aL>?0)@)X%LC2ceyJcFDp zkGrYI?eOin9IH-4IdbomvI3k3oTST_oiCrZ=!i|%e~Db!``FHrZ@T^}pZRjW>^IB( zY`I@CtHb%i?@Ur=hs%Dy{Gs=~*hK7HVvhs2*-bCaueM+a(2x{1sMs_^wh4|B1glX% zw6+(h+c$0TSU~{CjaF^Ya6sZP{R$I~q7poEwA5gu{xa&ym+eQ1D4@jUfMZc1am7ke$xE(0292sLHB)%Z5^X_d zAh*kEO>LjJw;+Hcw6>U)!Z6{MT-bE*-nHrMoY(SB95j`FN!>9JWq6Dr4yTnx3EaUG z_sVMJWpW4NYDM0x_pruE zqk@BjF2)=@KT`&>@xTfdSL7iF>PnpPQPAQHO%rZI zlk@@CikxWt83=-dR#!zUmCQ8|1sdcSs?d$q5a_@zM#VM*)nU47#T+-}9P5^PsOX=< z@F@lex#6dd8{{VP2n_g>Alx)5_~V%e^d@fDreRGW5Y)+1^qSk0%sQgAl8%r|F@Zxg zLEZ*2a!gX!mJ!OJH6ZDuugy3+i)=9^DW=eh8zImNXSWhG6VM39$KGKKdy@fGQ-B&2 zb_>7gxb*kAnVfAL(x!*Ivke#tLq_Z2d) zjd@1#mw)yrqBuovf;K`P%MhNS3AS|{w+!DlZ0OH#T0Kd#^4MNIZDt^Sa9|Xk{GhHk zm4E#Ezk$OKrkk}zV#}Edavdt+#YC%+z*8)6K4G1SX-zP@WxG~Oi;Tqq&vfvKeqCV6 zwR%`aY~_|3YI79LEd`!|aJ$EqmNI`z1(`}M+Bh@<;Zu+6^fFD{!fC}*r-NHY#nvjJ zFF1z|b;`hm0N5-5R0pvNSxKyi7BM~{kz49q2>8T`6r;rmq=qh_eB9`x4hI5A%sB@R zR%V|y&^|@;xEa8@39G0Os<2c-(X$b6hCB0>sLE0W?8ajUREo zrM8zq#;eizz%QebMDQSnc8i{3IjH@yUlVvh`s@?l67k&tFMCK-rKSCvxa`$4F{yNa zCMCBeiy&iFf+GQROI3L^O;pl^;d+4w6cdm)*kuF)<{1^8KHSL3My8@?d82N1MQh_2 zRJiF`x2|YMkJKs2A_Y~p6Y89BwCS7a>-|( z!j}1%1k?&14py4{&K(C8B0y-R4ksz;oDK=?8Zvy9FoT2869+}btw*3G@98kK?GO8m z4KG~5BoP8CuE0}W37NJyq!=lgkWb(nqK#4mwGzaz9L1V5itl}SA6t$rDeiFDS+flK zkN@!5Klm|{N4xC|+PSkMxhEgFU#_9Ddc|VJo7g|_m2AIaUU|69q1zsQ&=w2kZaQ!FM)PKG=5YCl z{kA=P&Z`cay{&TO{@YL8ZgbS!?T$Nmrz7^?`oKLmbo%UP&fd^=tbE#0yPSOZjwc~mPo zLF9T`C6yL`hj9`{C!umlxAHzXox@7>+)6fWB9JAFHV%QbPuxfb@kAo2++x#B-~5Kx z+feL8p7Fw|NE>C`xH)kKaT@Dcx@W(h58Iw*&Y0nILf}4dByFUUjnZe12(mHJ*~Yi- z%U>3``qwbdodPS5G!;eaOw0qt~NlJvk| zOm1iCNE%N+RJ5Op;e|1sVsEzb1`-D$q8wZl2TjztOtueEdW2_3HJ9E_u6OD?4H?JJzaz zco|!=?DxyR`^iiFe%YTdzvInkl`s16xRt1iq??F!On;Z zf-5F%l$g|td94CJ42Sd-(~1N`r8+4xEaS&5e0(%v9)u>D42>X77O}HVKV4ufXf#=b zZK5fLK!~%EX5a~B^`L_fttO+XB>g#=t@kt>$= zV8J>-F0aCo&r+MiIS>bONs*fiw#`IOx&+DGB`Bg5{8w$gwI$9P=<$NVNoQ*v2a>C6 z#PGhU=s};lP9eQVBA7t1;z^bz)cQy(wsa%6{E>7{Q((GUnk+s>h;DMLm2;@a?RA{n zhRtw0G(gH=n@@;P0aQ_#AQ%=!hp7|C*hT`Ov8|P_QiBKu6E1>-11b!IAfXscR|Sx6 z(CC1bA)%6JP1r_70XSh|3!VsmpbvZuiRg+6H>M2H`2oHP=7vx{!5L2-U2&^GAVnbp znn=L4(n6H$sEQ#-8miNQCUiiH59tc&;2^FDA*%pJ>k1N_T=lro6K6w3j^jn9soy}o z&~(VUx`u&mUOeEqGZK2@hPFsX9h?dBWgCRSx!pF~c!q4S8SR)GNW4K1>4q!hOy)pR z6s#a9Di!sZt3U%pqT-e~dWSj3#_>nxAW?fy1#x`F3UV}qUyG^%-kfvjV7e$K<{NC5c+gdwg-l$<}h24)_^ z77hhC&1&OuI#m@G#RNm1p(M2U)0rlA9KB$=q2(XQ7^)1d5&;fkWwzi51!*1{LJ}3^ zT*Q0euJBE4Npatzm*JMB7WT{aq{H3`^w}9#4^Iq?@){xiWJOrA7>S`G$QRF-1VNB- z%nQIvzLU2?dFkRz` zn_KxZr`3-WFHI%6u39T-q0tZr$Y>?SD$~^~H6u>ui@!3+Re}tBw|FL09b5^g+vz4Z zR4@T_rAdU5P*_2g0|Ao@0-shxpTU1rVi>J!;|B+<;szlp@x(3XWGoV`Tu}w`AdG6z zMukb3!2wggtJ^h%4g`5LT4Teg4h@H>R4AjURJSIp5Hw1P!7)}FmPTWxVeGmFyjH(o zmNH9<<-(4bCCDni>!t1mY?VIz)p4_&TC)7~_g6go*h9}e`rwm~EPML<_y6drhyLJ) ztL4kS_x0$?yQj+c9((w%iZ5m_zSYk~ufO}YtM0q=>c=0xNxpo*J{!#2Z{tJfNR~G` zbk?TxXKuQ1)>iUmneqwqc3*VpZb$C_s`>kFaoFswk3VGRMe}w#`oLH9!nSv_{lHnK zT(W%15j&lI_%2?__CEGG$ILk6=)I3QaHkpDzv37CpOgg6GgZeWw%XEO>9F)oW-3V( z081nld9)l;=IiuRmRE)=*RwC+fUvV^yM~C-w&K@?;zLc=a7`eNgeKXzeW6k}mzUA;!mn znmEuWv+R$4VPs8{_(Py1-8|U$zO=C4YmjuHn;e#sfb8`vNX+FLw1wkwfmm87PL?f| zUHo8sr4J2l$Ff>6FwMzANF0WueN@hf<>{P;Gfca=gCyqgjKgK375gkVcJ|Hx!gWyr zSN|Kz*7Hm;&pbI0OuPqc(|NXKz--sHWmKYV?`V5!H?jL5!2trE zqiBUTwi}hGvL!xnP@z@m@0L(HGaP;#?0w<{L50vlaOl?H(RV@-6;!Um2JR|gix4d) zaKK%;kwi@u^`syd>cGTA`ctYBgdkK!_{3oj8M$8IN#vqLgx*+CjB{fKTZa+FMf`w- z!u%s};v!b~ZGus19I^$nRzk7m8LUQy2s#i-tJHR@;7lt@;w(y(bQ|8rrxk+&hNuTq zCW1Vj+ElGaRPZ+{Xwpyt$wuDh`FzfYCW2>QkcAVJfeZ~>bIl2nIWA5XU~?(W<)M(Z zdR*WsoF{ny{_p>W8!Gw>YduVJJmjGkdvT*M8x>laN~W_%Po8W2!{7at^CKk1v?edt zSPt&e?bV&?F?}h^0QrTE5DZNvKQN)fXUFUIVQ66EC*HeapZ(5_x|-? z{8x*NF4G-2b26QI=5PM;Pt6f)g$eFglcMo|{ipxV&p2ZO8J`9fJ}Q(kt}L^f>LxwK zb=1(B_mH?%AS?W=xW$vVbtS9Wf)HGtjey)sD{y=?spy52ho&xwGbm@Mf`kjm&j8e*kjl>)pCpF~~R zBr^7Bhze&TjUeOMZ-wlVX1zLO9!^zPql@OQ>f{>9MwG3}tTY7_Y<(5KfFDCT1HwUhK7(#SbOY|hk5myIK?Nwd|kw!Vn}@c4#{c3UrFnC4zkX9bVZhw?C4Y9&;7 zz+*jrBM2dKglBsd2xJg` zfP))bPyymYbA!rAwu;gLb4#V$@R3k5H6pL?9J}i~u4{M`1V@uwy#Nzi9CQaDo)#SZ z;S&l0#1*pX?+T5XIB2!X_~Q~EmT@F1|JCDinX)ZSj(UCW)lsQm;W;ceDm1wY@+Gak zRxLL~BIcL!C~_YI$tT}^6;>K91y=kr!mG{>mmN2k7t5C=&OY2HQ^2<2f`7&8V2H!@SDZsK`xo?$GUhK(ynxwSGV@HsPYa7dnq zGx`@f5xQ>_kh~3TxG~S`eO}^?GJR{uOyZmE@+3`cQykvFZwe zAmfBenp`0(;Hj!oUF(pjP7_K>9wnOt{`?AB%8-Q{=MxkyNaB)$(5+U=Ah8zfuhQG{ zaji%)KiWuKaUeob2{@Porhf?2{SXzm5DMSzXeA4uNK^(lIGQqC1?OA`lfqnKFgI2k zmd2`Ez{a_X#i8k-N$C(-b}ValrtEXsj*|Ugx#MP^|MFImFMtJ^d>?zoy}pv_NKetbx2^N#MThQEzU-S| z-r1HfJ7PXz!FDp`(~sWu%wuYHsm$#YS8+F(=mpn#2>LmYR>sYT;x z?Ll1RRGPOrMDEN$5b3#FZj^@jp(qzIQ}T|A=2w3HlSj>;`|@>OAXAnXyULOMirG8Z z-p%$CW*^FyJ}Z4O`^qnT)c-HP<#ngN=k4eF^X2kohs)P|=~EKt?|$=Y|G&KC&TlWi z?M8norD9vR_$$H>+qbEqVqTag?W%6A>SI%|pIY1P3m^d@w42xwa4_$pHRDvYwuuaU zw}6^V#=#aaw1DbqHLet!PLfKO_- zl4S>%0vlj5O2d-!*m@1ml);LI22c3rg7=ntb|h7#Ck|STJrYqB!l_h92Pcls*ybNs z+IF>|dMdTaE7$k4VlR*w=Lhb;zqx><@D*n6Rl$=)YJ=z(Jn;uquR9Va6IYBD9SWHU zzktBGu-~llt3yuUe+FtbO|;60ciVY;>(!r3eRm5vS>hRshKa*Dp0>fH6_ceKB3o~* zm_!_+0$D;zqWMU9(emH^^`CoWx4qwgZv0Bm<@AeI)6;PkS8x;@lT^O!`J1yUCSW3Q z=5y{&hh+QQGMMWp)FvIfn9V=Z!g%oZdV8XM%Q@SgbGx*=J%1ksAS5H@b} z2{cx4V8c?XqMLZw2#2h-3LJi**q8;F#D@+{KuFt0nvOt-9NRh?91x_z>{bUMrvo@N z>i|CFYI0Rr!BO=N9Y}|dT-_B_I7TBh+%U(X0>LfJlvIZdIBwMyRJwrL21!Sgv7)?I z*Q`)aC_Yr>iVO_LEpOA4=B^@tDyhc=36IeVGFl}#hNOc-$OGJtoQE-tN}a+9g#%t; zOXlN1wA+fh;ceQQuyjZ(BWLP~){9u$NS3A<@KRvoUlPaj1~BzT&(f-S1Yd!mFW}J7 z#4!ad4t|Dj_n{sH(LN=0bP8LRkqlb(3bycZPEan*$>4C1yET@qGe)?eOSIajN*1K$~k<38_o(@G|cmE>1O+&kD^}M>oJ%2 z%;n3rV9%hS84{SNU`st{@C6%w=FD&2*02I#>l+B3hS|qGfd|*>hZ~;w*`VPN#4{dV z+2SGC`fbF%fQ=hkh4+lQOq!vq@L7x850^=Ueu2 z5!)NLysCf$)(SzB+vX__dJTVNbh+fw_?lS3UTbZ6rvCb?5 zFXwU{`H_0JASyf{X_6vMOn&Z+IOK9GBEpwR4MFP9Z%qN+q_0V@Voku`HReg5etkYX z2ocaocS|mAAZ}F%(lkBagu?_tn6;)JIQ)RfHMs)9?3#!?(0MlzKexa~?us)i7U8TO zpFt95K)s{zLsUa<5)n?rsnhE}#< zXfA-0ILYg$p4wSe1euD8iO}FQJ{zteCW5A^RuB5TgW)exp8BJoJS}}zWX*tbWSO!e zcOL&I_5BBz$d{iha%Va6l0J^@_siwWlI4}lZ+UR(cUIi@ZT~QJ`%PDSAN%P?zb9XI zzWm;|9>3R)>m0DpD;zN&e!%7n_n&;`OTO$?>eG+hSJo`&J?)6SPd$9EQx4nn#QD1& zId>cHV;^(StByZ(2cP-!%jHG$UnN;S_k_Kb*PXDhSF-(n+4-_Jx6e9e_NhnC+UVC` zw6^!L9SnI*QIaUGbA&2*lbuN|ZPRv>(qvazv!WHyLs}WGWEn&zD6Ix`)#_y@Nn$6_ z_>@_TX}u&VF&CQ3m#sqmdo0niNHHyjc0ETt_CW!sqivW}^6edQul>tOF1Dgvk-rpG zU;tSLE$sK_vmVRKsU^Igno54Dq}9o?%}Wp~M)eU78q${9B2t7@#nAaL&cr$TQITAT zhbaS71r=o+Hc8>VJ}t!Z+9aO7ye(CyA$QRYq(dvlih@Kb^g%)~FKtf!t<{U1GLyw2 zuEfEIsg*vSRM?tTWX+s|r)^zb>g_23J`xjzvVNTT?UrwSb>Ts?)_d_UIZc)?dn@}r zZ+pGe*w?-K*|JLMGobV3tH1bhpU8gWg(to1t>^mQmm}sYzwlc=j_qHiZuP2sA)AWQnX&l2JyH-CuNJe*`2npdeN_ zfmV!BJb>0hfe#KKNQ(bO5f~zb3?~la2b*ls5#(VY=F^MFgaXNO#nUZXOz@-<=5QiO zlUQl8SQ#N6Ng1F1o?95>?8BO7zm5rIWQe*??$#J~DC0lMS%JUE$4nuGTkyx6X$hi0 zn&c%o;Jm^4_`6yqq;=+$8Rr1&tocu4OSQ3UY)zj9X5iEj0KQGmE_#Y9%zC$;Fex2mw7= zwFenRa9AhJF*fWPGC}-_!w_2}uI7T@_!wjAXf;hQfOr;%+cFGhiW#&1zF zbB+NPm98AIIwlH80TO)3Jk!u(c*cgNiA8n_noD7fQTejJem;2aoOiwbA_17cjuPof zf4oEM6i=Kc)R(Xb=NM>EAt%I$@`n=T4pmnD}mCTCX6( z!IOIb^bbGvB|wH-np{1$5+vB!I}6}OliFOfg5l6XGoFnBnKihX>rD?EH^L?(pLlR# z^>DasRe|7{p--XeJ60s!3Ipg_oOjQxA^Ofz#yTU7ZoFsIv7NT&0xC`QZv&4GoIsok zNI>R*D7q>C=(xrPGp_QzlJ}WR9fR5;7^m8;Z4lVOIf);`ncYkN+U%bg|5KD7rfiIzS~qGifn%yyMK8Y! zf>YN_7X*2*f~mks2Ui{Ku>Fy^W5X-oOR4hS#$aFL3_Q@;d1%1 z?|mJ8(3XC}e9of1oG<&{mwfrW~rG685_K0t>63dI8<8d;KrUJ zm6P;IEhW6JGD@kgv}I9{xpQXQXvh_=bl^oOwxs*DxEIKoz%#NQ zK6mwif`>^gUv>+h<~a8oaOa8A6aQ8tN1nOI4!do?g=1y!V}JO4@9-1mku3Y_m*UXb zS+iE}W?yyrZ~oS0?|Aju$N1#erSE#(B^STuv!D14$?~T^e(BBM`0|oFZ}Q16C(FL~ z_0ZDq$(I>r2ehTO=h_8q3a-|3Q$|N9R&sS~KAcu{_;w1V7@!5eI01f}f?FF3RxyXE z;NTWeWfUas;Pwu6gl<*BMCF!rS53;m89w!~1U|_QYbQLX# zenlXftP(7xCk_a;f)^^FVi#pJwOwL@r=pT3#A$-oma48v;N&rpafO=^OQ=wU2%L%5 zihmm`&V1;a6v3N^-X1`S_lyvc8+<9VGZYys5>kv$AFLCEd&~kyjAy`;LUdMBJNMEJ zTd|BnXK@h)vecE;AVlj^v|fI|P1@+{tqg9Fz|sv9X*;<{aH9hfr5@2roU0NX8aEPT zBBn|XHHuu!#nrl1;W2FS z1ji8V?z`-wF5Wh~*}^|Wd-4Xj;VULTsiu!*K@a}VfB6RpUJ>uHfN4$;=OEz_oQ)9f zLav)cXqrS~#l3_Y8ywQGq?=aysOSp?i2^}s2)RYVOE}aWNa`I_L9itn8dl`eK|SU= zMFBSq5LVaG(SoCOoet521BTY#<_pn)RKg@oe27r8*8rd)(OMPiLC$bfBW1t=Gtiah zN)V~OANEZh0Pu;FK2 zQ%|WB3qfEn@6~0fK)8-K#D0Kvu3l)?sNi6nj1r+%JjvBc5MU7Gc;-(ga8xP3G^Z?V zWI!PDoeIqI<$N#{(R z3put)PZmB&ABtfZPjEnJY2unPID7PO0dcJ|@R24VqJU>bM^8+-`Kg%fhhq(E1EU@9u9HysBsxF#Z? zp@J|BcMGT$P>JW@)2%D~tWb}fKiNWISH*2()wKm=+%}%44{3u|akAV8v1QHoFTL}Y zZ(qM;@h!$e`LZR4skC>qS3R`kM^CQu<7MCb@$_xB9{F+j7L`M5*2 zcep(HW2reC`~9*b<`Wlg|LSA+I%C0(uRdX)3l`1tGiOQn+&wmX=?mAIFWX$K{L)5w zuFSC?_Bz1n@ifiAzlE7g|;PTSWYk*iigB z%e_T6k*vx=WB%aolyD=OZ{{^)O9 zblw?9UiA9Y%3OGiMsXQRtv7En^WdmaJMQ0Cvs?LM~M)Jn6!6?gM7pWTGdCGp$UhmT-B4c27ENB zXrh}d5wOt^nw{t4P)(qii*8bkL!2&EMz323sDdfJqw>t(v3>+v86MIKya(I zl0L{a5uxDL#)`A>W)fpe<=3uSp@A5^6fmN)H-)JR2XjE{81?)L&r1k$VFBbJ2hK4j zO(IX6ymkm^w0QkkMrrV1$LJ4xhbzMturR&mBK=S zfh664fiD4+szab^1AyU5u^(1-Hy5!{NQNhIT#B4T5JpjwUBWZwP@DeFYs@bb*&Lya zR{SB9oQj-1Ofx$i5ECJ`6x&$4ysL@U`IPE(@+3Gjc}bXU{BSt8SW9v9bpjbZWy}IP>rIh4 zLvYq)aj=GXL}~f*Qwx?_z9`899(NcsoW?C2#ZtH20zwu8xz4S=&}BvfZxg|>n&4-n zD`akOzf*^xH4gN_)-68ulq+DIv8AnEsJk`Q>J|xcsBm^=IA9e@Hg4g-*W{K;S~bNS zkWqA2Ng1us02OEo4-n1OAjq+TnROrr44mLZW2;`PE3L8=6+L;ZHAhoplDITMrGgb% zO(;#nY4}tIIS%SnMM5tBn6vbuTlNArgYpS9WXfBEN{95?g1b}n*dPC-}BoPJF@B@@ahK9Q#w9_BUc%vO_~pIvFr`L3*U8fYt* zx|-(9H`A>ZVuo}^?qwKX`tmTr-+yq<|M|CnrUw29Ektr;55)XJ%|7ne8HNgn7%J@L z2UHdfO_l;w9(e)sIW>s(B;2y&ms1vD-1L!Fo-`zk8LI^Us|ZxMt}do?IMmDkJj^HR zyy0q{Q2^KI6l{_359X|B>9!zK#=byd{2|1gINsK3^`j~hn(!E_hbB!ZXvk&U9u7b@ z>g+@mwhz)A4mAjE?yv~Z+9m8!R3OW3WurR1tBUbp07&s?md#RZFi%hE;MT4;tr>AX-te8e9QUk5EMc z!Hv~QsT_PoVg(7WUJKnRM8Tfm_( zDXEIQEH75^aUE>c17?(ZJmcWX9GT!SDs#fYB-(DDe@!bSnjoj%B(Hof(ay|a3st!S zLWD>Vs^_$BW0l;YIUEJsFlh^Iv;qbXp}JND&q~NOfz;|YO}Y z4Nl`wY0Nj^4ammu^s9VXrtEN8S-Ik#+i&p?Cf|`GyOlM26T4TkUHy=G^}|d1GIsA{ zOO|~fTe7@t@wdH<{p4!DWnSidxnC~JnxA>>KKb(RJ-OoEJHB@Rys&D&?og|oIfdEs8pmz^vhd(e))`XyyP`LNwi zIed2?$DaDbsU7?U^Wpn%>dV-Un2$Sjn{$rX^@3yeeBCLt-*DQ2XC5=d=e2j);>Ew< z_sg~(JF?)d{817oag)Hhm8*&vMUuTUtO?vc1qfdfExhzHq3l%>D$^7tqJk!*?CNDm z2_R(QuIw1C)fkm@SfZ*Bfy%OMy%j=`CC7xR387_K{wv|NbUR$OPe~yy%Th?ll5S|0 zX!sT=e9~ONOhd=hc1d+xv-Mw-tAvzp2x%f0R`Bt7-&KN)vxHi~md@bFX>OBcom)rh z5?dM~#0`nuME;8{aVg-zKISyn3Qa04koGfVS8=#wXmL0lXvj3~;=K$ZSA|zOLE;1M z-Evf%58v}}`@Y*>eZdQM-EMOq`*MOTQ-0S)uT}SJ^4>#hfUU$0kp$vwGr4R>?yVeK=o#p$u&ybeY+0?+c!+DA>$mT3R*z7DsT`&w%xcY zHuS`40#qQIu&xSVMIAYAn#^n5)RWRQZ)0+W&nN*#(YpXcm{gL;(kg5$zz{aTWGOYY z8X<#fisHi26vS0=OLV@)9|sUt@G~2YA4A#|CjNBR3Z7A7 zPI~%_6T?sb<#h$Iu^?HIW_IN2;gT^UD^v_-zxy+8@e%xx%LDpO!QT}_;uLHpu!U7A ztm~-(M|5OQ7mIGT@y3R+0qkYwtl(j^G>4FqAazjP3aw7zB96tMAZ%I9IV94}%Ag|b z)PoMxM4lcHvCf#6QA!+S76^IXG4ab^`fE;*A()cB_obWgCxs4{6bpsa!wHdj?V59LVrQ9+?~$+BTXRX_-ajb5WC*Xdu2gSs9> zXf1V>-OEtwbhc1C9}2c{aN0$#N-v^_%(;#rDL?ax;~!pP4o|Ao`;9RWUL}hYQ02Ww zot`DD;Y~Dhbr1taQ>+jw@Ldf6`s2oF{G+(7Ygnh1Y4{*-FstZFIHE^&45_4mKyLH` zsxVejQTU2D3c$pN+C+ep>xvKpSJG zSxL(&_is4(F)S9Qpg7q}|qvJMuERR;vB!>45ZK1x{j3)7SwF3x&OyW36%?rrC4n&Y4-?u@rxUN>c3I8yh-9kPywW?2>w;^~SYF-2Tpzdb)#` zdWc~nBQ$Nj@j|zM{vUs9Rnga&$c`x~Q|5Dz!r)tN2%@k#1v6Xoz2EaI=6=Eq zNAVa(F-mbpE=RzVXh8WgV4_JkJ|!azdOQ<>Bt9m%rx|=m0X?W7gr872I3P(S z9M+aEBS*yq*V1Psn7FP1#FJ5gsT6)}A!{N|ldEK~_g?5Qv znV9pB0nBP>7|N8ZK+sAz3=ta4R8pZpB8E3&8*;XUnctwft-!3fosJ(cdvQ=eB8N$N z{X$7Pa1jUuRtzS2+ikiTcnx6VwxR1bpi0Ha97VD$DV8<+&9Ve}#d2Q=@ixxSK9sFV zamwt)>~iO+&tl&%(Qv-}%%czZKDNJ+TDkm=hnC&G`oX(>?8{$DJ+S0vMb`Y3xbl|k ze)7!9TW|d0Z8u-O_|`Aqb<5}9a^V8s`#Rx>9gjM6>-lpwK4_nf4wRad7p~)}$qps6Cd(S@8LXF+70D^I5>;8PUkQ0BTKsIG zM%X$mixdDjL9-^4STZg@9cgdLuUp)Z$U`wC0>l+C>6l|nD6JH%v=T}^SGviLX-+*M z2Z(}Cm?~PS1c-`;AQWRPhS(Ac!2z7?n?6bOIGX0da#tI@t2Z!-5XqCy+cBiGLeKPJ zAeH^RtBj;$ivi9bwKu2JC}0+OK;AOuB|sHP@tDTb}0LQn`INu6RO3wqfuHt;+|Kr3ue zM@zKW%#n~VTMBQWsrdD5xl$bH$S;{03QJEVVJAk0{8q z3pl9?h(A_jsQ_Z=YH#&irI)V7at>cy#mOr2GAElxBYb%Q16fBp<|;JAK_ORNgijm~ zKp?B$VKs5I5<5X)wN_Rj&+~dAm7u9mzN?C+Zg~bDfCs+Q3}AWPQ>gTZwlhk&@{Ro>=v8Zr|@ChJ)WP|OcY z@u#roX;uTG&2Doatr44lcm^gZi~cCnW~6@)Z_NtU95keN|IOHUB>5eCGH z=r~WmLPN{Ov|^nRV(9XlE5*P$jT+EgVWu0xTmhm`%m^_UjC>WcJRmi+3`hr*vfcbi zTTalko=rI?wOJVF%xoCPpE%4($%IV9!3K((^v!_upiH6%foQTCD)@{+f}DSH+NeOt zZz?rG6VjUqD&*or9}GoVf-&hv`Hs9s)+0jeh)^Ucq!dFIl?m05=LGK3DMlJ2Ur2fz&&O|;cy=|DwK9C*)3v4@9dC}Z6kV4h0Bl<@kP z11j05?lT66$Um4^G;F@u(hC2^j1<_Ew4`_tM>K9^>B?H^NjJaa@Jx*7g+^{6L{24E zAh+lV6-ac;f$7Y{3b}3pyQLe?kV5nP)9eOO)FHs+f8)V`F_5q&!bmd|DUTtoh}QM- zbj#;%`GMM81j6bDkDgUAq_M)p@U!M~oly#?u9YSjvO2{mj1>eA0c@96#mT_F?Yoj< zV_>AcLUB)23eg4aMo0&yVt%7SF+v(7R!oVZ92S~EWwgiw3@~t19>vHU@A90`FNvOg z!r`*SS-vb;{*LckE?um6CENShKX6v-DUe@cdM-4T#Nj|!1zQl}diR@>fFRNe8cqi4 z)DI$>Wqim|y6YG4#ysb9g&qXa#ei_&yB(!lt-RXUGZNHY-PV0@(AKK1EBRATVG?=0 zX6@h*lZ&2uy2K~fajfKve>jTffZf&#s7c;b>I7I$G$3xnbd_O2R#XD2ODA<4H2xu1 zPw5V~A+M45`M0=``If3p|2i?zsFZ}#?PuP8?)ZDC3YjgsqaJC5fFMym(pp_|);>GMyB*@ZdO%9q}{hyRWXTM+ev*jN@z1myZQfBAN&px)| z*~cI7GWJv7U*=z!pMG@d%H_8$z5AQX@43;(zLeDu+_~c3@Ay9U@_Vj-c-c1}yzi?Y zdEc4aZT5nLXKm>Jm*vae$9BFvZ=cPN*l#g*}H7C?ys)(eQfDo z*`?ggMo{J`cLsEo7ne4IizH>xZe?@o&YH!0a!k8}H_G1g&bQjrZ3o~oS#a4gh!heN zL9cM#qN;+GY*db1GK(C7TM1)St_a1w_(&D;lw+><>%Z;|D{HbQ(zU`D?v|<}p|GL? zqR`XTEs4-75>on*o#x5@I7=eApIsR=_d)n2>r?wne_ z{Fc|A;}^ zhs$5N;uBY0{@dTU<_qP^eIMJuFxzcyg0{sbj;pP~-f9x5n>aQLMJ0q5{Lu=oSo{Z0 z%f2i8HU-!C;2c_1ZWZ-N)I|aX8v`E@c~g~8^SAoY3MN5`Q;(->kebvrF^Z+d2I2=p zi=TCw=-0k4NKmxq3i#p#O(F*+s@PIPNtW=Va2iAS_`^vzO@Q2sT$5~(tm)C3=!Igg ztcD}_5%#DEUFe|T13^)7OMh3YddI^Xn@22MC^ z7X|2p16k^c6ZvogOmI^83V-^;VJ|pYg=FlaL%o?w;e!MmUEmfbCJLZJuC!>VA=cst z%o}yw7!y&7e_6WV+;d4}0zyuvhPo)1z3;!|5_3>4Ks28#)YfX9S#ao~=bU+}-puD5 zNC)G@SZ%l(HXr@)hrPLxrDzE?D_!-7Imks^mohZ*6$PoTlRl|BKGdKs{yc+&x+^!z zQ@l|lOrnxoW@bM3inkR{-t>{z0KA@UopI7l6QhDfG*t3iE;3Z=Bh2e2T5%MIXhOv< zeVV(_s_QRa`ni2k7r2IoGjfLOQ|91CTRP)rL&StpghI{<4R5yyT@wcZ2Ul`2!B8s* z^->Z;%4eDiLf+Fyt}wZUbQWccD+A37mGnfSLM76T6CFgB9@Ha%=>~ze3LWqyEAz3! zKNlefhY2StOKp+wsAW6!@;Gm+kN=Mhc)SLL^*- zL=$uDGE;9Itm{k@j&b6Oo8(ThgLui`KE-Mzw2F_bI-OPUVFJ?(B+r^0SkIg>L$d6r zLpX@iDL`9jI=B9XA1#&e(IalFjC%atDwyYa72F^jx)b2kr-mJP6(o!8<-NxeYgvAoI1G#w3%XPPg72tD`H z;WiB-2 z3cb%#1{!twU!9jYaCovN{m!3tL`|-BA7o4*)Kf;C$2d5<&3mqOC34(!gnr~?96*vg z>6ED-cXL<%q_7GT=*$Yvp|e{~W`fB$@*qG><%6X{5Rd@EaaG(x3mz?qTR^wbf^eXs zBCB$ZL-?@*93)yPQ@?{x7dnsu>8}jw@l4N}YK5Svr&UcLI76l(p6W@gE3pC(Cy8-z z)g%jMMdQltV5<%xI45I>KUb$e5tUeJO(K1up$(!QT5Lm3XDVq;TR0HnoDOhsa79vy ze{KvDTrb2~MUyKa4)qk~TGIra6p*BcMu9X0Op`&*a1gf*zZL|;&p2!Vt2E9>lUsva zQe1JSJiSPe%a>))@@46B`Lay;C(k}AWp=(SU!HvOYxPne`|9`0UdEO$mn{3-1I#3R${mJCpax z3h^%ogjsGF1Wvay(9&~hsd%!SQ6!8OE2}ZM4`?`CMk1PpBViX~5Q-IA93+)mE9wf} zz+K@WQB;yeF{uQ#DG;G1>8jkE)kJ4F60Ne@2qpD&BNtqDj57{-2@cT^Fob{uLW+68 z60L^`9`(zY9WD8MkY~0+`dms7ZkWL5QXJ@3dM+CGy)W-}(iwlUFch)-{j%RI``FiI zmtHJcmJv&krN-*xGhYs!`vRECCx7!@-pBU6uZ!Puwm*~deQdv9_Dc4*uDfFLuTo$9 z^tZopmGk8lOYZRdWyNk-$k+OB0Wa3F`Bp%backBnkU=c?_F4NiCIu%j%_-YKMIFS} zqqU-r5ZtXTqo6weuHisHK4{$%*Z$SkQ&X~#KyEH$NFNYraHt2O)#e2vx)4wa?6CsD z7DV(VHh>1`%63$mFvki)VTlSeLEtFVP6u_~E5L9Sevpopf)8#;AOn`1x?)vK4#!#q zM**q8cZ&o!6-?rTbA@i;I)!K? z_`qaK_-`8$L1+kqobvSoo)w0VzJL!0rovH_rz#CGfz08q zgFjaFU{rOVI$DM|J^_)t4M=mUy7DB=A=mX!_4}T=N@-1S!J=@I=Ew%NDex(?n4L z$tVc=F&)4&A4wAR5v!Su9GYS?@_@^+sUPo1I=qulVU|kV6yLY>u85~A>R6>G_2_Jb z>nw_a%h!+?QO2hi7(ABoWdpm0^qJZ;q>r6dA-Pp<^|1@EJKe%R$%rrcHi>qy=$4WB)ZqL!KKMR z{L#~W@=HDDNi;YOQ4nbb2w8X2BPMlyZ4)Ue4*%;aQN^ImS`1die^O&qB1s?>>H zH7UFrIUodlu+?$uoUdplO98>Q)njxMI1+_Q%%}4tmwDpRvVIjMwQcz|4J$yj0x~KO ztArDwI-E>aN7yZhirauH@JIfoRd)ZCH}@Q0u@nNEUL+~ABdG><=c znuh@2ZR#arKn9SY+X4Nnea2pk$VsL~uH8}Wzl|Kj~kJX-)^B3T0syaJA=f(oWv znz&YQORli3Y)hIeZ@f!3~FDT8gDo^%#bX zFo!^xFr2pR7=%Ww#n8|-TpP)aYH*l_?l@dtqf)*sg9ev4moNM3*Y7{`{XhKSx zAAi^%G2hqw*j~o=uTtK}erVaPUdEne%2WTp{P42dRxZ8up`|x3z58o^zr5`3t3G<^ zSvzn20w2fr6XwI_ZsvV#-^X4!Yx1!##h*!?v|vyFB<264{EB%}j@(l@X`v5)?R>)g z9TXq?^8001f5g0K?v_&K;}6>U%n8n}GH(CByxux%Jzti%$;gz>g>6GlZ=G04{bX>x z`7DVOuE^TtiSXr>(nDx~c4V#KegrLFMj~MZ)Z`UqIj>9*M6M^HCPIdY-0lyEkQ5Ny zT=ADG;tvQ%LW)*6EsOX1qDSSu#wpsAw`vtyAS6hMReEBDc{wb*)FrTD5)cfl)6gv@ z9tg;kC900jSpiNuZ9!l%2e;5*;^r1YIzW)^f+#&$AdCJOs$hbU6U5u(zHS}<4dOOG}-TzOOXAL*{xTy zLDU^Uf8+yidH=g#d)6rnoiBgtWAFX=hcEV}FDJ{AW&bL5<>x0y%y-@LP5JVw`xpC4 zwxG)<*q&=uw~Px^Y@My=;MR6U#Wo6P%cxlD-3l)ui-Cm@?Zw~_uo`G%>rnwW4b8z2 z;_MnSpn5pwW`r@sTm?{p3@vW8g3!=jPYq2((2ZTx12@wl*xw~DhT`O@_*CdC&Qa$9 zx7flICy1lsqmEoF1JSw)!bJ#$;-pXsVI;1o2nAP$yFj3UU?uRtgnKFGiiGkwOfTUh zAqbi#kTk(T(L_}SrT`8KoQ_B92*y&|h$>u6F@%E^6EwL}2~XPQd6wyXlM}Co3vgp(2_xb;RJ1rAVwi_~7Qpj9!Q}F}+ib7Apnkh7HHk z5EBeVSrBX`5J+b%pj$v}iJ&lJv1Ni*;ix9Nk|rg;sUW8(bLdGBFM%)(6CGsi=~lN2 zIOmX3r-xkk;e&I`bGR!Jkdcgnp?)Wh36voVTUSglujAA~8VMnU!vsJ@MS+jGD?a%d zGPasj0AXTSZ+6wnfl4}{$|zUJsEB5@PPdSH$e@A*BBH?vl4zUg$M_O1 zyMugaz(_a-&oy(ueX) zgUOt1Ar5l|M5&&=L0m~01(V1zREVwu0abNZ^~zWZ3_Nwp69IvquBdRsGr20zrgy7b zeq(G-Xe3h!kWr+k9`ZcNnMmB~V!rNaNm0GJ>=@7)u_vAZJ1}+FCsr>JK&YY^Cq~gE zbbs!B+9H3qkR`$nm*vIY$M(jxjM(isaRz^yGo9vJqOD_g!>QkI5=XQmO$Nb`O6s(7 zYkU${iopa9%s@8sm8-r};UyIa+0I^Ti{7kjMo;Bit>7>r#M3~+L_L@CKS2;cjMX)m zXpK-R^s_`}Dh1;uB~@LeKg#r5sZjXS9S8ykBIotuWfF=6&(Kto z5_t?$1A=MKWIx2FDoj_2aIA9ObXnkTIR}%{)#;pvdMZhoE)m3S_`?Zq8G>g@hTuvN zHPR#!%2eE9m7dw2lhvnlgOe3FD_Tc>4X&UHVZ!jajll40h*~i$vn>}!so`yQv|y_L z{*Szm;mPgfdtb_UAG>_Hqi1z5bo7;MC(DxM-+Stjsjp-&e`3{A`La)B%a{G1RQa;k zu>F&ieEFee-<2#oVqSIsckjFFt4qIk&GN-x`S@>~>;I(6mk&Q+^FwBC;`hsk&EC3v z*t%C3IM{DzZe zz5T3%E-L z;BFDhm$kYDcP8skSfq;LC7E*RDpo=W+=K)bIV@U+;0BI~I((Qaa$9IB;4mRzP6VJP zX);6P`y;K^3P^dC3bsTOfeJZR@X@MZBE#hnO>pGG;0m5@<@JuD{0!MI(YVe5mNXm{ z5*#9lr)z{NPS5=fjTD;>-XCLye!=#pn{f(6G zecf~WO%9p`azcpqN4tZ4)}|nSXwL<=5hyB}0Il{-A`{J=ilp7sE+9^@JB*@L-L=xj zF@2{{#}HKk1_TuloE4Qph`Cnyk;J(uq%ywRPP2FH#R zB{Y~6S#;MlvgXA}JVgUf*6{~f#DGpMHar|a570-<`9QN4XY;~Lr1|Eg(Ok_URWx8D z!vCOoO{*n>XcWH{g`aygkTr$$t2Zf8{$O+Iwv}UvnjKD4R)i`UaFRkX{qPYnQYbhj ze#~V;Cc_Q&lq-*OoI(@yNgt=M>f;k1JKCo=y}#z^7-FT_GYn=GeSY zSTQ8#5NZJpW@o6Np^PC2&LD+MvxT!MG=1I%;sC@aK%^Wq5iv9bZ@#P;}eTd!{}osUbduG@W2i1fcdQ@Xj3V*}Gr@oZ zWp^W=l<$>h%l%5v3_(f-4KcBV8w_!hssSCKiUpg6^ z&zo4SgIGh%Ta02u^q+RMjn9jpD~Z?a+V+-T|7} zo!MU&0bvB4RwiW@plFBJ#V=6N@#EAr)KTb6qyz=MDhZPET3yf~D>yO7mMS>SDJO;K z{a7Y896Yh%^oIE5mwd_p;G(c@c@W`)cp4>A=rWW`7O?efsF)iXkck{H3y^hlY-lKC zu29g4ht7^pzZGBu^rR%BE~%!FC%Wn32_6d3=_xV|X>*Ub3F<=d1N`YkYyn%bPJGKx z*Tt~Ak`)V3oC}Dq2Ji}Go5($ktw2^CPlKIh#d06}vcIfM_Q@};e_{6WvgfgFD+9%{ z2eZ}7p2vRTk=vere6cNN4`$n6e)fs`zVn2any-2E{;U1}^7&^Ry4pLI@;tWv<(;>B zyZ>L_Zo{|iwAo7PWxK;(UiKf#>SdMkVSB!}dilWl8`)p>Jho!l_rB(C^NzqDF(0|d z#_DC?$NuO+vyMJ+`=j>TVZXVXzvXo=xWBA+t5CHUY%9~BF8Qd1(#$?-uL(;aA)XEP zppiH4w7vkT?z7uVR}f-5}Lq=9b5GF3F|s+ZdrhQ}j?;5=~vaobgm zBGRmSrL&5d9&R8~mBSPQA~9$&ux>_L{xgKLZ-t?xldDW)cKgPl#zzr1&g7VG(-(gg zH_jqJe6QW-dJB~*6}Mj={G*n3@h#W-u9w%7mBYYe*uARk(QAmV8rlA`f?CyVC;E~v zoS|esaPQf^_jUGZANT(9h3A~;6=o%~uYP%X`L0FR+p%vuZ2?w~hu7Z#qxkhkIJRCQy8 z;4vB$Ql7+*6Tq234ClW*Uphw{hv^|j2?pB66fmocV;dSqpfDE-K9`Xy_l7urIvr@X z06ZWp%a^+3K=3XZCEr zQ`y)uikVR{XB1~TWz?z_N=(Xc7kFq8YWf*+bj$(;h_LFuh>-%kRrr%7h2GTQloP~H zGdxvVJOj{a)}>h_O$`IY^aD5#jq=v)!izJCE@Ba&q=N#X1QEg+L|y)nB0?1e#ZUnZ z=V#M5N`&X!&VNJ6riYUNPlo73Cu=Sg>SmWz6c#avQf%nN%?=%BY}`vQA9VAr87(md zQYeW~)e#(_5V3+rVNT-VKZTT&g|qI!Vc9K>Bb7&Sbd}&CnYpf;-?|EFu%28rcFy5&6K1KTh5od)8-az`_-$^ZY#tGLlYrO zZL?^m+Ez;p@eg7=h&!$`A}=Em>yvloiVg zVb!sHWRm_x%B=NtrSz z)I#J6fG%hb86t{VJ0b$xF-0VW$54j@gF}D^L&pvUkDy^j(ZcaWNma*X(mwaxuURw^ zPKr!HD>6JE-4QfXLpS~A1t}D|0TC4Xr|E-;UL6-h;#$^;$2J5J9WTYB50xrBnO2eU zgnNUTo@rat%4y3bdRha$WsE;@s+Q0*RCPc}<=D^z;T|^1jgG_3PZcGFX~0I1!z%qt zF22a0*Xk;hsU_vvY@99KoYV%`T;;IQK$UJjj1`r2;>LJn zU8S*#xT3hfT5cb?!g&MYmrs68*UKv9HkTj1 zZ=ucQ-+$_XXP>;w_r9KbWa00B>z2=*eYo#^`SWG(FWX^--C&k+l@{J?@O z{TJ1o_pbihm%L!TEEQC~DGWV{txUFkY_1?4?=r)*bm_ScB*wd_E{VIq~2Ec z%9St2;=$~HSUim6p;lX>h^lNgsdCQ{fauyMRdRZAOkJr|tRhC26(~6kDI_a()u9+v z-+(Q+U0wW`@u*eRp>|_wCTK{liV{@eV#90{py*bOR-(c|4nXjrt6_OaC+nOM8K8?z zSBp~~f|F(xb}?{XtKc<9&co9twtiKXXs!Uc6U;2gfRL4M9rQCM2?_=w#l)+?k`OFi)cFDP)J@-?-kL~}LJ&dhh z{>+)jU2ygZO6KdY{NkNAU*-R#Zd-Vz?|nV>jr*)*DVf~E4Y!=rg(PtLxOJAJx?_&C z6CS6N%UwW=A7`hV?w}CqK5mpqsnD8T_uw}>;0E8ja2f0K^Zsc%8EoKwZ8l@_u!@c1-TZm?)Ma-p`hdJ6} zi-A`tlsg0+h&XA`bvw$Xd?<7rpn(+^F*w0;Ia70qE0_|+DnWTpZZHJ!QzF7dni0)B zY$#83v7xXM=pxe0tEQ8b(T+vRb<@+-@Dv#iFse1m!@)*XwoquM9}ll0!r8^oTLpzt z8#*~JGU&0Pz}d=^4mxciET)qZfr0}ff-HdSI_aE`_(hC4Khyb>RzvO=G$@lxgB=JO zXhU@4z)%<6Bcp{SPw4LlC_}6e;VltWvms$;MNvbFs<8&l1;1(LCqYAqF%X20xeHOA zXiePosVb@{fo?>C#M2P5(9w*6=QIFAv1v2SPecq9qOgvQHgr6E;BacuTZCGkgJdxS z8tB2}_XHw_03LXDF>nbF6Vx`%UieiK;=}-=V@N+GG_Z)y3J)C=dT<(|*{FC5I2?}_ zkGW(H=fEwk1nFr;DV&ajSmn8)F^U1OLb*|NI*TN`rcjM+J@kNqmB16`+%Mc!eb(M> zR1GLHh(_tLPJCCykI&YgB92{Zn*!hH@2If?=QcrKiS!7!Ta^ofmohLLMF)6DHE~NC zgD3&W809&Bsv3gmB9hWFhf~mWQzh?`Ye~bTj*>l@9$cCSN+6|$BPm6h4k_PEheD^- zmFDCT&?lLq0<1ZF<; z`-@}B6tZwUF;mD8KWRYeC~MJ~gCIuN1;i=A49tS^A((r*`HZvq z3K~6_AudX$EJTc^JX_7HY`^v3+NXMIPgyO1D80u}F=AC%Jl0Hf4Crt4#7CY6w(HiFr7ZmGSGC? zI6N0UlQK07{5K@FhQf`aV?&2mWR2oznm*1mOeaW)-RvOcsm#4hUu&CZa;0MUgLY%UP8h|@rSvnVY1b>SjIGyH|V_hr=Ib@>cC zoyCyR7NPEdj8coAPNq47$OKBb?(ahWr`*&sK@UoCCL+Rq=F>J!-qOhgo-{)e!CVk( zb)#c|YvZJ$;6Tuug&{;%avm||+z=c~&T8!1VtrbWD6K|#%ed99YZ=>C9;LBODk?bG+!PDwx+sf)?|G(^$U*CS>Ui-^7m!Ey=UhOv?xXI@7_n&_7 zyHDNy_(M1R{!_RA$q(-N;(5RKj^&nm?}l&i|I6NA?s;tQFDsV)pVa(q*6_`)J!Y-7 z=MHP{wc|RF-Dj=tFQw*g{qDVHuX*77_4l8#R| z_Y)s{-+}Wso44)SD=fS83q6mmOjF+W6))wOYEkX1)b&ATsiQj1XEv*pmtAI=6_#Iq z?KRfWwacus_Rq*lX@AY&d1HmD=aKxSf+BH?O*im4S&4zI(*5_@c+U2UsXP;1>` zF&lz&4>O7`5k64Jd8V~~C{V^SEg-rw8C@+N&g48nc8zM;VjWPcZHJmIbo?g8VdiWW zi!(%JO_j8qIX_Tfh=Lgq7ynjjp%iCEr7i9<-CBLbs< za_d1f4COEl=ng?T;RG=Z5!QjO7Be@DvWTBHbOLrIveyqhKNy4-c(jh=+Uq|B*Wv z$1u|gxWOwF{D>x^OSvCjtYJ?qsFb;pSB9K8phbe8pwOX|`E&0qJ$N6KdezMXlWB(?^YB>JoFoFF8M|wRf6ASowIoImIen= z7*U9bu*i_YsXG49Q6KQ5hZ{GZ|M?&P>Y!7C(^!1h7$fF64S_t| z8jyrb^;M=oCjVDI{fSCMt!jnz1?-lM*0m3VHA7HkZIO^Ix7QlNQ!Nm#E>;oSP5$Z6 zeyp{RtbB&pQ&u4BDw%C7+jNGjqpK0pNfj%V%nYTO+Mx9*2puB<1aOlT4Ri016(X}n z1Th!|j)f?Rm?#-?SS2$MBJmrwJ}E?L@egstlt2(iRPA*NP9g2g;wOa9B5XQNG!q#` zkg5l6s8Y)ierKWx7^((@$3vE?;@tG9)kP!%!H=SdbJYCE0EumcF8zpfs-iXBvs2XL_EI>O)9%uY$k}Pe1_;Uht3wr^WndtyrLEl>kQ={!MbC1q*5X#g~bq_ z9I!$Nn!5!VC!7y}6y@0Hq#tf5b7mKhmQI2^$$^dqDdI+jXqpGJE+sCZOgH6%lYVQU zC&P1(AxMtpe1{2fz!2vE>$#d~Lrgdm5mTZClsJ4Ap%rK)TZF;5!a&A?hg+gf`5*s@ zmsVVXohEJ>|F3o)_425oC!5R9dI0_de$L{f!>~B5h zo#n}!%inwEf$uyy`6FhV%RhMLA@%YTkKFjf@7(>A7&Hd-Bsb2o@!8<6ox7pww%f03W_m|aTK)L5S?|!X3RL7~#{6CbfQh(|4juqZ) zL)jlCcqH2=H{bBu$wu?5UiRXH_S<`>S=-LtdB-~z-E`4~=Uwx)%RY4E2iIPGl`~I0 z3Bz8y&AZ^-vo=^~O|rWF0Kz}rC<9fF9)X6aH&vHy=_;sI=!!=*qoUGig)^6=toRkh zh{|LYE)hi~PB;V+j~Z4nE2%J=5;ZSk#WFSu@%QvK5fwV+=ytk2%;PyWIMr};A}-}|z^eB;$$x%t{l zJ&f(2r2PM~e_?j1mv_0bx*K-oms&V4w6ZUENkj4kLoWVKpKhtTuK(@=I0?pJC}MOt z3I^Q(o;c-32cT2lEe4TY=_P;|>PF9sA#4CK5&js3=TD<>{=}9`F*oX(Km^^y>8I8x z3J(zr0t%cGDG)^0DC;y2H}Z;3Ll~M1W^}~CoMUN-Gb=++2!`Bd6oXJWC_F^Z1Y&@V z+X6_Np$$OlBFH2oMDLV+7~7L%2wWF4Exki|d=qS+CF zr@zS>mDSMTi5F+9*-?^dL&;Ju!z8E@p$9;85#3xE;vPY2nbSptPy%S9oCpSNr06Uu zGTPh``Jfw{u9*_-H0*eFl$js`j5>&jr0}DokYaa0oV8=_*my#UpW5!}?*4Ap(jn=q z7tmz6ZmCs3at3i6=?6}UlWn7Z3D+8RQ+C7cZr)8lB{(_50q!v>4gtKQV}gd7co!Ca zwhj?Z$J8*Jrc=)nh3Hb)93lRI)dzCF?Q{@KYjx&ZK(?H|X-O+=EprhyjOx&H2Ne$EQh7#QLb2e63}2gI{= z#if>|E}|g}P6~-Sx-LJ3T40SDDm&8cFd1zT7XPtOF(i^9g257*p_O16Nb_0@;s8uj zZe~SQ5WxkQIjH+!U-0^YxClU0r1?-%Lua%@aKNPqU;{ zE5eqs<;_rQIJKNHnv*SBe;rE%oiq3$MjazA$uLR%;xGQ(FKD)(B)U1Dc+;c}k*g0s za4#O>6mp&*24I?xeEq(x|N2)yr?zNNLZML_oOh-?G3Scmmf?~$^Kx!`rtPSr*i>y_ z)#|lkvH9;L6eT3iAi?%qm5o-i^=vUyf?@2&sxgzQ{QT1&{}H6qr&Fo330_TVh!W?N zE@qm6(XpkAAH`fu5Onw?;Xpm|u>rNcZ9XNJd4_!Z7kd}>gsMUqEY^;_5ry)x;L3k_EsMVsifPqy6pRDJZ zYF*27K6F6gp7iDniUSHTn7Day2{9=X!P%@4A>oLk5ujibF{fUO)9m^kKtPzVLUiG= zkrFhV#o6Hqn5MQc#)F?x%rWE)9u%?=6nInPMp8r=rQb{ePCBs}#mNdka}|*WqeLLA z7}aJd4~a(^;zqH#$WTHdS&%b^K%t{Nw=L1J{H#)|(Q>sK5h1{GZE+g4V!!o-KLL5> zCqGhRy9fC?qF-VqX{01VvQVj%jK%Nv_I+g6k1CDrCi|=90FLqp&pxVN_Q@|>%r=_g z0AlO;kH7c$cb~l9_r9Kfe9<3%_ue0T`!@e4^`=)(>Sd2)+h5-4z3=q?@|J7)MD|Ml ze0lCxt0|fHowI>gm=Bn{F~q~zwwP@$AF}%<>g5CGt*26+>@3e;=iuGf-+k7K`_EnV zumu|&b-ka)7))|>CM*RFo#C&irS z-vmKz+XL!S=z>!f&jcR8d8KjnG9`dGPzb|~!ueLAP0GYg7J`8)bO8C{=rHisPFfv|vJX+w~ajpRfo`wj5ZUoc#NO#kWpP+LS9djas zhY!+J05KXH>)7BR{Sc>f4rIMicx45J!v}by_~EoQ(>$lp)c}3y8ElAfqZ!Jd?4}Sy z{$vX=3RMI0e9&2MI#IL$GiT6gV@rxVrjKIOkQPxI?-&tbD|!gFh`Q4v1Zf`8BO*R2 zY5FJ-IuWr!S`q{)OclGsBA%|LC`E=v#GKIr-2ua~qKcW63v$v@gp|VWVI+|N6lRe*rKgzDxGM+DFTKlq=8o$beW(TgD%rc z0Of#4c2k>C?MQ?qA#U6Q1WqavQQK|03_+m>5CKGj;TDV^U;{&tHa}wcb;a>=;P}AR z+)x3TnpKU+CAbyEf3py3t=X9f<{UOb4A{_{I6)LSy!uz_B{9>&D-d*LXaQWAfKx3D z;+LLPaY)ii^JIFXMxcraPPX!mEf#qg=$eb8RxaTv25fgr4&C6$?K#dxA^lD2%KK2pEwZQ9|K>h=EoGw)E8E z9CU`PCG^CT(l%c+h3Mh|g9ifA*3fj$HWF0X8qgha2)8g=Z(6BZIi7^AQLP20HL8`u z$?+^3>xWVPc<@6d`uBhP3%^#mM|&Ll|NQU&K(yVgYPO=_;$ISI9a5rnRRvq0Or!8; zq>Ckrd~g(u`fuOA`|DpwEEQ8$wHle=fByTw(54XP34a_r;?$ZQI%t}RrNs1`0!~AS zx-?T&5_|9B+p|SmNZRD*{ckl7Eyd}w7mi|~TEiBnl0?N3Q44SNVhD8ONy;K01x{y# z7H1_)g?2b1=1z(zl|LxX1rXx&ab6hW49(~{mN`T-YUPWJ2m=8@nJo30_-n(3(WpRa z#b+#};-)o(2#O~*NbTU)9fyFzfW-kA=+QMqXYM&J)OL(ehN~!W03nzx!47LW2Lql7 zJhu(ksiAdVR8YX3lFmWrQKuUuwt*1GZ*-iPkfFh7e)56!yn^s&I4LH@rwM8s(k5jV zlxQo6Am|+<$Zu!ST&7ygG++an#yQdKLr*h5(KzA`eai_ESQXZ!g=857tw{wkzz_UN z^W&fVaSIQRMMb3Lq>J7$>M-Rb>+E_y!Nt>=;`D?#t(rwsXkkPlH9ar$EksSbRc^p53T zwce^P-E^JT%-ZZNJ8!kpj+?Hq&3eo4wAs6T9NYK4JdZtZ>s5E#ZVmq`RlRI~dGDRq z-fx%n_L;MmSD5{olwuk9Bj!DKeCK|mI)EX6ohr=Wn#>x^I2+YhSQlR(Mt& zDUxAIDj$xjI216*dSAXP7OyUkn_RWqnWHL5hGL2aux@7ZaUI3iA_Q9Qcl{Y0-_ z`*Jm-Y;nwx-wNol)y{ccCen#W8*>12UU3!9P}a>$vCKn$q8Q}~i=IW-RZ5Rq8_}q( zWaY3OWP8V#U80Wuj16U5$;TdZ)F~%^T(PWzKKslQ@E~eEbPcMS{qwSKVr$jT9>cb? zY^XPv8#4OE3s3g`@?P_|{`k@RpK;QMzx27&)yrSG=&aA5cgi)FfByCxzjnt>SKfK^ zH9n5*d2DYM$dpR^yOO)$OH@kI;N^r6Nh7xEsS-%U?gCD+(m5^JkzvB4Xr~#Zql`NH zh-YX>Wm=1&6podYkb>YJA~~QNkCrF8iN`s_bD+`8QJ5#$;tZZ)7bglxZQiEMkZ$_5 z5y=9YL$t$mR`LyfIzIS|Ud{}0Je3>iL5~3)B`4_zl5NMuXz4v9C0c+#W(ekdz(aZb zG1CdbiHKA2B4t&XDn&cu1VhX>mkJ1}qG9nsa)So=u%01Ag3&|7mXh?q@$(RplX*+6 z$`QXga!*;If>1}0vg6zinrdrAfGnIXo|pxE7)>n)GGWw*wOs_<4p>1n#OMITToMF8 z97xeAq~D|{FI)uqlk=poVGu60A?S#twwTb6Go)D8qQq8w%*af6NSj?Y%y67Wn~s)s zCd35K^8#DwZhYy~nj<%)0c8T(^4PKzcWt==N(}Jma)#0d3=p??2~5nAjd-+W#TYnN zWcX%MLctT@hO6sn_Co|5G!e4{_;ouv6F>i}c*hh`NgF7KtBBRhK=vhDD@oIq#YxW$ zVUzRA^mNPXv~{tS@~H)=#VlyN<&T5Q4Z-JCYgku29T9WEma8WIykNGfF#Gb-Gx?sl zL1apv_V)<44?b_Q;X3XJEoO(CPV3pT#NK%F_Yi)}VS>ecut+l3am-1M8ASmQ-QsK% zrLaSEty{qP)<8PZQQ!`QwbXLL&z2z*#|(6%AXt0b^=S5_t&)0#Dal#nuxt6u)1-RAA=zooqV<&iLVTR-W$ zO$#h5nd5`{mlbigL;*vbp}Z`S327u}{^{@k*3of9;MTlRtNM@w_WkU6=fH_abS|V} z+Eoe>=~Q0>T@H`}q?|Y?fFU}yj3OFki%W@5&XlLViLO?y?r7;&O`uqf)+A!1=s1-j z)-ZYiZvo?}!o{CLbQCKePtZ`63vE<|r=LQ*(%*RWL4uuMDR-LaQbhy>PN%LjD2JPo z$eW-jHOHbVEJn?-ONwrB!zm^f5^%7{5(Mbet4oAai{8qK86C5s0Vn~w1%RI|c9}~S zCyUw+4Q6xzK@6un3MJTvMMOM9eEN7oT4|jZT@yNMI#aaZc~2tz=$*Di5_d9A zbUyb0a85Sm{K?NH{4vxlwD1u|f`VD)HHJ*&g}_23LW7{CgeRa?XSF!uR*2^fAWGor zb!*ME7%UQqbp_y-mU_7d8$6<5s6~a}dec%{v>CN#v`qogx#I{TY5{sjfs?;=Y^J(5 z)M-BEB|dlt+n@^qwJCI-c8*gUKQ`8>ssfEO&2X{|X+;SGgq~y@B@Ss8kRpk#gN1^* zt}8}YCkl=F!kD#chz4C#>lDx>!U|*>a0b`{qu4svI6w(HKeceCLHg5!I31r>vbEQm z)OF#714yIR_E_F3VwJKzW&6wOWyP|#N_mN5*)KGTV^82)cARf{`q7(wA6vcb``8<<@rupYd)*G3FF$v?mEZT?l{}B_ z{bf&L@4w3?KKbQ!W?ul?b=%c__-nyVYgaG(-q)NhR|LxBL-*R~z+Kn%RJLOIfO)GO zwA$m%n zKNz;fTv7b87ftq&@4WS9|3vk<^Ut-*{LU5LX6RKff4S$e&pGonKdAc7m7QFhwl90} ziw(KEsVKdDOrero?d)5Z=onO;9N>mgb*1fH)hi-r6uGKSqa08&WV0i-C1RRI$F<#_M@d+|3s!9#*hbk5euVZo#^=wPj3DwMz-WuZxXB z;wV+eG@vH}0c=slVJ$poIB8zQ)yoRui!ZXbZj;*Y0LpV;EI(JV{OQv_g>v3Gr+nt@ zlkGlV@l_ks=c|a7#$HcWGAoL0B5Q3Zdp5gzS?yc}4e{9amp^~nMdzQmVApLvdepur z9ec#*K6Bzl=bvhS*;Cp6Qp)?wzW3$-q}0p)g4zDEG^q@>q)5^wDV3c%Q-F(kS9RH! zw5kFD13)p_IfZAM4QF{WZ37t@U8!%%2az_+U|2LXMu`C3MzP_q8yo9}0u-ZB@+xss zy6|iz2san=GQ@46P%5^&m&In99?VURs>DZ&5~LrU2uOr}z9Hrb;-Dik;V7sw5aWT% z*x4F7@nqU96%RUvT7uOerS#b14AG{!xe#Om=crk9Gm$^|bAXg0(R-INc{uSx03f|A!A{b?^!m3Si&~4$Fex>bW)c~7KyoYKq&kh zb?JwXVfqIykHGne{VMNXQJ|y~LP|dk+2xq{`UHd3DL~RI8EP@4*{E&`jY6_JQU1(RX3>%`{Z4@{CLIEJR~17J zO+gbbB4aIyMifCq7cp$017|eAgCcIlF!4+`WVARt=^SMiQ(K!_vV%DAR1M`2lxh8W zNYIJXg~(avQec%jXD!;U30)-&buvP%;Z8K?j&3l0GGaiiV4)|Y&DYe%?5ccq`4GfY zlu*X((1np6BBO3HijFwMmp}}Oh65yM4%tv^ZCEW1v?b({YbC&y!2Qf(c?6aXiuE}t z77QW^IuUCLg(?iKCAiUCVy+X74*;ISO+U{~s^vihg$Qp!%!wnWfq1aQG$|xbk2Oga zbCeWz!or2FU&C-(#?D_Yh50i))I~2=S~v~EpQgY8Lx`l{7}({5JE{AEtBoNSF-B<% zuwhOcPDB*i5Q8+w45wkZfk-5vGeJC%U>J(WlIz0Wn$(SAO+qYPbSvEQ?jGbRYjtsz zvd!i3_fq}CRF7oaU+$yXnC&S84`8>mJoAmO`)A%2s^dKHTuoOQtC~Y`2j6%zSYr3eBkx3dF3*%eudkX zhprc1dzE7Ort7ac<|9X|pb@v(ax-r<`!BQ0zH+fIXn7pkgU~(&X2>_J)RjJY&5%67 zZ^izL%KKRgNrk8VUql<(%1;G5igHycJyTaso^#gCL;N^ZxcoE)4Aknv)uCE=+Ti@h ztk%Xx1AqKnk7&P}OIvNRvGf3gvR7M)E%yduLWEsPbjc3D38&K#1_;M^r9d_WAcjy> z%7%p0WFhvZn-_72C$W{x-dt8DLsY~{<~E&sPTP?3`I;-gJlS0S;(67}-dR>4+e%gx zE0$Htwvs)Vt%&wsa}~5v)wE*y3m2Spz=9o*`OpE!ee6)5$o|}CPWJw?&E=b~yTtyo zN3wk!`-z9G$@`VeLSd^;9xE>`cY@m&bF#RS%ht-f%3~3+jYK!1+ee5r3_?jFwfG^3 zhNc;GV;)1@S!$i9sWm#F`;-=iPU7(C+>aFTz>scw_)wi- z1xrKB83in&jOjE58m6Jqydpw&=!bIy-~p-zai#>N!T~?8q6jjcOr#+}Jc;KICDL8Y zlxGVYB88wDggAf9efWb1po6r9X95L6)=B{4&=KbhB~(FDWr!7B3j>7|PKXj9PA<_# zIV36k+ygiVeL8@|$#(1@uF_-$D!Vi@>H`R}=st_&^+wtpI8Q;+*}6?bKCqjtmU2M3 zy8zAZ0({8rT;eS@f{ezgi14EUq9Ht~YF^ryDWnIUT0X#yg65cAo)iQOMJx()xynQc zmrU7DH|Awh*r6O8=c(nf-`fBl2yTZ2-7m)Pnub= zlX8PbYp0# zP#9D_IeaaoRB0Vqrd_EVUq)LVqa_9z=MH%{pndP=XUsXu%^o zSS-3iY8lP94ioDr;mtJMOo1V?ju+fyhr@WL*?fq=*(V4lh>k5NR@0oRMF-p<;xwlkF8#Q{K1#CRcpSK3ekq2(w{<8mHexZ7~(%9#=Y}BeWRfslTRe9>%%{N)! z!_tTz9>r~>6MXXr$f zU?@VXSk<}Jp++H8RTPt=T=hzWT9rc9mB#WACHyO1`cf~5c_I#j`WiDHt;wR8ctvqV zI+q5p&Ij76qcOlGHo62kz#l~JahMWBLnt7g3TGwowO4(mlG*dvmtX3wXZ8Mt=bwA3 z&E=aHdaIfbL@qez(m)?5Ar);*ow>;L6)Juy$fE%JJSxN?jq4-hofQH6xrSvT8Mw2y4DjsdJTISFv zcbS=NmWQ3-x`uQi_?r}MIJ0OJT}w%GBuJGwsFc;qenU{O7{ZUU%Sc`&o7+S{6l@?$ zL~Z&x0N2uAc;WQ31py3Upr0Xb^RtPk0Rt&5XGET$)(~^_1A=gFU=VYo3G&r8|68y zH631M4zm-@Cj~v9>BS1aQ`v4-&)SbuFVjg+UZnw!m_Io%>`uGay!byrs^c|Dhrf6tj!AuI|@hx2iU3=mS&Ny zlw)I#OLfg1UYsY9nfZO&O~1^xJXSZlZuOmt7>-_PY*gCs<}MMH@Z)jEH-w?wSSy#9 zh&*>jvj`U@%SkQF2i7|TyycsD`EAN#j;K;|I+(TDnHO<%QZ(R1*8+)FQ1)J}A;TX9 zFG@rdg)kiF63L!!TOks*n6-$L;$mhi-?88%>zskO?rDX(Fp4k=a2(6N%9I2_K#Btj z90(nXj7!AevJUqY|2^^6H^0H{Mjm3C6gs}H!(P$#Uy9xg5;4CyD`1OTrVG^Yp%vcR z5Kr9>eT%Kp;WiC825#Sn9kRcl$UMjHtoJ2SpCYkS zZ+|%}twXj(&dD^F00xlH3`w5x*w#_fzTzcIpLh0|{)5$vW(+O5@p`Ww^8qskoaO>2 z-W=1JHc79uz+t6RnyNLb#h)$hzx~rc^eHe?#%vc``E1csi!;ZnShZuJT7+tl+UjLh ziN)FaT%umaS;5jt5t~&FXaE%^ogHbQ4P9wNuu*O1Q1JNa$=CBBY@u+>FI4&IRKlqU z0vM=u%6G)gR|H!B4&_(|(NTcWW?c*gL1fD0L#z{sQ2{qvZXa1vy|B0jEL;?6?NXl8`)cEbWF!K{M+<7;-6wF-KoY8RC4S?Qx z7eiMJI1#6nA!l@_GYWBC{^TK}IRG&!8iA++VH|PBpOacRxA9w?wN@sa=ek;5%AXgE z($H~%;Kw%6_S{1VmBw1@!7r1ov6dO)sF(4#>Z}~A7lXyq#~8IP$VTy7PAw|;6h9*M zGOwv|!(?;V;X~<=wIphrGP*9{=cMj?ZeGnaJwufl;U`##;D&G(v=r(hI`Ks;Lq_ui zKW94MwfONAWFBJ136jEzp%yVeM;DJ@(kSuuCw0@;@4br&*CM8ex8{-{*)gw>4AnMM zbZmeg#}_~b!?eycED;|!i|!OwAl%YzeOjv_M&TZ%05fabLiMZiWQ*A)FuYflYdwQ) zs10c4v%2_qOJ)>wm9m$YJ&&yo`R2oS{^$o!cpm%7M;3kKfoq?6?B?%2b+i9pUi&>S z*>=aW;h-DPKO{MMZ|Tgj)bZ7%Qf-c`NDeDJQD9W;MaC3E%i0rNJTv*kN? z+kVwOcU;Z?FZX3^^|BiIkloihaQ^E1?y{QN`Jg>ES}C71rI(Tw(5B0fvQd%Fv~6ouq#9GH%gQKG5v35S zR~4?RlUD!_56Q}nrOMTwi6~czsK@0PI7z8VAvh_4sF96&2TB*wlh{7r?SXR+G|GDQ zGAC8s)x1nApH;qqX%3(WOC>PDtka3e9|%E2%qEB#5k#cQkP@DtMCkCt_EQeqWwyVp zSnl&*is%bIbDFBz>&&10_>n&LrFK?9tC#y4cIB|WW)N z1stL+Ju`%7iK}|WLUfmXqv7rX-3!P{#^k;38Z>lYz!@|Cfsdhbv(ZvW14Ec|Ealx+ z(n$$%7MMr z^v8h6{{cARe2|NZNk&zmZRp5OQPM^^B|OKUW=Q=yL>E8+B^Y=pb>|fghBAu6G>Vp$;yfmc%@ElR83YeO3gdwzayx}a@dF4-v*tkF zrd->kNRicrI11_r-xl_FJpL5kF;ZeM>ZozMwr$x|85%}&K+9sbjB+o?7AY>_F@1Y{ z((;y*dffMlStkqf9~%vs7!ygespUXZb3h#rTpB0>m zFd<6V_-xjhjz4qSk!zJwmc+X0yP<0l-O7~=TIFz~hTsyLZ#;Ovsu>T4AU2fpVQroi zOs09L5Oy{-5n*wdM2y=a>=kmSK*v|HtX_lT#4~aAxpG@KpiORH+Ar?{aQU0;3%wz& ztRAs?Iij>pk`|ECmGH~x!jt`NJF_O4F0dvdq@DYw)}nPdTv$}TKoToHvY?VB;T0>v z5hw_v)J1&kcA@CfW)?o9q4aal^)>+tg$|ihj3M_34<2{R@NyH2Ei*_5nj3s;F%aY- zfQ&=mki`&0=jwF2PK34&kOR^A&aoD=Xb-hC4?R>_B&J;2X=?H`xOthliM3`h6pfO} zvOaAIzo9ZWlu1BlGJIJa-1^~++5P74e&z1@`q#d~7Bk=e?VtZ%pez+PaX-8Zj=B8v zZ~oTOVz(7bwxpClfazRi6c`F@@RG!Jjy*==`Y*rvI~lmLT4bC)(qk)uo|Kror8vN1 zwsb9KOLw|5wQliPtJSmEKorYZRid!1A|};nttE}_T<9DLCrGNjW14YhR1xIk^NQt% z{ru>1>KZK?j20q(q=ozz^F zA7 z`Rjx-q^vONR07Ty=Mm0sBF}pQTJ2pki95$xXC((9qQM8NQB;Mo$Y|kWF380#KjF#Z zN3r}MCY!dp=+6uBm}zWjpm1C#9c#O0H*hRi5;m4rcF`D)Vg+(n+cHt&M zC!9a*a^Ut`ZoK)1YZ0+E@m!)OXckE^RM${CAyIUv^>f}3od7bu04M=Nu=tcVTcXyg zb!&-QYq}xbJK()~;5SS^A}W?A4_bRS`G@X4@;7J1YJt>)YT}!>Z z-~9D@Mo-AIY(dF(4MyJW%c^Zn11B3jpt6k;H@xnZs&aXe+aE)!=C+9y=89RRWqZVIDJR_uyBjJtF}p276rtgq)Wr{!)#!?4 zEm`^%ycpnys8S|trPj(-&OmH=_dND4JI`W;X;UW7AA%M2Dr-6osgVI(LCq+&j8cdK z3{i_tL!L87inhGsxgcoGng(H1S&M^Gef@P-KK+zW_K#8am%XIyxoc&z&wnYRef4X* ztvCL?BMw6MA~W!}%eI(3YYh~|>Sc(!*uO1bd-=sSn*FiVS*ILpbJ-rWFJ}8bwvyTV z%g24}5Sz=skF8$5@SGFBcF6^|F1+miI~U$}#|`(~c7saUBia6c*#*B11s8wmldhDh zl&tiu#3_pFxwFYl!9~6V7!L60;&Faqkg&nioZ4|y7}`d@hm)ZJLm^x4INgt&$lZ77 zcP?XQE}d{hXY@G1F-s2d^TgG_C?BXILe}ic3A>|Br{X9=A-8XTS(g$HFx2&AOg!uP z%ztc#@C5KYGzFY+JP@`j<#IrQ;2B6?z-yN+B6B#?kY?7qFVhCkhY$>;sKqcW!T}|T zQ>cqgA}YP)01DwYer%XUfKmLq5DE$E(w1^oV&DTh90TCq=w<>ZZBpMM zqDx9k8$qV&B&g+sgNTx2&4nJ~HhKNX>3e-|d&{z(5Ak#>x}!{kPYQdpLsDH!vx%5B zpn*RS#}|=i`W-Ch3~`teMq9c@4dJBUQ=o5r{j2@x=h7h{RX0ZQ=PkPw0!mP{C{3*h zG&_c(^PwrzMv5LoZ0X|H#Rll%PZb$NmlXX1LJC9PqLX4GB2_4{iLEYv%oxz&+7efZ z*2r?*ZQYc$ZvBW=9}Aan_Q6tFjZv9fX#)g5+^8}EKr9t=~c0t7K5dA2TtGec83LP@7E^!B|hpuJvY6rQQbWZ9nfiHhUuZ}lY_Z4_hr>FtDojMeAm#-V4~p*O&^-47c*&ct5DA4OP&6bhQWTMt zZmHD7nTG%IZ~y3yrWic_*pGRJ!FpqkPGDcvt=Xz`fAJENdfBYSLAb=k;cMYh!ZFrc ziir6$hyvb9puB9y()H}O6=gd315#8mp-nkDH_Y8p3sR1gDljS|^OdTVyqRX%ySbZe zF|ll{5UtUIgU5nF(M1#uAU<+DNvZS?k)Is!lEs2O=6M6m)mX9YBI&F*aQVz4Oi;V++|2Mz%pq;$v;QjZx^`;w~Sw<;z`aE#YV!u1VNr}dU zf96GdwT6{!HL4&i!p9u$nUcA6+oHvQO}POY#gC{+@!cea3A*|P(aLX0V?*rMBrUuu z*ogzFi%!bvKy8}&;CvwkFlU9Kt^j6aI>O?}ingxB-%2n?Ll#DB zW2F6#0_dno-HU4Lq$^Sj13zS%4j1HXg{LP~tQ!hPho`xPir-L`!dNPZII;1N=QQY! z;w4*M9t}A)oC^-M^TKKC+GPAmog3dl& z%B?XzbAVkg0SIR}ke}04Qp_0QFD53aN*XgQfOw+gd|tp0u;F(;vZ(9WS&dc^upX=m zFu6#6@7pL=PK(E4>uPKE`Pr5`V;VM2|fL=)r7W z)`<(ImPK~?X(FZ|+Ey2rnhs7lo+g4*mxtIuN4W?z5u?cxHzNQ^KYC#tEuDphENxaS zb4Kyg$qip^a{dZ9PC!FeIDi2KGa@{Q9vfA!)|UjHBwQu zaC(d;0_T+xy(bm$nm|fc~PTlTz0~vP^+XpkBVf5b|a+rq{ z$bBMPf$WRfpr^9OE6mDjrS$aQNGY7{Ecd;y>g6_HbeKwjrp1te(>g5gAc+>LB zy!3^h$M*BJa@fXkTg>W2TcWBwW!-yLTEUmGm4UuLsSI>a_jB&2Pdiz$Z0~qQU)_G? z%hp`=J${Bg{p1ra{``eoZN7>Bt9sj;-)IBcr@K_mp4|34wj$B(UTN$*pB|gE(X3Wg zv9e_|SG}oR#lQ_IfpV206{wnKuavZicye3(6JYj-hQy=*CU^V5)8m?7p2Au{dtZC#Gh=@$mh8xk>svS=nu zp?Y6Y3OB(?W$ z+kWWq1!tc85q~M=V_%ni;p{~>e0A}ntM0k&y1N!#Wj>8p;Q<%w>y- zloTK4bcM?yD~y65AC?u1k4T!JB0`|hl>>NO$(^vS7@phYA?0E&2ynnLs;CuDKSvOB z>fL?kB6^If(OmPZ*Rm+Ram0B`v!C6l@(w9xuSRmf%aT5U=>L8EJD%62Ef_9QZo#lv zwE0gJFeDc;!R9M0=18qB=bQczVG-2DkVsJ?LR*pNGjlxA7I_p>_#xtkj*}0%7ytxN zg=lsPjbyop)BU#;SCU_r(w$xMhi=r(7eGn_+>YIRlad1~6Vw{_+<6;W{AB_uOas&G z0=9CDOJZWOToN>%^G8<<9UVtRE3kg(=yxiyGAaVt%rqsW@Ea<(uMm*WL#mXupcF2O zWK1GgbqoR0JYB^?b%)HqtWOK7AXsFj(~4=HgC^x@T7iIXh`&8dGf%A-G!c`Bh>6%% zBcQ{33fCy2qu@6mKb-p0M)%p7U+jO#kl@g7`F-%v1AX7aH!!VU_qEn-7IVNs7azf~ zZfSF}=nAqGTfOX{i@)m9qZb%l*xl^B24z$GU;oK}DE+KeXHukEndaiO;*1utbtvXI zv9Xxu$$Fm2T1`{lRqADVxFvWC*iQr=*VpyqfSIZ{b!BKRD$5L;n$&8Mr|Gs}Fi2%3 zmSTm0C(cmE+8HMfBI72m%O$}vHSV=M!~~HNl`mNu6gyN|qPjMN;8w3i%Ugk97v0Vd zA0`Ll558v?SOT-*x@&pY%tipuDbezrlCn)ljjJYN;wEC$Guu>wzxl;q(!&HP{H$;Y z-M_ZaNm!VqGoQa>^Xrq}{VF&6=s2*F1bKwZIs`?<5%LPb*aP(;s>5^aK_tt%{! z7gcz~JbDOO%oKKT5u+2wPh4P))}_456rF}TQVtVHIfgVi7MUh!lmqGxEjF?dwQ%P` z;LNfhoi9!el&%J@{}HtqbZardaiVJ@TEG#VR!(Q0GW2ofIGlTd`Jh;C=zDXUnoBYFP%B@t^&vf*O6YxjflEp84x2l`=d~ zJ{xV{x$k|ox$K{p`%tzQnR|b^4P`sa>S&|hZiYX2*A34;ao_izzW3q#u73RM*Z%O? zJ1+m?CpK99mFi_%%WFX8*su?fP%sVdHoD^W_8PZmMdYyVWXSk6CLenfKgr z&H3A|vd^?B!1FRN8mvdVC&CI)XI`S&Q*t4f?^d$?*(=`Q`M zRHdcLR2?dL@G(qXrL-;wjM7R^{z#JT(i)-%UcId7#-mF=qv})juk;`vbQR$0U#b}O zr7!;AuP$cD#QDP(1e*$1fy@V0wkU}ei?k`34YA8rqts%cM9Un7renXJ23@u&#EgLn zl@~S{yA5|c_%@NRyZVyTPX2^#WgqoYFIO6C70z#Y)2laFcNK3jfA;({>^@(3-s$I_ ze*CGQ`sk-m{-hmbrLnJjdHULW$_nRRZH9O@Tcr#X&R@Cc^fOL6`oO(sfB1;KPd@HQ z;QypNkF8|(3iI8!UTcfl$FX}D+l$On5h<&zNoL{faM5;+l#`SYI%^PIl;z zn5@ykIs7qYS*W@24Q}p+AX>8M>M7Z~bd;@WNHfp*nODtSJA5`8v591ztBCYZ61AEU}01*!<9uCMmEbF97VLF=O?%Gs_+(&!ll(sDnUv>MEo@1;Vt3_(ezx%Vs41r zDJ12hYDN%%ze8`bcmkY53WXjfia$gbkk#gY@yV9MXO7Z!B`_XGgb4pBWEbw}sQYMh z;9SfpAt-sqENGk&UPLVs+DNhD+;{SdFoguMLFlpIn}P%FP|}Zw0~AWmo#|?jRs|)l zflg$XN|z4WSm(xwBw5`YptFb-3ParE3Wex*q z2!&d9b*G_2Ko18nXbYgMRmvpGRZsxcoRq1grQ5w$SL!8!aktfl=wcInBFZ9r&9VrT zw4nn$6XxgYWk!R1)AHXOTLuv4monKQu<~6b{aE3oa5SU9i90&2G1DaCz*Q7i+l~z~ z#A;_1M74u@Ax>Fb=^P5swI!!&paE1VS1j9Kn{0VJchU*Cc(M*eoBx8zpImY@5htTF zn+tLkIZwHepb3t^Dv?S@+g!$J$k8`Juj2Z$hr4jwsdRP`cA$5kKgaV7eq|LC^)kAM zvCc8AGYT;;$(S`D0aH$j21IMMXyXwa%flkkmDCX_NiKWT zH3XFNUQBj}<+*>Q6bBPGN1j_S=F4pp6hmjI=vmC%@c9%D_?IZS4P2TPtu1e!=*o4u z&5Cr91DI92q*zfbdmj6JvuCNhNm8VR2XmRwybcwBn;H?1n_rG<87>M%O_Bh06+YXy75jYX~Pm6ewK` z$r^P8J3uJxGU0%x3WXKjG?2{`U4nofkO_i3j56qaNV$WEsGC;`jk;*h2!?)LA)=sG zBU_Ma?K`A8ci(>XHCK?*wbF3%fn%9tSCGx9xi=j{=q8R080t7e8iEkP(>f%Df=LY4#vf;%k1{!AtZhrt%0#rtWe#Jh|MVwb&!1Gs zKJcZri44@vJ%(MYUbg!@_LuuawvFcgTT1!dE_2Ud+f!CSM{Ixj`%gdk{b&5))OC+N zaLqH1-{Ada-}{=g?K^z$YwHb{-FD;Ux8G>_>SZr4tC#%+v&XahIJW&||0lKktko6E zd(B?c%gb}OUTK%DSKMvZyKOGpQ}(?t_40z9SKfD*H9e2L+YW2^|K)eT^%XBvFQ0nS zaVlkCXI4?E`cvyE>y&uD_vQQ87^2G?5l=hmc(t(KNDth1f$RTrZ+IPwTYw_^!$%$N zsj}MH_OYs5&5oWw*jUt>4hP^oF{FsbY=Tm86ykmj z@O1hSAK3rao3Fm>j)gXnJ%;U@*!GdnJncA$4}WoQ_xU?6zWoLt`|=92mzr%CdpV_S%GxL zwOw*0VUs&Kxw_grxkhd*zXC*Pz(%A@FCq#$3e7Yy*PQ}RGi^~glO6~LL=Z8GmJ-Sd zQUx&NSo)3PY19fpmH+TFD@v|Eoi!`HX z;Z3l~PW#Lv{V0YSrItCnY23&GK0sJ3KKR*65HsLBB8sFDTevQT+MLvyFG2j+fEmH* z#Mbo5!ehfZ4IKzG9U?Rj;HtFTKDgYKS`Gl5G_ac|!x=>85QU}4VCDuO3ye0DNQaL$ zRs>T%FZuWEB`GPf;8aZTPhTdd!Kt(_v;^7XyD#=y8vh(8>X1lI2L!Cz(SD zhP%1@et9MaNj`um2b9=~1?PztkOdNnh#07K3m*QM<23NikXdkvV?1}P9VsW8WH2qCa@n z)LiF1YjpIHhk1^|d`(a*0s`VYnLe`N2IL`RoLv?~29LOLk0)X%dT>`@CrEE47!@vM zGAXw(|J(SAmrlMctpN9EgpZT>^J01|=GLS`09@)@y25ML%^mKzVAgG>W4@9uv$G^7 zbD7z(vUp@`=Bv829t6Y?D4%Ra>6W%(77zs{N}SjXNz}zh07cu6W*){C1fMBzA9uoz z`+_M0>)&Z8_|^k$|NhUv5#C!CURNG#M}|>OdJ`F;P#~#PMR!L{KP{8Ta(GE<=p{io_VnC+v}gY5nG?_7ps(8z!0n5TD4*= zY|JPj?LIp-R4aH0s&gqx1D%~IiD#wrh!w?ZB;10SLUbz8xzF53vK3#`LiD8wZbh+rdp*tupCfz0s zAzHxBJJ%36!A2+R{J9tH`QN;vnC$Zc&pB*HTrGO|&C4mqZ8Hjt7F7Z1RCTrjs}xw4 zwotW>n#j;50$L(2iT0Fj5UYzJy4Hj1s)D!uVvEH3X)#z-s$fO2&1lT(YrD*VxQg5F zFoymwVfO+4S#jO>A0Z*S0E1{tS=eQlvWQ-mg@t8d=|zAj5}l=sCVCOQ_bRFcq9b~5 z0#-DkV#h_cBwLA-$BzI1#CB}Qa%}(O{GHeL_ZBz5P?G1F=RR|1&YU^t+$rZXGxuIi z%K--4);QopRCSR&3F$&3l8f{pl7ukH(-WbTNLeBoAw#(pvX$pmB1=k1uQ+R0<298f z9sLl&L|k!Xg%%T3O+2~itCtv6SJ?=#N)lw!5%fT#sd&%VRiYF{l*LmJ891b7)zuEG zcPKItqY)G&VH?Wgn5zQKc(TE&+TnnOhgPjk8eNFc~6-m%-#-TZW)T ziVu+47V`*7|Mm~;9fQp6FS`YeZ6$-oj$yaa3_c6aW4i^GfBNprKYr)gd9OWafBE|_ z-g(hk2TWOczVPzQjh3FZ>59Jhwao@A+Fyp3y~4cbtZ6&Xm~4O9_pyEQ%lEP2%SIE;CozZQIFvZnyT98?QQP#f3i&F9#|iVwhAM z&=bH+!S*Xa=hPOcYu^^ivuEnn9?915j@|u%ZyX>O#Z7w^5Eq&d!MhMi4%+74v zgC3#g>#p2ukDWP|_GJA`YSooSo6E2&fC)$Y{jpVUTgh%=Nn&`p7^O4Y_z2LbX_{>0 z0g3_7FfTj{GQ+L#FNh5wlkk)ik5v{#?JHYOciYvKNDHUA`idqFfmWy-Bwupz#c9VG zaO{l>;glK_%mNBx?y45z;SY%ufDwRnyG|lj7@&T&qfswOpkSpWqR*tnmJ3ibojob6 z+7svh?6Tu*3NA1|z zx0Su23@O`628!)C-*ek_ciw!p_8?m6fd1f zIFp=zi%DXHZPlB(VuJo7ff(pralpCff{%h2O7ukVS3FoMT|l1Q1{Z4nIpehuPiHa1I!kMWGfTEpc+*Bvm!k48pz=Y|L5lq}slKiRZmtef;eJV@tBKy#1iqu1ic`)Nc7p0kj$Q7u~M~(((K7=3Zb})^9P!>&8<8H zz62=2t#ak$3qbLB=L!IkE0l{&5d|? zS15<4!!axH)rH|U-<}%}K7v|wFk@Q&+Y6=Di0&65*7FN+YXPoN%+r4++ z!SeV>A(%GFZ&Zs`N<5z=^|msP#g}%#VOzjbRACDMWOjDWCt2(%`!%y&X0I#yU8WC$ zY3ej3UXRr+1qI*&6Lr}~_R+BZ95T(F72GNDMYkp-|Cd|eG# z+tVzy!)TwQw77OF4%-gIwcf5uiGaX(Ldw7R<_8)usIKCQD9bqPOgj&qi%vV1XMaIDO~XD6 zs7ZL%o_IlA*I)<6WENMQNTSd~bSqMWDTTE~RHPto2*lAV>$bKeOQ;Qa?qoow7AY}A zMwnU%0`Z|u;#NQ}B#8(eX@Tu1<->tpztAZJ5s4b4_Nk@lBT`%?y8SqPS?G}&ZQNGs zH<1s|Y>-Nz8i5m@{2vb=u}s!rPcmnUuC$1fK|CNB*&5`mqCQObL-y#MC2{=W<_`~UJQ&)(+!<#kqHVB6`-dw$7Lzr5XM%gx?=#a*^uWBLXwt-Q=dn z8r-%eUQhy(z%yYp?(8w$mJmn{T{<6z@2H=dRnmBfR_< zm)vB-bxqthaB%}I`I#_j$Dz?f)3#q7bcC8kz*s2K$F%tg2kiT0uopNb4$i|#ui3~j@gr@#I_0{i(tHZ>5x7( zeb&Z1Yie9_@y{*2OCJ!0;`ySrX~;WsWl|KxAqe$D;& z-01t*zV{Vge)PL{`sCN!uRs6O_eMt?xncRV+^~EzSAH6|L^si8@^bYceM*;+E+vnV zbH$P4S;@_LGX><*4A+WtiA5?aG7uLTx~Htj#ut}DjA|4igSbR;4rJ<$Nn~|=#Q0Y! z0R%;m5t!?!!SGbMXSQ-UvG!#kEC5FeN(E73Ta6-_MV2Z)GK`=Rq{~G)5F`LmrMqIN zplcqlI6H?9dU;dTK!kvRZAm^D3Wuys3=!PqmVzE1^)^9E&-a#6MOh%8YDXqZC_T<{ z1$=~_sdNO{fz6rJh)U+zq7gDt90(&KS7HWHRw9APBOQ}WAsrtyZiP%|-2N|r`4_6t zzbtkc8MyQnwA<_H5SylYt`Wv31?>M4fT^+7GXM6k|6k`rbU-Utdc8-fBjd~i2g7ww z-C4I)1io<9@-ZYEncaE{1lV`)-G6I|MSu&ZVvHnlMy8$UyeA%i6!9oRs+1KG1{JbQ zJeiAouWG1V6bmN;NnsWkTQ(mYB!BTm6ru1RvYqwG|U_FI|^pY~@#W?i{OlTdw2LV|?4Ic>-LET~dEDRDN7$2A7R>Y)eiZ`l@szJm#`Uf>{UmIgh|QZ24*TklpQ{(6Th$VN@Mk&Ub|w%Eea; zAmIG?w2R_iOv6e+w+?u(zRCp5MV!8K2Ah@ep*u~5w05oO0;k}E0Dz1dp+R75?-*Pa zz>optLcUUf6jx=T7>(J^a?My-%~%$sECsF^y&`H8^TQwG#?4hhRRpmS2%r$iY-bQy z{@&vc!#HT9tJ-O8B8gXT;7D;Pnt|u2h0_IIrkY#IGL#6F8QGI{Wi+i~q%x_;h8^8O zx|GbqyqO43ta$#?5ulINz(IPjE{6ChtKlJ*F5>AJtb8}XQya!8vf-etTmk^g*p@4i z7-}qFixMB9N=s3ZR;T$jd1;M?I~_z%Gq{ zmni+?0hwIl(Y;b`84#ru>WI4{D7Y120c}z*(6D`jPRGes?2pM%ortGDJZ`N^bFx#c zd1$dfu?=E_C#0+;G_-13TA;S1St78ZENEAqTY!`SXXmsL_d&Uy7yGj@ZGFk}Yo-d^^R7HudjHZY-X7qq7ktsM-=TCZmr} z1mrJQsewt>3oL7mJSbCuhN{JCAWa7nn=H&1B0_u0urRRf8(*Pgkx#?Rj$s4HaB+vO z#rx`4V7c1?FWX=Ko%dgbm;F!5_pzPFe)*YO&p&)w%MLy8fW;PG*y5^>W5df1Y2#q-@TD*Rkiye0ZKxb{HT}sFX3d-) zz*Lb5ebbG;wAscRF1_RuJXHs=fEQb|Kq%STTqdCa!h@H%^`xE*0mDKVt9 zXwP9rW{cdqw+(Jqa?3v#xBE;0fxfDPqg9QqdXd51Ru8_g=%OpFFtX1g3(n`mU%vX~ zRb|`CKKunWUv&QIj%3?zb{O01%u?(=`!<(PVS~o5Hka)(L(S)$dc4hL0NJ*3!4`A5 z_LskT=F#x-S>HJPvI|b>dtZ0le7RHEP&2&jlV8t%Z*(NvFQuHywu?bWBevn1!qzEvQ%Vs+J|v zpayP|B`$?Xgh@iz1S>)kxyhNn(o;vupU;lAeED!v>@kS{Mo+~v2=z;_p~23_pk#S zVWe!m91nAn+xQVJ%g6me`lM<$YJmLVSFt3stLFU}wY6O_$3*+-V*F>ysK zppot>ZVD3=L{EO4kqL5p#IEA%Wf1L|Is}H7RUNn{22V+VZup7?aZZJhKVW_raa($T z2&IVqh+$WP6M_#{mV01k7epkIhlFHMmuvE?J?-4mQCvWYMphBRlUWBOGu1q%;uJTh z(!`M~5DbEt03w2js#&JqXRv~HrTqHu{>rwp8OqnO^a}^DS6uFkx`4yj7JSS*YSB}$ zQdV8eh#T!Z0^nT}H5$acXD2SA`xuclGsJX9LrB$W1_i27llSF#v%Ol)Mw`(j=u3BI z;g50N#JPfkbTlj=McELjh^%}$PMz&t9o)`1;q^Oxc9jXs>Uug9smNoXW+kJ6B?7=% zXPkTJL4MM)@B;JOt+Phh^+C!!_8}gX(c= zAK#UR-b5*8gtk1;+CR&iDH-3&0~Zl)r71Eh8%A7FdUM={CWC7a5Qo63va|=pT7oCY z)fbnJ8&e}5+E2}_mZp^3s#c4LB8*G`Z;QLi)#PMJ7clWGdaopT@ELW?51}O*x9Bku z#6uRCV5JM_J9?9@s$b|7CNH<842VO25!}ixt^pfc8vLawj*lTl&?WUv9c@JXzwfL5 zUvG&A9O-DTWTu@q1_@>>aeC1xK5j)&GOQImPr{8B*SFN57HU!1u8o#vXy?S!;iJ_F6vqHTH4r$=>6qAwwq#q8D3s_na_RN{xWc9_Y<14Aqy{qa_}Zh zCl2xPCt*n#)`2Js)Bvng*1)o*SFbIb8XR8o#q3^TmU!f02m1)5bf>kQZt`9;r0F$e zi^Cmq=KZQs6(HoYOMii%U_$qHRaIO1R@Mn>@<jY%3Y+v>*?(3L@~S zh}^tcTA+dbZS{iT5b?$vOtS-J!sR(*Xvdi=0%hAHwn0n)FLrS)xWsa;z&m>>*9tsr zYa6(e1YcScDxA4ECpD$xoofbq+<@91 z7*f9EywmI^kIr3Re2(2@8_j}<6dTZTZ@%`t^Upfwzhy)}tM?=&h$gN=FMI1UP zu3oot;~WBVD;E=zixY;ZAqeSgjwfY9AIP%L_^Oe)tx{L?@mHPntU3rOg1&%K$V!M4 zPen>0zzQFMVBBmjtE$nsRSKDcf{0s*6%c$sZn1xeFy}m5k=D7tq3&> z!_$c%)3LLH!{d`E1%2&D6Jk0Ar3^NSUShcwQIQC;YN?1TK0O_A@?egk6-y>!TdXh< zFz1Sl58MfX&*@q6E1JRGaw++)0zXPzmQsKc)F6eQX)fj)R9vojrPNMBatDWXTK*4C)5I%Jb zTSK+zij}TyJ*UM1Pf!*6e`Qazr?rlZRtu@$sZoT*B$CId#Xibfi`FSj+~zeM$M2{d zy*B6m)qnc)fBlz##HV464XrY%RKsve%3%x*L-W9jM%sqpS!g-fvcCl=kX%_()pW6f zjCk5!4YoN&_dom7U-8iBYazu(6_*U=)jX)oEC{HdH&`Q);*oby0X<)xiSiF!`F`H% zHuF+PA2d7r6*c$_^=ntO))<@{A&)+5O64tVHGR|Vw%H0kg-ji*wrI}>Ry&b2W>3W7 zvoG(qv)?N_j15*p!OQ;E66;S}OH020J9op*g13yfpEVOGGmd!KgO1Lt0pMh?BR~Z% zD|%gk{9IYOP~1suTid`jB#tNC%{%hlu<4s@s3Nzsx84#TBbW6A@-PSmabq8XQC#=2 zHEhiX1O`At8Cs@h2|I_8;SPsd-GaTfRk^a@*+(9I;Ep?Qwb@rM2*POmIQU@KjW=EE z)5Ic*qZa@iS@>xABHl>#)g}QeT}ki20lJZ*I()!zK&iTPB7H<$?TQb9O*}^@;r(XR zr_T%#RC9$@y~O7Ou(I5G7PLmGBY9pjVM+iYHd#6wl7BBB~bTRuDH?o|Hfs!~vEfQgBd^3VNTW z2soHNbPA)K?&H=~(4oW&4d6ztB1-95b!r9myT}lbNH5d%Bo3mE7e*p^J~5CDC3CqV zU_*^Fm1Rk1OvYzMK`s*^qllmXYX7<|3)Hrep%-Y)Aa0dyO;X$N+_g{ZGuoSm*LsUq zp`EmP*=TJ2M+5T~GXm{O8-toftls$cms=e0>4bygD$2;Jqb4mzm40{-T~-QurVuV{ zD>r>bD1tzwT$F-%D&Q6?oKaQ~f%JMYK2R-siTDh4jlUh}MaE0Wm^IFbW#K9Gq|`-J zoJFv$U@}yT^cX54S0b6yUAmC8DI#8OtnfiOo@RX6G>TRBA(9~F6NAP%K4_VRCTq=G z`>bt?2r#leW$zfl%R-;RZkKtuj{n}*FMj`hkhxEO0nUB$tBB2IuPp=1p=Nm5|Cenp zdw=;yZ$IF&#V|Fr>j<~4YThZJ z9m)ocVO2~JL#E@mYo$QAfVPO)AC3l}&9ly0n?@Z1C&sXYY{?n{p1c$*@Cm~=whfJp zcR_J{IL}T^b7LF}Uwi6QJ6tAff=P794dtYX6Y;U}#FKZcE?^k}N18DFZMm3}Geqz3 zwiDX6o?9HSnJ(@R%j>MUs#Do3uej9Q!}f0<*_W{|IPcUGk3SrOzTsLwvAXHBQ;u;+ z`|LAL`0C!fc$Ay1zvjJn+z2Uqk=dKeu(J(iw>||X_^XuPOZC++-}^e}w7Gljy5(VC z+vT(q5B13}c=?v=F7p572flsNWB1?j_=9&jjO}Mq&STGe_1WI>)I0c5oG}rdqHe-{ zGCLT_jq}oVE6yILOFGZ3g{iz^U93{bB5)Pw*q9U$=ZuNydMKp}Za>~1OTn<&WZ*1< z9$P`#$hxWsf9Wyo*7Ot^8Yw0tP9X0zHPg$jKtZ0loePdSO%pYS3Sw(IQx=U<2m}#z zp^>hXwv7ogl}cByQYuoZ#DXkyrGg?Ug-l>`OaKHyL=lxrH+7;U9>MZjDFUTBT@33n zP9{Q@Wd{3HFFrV{5fhaXl7ysUe|!cVKORwvf{7|Zcr=(Og|nb)=#r2YptNTdA!D43 zDdJY5h_VzJ?2jh}af^RuQ5_oIFQs_t4^md=8Z@QtyF4)=X7D))1!F5#yso;%H ziANoA#D4qiVMKyE0B2Q$)hx_mXd$s@9S7;EAb{5m1a%P!6IN0Y-%{Yg3bt%Q zc3dX2KQG0^YWn_-XE=5YJ2y3FfP;#jM~c84jZ$0+?~Ai4SA;22kou}7Oy&}oA|R-Z z(UeUHPlehE8j0-R165YsaB4;%d4ms+JzUI%69zY)0}o9A4jeqr80QT%M(m0PWtbwL zpGPOeN*0$7eSx&(XI{oe5eSU_WLz#$I-JY`lgsUh}Q5Blm?v~BaIC2q&O zamMx^fBiQERtly0i|HUue7qLT_!gJIVZqr0DySD{^sdymIsu=W4OIcQTVO;(POLly z2p8O-2&};$vkwSecJ^$!rI+;IVfwOzz;j~?K|9g3GL&|rxcw|>#SSAJ#CrTdrI?PZ zZ&cZRmpNVpTV%lnt=@Z^+ZvZM>X1IMshlo8C~D%SEdD;UWlI+v2AQqf`>vNN2mqzR zzjliKF^c`kj9%Da-L)N%`^UfkTQK2EYfn{B;wdK_M-p-p!z;|Eojm%q__}Ma&EfHu znJ37-p((a?kZotq*leTqy~6C%UbKde)ed5BJ!A8=)>wU;EvGBO0)PFV{=(bPlqCu6 zvJi>K#lfy0K&>tebq82RZaaf84gu;Ncn$}` zB@U1oH-_~zEi&+dEKR^)9<&jL${a;bBOGm50CpHwu15n_JIC&AaRn7O@@N$yh{3}* z;4qYQWtpymx|QOtK^0}CXIqJ^DrlA-5I!FF!}s4i?3_x}%&^rlJx6ODf|gTp1&OCr zKqIu`T129<9vNZuRy{~}Fb_69)P3~pI+1ZZ*T?`;pDLmg>AAY3HY^=YvmQPe>YKWr z6fKab+DYg}#H~b;PYhuwdWn#q78)J}5gVelKq}PxJQsFzYe7j|tG@aAYZ!rfbwgPy zk~?V1_#y#j`xL<5zUXDhm8J0*JdL8o?I3HD+LeR-Hlw`}?;lduVccq3 z6|_uL^JZ0=284S?WDsc7?Vt!6OLMZ&Q%$7SA`)H}LHrTCtc&$nr4InM)`h<;MdIU% z1I{AjB@t&Fm14y>B8q67Y*;TwP>_oQn-euMDMP7tw52GXs7eiLDuRH73JM~v4|oJs zs)&eC6){0cU_m6Fu5MFSB6>|CBM-y_0WwW-2pZ}TB#LXCwdb0&xL-tl@E&Kzr#OFX zuh~`jlV3Qb^vVB|`qdx(7-R;M9n1!t|LAw$75?zwyz}$ldk=`Vtqd}^=?pKw_3D$a zJ@?QD^PacA{M-}Yf|q~t?xVL~f6|6)E#iA$v$t4byXh_MPW!zRHeUOmrCAk?b$ep5hbPK90TT>`D9VFx8&&j$3?jry0xc zwf*Y;Ep_nTo7i98Xq{!(nz+QL;bjO8hBc+yqJ?C6ynv+5P`7qhK~#<+e^T%&A7m}S zk~~JIwR2**no>YT7#274K9|x*Ma;jzqYq9lvFJk9Y|To>tB!L!yzTRq8?86Z7sK}2 zeHRm@m!J)>rYfkMFhFr{H>>yL6OO4~{CP?cz3O<*U@1980@TMJa|C~w%%*j)nq%xM zpq96GJ|V(gAv_~AdqT(NXm#1Wasj&F zzu@|7Mn|$iW;(*rEbN$^xgMM+v)yVw3}HyF7Qo!&paOcnu6MLO$3;Ik-}DXF_9F9r zciwctSttA0S04ehv+OXo6WYE27FZU!`qHzmy5!9L_nLFY$#c&?>+9mzUwNJ{WB&#( zKXCiw58e6fX&Te+sjb0fB)o4=!{m<2>6nh4Aex(^poigYQ0xRIX| zC5Ye<6Y1m0a8P7$FVVkxTy;0x$dh+>a9(m!}M~S-c={R1B*I+a)oarl3WaQS4{h9}Jj|M}+U`009s*u=&>meebCXE#2qKhy-O1C1i zxTzTvan4AnXv-B}bP5|uR-a?-k0GE#vLtb}QqL{*l> zj3?^S--!yI(}lpqGjtf8ydi==AypwrCk!(pla8`B$)4$}?_e8Qk1`9pBKFW_sR*%K z9JCFhG9Bhp$Rih-t2pB`nsy}%TUR_KB47}=b&z;wr3i@Eb4*T&=+&j&iqsugKv1tL z2%r|kT7_Hbnx!l)tq2aZ*0iuiqc7m5#_^9BXBv?${MskxY5 z%py^idR38Gq^MolVO(My^-{koREH1FB3Ln$TUscUcb)I8p>R?7fm}q+_TVH-gq22M z-^sl(%uZn1VV-yd69%PJgbZ^QBPyAZF^hOU2EmvDSNpkx*cyoNWEnzY zi>D$Y5_=+;sIeBs$l~}v!kPqreelkluzSop7h^PgdJDXK8+c*IPkgJ_DD z7o?k``2)5X&`V)PGJ8hwyIjXkSj?J(EZgEHcSl?n|H8tHExwQ=&EUhI{OafM<}d&F z4*^mEB8Ggy4KNeFuq%fK2I&a2ET~zoTXEn*7Gw&iNXIas@xu?^p7#TL#9nir_sXl! zKm5pdzxVhWURFn2Jt?_DU+KDPW zIFH!Fl}_@99=`8ex8Fqa*IsvO_obI!e)feIeT)11zyAln{qtY^{!f1DTLk_Y{XPX> ze&sm`%K`#<7y;fVi3KsBUfi+(bIB5M+wLbJ6+ys^4^r)W57`W5aMDT?kq*J#aN{+$ z=H<3?ZIBIE!51RM15@ECDWDDT!dChPV`|cRO>Mg|N7tsK=Ar#)?b@@RA|TU5wJyX0 z`?%GfOH@__H(4l+LWZVR*hr;PAuLax0!$R1f*++MFgYc{MCd`eRB$VxF~pHcXsk2b zVQv_-3KV2+eGdWRkXI@bj0%L*csxY?$V1e4oQk0%O{rYxqP_?|cpfwNFuQDCqzHoyNu^XWR~&J-D0?pG z6HnHVODgpuK!(1ou4MbfkmQ4=s>4KarXH_?vWzn_>{!dxI5kU8QxNgcOE+d}mf~8b zpc%Ax>@8;B`0}l6d&*8?yM>o+JqrlJ)Ia@^W2^6j!vHd*?5y=WZ#)Y%gU|MtgoEp{V`^*sLpYJpCL!IDTr-jWs z058{33TP%SH#E&NH)#s8Oo9+x#~QbsKAkTLGYWtwXvw8ii*t;SATno=oY1x^YZu1? zqg6z~T9#uu&P1W~4M`ox(v~G$>#;U`K}EAaS5~fr8XtcxF1QREQ0bD`Hs+V1QNx+jX^o1-q+e<9A7*J1meCEuaz1%X(s1yLf z93`JwDOy_-kS?eQvDr{r+u!D2qh%g~$&veU>&=&3dHI>wTy@@c*PMUJMWY$IT;dPQ=b!rxc==nmjOxN49~H5%2(Zs7!rwHFp*e+-&A;3p^3z=rvyZ*7@bdCY zEetG!$$bYKQnsP|tsAbevm9#nIzLevR|SJtMrhAY$n2*0o-v6G?1rlp<0Q1S0qtKZS~y zD1E#yRAQY`yntprTO8c#zd|(*1mnpFP>}nHfg{Id5{&Z*3<3l*xxezF6NWqor$B*6 zU)BN-N5Kg)cv<+tn=i`JO!X^G!CXAPG}J zM8KiPhck1z#VT{rS1tIv%Ej5*6cw;NdhQX3y_u+w=o(IO&`w7t2_x`t4NbgaJ(Lf8 z?>+Zu0GtyR#+g6QH`{jWt-VD8P#X$>Wg{C8kWDlsJJh^oD`pnEEbgh-NpGT>1?)$w z&iDPC#P*cU4#}nnEpR~0EcmFA-3Un?L1eI@h+9U;5oI)%A-Uq>vItoUVX(zh5fvf8 z%GE7;WD*kqCB<1xqFe&tqlNSk36G)l9;LX7;1n__jz9BaD2Sku>p@U3k%ot&pj_z+ zCbr^gaf>pt7!t#+L`Bk_Y?wTtP(~ zdwohAD`nAm3LXJU^@?N}wP!axm7o<^ z;!|T5DuHqT_`pFRhAw7kQoDD)JSne-lYvyey?`5cYm-ekfRX>*pZ+m)2KGW~P##!k ziwzWo*KF*9b#2r_d0}K%>5AARwzumx7$^=ngWIkMf|GFZ_uqIK@Vw>L>mPXVJHTcD z6Ve2n!OVN_yX&5B-~Q-h5AgBq7w~ylkNv6O0nwe;A%$nfwwk*6%VVMlSSqMay#OZ3 z$xl>x*)_4j+%7LT5Z@W)&g(plB5#D>r}-+J}g=bY(%2u6nM-+9;R z?$_b{=bnF3oEGGFg`HiY>2}*eVAfz6^$IXM@ub-3bY(Q6!t(4xPQu&PJo}u}&+s3X zZ{GnFqk*yoD*$L9=)3P>oCn2$Q^Nv45lDeezzQK$0&WZ_CV)d7%|rKONeeP2AZ7w64pq-> z*kKcIYFu)`d7EtTCI6pw>;zKA-!Xvng3xJ~!7*23_%Yk29r;U4{@=j55BJ$oMj&?p zjN<7oMZ7GEON@R8!* zeH`94ec06nX5l_4yJOEzkvV{E%slR>Bc`mrsyBL|^@G2=oSfv|Am&sf z7R=)gRAUZ`(wbXtkC>-zLNtRoj&3sx3MLZCEtjzdNCNo7%N9$e^Fw&uYGeW2Y_D6s zHnTv@a_v}|Yi_ye;;XMb8(4;v&pYRY%Pu+N_S>!!yor3qsmI-L?PcHn&gfUpm=M^S z0C$~8jBE$oh8lV3MN?BVo|er``s{CllcL6{6PI^Be&PA&gN}XCtCyEuZ79RY5?zDO z(DX&;d;?g%_VRNM+;?Yq`O@>ham7WaUvTybAhWMzKYZVN9`@Y7E`V1N0&x9#)VPW7_n)4^xAFFntn3Vb-C z)ah|lmIbV$*cO58EUIWTP9{4|VReON^cSs#b3#bMn7i|kMO1;gL=|Ns&%&)l{0IAs zqm+&V)x=#U_0?B!vmZzXsYXTCzH$f5dL&Sz1-EhX{8##$>fC2}M_Badt7Jyp! zcZk{>ANrJ)E}O;1f5plF5C8SA^=k_bI<=ReJ)_=?CZv%;MYiXRZ;T0I(1>xU?@Rtv zHdYXo(WJAj>L}Zjl}@ir=h^WRoebwdqM>7b!BxR%n5dA!@pLN}O%DivDV3EnwDv^O zNVG8~6;#2Vh-TS>_7&s(E_HYpdyI`5aDy?4Yq;8ljNjH1eQC)DX!;3i99+k1T|I7$T|ENl0l25 z9fm4$C86qwN;Z{x;sT8bla=IVdGtiZFr`Sv>#)eIy=mcIrI)4Y*5q_04OP7+Q&*gI z7S$D)4ihzcYII}@^f)-GM~(eY2IT6w+d-xPsWS42{T%Eg1;L zn1n}07?i3I)aB7AB})V!8o}vxTcFjBwuy`nKq)was31II>SLt{=86bzG1M4X2_H3* zO>ir>B1#F2z-UZl+gFD*;8<9Y?m>_oPm9bZO5>qh_Eenkt=k(Ik>SQTrd>=7R!qd6 zD4AM&BcySI4ED0EqB9*bK->6082k-QRvUb8KhMN;Z!9ug*hEqMu)Ksl_h1@LUI;|Lo8I)GjTg4B|n2oni%hCBmNJW$|zy0*DV3Z6{YcNC?mg zBKA)O5h(J=SK<9ZGMY%&Bx0b^o8bkZGgj5$v2AC-hle*Fc>Le#=(P#;^6Kt7+*%@D7 zFbLEyRIN2)-a{YTGWKSxuSGfUJ&)vc5vRrTFFYmvt$D9O{dBY-Kz@r1?9;wBcphG6 zI#o*-fMtnJjN7jlu_4ZqVDjNd9DM9?M}o!>H)2@`1crs6`}Pz?;0Q=0{1UPND?%}$ z6&R-wa?x_A#%<=pSq)$@EmI2=an*Qg!|b4`AlBU|F1;XbxYGzLrQK@7h?UhPwM-ob z{5Al=m_cSwC9p$IA`#V997dBKCXz_J5TdJi_%7V!8d6K6mIqrxR1pkogmjA`#Vd

T77IDQF2kZeVBGe{;Tm%#r?~<2<}^l~NdzR$R#D@h5+> zAxMiP7Z({c5+{5`MBEyyp~aMy;#NFvaj5KI3Iqk|mBLNI3>bC>B;%lVxr|&8ue#1i z4+p1{(5;A}(+p%zQPzp~L0X(Kk%%)UC<_FWSU^04QsfrssG!_|4Uy| z?FttI%dUX4z!>6!2!L!u*;casWuVx>Y+xBQwyGxtqQlG1V@nS#L(L%bkKTOr$8SA$ z%|*w2Y4V~wZ!=Zcapoj=dDi9=gdj733=7*_7DRyMU1zMe)8?y)1Ir@2Z#5}2eZY=i zI(WB@4%>CTxqH}do_gE?TOa$iS@80VFHKl+sZYhr9dd?+ZK2ldb$npi$O)o~gNpDY z;0zkO^+_&Xs0dFLEH#6GwD2S68CHeqnr>|M!kZ(gd*x*Zd}S}#(j4ULk=I>yIWX+o z4&L$cev`z54%oMMqHQ$$UjrF{UoT2Srf{Os8z?3NfWSSr6wmb)fq6}u>Xn`hXkd9V zsMo~Gg*Lg9(=3UDqnqj?mD!E5IQy=b6p-8uLSJlUnM_w-byYS|5rRb*TEN=1jVhoO zvGZV-myxNZFJ@b3Hc0a4z5C2u(r>)}f@`ln_r~ily5h34PCxaiZ=QSN_19h~_&?O) zhwOXJl^5NA?;UJQY`%@DpZm?81Y_hqciYvROJG{V>1q^gl)EJlgLBsp-fxeEJ~Q9a zi!E~62}icYY#-S!v#Yn5`$V>PnZf5i`2`x=R^DsZZB9Swh>OlW$$4zIu71G`CfiVk zl!aHGd-#o)A9pI-T$b0wMagwB`ibZ%A`*?^JORUd1Ae(uM2wus1X*qirpxgGjog7o zLCg^}t;mfa8f4>n#z$5oc1+v~6?Bcy$FDlOptGY^EO` z#Lk|SgO_iB(homym*d%PC8Dv#%-<8Z6MT$(W)p|0ZRCoNx%aXLCzhK{RF#q(L0Qbp zjO;Aru_(x@>_BXZ+AZd;_>dDZ4wRLPp)A9cnU{Gn*Uov`ZSq=cvZ+BT1m)Qi@dSGQY#-7O9Bsp|B@m%nQ;duDtTtj@1(4H7yWH z2TWf>oWX$@)WIr4;VeRTtVp6324^wP5e=;=i$GgtYPGwKGLaC(D%sGGM-?<8KrLKc zqAa(HOGnJSuIj~H3Z4SnDvLAQ3d}_eG}Y_Y6CoiQWo2ijD+}* z0nHFplxoT{j)DlJ;K_SeJ0bDGO%~@+U?x2+&of7{bXVS~%`hMG_0k_QT?scrG9zx9 zLA{COxKUQ^DWy6n%+;)CjzCXlf$$8480DZfO-bl!a!V|n`u{Sd84TpB&;l`az%G-Q z?@Z#XYAVFA2G&Di5S3ai(pEK5r%kZZS+rHlzh*S2GOQk31#-&5Jfbg93Lo7B2Mt~q zqYm>DNjl~#VqRiUoW4C8^eSQv!pk+(DCkzpv1X+@OHx+3y)>a9o1me!r4qFu!;>9U z#AsAg7lv3dg1#wB)q)~|h!lZ%95&n*{`}{cSZpC%%qnsq1aktj6?F^(w6==%umAr) zTgO8$5t-MG`3BYp)MzyDP?lbDNv~;GSEP}8jh{F(pa=;a{KlE-mRV{kXG=H+B$t)h z>FYtj(d>!2$H8MHKhMDl7GpxMcRAbkFaP{&586Hg`>!!&HCxP|UvU28jz0>@b0pgp z<_!gl2l@cFwr<@9QPF^YZqY~?&@`+kKzWsgB_U#P6105qp?iViXP$lh-~8^6-+j-I zrTj_C%lkjFEo*=I#+$BjB%7KPXAoCy%=0xt=>8gy^#MMNYtO|~@aQ4Xw!&?2tC5`E zV1-5jWC6eoby9~&V6|qixS-!*j)}TFN6kPRHVz*|9EZp# zCD)7FwxNORb_PQ$1Au}xiUSnT4FFOQw;rWQ+h1-;P>-lFYZ!V#Q>Vr(QEM@*X-)+x zx}oMRT`3X7vP4`J(Lpr4Mklb#8uV}s#+sgmJ{~}5h%iu;o$x?qg zH?9hX2Av;&s)^|NtW6AoNJAtPIiwIEjUtmPMZunn+wlcaQgEP@8y|`B8R(-aSC$ZG znu^1c@sWsU1g_a6CgUrTMI^h)8ccZE41!oKuClPDxG+3BOg<7~POns}Qi+ifRYYP0 zA_UvBvXHsL&cFPFAKGGeD%){uh`4Wjz4^)$15y?NoMi#j?Jxi6 z&F}qW-V^Y$&1GB6SDt_5l$Aa^XUj=D&Rk<>zf_(%2~ys6`bu`0x8HJADA>{K19#cL z-m!gTd(H6jj+?D)b6I5P>8rxaF!gX?`{13X9kKfc`^;YB$bGgrYX6zeV{fz3#8sDF z;M4Y(ZDj&z03*B%NZO5Ua~YzPXa*E9(Qr*UpP;C9V+2;b9cpr*5%g0^Kn(YFNE_$_ zX|Xc=I(O}3US3}I`3tj*EoR@%)`PsQ?2xuiXP*djLfaaYliO}hH+)g^uiakWk*ZZ^ zQwb~RSeI0bNgpyLwiFQ~JXFi;2)W~+A`)_M2#-v+^=%*XpcE4*r6hU}5gAl&x|f2d zDZ_F!|6g6;zg2pNIi;I7af5BmfM|>fKnizSaeNTi*CsPmtJGCjoMnIc<{L%MIrH>m zF8Joj!17gBoO{ZNNA5IdtBWr<>z+GrCNrN3UWV%l#v{U+C4qVZ$ThRHQ((Q?5`>|+ z4Q?w5XPt5^yljj4hy(Yro9zF~URd@)?7MHh)?sWfH4l&(g0|6o`Gu!l^37A=<~-c_ufJIEO4C!1-XlEy*thL0zy9K5AhRhfSI|tCgJAvW%7z z1p|9zA|)C{^N1pFl`;ez6vxw*Q(;EPpBxA(r8+_1lwMpqwsAIG%No)vtM>mxP$LPe zy{fr|CUgs8j$XsUj*;l^ox28>MV@)`L3r7LZM)A11pCWMnF?9Syw73Fl}Zs=TrYu_ z#U+T6ki=7?Nkl)8RuQ%+wlFE{)T*Zx1vYgK(fLygODFc#3-`6+Q zXbFx~x+G*xSqD5g%zSA(XjUg!jP){?;slez=AIE5L{!JZwNGRc3(|YCip%w|L~xK! zHbqzl0RzhNGzGF%SGoAquFz-3QdBA^OBfMNlsZ+%;EJe4>y?g13n7(U&&aM*yP^q( zOzkySRfr3+Fd;@y&@F>gR$b_ck){X;iF3M3M`lb@3re@x$`!|1oIFAeTP@_l4Ri9# zl^#7BzP*QqKeI51>Rb`iy}Ae{B4CbB)=)craUO|1cHEj9!&~F3@fAhDWa(T8{*2qv z&h%psd$*3I@W)D}(zH6ANRt*+L6KA-fWXcSND`LR(sUUDAV@I6D~<_aP~sz^lqU;t z*&a3WBLI-Kd47niCaUpkf#x*@@j*a^9vRitC1fQ$xbYTnciQ1@+k+rp9C|3Xn0wc2(bCY zlaJhS=PlRYa8(_Fc{L<Mg;L)KOUhyfad!(cmekwIWXXR}$14B`-Uwh|{86+mo2+di_gXb21%`*pRiH#2)W zg3>+!4oU<)TmcFg1daeGii0g7kpXAvcMMfKXEd79S92X2SRGayMy5q+W*VMSdbieF z(7H4vSGmmvG1MPB^kRTAQVAmC*ZN46NCOTM86QYH03vbfpa=0Jx}ubbtwlm(oz3OG zbq_Kd2{Q$HLBVjAiBf0L<_F2CqahKPb8bNJa6MMHbkP!=CgtsWCmIG7!L9jku`{l?Lgz9NS6|sh!oN$&v;7J336TA zM26xAMbHaF75F1#G%jW6Sj4jy+-j>`JyW5k>pIr>e}jmf%C>#Xg$9#-_{({05e{=_ zv%|}<^S}N1Pi!&!0ybRiYJb@a%doS!AP!2y%cGs;vG&d%rJ9Xz-lfA+WH4DHpq%4R4#ZF@Hvc<$5HeJcp{xZ}I zK?jzhW+?}3H`NyNUb7}0{^iZ)?z^SWh0U2hW%2|+`2FPfmrd4?njIE^*VQI6oD2V& zeE?N>69zN^nH|JU1b{Pm2uSji1I6GftQwwm^l|Y;7O@9y?3IFEfAMO+pBPS^G|^1w zGm_r-*MoeK(ti>^KmYv4A3fK{zU(`1y5V{YeC9J8U@sh*1i?>ZxY`MWj}*)A0*Gq= z7it!VG(}))fEvE$S`cQJ9cQb^XUcdlhZU-!E1igSB^x&393`OaIe zkUsjK)FXET%Ywt$!1BD;o-)*%Obyybcq6|N+Z2#@+Q{Ej%Zu}xuvUJK=6q{6**w0X@ShN zGlfCH_LK!HV-8(|$+|r33^Lny2ARFWY=2phYXl%OAHzhzm*7msgb}h8>*Z~#OQK2% zqn2t+UzFKUZUqU$4h1S;&b#2ouu@sBI2fJGQ*6r>v4p~j#n2?Bop2_-x2WB6QxQ~! zdht}2eOy^S@3czM;F*MMs5%6MM~Q>7*^`~q*F(`1HBsIsKhVeF)u?KFE-H2S84fjv z(idA*2o!-G1nv_Vuq6+fRgHNMQKizeB%i@b6rNm7ZxLF>!kI``t2SDHeM?mo#}hZ* z2SZ{z;^IWQ&S-r}?d;EJlwvDRGTAt>zO1VYGo7-uAQ(~k&jt{6ag5cDo+JvAjbp4o z%Cc+EKeviS(~Z_&7gFX7OXrPq-5Eg_?)fUnVglxLBr~PNanllN3{)m?w;3OeM2e6K zA554|mf$Hc6m?=dTn8GRNFojxN<>d_OtP?B>5BJENmA|VLf~2RU2%|(i7bg+S%5;L zQdXiY#nE{B3F8{QN(-{^XVsJ{moVffyj;Pp2d%6Y>+uo5Jd0kXQKC@`>{i22DPh<_ zKqgU;BHp8SOHGz^#mzNJ-0&oW+KK0u7IIzjq_SG1+tJ{vN#d$;Gq^4RXAVq03@<{8 z;}ino_!H7I6|iz8EmrEHm!Muc5<|;WFB@u~x-ep7gp?bCAVy}<qx88nqwY+3;oAO%=4jpGJh_7BZLZX}*UloH@aeRH`7Pvc?wc zZyKopn`i!qe30?afJ8K#0lt|1+$8FrR#5DWDrEdtUG;{7ii&a5L_~ur`;i6RHks25hlS_v%F{hbH|H;Sn#BJSmLxGE!7B#L)r~Sh}f;H$k3-zN?{&e z4l|-pcYGp1k9h&{|3MgblUt3DGT@A-xkkJyFhN7p0?sjvr-B9Pg>naJo21;Pom_lO zSZ+TtP((TiFAzXM{H3@mD_3#ysEeEk8tWROL1tc4T~*wGP>VG+h`WxpST4kykTSds zBlqeenB3`W=_1nm4j8;_bJ=0+K(QB=`?{B{t+fF`g&rRXweYRhJ#~Eu*T5;h|!^;pQ#1~A4l!r(=%aF1ZFby6w zBXiE(LYn3sb1n}S#MVuKYp4^@^YzGH`~hiWMWM&l1Mr%uYeD1 zI+IIm1t|!4a;mMs<4H}c1bu%S&?dZs#(7glpT~qdECuic9J=;Kw#_DXD8pLv`YneVyl`Wvph$giM|ICQ_;ZuZaU(FZEk z<#4q#i6n~qd<~;8*y39@@3)K_G+`MH!_R^I=i(pni8HTpuqqOv=|JP9T?Coi6+&CT$5(C6CfGT3uHj72zjtY z=E{M|wUDkx1yhs0ig19<&X|~FwMOlPW9CG(9xnbYKn7Q}r@K-^kSO3l24$6sLzEI_)dmz{XAV`wJyTb~V?vJ@jYmL? zo~cB)%pxFTL(hx89*`6=r@lsoK8Bd6qT(b`98WdI*{wt^wqBq}%o9HGG#fP1>7^9I zRvj(K!w$)h3?FroAtvNWi)vv*>3AxR605<6S*%jHiD07AIJjn2DHP$zsfABy55<+Y z$y?xlOm60!aBJmTeC?a6fHZq6b8MJYymvQSF|)r6DcZ>q0oow`~h{PnNws@qM&bTnTyGT zSm4fC&eHwOq$29lm)QzuX4Kzf&dAU@gBV`u4ir1KZ9wxBc!S!Ny2R}-n}YH0Qwg;h z>JU%kVHsxR4?x^TT4z`2%QzmcTErWv2x;u{UJ2RcPMNxTNO{uaRi~}H=F~MO*{%im zMq9CCpctkD_kzrzA2bK|d9^vb+-YaP5R?Ou1Is?Z1>CtEgRXDGl7g*R+sd|UZ6(9S zz_I{M+pdK`?bUiy*cPqF#5iPbte})GY&f+RN=Hr9lch2OHfK+GQ>8viMkH~Pk2Yb) ze$X~RW)W~6nnq*5w@*34=T%inTLI5CO-KQ;6ag}FOOc9D5HgBD zqFm{=n0>Ut*a+SNxpA)SP^o%JTZ2z20&CxxVm&tIu*g-ocrk5@_sdI^flr68~@4Dv`Mtz49a>WcZG$iP3Xt9`r-NiG)?76$Em zri0dTz+Bb9vMpvs(7Vzx72H-YV(H3$B3?Eu8_l$Bt+c7Zm@IKD4jPPJQnrKaPf~W2 z>^HTw1S30@J-o=we-?&!lbyKk4Q0?+r0-*Yu=Tq=-|_;GA!Xl?^tQ6k<=u4M)h8Zzw3!7!hL`;d`Krq< z_RmBqV6P$9=&daNKqz6jser2SN03Oc(aGn_FXCyqLh*7>Iq_Ji*;vhuH(_xzXhbBe zj9t7DM<}gD1NAtY&P;i#!(T3L^p)$aVXu-=)>5IDkMMzN=>k?8`qTn$+lAr4n4_&B z1G!Q#v9sJe$T#13k@uJVM#^FAlTSR%Tg*Q8b;V_8`HpnZ3Tg?;ABVeIqIJjpMm zoWAyfFG$%gvrm56Vs=zp{G8K{@r|#!2k!+b+h2AhTe$h!3vayo`~fdN_r!hnm%T*L z|D^Z{{5`%3H^A^H80rNBph41YqrG8Yid)m4C(b)63#GVRx57}+(AwyVt%9Yy>U4Tv z#7YTL1hoj5qtQ3fxUH1pc~;2Kml%IppBPBa-xH7#&#X;H<2iH;3+v`q$zWvg89EmC z+H(8OL1rU>MKeQzStgE2jv`$)8qgNJzAi{8M z1j8!Io?{}pO0Nht$>xi8-XgS{!==>tRmb!6-9HhXPS3&+2eoHg!stf^3K~hLFNW&% z)Kp3rYI-g@6neybT3+bqKRf!FGL0Ol&~g*|z zwOp{_dZVAP=L%xTP+3gvj0vZxi@t&sK?_$k$qPOGT7@jN*99;^tnpwdQKjV)z6ncu z=9PlETB64ge~L4&f@P7MY;1)Lf$HK z3g(_8&d9_SaU}rBRV{*>tFj)t>WCpr5$PyZS1o7;J|rZ(fB;)G#Vea4)GVTuidaS| zvXLZVq##Ha?i^_yw6@35Yjk`Go(5l$&%o=j-(lB-J6ED~-cIu1P@sY;D9*@yet{zD zLPi)7Dv$&*X(=TU0r^SA$aL4hlog?uT+h_o_ZpIwA5)bg7Qq-C2Y?PYC!D=B#K=r1 zAizx(%F>QJD#g(8rzw*rX_j7+0u?2y2p=@2Hgan4wwMu^!EJyN1`Q@?Fd;vY^h6!D zr>_=eOJ_H-Nzob+la1KEf2D6OvDhM(cBME9LULL==|LUqSNNwUEwoHDxTU_hTF8JK z8K}cb+Otr@P*)8Dfw+POy3c*)Ge$XQ*lfE~88JT2W?S{LxMs#wBYI-U^4wLb0qePV zGA2l?tZiSE`g>`Du$ z6S1{Z!!l>d6v;AdDBz9bYTDK zXTE2lfE{37LGAX4|L~7~k5r5X^U4h`+Y@Iic$rkzA{fg5e_6iu2Q*Ag1;F1vH5Kp_ zFs%E-5y2u?AY!!dHinczkBL$@kiyQj zc~(LuSG)3b7)J`IWoQcfgp6E6J!A{-7{@|AZjD`t2`q>pHi)?r8yRkfwFqbwACw}7 zK5w8H9*vEFGd_ZXIHL!q(w%HHA~4Bm;g~Sl0+3M8lap+RlAiaK7NSEUlAz?$UKBhR4rg>7+6xdn8K@k;I7fH|{?g8QEHXS3zP^1)DRW}GcWJQp8 z6{%`$nO!`k!pMkmu?c+f~-n(#%bk_r0&3wwbiYoVB1}Tg>*3VPOE-7IS;b z;bn>7v)n^=-&jET)$P|jcAw3@w)3>Pdrv>&D>IzOp0)96%P#(@-}?gAz(A-I>^07V ze)es_Oi0jfDGwPE)bUM%z@x1nJDz}>m1Ka%d{U4pz*0Z4-#O5KG<7f)k!^_z6 zPwa;Cy}@LG;ugnxeFxxD`BE`1neNF+VM{4+q{`Ph%F8fSeE@|pZvQ1+6ykd z_;jfGmYXiU`_Ahe$v*4!ufxmNUgd9G*V-;8yd&bgmkxw8wTWieowg$Fw6hWl@Wia`W19m&< z*n_;w?Bm!@V&8t#Ww+gMiQh=|&az$RK9LPF8|!#wd1B@@gM1yt0BGVfDjLJ3=uiep zxefA-nN12(nvM;a+zde^N*r{$W&}(Oyp5}JaS+dUlJ3@+?6!0S#dT7-ads`DU;&LP zTn7%)#npv_35fg!Q#my?RI>z_3<$UU1sB_5?l)2$)wcVLKso{=gUt&eJj-|GU|=OA zt=hF->6oiXrLq{4O)^3@WZ3)%&I3;vCI5?;r9h7A z5};#?W)6{B#?hzjYH6qdCfP8AymTcz)nbUy)YXLvhMuPCq~pd2JT!?!{hxf@2l(ez zb?0$oV!;+A*pMy*C<99Gb!M1xh8ac}K&p%&U@RaO5JXUrB1lt;O7Fc%k&aXW#hPT@ zB%5q@v)LrO$!?Nov+a3yU%&qwZvGi&{5|*iKKFa?x#ym9Zabgvz2EPTUqMtBuu+A- zhg}X@tHq?mG6c(vgrr3PjVa7dhz(LB#Q^l604ofMfg1wT2m*V-oNBjhNO!uhEz=o# za$$dLEedi8X7ieBO$!s6T#vye92G@#+Fjv4W*_0)}bj-Ci zvn!=)K?+J8QY1;JHnfYaYkY{I2vM01J!SDBsvtd65G-93L14&$V=9%a3D8qkPy`dX zS{Z#7AU2I;xg~=Z7r~k2SkWC5@e&CmNEB$7f)OBAiaJ7unml#7{TBanVvCmtVsn|t z#Yf6Hl;V~b$FY<(Nu0uQu5lm|$+2b08imXf5s*PdbyGE5ZxqnNEqV&l&Mg_FBozsb zP+Hip)R)Zsy1IUtWe~eHkl|&k57S8zbV3CM@-R`Tgfj!^;b7qd2LI--{sL#6oP9j} zVLilBAR|YSt$LP7i>A7g2arT;%xZn&p(ilyXhaw*f>qBNRO_hPBy9K)4|oI;mBq@; zqn4%!bCE~uxEYdUkj_4YFYcBF+!9{nx>cNbYD_g(1SV(w(=arg0c7rk@#xsI^CaqO zCXov%cY>Njl6R-9frBCKsDRQfnZ1JH1qwhOy?U`FUTVs%7N$0pX$~j4CDztPy=GDu zL-91&*+W8J0RqJ-#bWv%AIWs#8NxsR)88|rdTH&%?+!a|<5V`hyve2;t+x8R&N%bb zKmB*V4qyVs05ZG=wLx%j8~`|QF#rheL8(DwSWm>sVW*?vWq=W7o5;3bA!3l(7A@cj zfj;&0W8wnfX>%ET_7Sa{Zl1iwLsOQiBWY;EQwti!+KbI8(rBVG8t@!a_Wm-+OlDx2 zkPs+=0aXW^rH~(@Cz8Mvp7!f?qpRa1u2FPX{9U!U))Cm}m%n;Z@OM$5y;qe<-flR7 zVQ0e2qLIC3uV#A`&}K0K=&oK!nOWMD_ZR_2hS|wUJeCg&BU1{3X3WlDgU^sMgCmfF zUMYwhUJieSLCgl=23P@ch!o1Q3>0(#oy3r*nh4bxP?ylREhQdANM`{iMOi;j_hqh3 zAw@^6ue#MPxP>cCM1)Z_8A#|>?EntELp)hwF1gW&B)nS$Q6df#DVr9^{`A*>2-DeI zmVz_L>^P&nV5k-clsH$aNP^LbY{^V){UohT5gneSxFNs}7!r>J?dTK+is5;r42?Z6>Wc*Q|td z@|3G~JV8#yrN=4+ihy3kKp-7u1w{r=S?WSBj5bmQX*0zjPNeC4@XumsMnEuv>f$dK z&nzsrie#0W;=sOHy~)$$CZfmYv5OQR4!mhZ0F41;U>P*FzYHS-#de^4@(Yxf?mV^> z&a?d{SNq7YFqrJ)*bp(K>@S!L(6Ixq-U+pB=Y+ORXEgqN*%q^xm+!yz66dk+yXC?o z51!-u*fy7aAA8a4_xa?PFMavmms8n$&7T~!_W5f8a2DWYhprvQp1s+-cmBZJXKnf} zd(9C5%XXRlnUqg{EnU3nQ3ucVe^UF-+ics7R$YIMH?L@anMVq~Stcxquq61*1DD7R zwxmLuQaF~q_hiAc^}D&ZEibP2?dh@2V2QWND|&JPzlW1WtLSORwb$p zXj@(IGHHowMYE~`p=7h>!rc}!fr98Q@m4*a1DcRBsH-BAyj4;_22eY_=C~=KYBL%u zzJUleRm~u>>_qc12u1);709Xfi!VIw{Buu+l)b#{V_)Am`}1X6%>Ga6 z+vh8ABK!C=AWRsL!2w}H+DEVu^J>hrB=qosL7;vMkgWl6C%%>}oO8nGKLrc>uPE;? z!^=Ri(6+K2X8~BY-3(Ae(B4*d3_HB+Y__t$ zAo~cI{bgVJa<#uKMY`u>CI>45UyEyDi4^Av;H(zR@pQ}az!r1z&_eBkctgV=1i$8zcnyZvtcFz59t2i2Nk3@5tF87u(0)K^fA z)DPWA#84vH6v0Z39An-N0+L8nmJPX}>Y@%Bqiox)w%Ts%t$puH?*-D05#xxBT(JjeO5(Np# z!6ROHxuenEq~QJ1ogxD5?6Rr-ZEtxChMpu~P6gV@qLlxWGL85Ow~9;=-g9InsEy)8 zWgjwE5IuvCK{}vE0JDl6C=cAY)&UkO6w*s^#M)7f8I8cF9+7to) z($Om@h>V~{A=z-QW6NDeWuBqe==f~BME-+^|6B;(0QpmxM0WERpYQgLEXEny_q%Bk$ui7dyAJ%pi=4y+cGGzHT%fj4oi7ghw7)^KW3 zm!aWSCqbze&sUwbw(uF7d>%cAqMo~2O^9Up=t71MZ0-!0Cs8WJLAO^NA6N7(JzC1G zDqI^)#)1*8UX)6SkWek8A}ulnJvKKB8BP9R0%o*KQ&V@9%Rbc9loZsN1$^AbTm(aj za-|FC*-+mzFj>j1ac9}}*=kRiJsgwR!!bQ}t<_g;25{Een-l_8#KVU%A#1m0S_T2S zX>H z{Y|e^1y&*r9DP-Fy0I!`F_8$23=TV$EkO4~h59>KP8Y%_Klo2az@I7DYO)b*tl8>v z$lw1v`_jVH|8p@8d$Llvn0V|7Cey;h2SQ4bWoKC=n~P(mR1MMACj$kAT#E#Ax`j~8 z5@rv94cW~Ap*S0|Tc$H|rBhrF(GPV$5o5}Axq+3M+tG-)WgN^s)`PZG2k(S#L1k%Gq6?YqlG^&Vcu!+%&#{S*aAFexjfB-QaT`>f2T^pVR(`~e1 z?I=!QUxmIUG+n6$w0LUDEdt}a(`j(N8{hG4~k1eGzOvn@wml#MaUDX1~-44o9 zQfEciI9EL8;?qImD+|h91}~!-WfjkEwSKHhnJ%i7Dvcg7Wi2RCl1j>S1&5sK!m}Vg z^u;GpWofM9uGNBVVN|+Ix>6}6Qtjvi=2E6}_LRULG}lOX)d>X=GnfhAjNvLs5!~v} zF9-iB{v5FEU!~ej2A}OKNpupst!MDrhLj)@UY-EqC+-7~$F{N$f7x7aPuUfwmJ3$@ z{F%FfW#QhNFM9gHYwa(8`q26Im*;G^j?j7RPGT>f`~D>hHnwXFCVN8}7WS2FTgd>j zJ!Rkf+HH&X*k!iGEYYcK`_2w#AF^QMN!VkP!}s3q__%z?hl?)f}#+}SW5 zCTnzhobTlHwZDrJzWjwxAA9tX-ctsaZ7YYA1ItpJ$97cv)DtKFw(Osz7SG>t=>dEA z3+CgFIo!eQv%dbNOE3Dy<(Hi8U!|_Q;#WNMQIil5V)0YDQOk3jua}6p+&?Mv8(mMEuL3EfD<7DF^FLjXKg2cfH%@QEag@Cxb! z+1XJIT@ga$UwL;xrF>VuH-FWLAdH~B6vRXzhIcCrA8b`ho~$8)VJ?I?MHFFAx$OuW zn3R&jgH2k(W6spXHg@o2F{8#XG>8z}k0xi8*57#4q_qz&mw27HW(p>P!Fk{&4MP}I z@jQwt7=iF~S1-pHf%y#(@X{i4X%_;>#gJ(6qKc-AsB z1T@UT&!G;n5o5^a`Xo`sabR#9WMLxEQ9(3nPZ9(u>sQH8+aXpQbDhC#tS*#N5?c`l zm#h1!juB{0M?8_CQH3r?ym~KsS>$JGoK=Jdw^C#gQtzT&xd@6744D^+ZJqe64ZO^a zaVC&E$~ocH!fpMjmjnCsyjk#LCOgMk^Vl$THFeO;e?`00zmaQV{`8q>J5cDp+$J$FhJD zlBgK!JnY6eG!h}0bO9f^m@on%F{fz(rJxE-NG_2uoHXm8;b!O8(T@15m*p*^Oy|k7 zC;9pbZq#8qvdOKtV5lof@i4(L80#$lUSZZi0_Js88_-r>J^)=jyCxemW(PH*^qPhV z!9g_-u2~O5<^scUz^yl|scGs^kOM;!SnS{b#h=dm&}@*|7V}DPc+GdechO(}&0pFQ zEkJW{DEJH-3*ruA+tRgLDhoaZ>%z;hpFFKFS8y%W})-22vMi$etXPunzLQMc>`txG{#AJ(^ zFc5vaw034e&H||jLj@Y?sFn$)V>dlei>nS>2GleSVQtDXwXz*#px&WtV40k7F|lnf z-*T((U{6#dxrkHcKAkO>474u5%L1cy5}VxsX9ko@Fj=G`Fc9tG1IR2(b3oIcS0G?j zDF8K^9$!$+uCP6F(*ke-S-6@Ij7c!Wt%(BIf%Qxg@S~}Nm6+6n9W$7bBF>~Dx;G2! z>NHJX*6;+3HI^F?kDv(q*Yn4@<>VQ#hzukND}}9KAIJ!qFwho)iiC`&gQ;jR>2$G; zU@sS2S)H2pFUby3BS24g5k_DWl@hEGYH$%a?Q|sZx)Uea1TvXvAAiVd?kK7AJg)Yv zo$4|DdTj&S@E=V#|W>(lm7j${VF#8NpwogU8MeRordw@K*elR|F{8 zPzX^*Lt6EcXS!{pL9k3##3-pi5n>c7TVe{9GL%(tR5#+KOxLcISEffJeMp?FXT;L`ny!-|)KMmM+ZNc{}iP0w>Qp1042}@*WH3`E;j0Vmt5bGwn6c z-g)x>!mt0G-`e>7>wC4?qo*CW-Rh06n|$b!7%(^|!Rcu4c3HvzWa!xFGZ+PNoFzh< z5HSgP0*-CNxmHzMw0vNE{C}(2+xZZD`r@U1Va^FMyK+`uG<<%K1z=UoK>$yl?enUd zYv<4!huHx$E{6vQV6}ABx#fQI(%-bwYuvIJk%IHw_;b!a@e7|j?68mR{i)wS$e%C6 z%jcha$|V>1$7)Bxj|Y~&{;& zU3?apeEpT*7Hl!!a@}{l!tDFlPC;-5P4zh#COw{v6~Rnsm~a@(;ciPYA99DxljeT| z$I2*f7Bz*UG_Q(}fT6P1Ob4Zcsb&%H1785LL)X&-m@L?69-q#BC0?dJ6_VdoaLeo97zpemMWro# zG@Y)mSx9bRM7l`61UX5S;SqnIQNg%#J% zvW_{$)ria$XIZMY#LKnP;jAKjNRE#z1X|xMdTyZ?74=DP0;<;-@eIcUC|XNb1u{KB z@hRI*9=~w5s}$R;q0-vU#EY`6D@cv*9Q)V5<~4y_XD{h!6NjjZV8R>05MDMvXkv4e zQEYZ135d;OA)%lm@({zc1{!z=*%ZOx`i2_ya0GUrna&65J(Y5pggksaW$SEIHgB-1 zDj1pTs}upyaFRW3<1=r#!3LaXl`?YXs_$he0%$~5C45@ri9a=2i6U-Qs=B%~byP$! z^s<;;jj~u^w7%lnPOc!`RV%AVkWPjeipWAtU`*5{p5xkRszy=?3aU|c466f>O&mQ* zGPo=LuIj>>Fcn9GGlQT{W+`q{;9)XL_SZm-VgjI4Fa8K9 z(tyHHNO&|N#Lxs7DwqJeE6!vl(m59M9!oe_< zs0wVABD_E^ajUAUKrp=&v0y+UFc>}KH3oSF7Hby%KmYR|N$yEk-2)9rt7gS5wv0v# zmeg>rqvvEjxdLLh`XmWSWjtDQ4N3A>P`c+@yahYb`p8tRrCiY<@F<%Z9ZK=|ou&c{*VfvCQYHjTvl#$Pt}UQ!%!%a8$>y6( zKG*398~Nq3fInPzlG!J})_Lz5zLNdDOE3PjKmTJ8&weO$7EE?60>MFZ(t$g{ekjBz zC7=obLW1yer;edKw_v7|%&u^y&tbvK4lM)D5Hx`Kt3U8RES|0F!c5ol%q!2~IL^B3xL_!-P3JW0es=HCD9;It7%z7KwZ+8!OJ$0DPoJRAEUE5cmxGO%oS8(@cj znMD?(5tW3};bl36)J`PoyCz9^lRR#TDu@V5xuT?>rkL~}YcGzrHczLHSR`oYhUS3`}dwrYK zj%UNmwwML`%f658dtV=4vL(EH@Zv2GU9!{eJI(yf`^$D+!+lU;Tesm*aZ5QI3LSgs zs`J~gF%~$V&LZZe+k}NrtzuS8D?UnbS%NJXQ-@*Vv6cUmT4#;-K+OiD&1GB6+ip4e z+?KG?YhLZ2q>NL5+AThw(<Iqa5TfB4EUSJeB{LWl; zk{Mp+18_FH4w+L85#xZqT{&)p4JOMR8IdXIVLk=f!E^^o<6|L)h&c>kG8%{#e)lln zs!C3<8aHRQ!Ej{k^X&^xIqAgD*kAtqF(3Dd>{Gt_`LBHW=;M$3)S0K*BA;w7pLyCz z&Tq4f=MVHHB#r2ZEddrUoF_sl5f9Ss?8{Y)ul2Sm051!6knJoxjP1m=)7SodS)}iM zg_`@`m-u(Tea4xm9P9t2;N@?9^Q!`|Y+G6SRo^?;E6ninwU=K2I6IQAv@b4*#{@U@CR?}SI0z!G(WSV`9qYv) zkXXPFhXkk=#RZAP5J6Ba;-d;Fh%vW#(Tk4UKg%3MJc6^?UT5x$*?sKGN$jx!-FXsa zEdT zkp;x)>p7KPt#`ywP*5*X&5bCN zO|D>+sZQrqoj%Mr&UK`tT2g^o^2D&!qcTpWkR>3d3+B3tT`jk2SEHv@h9t+>;L_oZ zDNbyQFD(RYRpDx4w=}mwY_71kB^?UZQ3>#>d02#b7eiIFc$$JEFen@ zN}-5`lfs^?K_1-_y*RewBC?2@-IztM7Gw$NrQlXKP>Vi>zMfVsh|%DqI+Ei)@n3T7MY+(fk7fB8%jdzeyo8$ z1x*@Q5sY|j=!t|*ND`a{Wo1<_0(Mgc&a%*8E|M51<)UF9Jk^DavX~2U5o=a7FPYRn z*kcbHLR@10OfC)oBg0wJsEdhPiLuocsbfFXGgZWLaljLElDK7C!VpiQg5Hp_Ogk0qCZ&@Uj|H5$G8QnjR+5vz zzA8pk3dWq}GYfFd^&+!oQXQW7sGSOE(t@F+t2aA~V?|{JNlQZC0927@$oMl6&ag0U zUf(dxI|XMl0bsqs?Q8btCCpWd7!x?!V%@VibAqz8U>OjbJtwpQ zUhoY}gVzASFd+B_8Vf+5{pJ4Q2t8y6^g+h~=gA(fWy=I&O_>13r{HBrnjK>XErCst z69}~@3o=Vb@buInXCDE(@uq9Yrg7;)W#So(8eO2fo-pi>KyV8w!<(uDND1I=Wx}YJ zRLKLJN^ylop-z_3e{fS4e{G}@wRO)4^&)L8U8~{m3h4vF3p*m@=zOmpzD31sERiZ{lU-%u9>{`BPz} zQ&UJ<0tiu-zM|_;7T|@N+hxW%4!_|#y&`1cWr?gh-L~`Gv=CKJGJn&s=Avowr=wd2Ii`>`1nkm%YNgVAmPmUmhFEFfzyt zFN@f5b{KnynJaI<;oBU^?l5-OJ$G2^ust?Ca^GzZ-E(XICw0U}X2Hwrt^TIpu)hp1 zduj?uS|&{Xz#2ajq7>)8dg8jqd){T8wU2BOv1Ma>lt1aMNlUynq5wkLX&Sd8-XnG* z`u7h%^niVrtp4tIz{N(57nywl%y?Whe=Zu^)@#1!U0z`}Eqw8qqx@4U4_11s52w_M zOaSs&w)how%%f~i5u`mPn_6 z{fl2a`Lo9#_etUFr+jhhkEM<|>eEY4Kh*yLj!Ck*Fgyqj$A7+w}^DEsqe=|bQ5a$0-rGM|0g@y=i`n6nMY>?F2N ze))ST8_hnE4LHNgH(z@R$n5H0r5<}=vIS`kHUC@VtbOLd=I|zSDS0?%NM22|rHNhK zJg)w?XXq#CT$#R51)Rs=%H?ZL+j3p$2T<7ND}ue`FY z4K#`?MOnqEDZ)$9S3@fnGC_^5$j|^v^<^b&<6=Vu~dJ-XOcu{*;jRHHC&&K#0J znwmq6tWia#Q#vXoNM|b|2@K^*W~?$DN=_-~N+L=%ax+$W+nbF^J3-!pGy|B0xUzsM z*OXz&NjAj#p65=);6n3Y4PhR;S2LIzPm*wP5%7KuPX|SaAsq)YXa%`ETgBIV@7fN@ zDCpsvT4b3|QgEF$*RXAE@==}IY06>tfWq4urX351!NOwHOhMQBE$s-&;9MrQ9hM=3 zhLBYtBtyx?UtC!&BJQdWC>R0bAgkxZs?N-w8nMoXA!3F#5JY4NaZ5bRlMOdP9M49L zc*M8~Nmz@=Q!XZYApR8;L8jqygGK5~#AAf6iCr8qLz%~18G(+-FeE%HSynu0WK11^ zBIy`G3ZmE3(Br9UQjxqRJ4s|whs>3gg@DB}6t?8ou?0_~!prS(@MpLvzX31fk0%SL zR29^BOIiJ}*36isO)3#78P-+2AXk=vVK$WRR=gu3hLu{32v=25!LVW+BhEuVR`-)Q88qEZQ-^n)mV~_h-Il%sQK_$ zP${{Ed^Qiw480x**d@1t&WmFT9H?enIWhPo&b?Sk&83!Xt302?nvRK{=BmjY9bbEm zH9SAJ$Xc?=Y|hlX_zz5E=CKz*9RSP;dD-Qccwt!te3l4x_K9py z`K%qZZj5M6?cRpq=vs2MOASX1bpethAT#(0GLs)BEs#NgK%r5)w+!X+m+mA|pV61Z zO%#oNO{OO^DFO6$U|(=(9BS@kUm!o=PjY`LWux5wxDSGn-%HI17(qp>QN4`dX@lEN zRs+k@Su6>e6anG;2QCFe_q4z#4hqWk1cP=k_1O0`Cj?HR3E(2+0(ZC#Uj#99071WP zNK#oRG0_>h>|peX{xWL>OoYfJ&Iq z5bWb&@kn8%Ndu)5%0HnAj%+YQP{rNKC$)h3?;^T@rNpO}v!Vqs6Q>UqGW=}h%l5M$H z0;>fd$FS`(qtT{=&oU9YQV6H2k zN+%(+YlxV~Ulk*~GCgjJPnR=##Idau!3LSGqG~c%s`QBpFMsb#7uBZbObxB;iWGM% zT}r_>zVski7-|;8dzTq-Zm+p?@s}|5j~)&qPshc!m7UYJ?+iHi_fk^YgYL~`N3xyA ze&p^;ee&zrPcN~#?Bm#b%$d2@hc@10&inoCvj3B^zr63FO>7zWWo&=J3@P__%pyMg zWs7;2558mmHf!2n7J9FFuN~L^==_cBFCVeb_DAl&v;F0{+phl`@G{70o#iW9JH+*N zA&d#BN#w6O&uo#h?}-2;1WQ54Agy)EPN;>+s_n6ouTr!`OzW;JaNLuD>oyft$)nfR0V(t`I#e4^5~YSQaFbO^vSL zrz~DBoJ&Xo_x>XpoUcY{`~trqKTr&ZS}3V(_bLKHS|u&|^kN6PsLS%|nLxsjga?AT zC0Ios7tkF-fxeyEQrs%Z-{wNwdOP+Dhr`QXJNa{7UY`0hsWZNL)=9@4eVCV*&pFHg zTfxN_vM^8#8spEltPP1sB&0;kJqeu|vv=<{HUbb>TtFb#ZEU+O#T92$`pYQTBJLEr9o0$jszn>k zz%qgmw6~k5!emFx($A}G=_dm|d?)E51%8Msl0m%$a1{tM3`l0_yp-&wq4gr&70Akm z#1J3+Rpcrzf}T6dd6SDFS4Te)KuZsZtW%sxEl4}e6)BFG$1-q@Tzm%e&>6sDl}9O; zNNU7EL@nH7#bHvN(1xrBW!@%W1lvh8;uWAVkt#xK^QvYQp=pYkTCtLXOv5ot{571! z%t%gIWbkgFf$3opbvU2)%M_RExfv%myi9$>hP)#vgmEy#9uwI(@EXE1Qd&^ZcsEY5 z@}S8H5YvT3+7aV_|MNc>Vg?ku*=$l4Fd?u5oDP#1l@6BlJP?g(#2Nuym?As|q^4We zqlM~RYt}5L8{@AcR;`&v!_gG6TF4--6hpBFD@jyJ=OH1;p!H`weB1I{IVCXTg!{aY8gQ$gsIJhmOlp2T3 zRqd+wp}q&RJX*m<5-&8D*mXtw%gPF=QxzDBkTb1i(Si&jRXt=Nq2Lxhg08e93|(l4 zv%adc>F3rXS?KFd=yiMjM}L#0X{uCnBg0d5A_7T_YXuQlC~d!@*X?b&TDA;ZRUq@k z)srP)&?>`kCxe2hEVBfj zhOp;Ov`{Yvu}vc~s28Q;YQe4kraEzyQa5E~M zzXE|hEATn5zPn{ad#nx25{Y2yqJpPh#A6n@1~nJO-*0gzAlwD>X7$%p61fT%Tn1DZ zcfwXOjqGuoqE*2sp-3YOTfJ9L=t%{~z-)uLfD8b~* zF8_{iTnk_tj0U^4*9qLYmFN_-TycmGKK;W#`UM)u4)l{K0wltQNqMum$S=;Qb6~rwiY~>^~ z*zTW;Z8WPL#soH@QGsCo$e3*6xdE5$t6jxdln$Vbz)^rr1a!&}2FA@FRvz zd<8W@<0ZR!av_+h2sW@h7$~Scm_wfM+Q7@w$s8b(HIegW%q1SZ^oj#hn41rQSpj7B z3bSiq(-kX73Wo`}o5E9+6-?IxMJOxcsz*6QL|HDkfiH0E?JPUs9ptyatS%a<6vHG* z4A->iR$M?(=(enoX)v?&4zLnwT$_Ucr%$Oes~1lo)iS zyW5dFWRr-1&6godW~{0=ZiwTVB}dluTt(!9;TU34Q;IBum!+VbgsG2xnH+?g*i5U5 z^-n>rpsgjo96b1XDYTmmFAKhjZM(S>+OBT-)qL#UO%5#EYwn+mrr5j38@8V+Ex*L*gZF$29R*Ro>YRWj_s3QC!hGmtFO2OA5t-tFKh$dkhm3}0X?RM2-s6D z*}NZZ)1kGD8=z7S!#%fM=iinA;Z9$J%pwqUU;0`GEKlBUKIdyE9{b7NciYzYzI^5j zWbV(TfMuu|n)cx@`^$ITa)rNO_SG*Q2nWM*)^we(*up1nN;i&7j3R=FP>LnEC9<`+ z^u}ETct#V!SYK3RfoN2sH08>2>*^Ly^wlVCsZ9*-ncQv_F`}hgj=5gQ)I~f6F?TJF zK{CTH6rMDnj;H-n{b)^M)@&oe<)Luoh zSd}lq^K?b;N*IY87`b4YB6_md6?MOSNV$ZhiwFftD}ujT1k5GskUm!b_LbkF8}nTq zd$6xzX0z-nizkm)UHZ1sL}10$i&cCu5#jGyBt4elAn~8cgFsdmcE!P?A-&N{^fqKU z$e*JMKd4MUV}RZ3sC^{ATF zZ6OYd3#vw=Y5}*jP#0?tGFgO|E2!E)(0hYa#E^xpf&^AXoD7PvkEFlX1ihFKj2q-Q~A{sj4jE}~x?~y?kFo;Yk%OU`1kXl4q3XLGQomd`DqB^(Lt`ib~`o@Tdd+&b7%GN$hhN&P06{)!mG(~92MCfZ} zqcU63i}0?9kx_>LdLTvcL7cvtk{Hc1lrmJoQHRN~RhA@*2-M*l80jWJlZI#e?_9+u z`=VwZX2Dz)Zq<&P&3AvEZtwWbZ&=CH;hhTyRQUNAiW}@UskI0QtY?}=r0e8F^>q<$ z!2kHizhe+7K(u}<-GtDeX#3=XIm#c$6T{?VHlDHG+J}AokdGg}^gsUPpF@kS_oz0{uCvEzx$T{m@`3hzBGB+MsmrHOqqdF#Pqu^^ZU(ugeE`Dca_A3+Ge9KW`8Qq)N_d%&Uc2^{ zZ}8JjH;f5y+NcGb-I7f_$q8wp(Ng*nqZtBSHK=CN;zqZAtCgi|5go(MIIs)|1Iy4c zyzEK~*FZFNY%bHWU1o|0Y#9nhW~E+C_86h_?QB9qfmGpQh603H9^Ma41I}3w6HuCA zNgijg(cA$t+vaO>05>?OuS7H+vIz~w<_Ag%`UCUo9-cY4%F^-JmWB0O^vexQp&$Z% zm7$uln9G|Zf|*JHGlTP{Ch`nK9LNeZnZZN=AJ}#RZ>Ny{Adn>vPYrwoM%jKfs(p+L zu!U;X;-hE09As)yB#?#DEm~t-+X)mgLbHHALw;FQrUHF+9{tLBY@D6$hQvetRmXHH z4H&8db8$gkRYwIv2K^8WfhRId>5hzo)LBtL1{EShkQV+t3!mkNs!I~Wj2a0bYt$%? z8-j$yD&`eYDn8POkrO!16r}ZZ#T8$MG7d2;cL-2}#xSz%gKT<`I09l>*;$b|h-ifo z9|oE5eE9*0sj~KM64vg58Ap$h{PdqcUc&3H z$@jtb-1@`2Zsh-@X1;gj6`jWh4?UEIH{msLgW6PS^^s`81RJ3_A;j8FE}#m-N_39V z;b(ib)?9lxoK<+%>EfQ>T6fy#<)-=zCwLq~SR0KQm7njKFt&oh1u1|D#8Ao;TEuR7 zu*fWAA{KfDAz~F(od{_amxZ&(p>n0bG9WFqZpj1i&04A>1J2xIs{>H)%J21RwnTv& zOfa>pmn1kNATuTc0$zuu1Rr3Q;!-?Bc7>xe6baQ!Fy~`gfF#K9Bpai7ZAx5VWZPu; zJ1pl7O^y&g3`kDf%8rUT>y;_&d2tT_2TL}kz-ra;*sr+cJfHd6Zu6Nwj(z#X=Zar; z(K!&bEoP|MF>LQF`}~(ghqV2x)cI$faO`K7?!RRA319g51!sTd%u~K_k^i?k_Z0if z-e11v(hDxT=xoF{UiF=OZ@>QGdwm@BMt~7Wgd&B>6QSq%c$4p9xz*u4Eb^qt=xhE+iS*F(hBt~#pW zPdgP^AKmg2t&(oVi6qM+qRT1Emk<{?T?M)1api0n3m*9JE@Tlnl`-LFs7`0jKA3}A zzP<(4=|wt*4H2kN+!G-)8!^;t!5xl@RaiXnFqai8LdxRwhJC~}A9)0gA{Q|odG&U1 zjA-4`aAQNd*XT8YQse|}aIQ5K)O(qN8&D5%%~?{lh1Gzvs_OY_p()A^F?v)5l7vzXz-lEh7-8of)yVFH$dbw-?90#H2bRTnX^Vj>M)jhKs=8C)%wo@skc!@%*i0Sl$N z^cH8ENunSDpjzvuWy?ZuomDSLi#fprOvI@mPAXlCDQN8$AGMQ;BC>EJSjvhcPzC&Nl)QCJf|bcwSswx zKVTAo&FRhvv}1W{V(!)?W>N51uZfHAVgKD15^|4SDF}s88E|mC!AFqb2zFo)i5w0YKdw670aXxRD4>-s)`dy>|u= z0Y*4Tab_nEM;f1w?_O-H@xHt72t|h75U857%@yaC zXIehm3X(^bUk(2uT%=6lrDl;5oA)bHq+4>rSvch8YTridvwPBltQmpVIm$B5|Je+ z>snc*l5H6fgg8@352Qq?-K7Uo1~y?UWYUR*b9T_6Oiff`7jzX;sa=zmj;FHZS1K)F zH^2~o_Q6M_+L37{x2gy?DT0q$h^mxR%4A?Mx9s4Cci4M#glEZ#V1>3C~~Sua67;tu~jXrKw7s5+{zL# zp$=Yg6-^#%;DY zf0`QwiKTO{^Z`?D={|oA5rmO}0?@cq+2a6qDH4$({*zxm|KlegeEN~QfAaK$zxweH zfA*tCUU>4pA3t^fi_hNw)FZb&`}pmD@Z!V2{P6=neDH>c?z!s8hi~$wuRna@et*9F z^n=$wasO36xb5Q49JXNd_rJ}{%XXO;?6RKy<-oF!W7}Vzx6QgA+IGf=cir%yMO%Gz z@izO--)zZlGxwgm(f+${vShan7x^rhk7@6^&WC5O>$>|+YwoqvYKQFpzI}FE_n_T3 zUb@$opFC*Rq5I5Q`(3a57rgu`zgIg}S%B0mG?s~wogNM|LU3>&#OIa|i2wjZW6hOi zGuo}?SR78|vhtsSP4u9s{ZR05;|({od1^`8Yw==l{Hew8u`VN49GL~$>q4LbfFND% zP@#16&Zk+s(F4oU(O8K{rBd>UBO?q}Dg|I=N#uJ=#En~JrCa4~Zw1@&1poQu_L98a zNibyv^s-3uv5$Uq>A{mPRO3Sn>2&l!0ItF9BYQ9L#cqGnYVG4%5XQl2iXVKy0dJn# zH+T4roRi0B-#%AGYGl=IK1rE z$G!v+27#L8icp!|s1}M~n_~>BJ9FrL0b4ElNsn|uO90UB2KU$wiy{hQ!kbkENdz2HoNRfy z<5rD)PEw&E58Kj+t?-dzhBP_^-u0v=b^nd#A|HTronW&>8G6N26oLU*}(h*oB8R|TaDh}jLX6jyOUydmH^j9k0a zC}>owxFYo;X2D9FRaGH~L__Pes|ipl9`F>=uP{*&8dp{0gHlBd<(8(xq)i<%^lIMS zw{gWez<{&B4F;L{=R9PAKP)~0r&B`%XMy5_Ep@OZL*s>n@n^aWg@C36ki}5sRMxmd zARRa2>63`V3-*#m2p#jr0}KvcS@O9U&5t6E%d_qfJm8INSO5&wj~)*1xj%6i)Ky5D)VL_>p| z1dB5<#Ia%xx#}e+$yFhXahTNTuozh=Ej@_w*VVBVM@eQ6=yYRCrwL6Tkdj^m8+B2N z+%{Jsj;7E*Zef*Ir%ag~Q zg47K1=*Qrj;8vobA0y!XiZjCGLBLJdL#a#={^FCPQ3O~9U~#A*4m~LfGEauv)ZaRL zmSqgO1x`KPvMcS;mO&~2dV=B%u0~g;hS5~&Lr>l_#atreYag6cXq4!=t=2Ad+rt{fou@>%!eA(#*f{qD zfNbg|XJm=JqI%Ixm!zsA79Y1#3fi%yL_?d<%%7%uewXGJwEIXe3mgCqU;$T1!B!f` z04elVBez+^Z7Ce-e*;Hwr+;|gPoI7GmoGg2;xi9D|HQrU@^eqz{q&=^KmX)izj)z+ z=byau!S7%B@V!^R@YG$8e*X#|`?}-W^PYS3=4T$d?yl?4JK~_(KKAARq;}i>z4LdT zv2fM~PGV2K^tH#P2ky1yg6-D_nHOw7W67M2_nG^_y=HH;*PIPMGXH}g+HS2mTdlV1 z=I@-f#k=Nh_nw72ueE5GwO#kwb_X zq}Bo$5Lkl;fje<1Pn?(NRs;=bY}o~1!9c;DsashvtboiSB@ru_(Hd=^)Olm;5#Js{ z#8A4R2r^g56b*;jCXT8hSxb!Kf`MjPS42hJR2&A-R*}tFd{l=Zq|7T2+MedpD+|>+ z&W)8lU*h55p)~hcRtQ>g^&$pN~X9np+49K*ua^=h^`?SzZYz34G04 zaQj*xcY5`!7R}uWOoon+|J*0PxiSZ{`2e8{d4kNRGFJ?o^58P+V)n4#qSKU{HGXe5`{ zXYK+pJ*ctSp-bB~b%5DGb8geO*1Zt`M_Z?GBPu3UAuc2XKB^{(fFVi9)2(!ssw-2Y z=W~YmNX0321kq|Z2r_j>PDdOB43UAT98?AAsP@rziko|-sG4}lbbFC%@42P8!3!yS zro^B3u~}cVhStf`QO}HTX!v3tG_49RQ)9x5RyHK43WLtBlQGVPR*?u;%>p)zjAFhq zw1O22BTgiqHo-I;KiOdSs0v#G6O6vH*b2r}kw~`~5`gJkv$+&OF7^4fp0Ra@zf~r7FU{#1O8}6c zg*C3YfKo-&R0|rc6cNaP16C->3@>}L(&0=IJ4YOHNVy2d*tSu~6kg^VPj8Y0A=C?ZQxBdPb{KWwSF5slb@{&_xjkP!)= zj9_{(^el_U3s!ugvf@t{(n=?A>PtzrL6EIRqAH}&lvHd|sNK=1nj0S~34DojdUITP zfE;ouvN+h%CqAJwz*8pxiR#RiHIWY>v5tW=o-AX`db(xn+~R9!`yLlvNFtpb2-7g6 zERoCr(g_KMD^(m#Oms!wj%7WSX{tI$zbyA;P`pM|wRXq0QKl?0_?9MNQ;Md~Ci<*z{ zL~RTj{vb1`ZgLNO#~1x@Qf< zBJ3klw?WCa@F^1w%VVf)JSED)!Jix{s}zkgjyV2U)n4L?$i=g^sNq~kVnSjbe`E#e z6+sy`ah)D8DIimOW+$gWJOcq2g|Ty07^hm1q= zXS%K|CNWb})@za-CvY$uPwA>AgDTWmb;iQSwV|V=InR|Zz{12XbbNR!N^)Bl!-gU;_^ETLT_n8MQ+W5ddKCs`Mbv`nC z?S(tM=a4-%Idt!>9mzgu@eUiU`+L6uFY^g`mB1Yb627xoK!xqJhW_AXuT{diHl3|U z7A$c^+Tn$--AZ>w2^d0!V5uSF>LD$jup|s90*aznqbkJtY|baEMeT~KR5~&R@rN`4 zVf1ROAkH?X`OokyX}fl-2pUDW=!%eyca0dvmqjoyV5+WzXKIAj)d-xD6R|3k!V`8T z9#(`YIP$6r@(_jtLsUT9S#&$UA`;tvQV}C0Lnc*3MdAq5g}EwxT$hf-umr=uw&36W zj>+eS*p}adUKM2XAYG!M7V@hFZ1-B$DsOqqqPcUtvkc+dQ~vT7K5ci{e@i)-4KF)` zZCkmof%)W@6nOc|Up##20Sk`*{9*9&8DIOn_m}N2yLxT;`?p?w_bpdJ++K@@mmj@< z@^`~}yMFGf=l8G(Mus9gqAbfN;$3aUPCZ0=$)D#AahA9-90{IAKu_E{FOlo=lD{vGH#RGhm~h=0zQbUb5=Qos#?pz3Z>iWvU5i6c--NDNsC1}RUSD!1L^ z<>HtAUJ6DQx62GN1I|n_9u(luvl5aaEpQVgh7R(n;*UT?DM1l5ECcEAwYasMcI!(b zxuPVk;`mer0tH#Xt%xj$m6NZt>9h?oI8iW_`Oqr^+MW1^zyBMN7^+kiI135jT7&=z zoFwW}3#%$f43dkhOYI0`NhCJhM3T&lCSqC0#MLDi88N^y55!b~SeAgEp(^D{B)J%} ziQ=YZf<4)`VG%{}1nLP!RP=5U69yAx zZgl{;DBa2;k3_lg6p_H`wN~EoR%wz%u7t=+vbvcRg4ti_`89=BOPLg5jnX z{3%FH0b61O*)oXSlyya{AaychSu7)n6Oa2XdDCM7V-=&@Kyl#3x9#RtRby2ujLu81qox)v&h6}FRs^V1(2Ib3lru3)m`dGg=5 zbNtV;%9YN&RIjoMday8S=k1)Lv8-by#B;O=G00}HlU#3c>v>m$onG`6m|cfNAcd(t zf=fuZg1AIbH&d_-ZXOakUFQ2C9zsf2T!&OO{*0y~0b5p*r3INqx(V5JV&tiPbMS<+ zuJ8gi8G*;k0j9WHK}5(6hFnZEt`Rdr^lq6epjQ@pU0R0+Je_TJ1(`jz0#3b>AP%U? zwc+c7U52e^M~T7CIKTxt@c2lF96=kqv|$HVWKM=lSLqO25y7oe!wDCWsE8W$Ri+R$D2V11nh}2iMATD4imtYc0 z=^9PxW#J~I<4{vs;xPgRUDG|uhuKBYr=Y7qR6L19aalOv3@i&QfF9erP&#_lKu3_= zqms_Iy9z$i4{bnQWfgo$@_&t&4W&X4i+V<95-=z{n)A@f;e@+xyV+Vr7);`m zzSPMM(=C%V0&&u-)ODmptf1m_bjvst$G^54i8Vo$if7>%#>6$7$W<1l+e{4<*Te%L zkBD?Zb%<;Igu%I6qT)O%a!aXKCJq{uIE+w1tPl{1Y*KKLA`rthZgDPsxm8HSWO`7f zAgdy=8Z@S?8q~Z>OF^3oZd(*ai#MY}%Kc+hc$up#*jWRRrytKo6J!RKe(VD7AqW-jR0~C4u`?j zA`9NcRuQ+#!inKiiQ@34tL4%r5$1{`Gb9a6St2;21c9YPxT-^D%NVMah=v1?IVPC< zmNk)NB^5HoiLFL)S&51uCXADW&Rk)FKNYYg2K%51zevPTB5vr(qEbF2EN0!rTw;nK!!`wR#z8;95E%%WiKLr~ zl@j6cs#?6gR0R44&Lvu-Z4Z%wuPYtB{)4AlB*KFV<_Uv@@H=8;B3xpuMrUg|P(;1Z zM8@PN?xB7(BR&g@Af^$Kbcm#d1JK&3!*sGPYlM8GoC7gQ<*P14d=5La9+$mmkl;z_6$ z9MEupJAggeJNBxzKSy>*VCo=fXRtx$Ja%|_a%Ow#gKt_{jTFpK_)}1I0`ctVv2X@S zS-KEF)yR~kAYHN~CYf3U4VODm038S%b+-}+QeGy#vV@^aHqq!hz=YM5wW%x&o3kgG zRqe`jdcJFol919#NO%m>PHw$jgbKLDSzI6hZp?xn2OY^l5un&72*5WFc67@uBiCbU zuUA+I^d;$3b|pYPJRy;}RU=k2fR1N#l`n*EyVmh}%3s9wT2jbi;j>E+uQI{r~;n zfA*~{$F7WP6>54N7zYy4n*Bj+;460R@p!YSr)ZZIEJxc070b` zAxv^2h@L!XXpv1Y7bH>&&ZR=k(*nwK>k)xm#j!=;aREH*c;$uXpL50cF7X<>9bv(# zQ0Mby=@r}yMN~AY{pEAH#$7>I7Yq$U$>dWob_@qo2fR=!oGb{nJ56h%)CSnRf zN{Osdh^(yIE*okHiLU5z?&t+gt6Zm!V^uXm-$SD~r3@V{$7Ilomul33%BSvy<4)V^3`^ zPw?`ykKGL{Kl}KdFFbWOy!_&`_khe#KYEL+lh|HihMFI`^HMJ_Kl||YpIkc6%gX}1 z3@rQPmp@+yndk4a?xHyxe01@4`!Cq?BOluIfZexnwZFV@rx}ZP-2h%*>`mt#*Pgr0 zYO}X`=dN44eb$!m5b^%!xo#VE)X57jJgJ!p)X0*=46KXRHV>L$Yu#dw8T`~I4wC}GNfewPhWH>~z!~U655z&BB6d-w3y9sqckm#XDXxes5G@M` z1!>_&DfWeB0g}oB?BJmQ!y*Gb6%T2Oz=>!?ly$`f+EpBXMck^+nM_%NIR3;AW6}$; z^^U-b;6P>tA>SfWU|aRdm7-F)AhU?F$>2EvH3>mk zkXaUKaZ?sUSLwq3`z~?N3kK)X%O#1e7N`V*_Wf|6UUir-3zx~^+xfG$v$O2;T*rO( z6X%`zRR^$rB75?l@);*X$~KoJI)>dJOW9~X?;FRTcJk4mK4RaKzVr!r`Rvn<^S8?v zop^$}Zci-&IWq289?=Rb0g%f*H)D=$jfJmReDprf&jB39?v-swS&-6G0;67prmlv6f~lGoF~pPbJaBBy z*$6BXvefHVz0#|62*bZpwSvebntWVQnxMF{ILqaRA>eUI%w1<35oJ|}AqP*yjs{`) zkN`zHs&<*N#Z$cqGz8fMW$~0H5obaQ<>DDC4x zag`SEiNES-gdqY35!|X#UGbNKk|XMtzKXkrN%3@ulwcxs_o5{LjzGPGCpF!oCzV7? zHe&qoBq4?hdIClT5i+1rwLN#Ec#=?1)>W2tijXS1DXWwqBAq2g6j#=5wIFtz z*%?ho!VN1ah(~*6A4O_(1R{bIlq7Mbh)CRAtGaX>$B=T~uLvi+BbRoOxaHCX@Yz+a ztn?y}id2C!NzyviB$AT=5>-JwfxrY{E(O`S2QSKIBDqA8OXU^e?CSg{-=R&=J(ppO0Rt_6TxCcC4hjTkPL(`(XFzAETSqdT`2@ol*LvR zQrwETqCu%3sYbiFEgj7;6xDE+HL8oP;v*zRSGkCB$evXvk@Ln0Yv=E>*#aoZ%y z4Hs|}-5Sw)2v(pgC&Xi9k5)_zS%Y|_Qe*lunrqN83SM-x(9ZUtL6XMeMdvrP?cby3GksHdTYet{$e@Vs<3k^R#jDggk^H zVM2)jTq*u^XF1Dj~G^61p zB5OcNS5O>Eg_lJ_b95B1)jSUYf$2+(P%vW9*q3vCP0n!wSC376>doPNZy)1>0Lp&J zAphzt?!SzEecmu^Tr?OPfW#v_6$mD--bAGxWo1b(5I)_>IZ_aeAQ2zA3JxQ~m4dGX z&{V4tn6%>AU)k8Y4nETpYZt^!HXy&{qU~m zo|r&puPs0K#2xUm?|rqi{KDh6-h1;!ci(XSi%;Hm*#5itK6ZGy{pGnku5EMKE6n>X z-fX}5Ti9RTcitukE}Wp|MLTUUZ`-xzZ@S=-{zGM6l!Of%bxVvmit$&z>^eXLA!G2J0N3 z20y_)iSQ;Irz{}p7E}c8f|<}OtP07Z2^gXjP$Gj0?Y{(3gTRRG*utvOs~0h#je{*$ z%%Nc6U%A$8=ge=g8JFlP-`bHW;V+S2WAc=kd;MX+K?S8P=(f3X>D#WTS?4$raSAYlmOr@dUE0eoq@?J zC4;Nt;Ti(C%?Jb*k)8nwEM&lO1@Yu267@+UWLpt+5t4S=0xKyDBDfv4rIA2W^;TA? zVNCKMMnJ;<&)I!IZCX_czDI3s)Ha}^tstW0P~=bol2eg0NR%WXf;oW-7*Gr&(jY2= zfFK!)Dk!KbLRCSLL2cVMck5^;yX||OxzlTU?wZy2_kMpl^R234)|$)OoZ4rf9nX2f z-tT#y=Znaz>C4R%nj2zQmu4<$d7E_;2nhy1%@_!VXo;tReu=jYBe{14vP{^va*tff z#&wm+F=*k`(#CE8=M`=Ab68xB<|n*$#DFa)t&WAWSVsK>VO?fPb z=%k21E?1gYJF?Ts=+y6=lsj$Rlzr~o>FipkOMhx}m42pe@0x1he8+G4JXe*wO}pp1M^nqF!;ccc^Z?AV}dds1@x4d>ZEi){C{QnV+Zb$j{(n-jq)sn8%> z7EMES2gGtruDlEH^(KjW$_8s5W~gE<>n$8NFa){8NorZ-4|D9gGT7Jlkcr>Tx+bXZ zouoW5n&;`!5^rW8x@<8GA6>T4dBO@#U2~iNCWlujrU*dDqJS!2f(1gYQFIiXX+VUi zm_$t>ZIyL+SRa}TACZP|LxZ6yq6g$dY&fR_ex9J@IUW?@2r93NdKit4$nF4AJfs0V zZ7k9WkDm`1@T=x1*A)~5kEg6XU@ubFg`Oqgz~n`v+iq4GI{>u|aP{bir|dg$Kkyv3|uzWe4&pImhF;rni}$=WaV$E188+xyE( zX7#dHnAOYPU!Hmxds9zhAHDBZK922q>;raQ*Z%VE+pM2{Bch`cA2+YEvnz+SGu`J;f|KXwOvPscuE5LB&_eS+$6$V)Zm~btMMnr4c%ORNTg?8L)E7SUVb5cGf7w^R6wc~ppU74wE0%jCTfOY1U7M{v zN}*bW^fS@l{QNI%J=|Nv&>agYgCpJ z54Poj2sY;kC9U*HwaM)5fJJx>DXl603sZ9X%X@|@v@D~U8ri)}>8E^`4-SeFc!UF$+< z?slV!k(ilSvK)p8kQH5XA_=r7geD5IiUm>9@UQhAE*R zgCcRvVLoV^E+N>w*5yCty2R$J{p`?(l0-ovJ^6;sdU0DDFv?t$AQlr9j)*}^u&GE1qL~B)@wY&UP>v_% z0^|wz_)G~vhimzX(|nRbr@=(AOCei^07MrN1OBF+IQ^uEAo3*Vhh}sG*t7V|I5 zmHOIcmiJ1GRi+iHa$xDPy(BM7_C~YX8C#xH)hwkMk=-KV63v`peNtac``L{M&lz># zb+~$h!l6#woEV)P{8N>(Qe7$zAXUv$bpGU9L@B!6H@2AO26JqgpVnpAMQo9=u5Z<~Mg|1_ah~u0%wdiJ*i6Sxt7YjcLA_90s%#IBa z@t+1oC{95+4Vk8A@Q2WlV{E~35;sd@(wvi4PAg$6*DV7qO&y6}so*4soT`HTghj2} zqUWL=FidldClD@iOqd|A46&|jCi&pkHVzo#L-Rlt=)~9J$AI7Dh=N*dm5IoDi!r(R_CBa*8dDKjqvr^kecSJzku`v;>e8mH;0cQy@Kmy1qlWWD)A?*}UR0@zPz$HMZc|#NefGmR>^an(C{sO;uzw=4>%ySqT@h;U>=4FN|!?OKyy|K2fr>orWfP` zV)e4P=){>ICAz7?!;R@8*p|1;C{8d&^M4R&Loow6U|9o8Il#5iQo19!8oNSTSrnr< zN4;!EsSPHGtz@8TXqT=1Wm&YLpa13KfBp2swv`{e=T;@Nr?Q`3dhg=-x7uI!C#8HJ zd*NNz-hRWSb8h*%_m@4Gz3A?1Zo29Vb8o!#uA46N{_^*i-r@aaAIJ9hm%*%^*7o<8 zdmh^svp*?y`m5r&2!buYC0dnQsqH>)WW;8HkVb+KD}!rRRyQeQ$Koa zTZIVmR|tLePWfB~rvOw?D+#f=-K#6z)Kr2v@k7+1c4jf?5@DqRRt<<^cbf)0Yytc# zM0x7aa8GOI}ePU4D9?Wk&5ad%@~Ju4hMIGLlIHf)1PEnNADlLiH>5*4%36`gRtgGc71_;k-z+jZD=qO=}4|sAiD|j%AQA;OIRj{Gv1Wu5LNojK)BG?fNTZ}fz zzV4+c_Q#~Wzx=6lKcHClJhr;HN3#3-%L?kr&hk{reEC-|xaiX#@I1EtWgq+c^e5iq zFE4-VYhUnvZ1u9|v2VTp3iYywvDM3}=G$+%)~<0ot!l=erf{g+TI~e{L+F3~!zX_B zV>!tmpSY7tIGdb75*4j;N)qDaa$0p=HCo<|4)6nr)39@JoR1Lsq|3G(rtBJL`7I?h zYs*3*nBicE4;(_jmIxmp)sk?YFsgOpi%WEV`vT5xr@Uv;#R$>0&#sg<2}YglW`It< z$=Qgfx%&cV3Z&bIAzj1~X^ss)%iAew9>$!Ylzz)a3*Qi;iKT&QL?R{!7}6R|4+cj0 zV+myAx?Sib%<}D$Tg790Th%FuvP#VPxr{Tq`NJg$g@8pOOc#F3EiQn9E^3G_PWtni zAbNLIZ8L+xjTO!rPByCo1Zkj+AY34z3pH`d4P^__nwBBsq|GF>3R+U}7vOM@2qog; zp`kc2)6l{hEnWznKdYoAPNVF5QG>tBrd9kS11gr`@{$*g`p*bH^UH0*fK;j zS)gkw=%g7DWCF1;6VXi@HURjPcLZ;TQLGz+tksRae4g|o#g{Md*fa$D=PRfoDH zy6L!7>I#at9am#BOO`GP2RsByV~+8PCjf#$P0?XLqGR!Y4~{F2;n#L7aY>iMCf3p0 z+*(;@map3h)07%@Uf_qcvc;||z-9y88Y%a@W#bQs17H2}dWGKvl*zo(y6pQC;U3j? z&MMzp$__0@0v^^86@4_D6S&dnR-XoxCLGZZkghh{+I1MS+vW4P+ zRH!xy+H47p8@gnzi{r%OCHzMOcu=fS>q6Xe(22p)kad7rR)B~{Al%mV8)i#@r&thq zVph$=FiJmF+#^M{OjS}^Mk???HxRo>0iHnY0B(Ltcj1umMyZYBsCKBUi`AzPr-PHu z`3N9J`zcB5)Nn?O=^c}wQ3_G`W`S%hDoZVs6goO5dCN&geLU({zxZhjgo44-u!uD4 zj_U*KDq=ra`Q6NqSD;5eMx|VV&nQ)tWQYbnC=)Vg6nzBFRW2FjTRb^W3TFTx-lAY@ zGQ|Z~cL2o{JxOVGDbzBm#Re#0nk-IR&nOV)a&>ru2=ZCs2B8p7$}`qY(!i zuPO#(zykqd*K&qtzG<5owp6}?V!craPD-?J4C6%0pQ1w~1t5506JT8?Qb;-Q7`B#R zRo>*G<*9Sr@gF#$P@HQni4J?Gq|?E9;dIb-0&+%-)Y1a1uiXAc2=q9T9 z)S?q_mN0iDU_jT>06~W<`E4%SG55B0B{M0WGipd6;!2!L1c{_Umj;|vWiC3NW|H9Y z5Py#09159mI)H3hj?Rl=E9)tr9#LUpix_`6Rip}M=rIJ+hKL6Pgt;v8kd?tREE?t2 z^y7_`!0`*SJEJwVg05gzx$Ek#3AIiGApv>jHm#+24~oV8_*W-gFgzZEC+A*WP{GwKrSuwX44D z`78g)P-(%0OkKvQEW!+|h@- zryu+F!}@*~DfOLuy0RJ@2Kfv6__?{*`iaIh{BUNX4()dY-ymU8l2nMh)`}+ zg$pT6f{p5$88{7yOrRt})v!fRf>H2u4AF$3^N_4rH5&{OjGrf*M5$QL8Okx6Ms>F@ zef3A9Y#%F%Rn2PTwwpbGt!i#tSq0rg+P?So=}(;Xu2T;4y)X6h1?QgCA7A!;Y|mrc zT=qQn?3=Ff3iE^a&Q>zpU$)cQFGX5KB7pclb}u!nB_S%us!QjQ456!NXI3XvZ6}=; zgNvtx1V506fMju&+YEZ6TKtqp*Txy8t2Bx(&03%3WdftBHh9@P5r9}X{)pN% z(-{sC6leyT=R8Ll4!{wy6{9Hfq%ipw=q6QOO@#i!hZyQY9VL-?vN+mY(h3YiWCg412 z<}HV;o_nNBqZ!jC8(V6P0{V3cigdW$5PFlK+nh(ihCdz-G^Ha<=2*cA5;RN1hv&^( z#EBkkQ+rhsV@WuWlgrZE$yGtyMUoho8{=V)eoFEo>rG%T83k}efQ_xZYNAYzS5dqV z?((j1#%VclItQs`J(u!IHqMi(1Bm*)N5r)pQ#BLGnQ_E6jXAc$)|{KzR%A>IpAj7r zJmD>8oMlcfbRp2=fSF#O79-~&`M?3PKKtw|j{XR^FIo95+Ec;mHC9Z5F#RTMe_1Jw zS@}lRGV!OwEN?M$LllEfEzs?VZJu41xRf9fIjvD$N-!smj-nMBehfr#0t`*mG7K5Q z3=Dw`aXv~;b|_LP2bPtMY_s%FE?1hTmH6q(-{jAdc~xq^9^|B{Q=-yoHuz(fcql+T ziemG_E^oDL;Zfmgc@>3AJfSLWA|;N}yT%qSwSJv&)EQ-qTI);Ds4i9Pa)1=#P|KW2 zLtG}_(%{97Uk{5e2CcLP2TidOh~c_46f8>R zbD9|f!%BAJ)aEt>Jv^N(5fzE@G2b7bPP4n5g9u8t~^#Y7-PfQ5Y?JzVXm#`Z4Q*>BunJP@0LGE08#gIi_o< zIT@x|;bbDjF=ryX!*mQ;iD78K+2zZcT0ziqYX{wmM|S{BfgeIACG&tuQ9Sj8Hp(cX zL+ni1T=oic^|H5^{Yk0nWiK!LxqdF+FBUSrR#UuS>WliPc5zy8)6ulDMfuH+9C zEnhDy#C%jrX{nBL8~t->HRB z@N3DcDDi+8Siy`(0jLs``52FDC*0 z7|3#iLPPP>&${}T9>A!OQM|=08Os6Xwx9R;Y!Vc+p2f;8m#89AZA&NRtP>Y+4iqw5 z#0!niN@H(K6E~|yi;=f96XCx$mk44gOoA9_V3B_2OrT#I{KY7BvAtszvDQ|y*7Mki z)1j()!tmJ-U2yK1>Sg=Op2t4_oOgfqi|2jiqEC1p``h36vgfgFG5b@_>gB}?=J@KD zy4X|4J(dhnCwfX*&3MmUxAc^dXq!ItM|?o5Uv2 zC5}#3S|aD9K{A6M9Z`!1bDX+xMwv?|=5%V|lP$1ml`JBK)Y@Q`+YkrkiV&%(pv35O z_L9X<|5|0_iA06T<{< zK(JwsU$;w>5V*|>1VAtp2*LPqiipl(CH+R5JyK>vIYt91-Ku7Kgpg@%;wZz(p|kmq zho95}U380;TFkltQWQ-EoI*SeH8W|*e+tv-;3+NdR*T4l=vrcJIQ>Gfu(f!aB@VNQ0W&{i(@k5e!y?2GAjKjIAWKBI zxC)cE(qOr++*{tOES35KT|@{u>lQeRd0v3% z>C6Y#RR=)2j>7av%iD1q?lCG};sBCMO$dB>;0cS40c$E2IKT!+A!xmLSn-aTPB@|= zN0-@Vk*(Tclm?vU*;@u4<$BZVuY37RU*s_8$RSRZC2;ZgDKMi{^@1%1b4Z**K8*S7 zFlH1mimipN)un;L0@S6=v7yVQVlj$21{8X*A&M7gny0sz|w;6qzoZ>gY5yKSfmFlMxmiU|3nEkCTPIS6I~|@aBO$tIF%gjPBFJ0XOOc1?gHpk z!se{uIT$|EKo78vd7ht~FBV<4bU{(%^H_JPh+{LFZzA z5sBnBJS1ndLp=I)g;e2p5wmbDs}}D1=GqGx(CXKEPgz#1K$av|DeEFuKFg)m$TpfsbQ*i+KtrpZxm%1(T(58Puz_42Hp*Yag-4`XW&+hemS*u<0A-d|QV`}@nP;3M{)p=9>4FI&vM z^mWJ{>u6QYhwrk+akJJvdY|f@K z=<`cBN_vu22GXGFtnNGU__rT+@O}zvTd2xRf)$v=;W*Wp=+)P%ML1&0m5PvxR>}>j zIuU8$JnKv-Gig>VbDol9v5_)V6x52>5G5R=ry7U8{%{$f{<5 z*lx1VeD$R--9NH@;hfKW^6WEDJ-m9^{<2CLeDSj%`}!ppe&<`4sF(fyWq*8Gu?*T= zu3%IoDj2oCf91_(C8UF26{%Rprd2yjDoQ(DCm|)V5{%9`X$)cvoqgB_5gm>q{h&v2 z=U08%j&8XJ26!S`S7u$xr9)2MQkO>Q35a7TUBM4`^_Kro<$I@i)X2I(24!8f zG(@DzXCSn8nFVgI*%d(SM*Sfz4p34=T})gzn4_@UL_j~;Y@|**J`0&bT{CNncIet8V zaw22SIR-dMiJ_qNijKL-Awnmj`>e|{B!vVq#AAB9h8Iyo*`kvO3aNAwf#`CNT|>lU zYiO9pfbJF#@StPXHi22ez%<87iFMh{v3z44Ot+{@Tl|?`X0&cI+0?!S*p=!)hJ~+BMS3!K^&0#D2*$R(MB%#a9A09AbRTbxQwfLx`- zr9cI32m>d%-Ic*8&~0@vm>Jub2Ols?MeLXAfBUchYDkiapP(btlQ6#bHDkkdSob|= zqvJGT2#*7@#YGRHmbcd3s9=e3!+`-%jB*|WD|jeDK}Uh}hkKw|LIL<`W(eQ}bfN%5 z0EMdQ#~vaIyVR1>!g+{iLIc2t&R=*!k*T)%ZyD#q^ z6R=M`ZVxVcGlr59MBB=h#^X?TkUJgJs?I?tGCw zv!(G^GCoAKWSmru@)m^~{2BFfJ~$=S%azPr0zA>>Cp`nGV!8=os2J(w*nsLLE{#!0 zWk|AGqmG|ISNY&T;-M#XJ(tTIZFo{bmc`;V|C5qoYBc?LI8YmcK1`z!52(!(tH~Ce z9zJLdaRWf8s>>)CL=M1>cI1;vo9?ii30`Cc-qZ7rM=-e46bqBewa|G@*G3qihwk??E8!J4aARB3Ac#AJvbqbP~Mj^CS;rGo-sh z7_wuO2oQ@y6x3xJjz>$IsmP_4m520XB?oZo<{Jc?*nv?)>4cy!<1k__HDhGOm=*Yd zAv-8Tr=iK{vmeB8eFLs(UAMIjp#(A5U>YT}s<~nran#Ed%d%?^X96F`{+@3LFP`_v z1OANEU5_ogd-BJm9=P*I&phyhCl>fPwvySGzC4T__|n%S3vckpm+zl*)qQtfVSo9A z!*|$jv)6eZd!L=x+jp0BC$Q@}2ko`tVf$`!#QvMvT<-hWie>xDKnd)vv*jiVXU}H$r=vVdtzSaR| z!s(}hX7B&@Vl@$^sAAa$F@U6*EPpCInM)yYL?(Ppj82~r@wpW<6WC-oA@0Q#s-TMk zKY+8V@=dLIfPCovXL*C0P8Mmy#t^rw$}#iVkW$wyVIX1%(bQo>X-^rEtggAjpOX=Z zU}hS$zpNbg3^x4hU%d$832h~F-}};qR7!hb`)e1U?|WZooP6j<-+z*qm(Tmynf{p6 zH!uCX_m_Pl+w<6W+<4Vpw_V@&z9#L$Icj7Fwzk)JD;0ZxdDP4O%_--C3`BP4@+jGG zsdN&#qK3PK%505-vJ{C#S(YJLN&uI-xW>vkr7N;cJWk0n8ABKVV&YU$Xmq-^l#~L+ z4Cfn901?XNHohD4FaPvgX%%yLRAIcDih)hpZS$P4dHO=`?Lk7v^Q?%}+}V zhiIDwb}?&75$`VSc!=0+FQY9hh?avY&LmFf3i;8q(k*~shUC4B63->7K(;tA);c=P zC__U=n+Nm`gE$*PVU!^zM5+q`5Yt%f5R@zqg(##D^HbN*@X!#onL|v!EoG{ z{!EhF?!W%?-;<)BTJE_z`PQ88W~-My1m#U<2Y^G=LDd1k7AOQRagP;k(cz&V&IdZR zI1y3kL16-p7-FFnp4zce{4~lOD|i5$EFwXw3M38mTa^6aOhv&Ta- zNswiB{LeADI-Yz$$EgnGD$eR<)p{G>os!PM9MI(z z9**(Kk6pz@khVe_(IEzSxKTdOG#oP+#FX%a4_aMO01UK2$cjik=m0}W2{yPu5^M>w zLNJ{ifYX2zZnXLZqKjy0XoKfZMjOhFI9UPs5piaeU5GBCV_r8C)0MEEGk5?z5AhJB z5QS7nQTpT5<`p8y3R!|$qr(jdC3wt$b#xG#M*iTeUPjTyM$pC0+3oaqcz4P>3xMOl z6Wre^aSNy?gZUMW`RWD;Rij&X$rv#_ZnjsqMcz{=Q zLzs&nCm%ZKg!M*GWepS7OvW6#=tjXh_svE{n2e#0s&t*~BR=>;pR z#guyrG4<&&M;-bnQ+v=VM}HL3pF32^+IUqwdUF}kjY%nosEAeoR?OO2MpQnl=_-LM z6?K7vv4@=1%d5Qn#iTsrWdBrQsL0hS4Edm?mMWg8C23aMx&yY^ib5xTZ0cp=DC%X! zz7JC~Q7KBBD1dGyC(Ssul=ICH96wH5>KSbg9&o)9hJ$pQz6P^KMiSB%Ie2{CQ>>23YJmzvcyA% zCKYsrbgntYWD72suBfi7kdZ3OPb4QUr-n!w4d8&dRIPjuosu*R9)cw#^k7a8g=Dp9 zMuZU5Vv`*K8qoj!-~Qki634B~>cIW>^0b3__Bfv?l*zyU-LDnJS6_MQmo7U0`fI<@ zp0dJO)GFnk&^94cPnAq;Wrf<29pwhoh(T_|tS!vK(Yo*Our5SbYR6l-x6<3Z)fFim z&;U2p-LP=#YKI`6m=S@=q=8WkmVnMuN8RUwCaN?~2sxPvA`tokJ-LlSttF#BTbv}Q z#lUp4F}h8WF8Y|L%;{!MYf4>;GeM+o87VeH4l~3I(U!s?=<<$YO3C8Urb^p9i-ott z0SJDaGx!-0H5hfr87v9X9Vzz_HwAaW1I zDS+91oS^cP(YBb|v67kd2Zf(Gz@MC~HPx)pAbc?jpMDg|R|r{g&+-f~HVX}iH_7~Q z@bk@*RX0(Kh*NZk*vuzznoPW%I$4b3A!Syvx&JE7w%uw|49xM$5E0`3=3O*PxqMq& z25ywkN+fQ8oa7;jl`_PrEPX5x1Rb=pOaP}pfN+3a=cY>pPZ0AE-TCO`Emv>##SSU1 z3W4F9Zh1Z=m&SZ@crp*q{`SJ3JS3Yo3`Ee)uqX(M)Ra<%v&lp(#!46>NfCtLM28c! zb^xM_sG`I`IdQ6T*pM!rhEQ060}QE!lLe4WOve*JH%bc4WYeHagIEZf7AxD4327sq6f0Dzd4-cU7cOUzXt;CE8E?g3Bh={j3oH{>wR z7pH*VtT;vwi!C38yse84&_Q^CFvJI56|$BO{3HdziH!*V@i_1xM$xST*;F+H+|F~| zrYq~ko_=Z(2j~WP{x=}5oEbdnM@PvSQNU?7RLfybj`_qq^dwG@+LYv&NXgO@eHowe z7(&FB1~UxkNeS5zQ3{QkuB3=$G366~rZyc~P|Mei!!^y~PKVN-srLWud2D}A$`-RP zeR*@a_L)cK`;$^jAMgP7obNq0&-cE37P~*e?2&B6vPZIQF?%F?$vxLRwfL4}4xVX$ zdDc$r+Fw>IAF}rh_3}QutgSurfX%CyJ%;TS=E)zE+G%|s`#Nx!_0-G99w4Ym7Bxy|MG zoqYyR)IRD;qv~sFA#`$paGf>U1P88`2zFgv#~Cr)p1=y zom`TTNvbe4BquZ~^>QsO-@sr9Ug`n?*n*PDhP0FqLHP+2m`mhJQOZ(WoDE^1dFUrd zgD$$XcJcS>rXlH zC`4~G+io7ulh|+UzD4?J9+Hxdql$^;QI6>ltCayx`Bt%3_l-h)=*ht|-)OGhE zi^ohFqKee?iAqTdN#P-46(EQ-00^&8FyK!lBnlHeH&+0n8&3*}w?K#-pbAC)FVCzt zjLi_lkT$mTaE2i3;tJ4pCA7mU6gbnCQro;~TiGa$@{l09QA+X??vic@C38)b(E%JY zWa`MW#Uda`Z2(74zcwD?(_KnRTY$NT6C2H$hUmsXC&0#MHy~r zMw>%kHL-|5Xu60H$^ir?fMkUsz@J9Z2MnJJHnwb~0ZzXVsNz4DpO8~B`xTT$o*ShWq)JOLAWN00%TOmEB0nR-^EO$D znWRxBSt?tKi&7#jiyQe{nIWURrIrsKnHJiar=RQ@^)*&|txDPD+7J8gj4nj}ESKA; z-$h+QeRBLIfBJm&vJLv4-_ANu_(>I@mTzX2PKZO@Y>U^&PRUYh0@=kzIR<65kWC|s z&>*_a3B;07mD=W($ON8!UKb*EL6xi^tG_MytOP zuO;aB0ReJ@)HoF3X&Y--4WerZ;5S`jrw36>)iQbz0n=mMg#b{9L>vm~L?56V4>v~n zV=Cfr9b?7-&~?+V&2B>((x!?w^*#pkCJIDqb?w4C=*ePWJyj-9Tn_%OBu=QV3AO6w zP9kTY>xR=o*Xc8C@iU+kpfki(3jy+ZIB$sEWDV&O86ltp^n5eKZImW~pO!+|%YQkre`$=%gWWZEOW)Xfjt*e@lxVqjBPKOghZWaHIKvpWx7mm;>Sh zaDrBWEQGeQRdeZc8`A2!xLHaqXwhScr#F{- z`ntBan8)+jHk$_6e)6#|`^zfjzj|u%kDgwl{obPsyuYko_9vL9K8`*A`;X7_eQZ@T z#FN|mi z*0XllVB@u3^<3ZkQk0#2<~tQ={?4yT_Ow$@P}nNjblq-%{bf9EQnp|H=;~8lwwsl2 z=-4Xmv?@Ebu-e&1v-^GpEH+>ZSV_3wtlc-?WCL`MmZ>@+70Zf2HKP)e2C}2%q!W(K zkRlyoySehy=ehbS7mFT;9`!GUcvRDn3QS7W#p+NY@QND{tw_oC81|c1U)>J{mtT76 zyG}c8yKT2w{q?V3`Gqf3Qu(-#%q?+4v;hcjsb%`QD=+iJwHmqCmV55H$FqBj*^s}#98CUF z^Tp@;ds1hge$@HryvrY7{>&%e>$PS3%U6H%KVARrzqtOYORxFn#n)f;wYj%mw`l&I z58vJ*(^)3_GocRM2t*;3}$hA=Szyqz%q2XBk`;;o@1k zD2bA>pvR_LT7^v-Sz;D{>4(t-F%Wd}Qp*S9$phU$e@}tG?=04stk(KQQeb?VtSUDeq|8Oow|m+r!)R zXR#1Crps0_Hmb`3cENN{aN^H%i5URWzOZ=g2Hj)X&MU_k88Ee9}k?8rqL)7S9KI2(@ArXAe?5S{1IcO z4N*c0!oxg)GYue^(J|*U4JZLs`0+Ff&l3p!fZD<#g{O&44`(1oA@nd&Wr|o(LSO|> z7EcNb0nrdT(^0fk2_J$8jBkyyA~T zu`_};#C#Swb1@XG7RBjo<6g1P4SSj{?|z8(W3m^Z=<%>IE6i2LDgAsR+efIV&AOrH zt%+r;`Gf!_bhX&n!XvsS&^jvWoj6nhJca-sPGSbU;+{j%+=`utjs(!*w|qO6hC-@L z0$|9*a$xv_Es*}n%H)a_UnPx*GCg=Dm{hZL7pF;Z0JBS#RNOD*e$2Oh?8kghU!QW~ ziGECwvG{}t#3SphyCW-jZOSYE{e3PW@JFy5;2}Mxk|CYaB}GL{kNbi@;3ZeF z*NopJVCcLyu_*L}jH?i%rU;J-6g8c#0yZH?N6U4^H9#sW*Oj9h5_kovL|UD4*X-MT z;}=mKLADyOq!)jgaLFxVb*Qq`vE8`8xHESry>JHd|v_?a{G=#CoMxgS);N1r|#ofHkUF)=B(pWVl#lXc!=$Y(Rj z=TYaP15T1+*L-55goXkn4kwifU3egT{Ma&02~G#3w|4<0X=t{!_>~x}6W0Y0T|{_p zpr}1P42({pf@oCGhA0G>jnW3;4}i259S`Y`IsV$r;TeD;UH}Q_286-^WSNMK6{9G+ z!!B*Qh9HQh(Gt_+d~_T_@|GYT43n9eRyR4VbRFctc?x&%yAGgtJ#?mIcNxd>Fz3x! z&sy^h8&49^nqH7;tsz})f#j9H+|(97Z>e>7V1QGO`EP&z6My@R{?rO5(_$GAg?@km zCm?GQx>zx^JP>bb64VZ!Y~=%-d-UK>Il7a`s{szVrk4Xo(E&u4wyX;WLJ6YLj_2uq zrh#e48?~0NS!$vm)MS5onZGBc5L23Yuvyin98<@s`>Oa*)N~(w-@BD~ia4@< zbakWmdpJ5C#T{IU8bujsPgW_4AEmn zQ=@M)_nC7)`>Bt8n2)~you57L17EuE9Q(`HU3JNKuKenEufBBl%~#!g-PgUyY_ECo!aMGpceBf# ziyn|pboG;9NC0F4C=ln7^QF89f=8Yp193t*oAAgk;D*W>oP9*nMk;NF7{W==5L}xq zL+J6N;{@!YGz53f0(9E6^aD7C_?K^wKJpOD8hGn1#9s5(TWql2+NYx4D zi&r}MfISq&b7$Y+X8~vT)C<}?oII57$1x{X$2fJ*E!7vyT;XOTRo217x}zBk&SJLrrq}%jj}- zc+w=Nia3XPORyNf{{vZw;^;x`<4<_l)t?3Ng(Z*RKKS6G1pxBsW407!O8)5WWPK@d zBxtj)MFdS0{bNoL9UeH7;)$S4f=InZ$R(2?aA`0Gqh-$MrpRWKX+d|CYJV<`rYkYeXjxMcoyK#5p z04J@W!bh!Po|#5uQ3}B&n#E|gW#j10u$Em!05KtyNV&)9P$1Y$EO_#%N1lGhUlV?C z{(`#}Ena9*9(q`jIDsV(*re?Gs+9-w5)hC*Yb@OEDBNc+=O%6B=oAUA#}4UG^1iS?M~Z{4MPqL z{OBeD1DMRlRKm=8^Sof*Tz}|9a?Vu?A%~ev_|*GfaszY=A@RT|sOTu5)B=&XPruS> z6u+H2d463lIVUE;7XVNi8-&#M)7q9<44x0m50N0h<%e zG0y}F2IA;sO~O=WH8I^BYp%gz4bH@`AzZtLr=wT81v!7D&+i;k;J z7bhV`Edji#Hg8T_Uku~$3szdmS5N)fYct@S#Vi~MB@m~%aD*mskphuA$A#bNj7|gt zeuzJdJl`LW{_7w7`al2jUeBpM^uXW#>gP&V-|12?dixlIA9;G^fA*cX-ZA?YJVtHB zdUV+~v7e+Ao|u)u3gPNsqwOF+^Mt8Wf+>7dzH)eFloI=4l>;EB%diej;MVF!?2F3Q%a~ zDq>2|{dy+Gl=DA93~)e%A&>|*=Cs2@CXDjgbU8ef0M^~n;wk}mWpLP(?JE#QiJ>kq z>Y{-Sg98k%1$d0oK$akiqtRLK!0X~b#IXl3u%+bD#+gNQkSD~ca-i0_u+Y}kgC3*o zlBLa2o!Xd*8|6vPe2p{jKMIG+?4T7ZXE&M?S%vHv zx?63q>Z~2t-*fwQ_T26*`|P;xp}Wtpo9yo_`&!q2+pT-R4(q>tug%|i*p84hq=UCS?TDH0di(aL95ln9U_NBmHPy>L zo$cdb`|mvCt((2+6@R)?e}8%Ef$SHo^t>1KiEL%BmpWCK)u)P3HLn`YPBFRyt&cy$ z-A8QC`OQ`lr^;4lD)ic2RU1NVk}6c&3030RB%M6=eAy+cVAb)5-~XO-Kk`9;PU+$= zU8G)CNBaxJ5LL5!8IH#T)--Ubx1lLiU{;m;?VTs;P(%f+>QbethUF^fvB7(uSqQ++ zIOnT3)t-jv5hIH2X;wB^_su)MZ6>;UgwD!g1+0&F`wP^91^##zD;`n1;ks`_fav(9 zu!U&T&m3fjw{B(cQhr6JArnFy(~NriTUUX6(EfY-W}1C%oY=Tc=ct$IK|~Q054CD! zT^bCTQWVAVUtIjTi@*FCW%4J^dEduB_TGx+%P#RHG#@$tq)}xu;-}90@I@DXLV>JO zKKt}z&-vi#pS$2gU;f;=>Se{U8rc@}E!ThJ#%sR@x$~B9KeFg9M{HSvv|&2P6Qo!Y z42YbkBuXnckQkt2?wlfCp5Wxdi70P$(qSMANgN%5f{q_P?3Q(C6B)MR$A*#~c;egu zv`G%bmv8+@^2u|(L~!B9KlU*%r@#0`FM8QaUSc7X$*cYOD>j?4wlCeS{`yzmdHeNV zXKwf02Kv+oz}2LadU@*M^GTQ-;EccnIOt+xGzJbBv2ktfJz?_?Zb#8Dtus%L9bxj(yq%}F@t9Y3OQEa`H%BS&;5 z0f?&@3kpvRx<)O5d4uBzM!D3qOE~RhnP(LD^l|`0!2Mljs{}>IR(XY9T%s;;BKGO- zyzOQTMybLqqtKcnYRh@z*9GXn91hI%R ze)_W?DV!CONAaUVZoA+3(ju)R-zqr6NxCV;^{j1I42neDad=lQxTO z$^EZ-S&ghZMuC9p)~ ztWXpff;;4#{t$u@UAi4wh+PM?E+(!vPI(6;`t&O&CHQHct`?C7hbtnyYpm`#fa!7l z;TQ_em}97oNEYyFbm2J}h0jccqi2qWE?=D^DZzuDc&?%&!c7<8kGq&#mp?vsBEqW2PvW3Q_uBZ$Z0uh>V0tmr4Aq4prLqq9w)dH?*flGRqP3;Q7VKu<# zaP2WEjpfg_me8j@kn8u`C4Dy=QKbx?cxd5ci~Rwqh2MYt!N(uE$3C6XW66E9AG-gJ zM;FgkFF*b0J?iBp_uV@0jw^iuY|;E19$zwh;hd}0%lqvzW2ddw+-Ii^_I}%Xd+)IJ zzB{dV$Zi{ZTiM@@I(WAY4%>ATaLlYNPd)T)Cm+1M!ug2ZH$G<8=11){Ov$pZ~q@Jr@_OP_N z((b2yWDjO5@l+EEG;c2Vbo6(wzCzinTy-Dup-wk$#h?;T9j9)^uT<ej9z%&L|4tZ+>m^lro)aZ3U*9 zQANoa#XDT>*$2B+5b8(DRn6EkBnme>beqwtPF*E1RRCQ}vyu}XqT02OeCkQZNeo2l z_R5MRbHpqEo1yT-B?gWt}z@1G_u7)Hb199FN!c*2E@i2-~8kRzM3S%1% zK+xmYlGVjb+)#-+Bqj9ANVB_q>zmjz83~LWkBE%OTKN6vRj+u_EB@>SYpmu;?3Ir^ zY~Mfp>)-VgneU!?G~L5-Ug92Av%*<8!W_;FLn9nccFi%wL-gD>Mf@y4u2v(<$J9{v zbR^Hx)HdU|Co2O)w1|c5Se3m~!U03FV*0g#*vDc;*D{ngsXlQ$a&iiZ7~%||iif%o zaREqzWS{xo<7%j}eW{F6Oxf#1hZs^%0f-6?eJG@ptH3-o3Dl182b-wPfVf!1hB*;A ztrXOHAzCu2so$*NVz-V?RWpM}bu1i@uwuqWt*EusnpGil!_iVuhRgs%tPsS5PRg7Z zl0usnH=Q}R-{MB*_945FNBP5)C=fS6H#;{MX_ctf!_l3>$@frp&0T&=J*?fgf97{b79Lw>ce9Py(!gi@|T#L(P3ZoU2H z8!Ecx5)ujNVp%bM%%&xPW1v=>T5Qf1Q3%kwh{aI~boo4Sb*jK`y@X?-L3N@oAZ2~c z20Avlz;y{WZxA!!+HOPB*Pbc&1wuy-Hb=EXT0!T%W_K312F$J!lt8jYKb&TVv6F&M zQVwiV=#Po;F%wk%G34OaipVI>X&4J(QE;|8StOP7h?qgjIfD*}pn&{g zw7{DpI6<>*6&#tZrrB;GByvtD$D|<)@mOfxfBuKxs%17_f1Q`T_(f-(@y$d*MIjK6t%NLvPxO2#MY`PC)KeZ`lfb%QPhvq>t&nEKy?hBe%wzD z()$TT$*fXdy4X)Zeyn(u=1OC#fMQmWuI#N?9+biJGk>3JYP*^95XJH>x888q-LsX< z_RU8XEKH+q5L3cE6c9qck{p8}MD2*hP!U0t)nJ%TA;&0TofQ2te5t;p#R={Za(FOBPNo=bg6@ME;D}bAI?~h=yt_mry?iJC(SE zA)qY}wS2$~k{WjNR$Qhre7KDm5D&OylyZ7nCVJAD|CFPrXZh9;hUh6FJG@;UVcjp= z0)&tfz6Iq;w&JI-h^UIbd@D3kId3$6YKdq8RZNT+@h~^$0gi%4i-@yHpa;V z_yN`N>?QZzKDEVsm;Gg%%ZnG@^1waU+g#TAyUg=vU-it=xrfc#dY_#(+HaSQl*~4l z_uF}W-}O>5E0+D8O2LqV)=vbJ?oPH^u;T`@ii2C%Jy_^%DT;=ctZQqi@)41 zbJZxdRMB5qs@m;i-HKDCw2D~CX-K81kjKWltOJp_!c+CiD{d=|sUogxRF??<)t(-K z^Lnxmo2XBT07Kq}7O#BctaqJi3)1Jm6yu_!mKA!8s%^P}LJwK`g=z2GC#00#K!I$O zpWINP1D_(Xm+i~U+itb#tv6jKKUes3$?pJF@%X7#jO+4~q3UHCtcY%bAbJv8vFxdA zTge~&(7QkM{xdvxtzK3mE08bvP##qxq%8xK34!W7w+ZJ$8NTf_rZN?XQ1w$F1M-DKof|+1Jl{ZMmx1iJ#{@ z5nYh;@@naB2`wI18+0BHHl_v3$^4W}8RfiW3>31-rw5)kGa#wMN!I*W6m&|=Dz(@^ z^N67nPlI!;;}Nb#hVpzh9*geJmHnHA<3f zeHGVqs!T1h3OZSIYvC3kI)%B|iA#eiG!-a@xJM78lM4E&$34XBF6PcFwfMVV|BX#6 zw;p#Kw<;WPxr$QSZ3uJEiVK0mW{i6*dcfP}AdA*c)x%ZKwD^@zEyGX&YB4ljgJ@Yf z(-JfleyLCktB4`i!c=ROt{#@|32F~f+gxs=-;kupT5{g4vwJHifqbAc5M9?Tvir57 zSiKA&708q`2UO+iWe*rITD=V6Ft-J3s=2L&1Gw1(aFZ-pziC)OXN04ghk8J4 zX({Odp3KJ7J9O$+fxrP=!KYyLy4+7hMq{q{D*1uiUsg<6H9Q1`JT&W? z5cB}fqFl@D5l4heFNNU9%v*lKor5B>%3ztRK?f2xYX^^C><$1pDf44;gehJVC442A z=cCkU9=b+%{5W6?(cs8*pYj}(A8*~@>{^;t;--;Zz$3xyJPK{EiMaB}WX zZ4j$=fAZ76QuL_@6^1|l$qyCIK*_1_R7-l%{>GcGyK~NMI91Tf*x&v0-z&5iy7*6R zxhgXu6_r{H>c=4H$%;8wMy*$5Tz(Hc^o!8Y_UFag>bUa||c&n?`br9mzc9GFKz+6%w zE;X(Ir04|L03<6ER&hAk5Mwjsh}90?40X*xfwKts?6;j|Dj>=Uf>Hipn9joxCHa5{ zz?sW5e+2H{#4AH=3B;9cXhWw>7bhQ_EV@o8HD+`o`A;od@uybHG$5{xEnJu2aGo=m zi5o>95F26HXoL6n^J;IPZ!|; z=bIKv3>EcojEL}u)Ub;gkCrXO@jC!K>qHWSEDtP>wMa>^5GlIpAs8CI!Py0LT5$;f zoBx4hp01NBL(KjEsF!UbtAc&$2zc(=Rhwd`th}}0)DQgegb>qW#+vuo0H?hBb`0g9pT;6|& zH4k{(n|ux1{;~(NJ&X-bJa|*jW}p3z-A+C7ZTs%Hk@uIMt6o+et8ncp+e`J8PDPv= zP<6KV9y{BBQ$)LEqj=xeUnHzv_U$jsYK@cMag6&o2HT)MHS8Z?fTf?>y;*TW+{crTit_AOD!0{wopo(4+lf|It2fo8EUG$H@v2B= z>3h#Q-JgemAhvz1hE*@4s4i8?@V1^gqpQeOV=7dI2GpTc0Tkt`(wG%L+I#X`;*DQ| zL4(TFhA*A!UtP1|`(}!0I-3NJD`0_N1K_rvG4ocWzLVxAYa3elTw>A0;$h2CDIa1P7YaRyK+*-L2jGVT zNSsj;rva2{XE)9GA%?KwU&bGl6@o;Uh!^x^O7@y9mgR7Szwr&Lyy0~(S1%uR_1t*;U}h{C1SQS7kWy~un#N*5gdjGb z1;wkSk1SG}-FN>zDl$c}jZUC0gQFN#WQ}^N{pGRllngzP%;hIrx`^y5NdP)m<)8U1 zQc=rJm7;#rGa)ikEtg0^ETRP1dm=t%@3$!#vdo`aN|eJ6W7iO8P|l#68T{4J!n9aK zXfuH_Tgp4;-8bb?(upcEcMvxnkH_e`6RDx>Z7G)BwmhT_r&F5kq578N5C8g4R4Ibc zArfYY7+o1$YJ?107g4l!TEu1^5nyO_4E30_>eXzU>Z(`Me%IZeioE&O+itl1j+^ee zcaECcx~OVL8CU|K1wzmo5}LNI+-cH%a;a4tD`;6Q_nf)6-FVY??w&WhG8~ab1^cA? zY;{~L+M-Z0m|=&jR=j=yF(*!nexJc3D9mOFKso>-!mTwat$fuaOI-QJPpz)4A4x2S z(IFBB#8EF7ntIs~eryCQpS=O?@cOi$p9C6&DU;qwF2cA<5v%?s_QPDXj9SkNxjGfWur)!17*v;1mrrdBHz`)T+Em2yTv@`|5YoP~yNIRR6;>jGllsCBWjMux=BE)nOx zl2h5JRYH1zS*83B|M(l#pDkJyv{KQw^6hurtXfoc{_5Ai_|&IAdHNZrochiaF{rQY zJF7PhfhtxNWp(HuE8MAVL33y-nhlpJb0{B11m*|b`2rmI#Feh;q(OPx+r&es`qwCL zImvCKqj0XIRl+NozkT%;Hky_B1S@7C!cjqM;g}&*seCiRRu7pY5&;TfoB)O0C{@Q~ zb^BGJ&1kKzaEeQeeyRF5Dw4-TOU2RxnThLUyonF{nS1xDeDEQgR*z3B>oNIYs{7rN=x8s$}W2OffsTWYIN3{i+bSuJ{ODWM@N@mJkoK%@#!>!=Gbi_s`% zQ7aXTJykAQ;u@Y)g_HFxVs`F${ZvkWrmmE+Q0kty*pW!U)cE= zgWbDtzVOz8XJ2}5({tO_GA^?+`+k{adFRus9La{4U*Gd2`FMH3#DP9up34k9x+e!L*Nr<4SUzn^za_QDJCA+V%>Mp=+5Om` zJ^j>+mXBXPXZZM`-TQTG^=Wt+5c8Kzc2liH{-c3am!9cdF*qs2IWcPl$O$i37OYO~ zY9@@f2g?sWPn$d*tV3Ykfd^S*A!RE&KF~CiideQ|h`33^!%022;+O&Ms}SSZJbVmfcZGkTXArJm)ReEIA#NZFcK;k4>9rz zCbKe2f|XcbC?9p?kwhY%Qp3XPL|SkgUWR%59M?mosa5MZ{t5z>Gn!+fq{yVIkC(Me z3X&29N82s{f!U5Zco|Z5;+nzKv{B~fJ)@gNSSi$lqR~s$ zm@O$^cRDDtVeP6Mzm&T#KruOO!5pdV;HePAcycb(RY0ly0k{8}R~!H9KmQGZQ~?(R zpezx{@<%7Jd{Cbj&YgXDgTodtoM-i+nb5T?SU#v^yIitpzD+hJW|}jQlGZdA$z3>U z$*h1TE>E$VHI<|6>V!zbIe~F0yQf#-1lw1=dP{Bduu89W z{)ufBrXUp;1=5oq6})LBpA*j0ly4zl2UA`OOb*ti-b&$)tNY7JeHq1s!Dz!6d% z3eigy+3dk-&_F_gVilIF6sOwZwIC;Lnp=26cq&JrtA-vDDhRQ8==mB#&5QM<01)1; zg>W=nXuR@pQc-=;py#w^lBko=q>7^Iq@sjtU(7xErPU3b(oDHvtMolhLr8;2Q<_Eq z=j;qu!i z1u9U*3gJ>+Dwao;lE)LhyeunlAH1eR%2bEvkqbX!%)LT(`O+aHQC>LU@3sCQ^{S0< zHa2BVDje`hYhr~!EHMO1NC4TTu*!lg#QX#hRq53qoAuMO8iGDaGxVw>;Pli1f@ z_vIrywr$tGRg0EQZ@lTNfBN(9e)a30gVkUr>62hMR{ss=_g>7;61d}n316N57eJS;WMhHKTSd)qBGDUbwBFeze*Ea$YBrS$P85V+MnpfHBe*CH4bW~4) zda|eRfNqpRP?qFaaUm0)elBhDk~U>2{Oc1WP!S9w{;X)!0{%Jc>g+N|$h=nAr zBnUi1leG2gS**My)DT60gFKQS zW;V{u7MCgA++IQs@;Hy}tK?kj1uQ#{&E+IEj9iXt^W-)#F!}ojUVQu2mq6x%m-F%R zTL*T%|Ms4@U)u#QKf84eyzDDxc-f8FySA&l%gZZd?zaFmsM%FPzYI!T2sq zXY{{#+1Sg@opR>flgFLZt#8+spN5yA$Xp!^?SjJ&H$unDmn`%j78WMQR;ia3iv@FM zdbMQOcNN9ML4VF6J5=oCwpakuF=jxOAAELgHIpd3?4~avPExTiGa@^N&A80(o8{Tl zr;6fcPMa)BAlQf@11=m2R645`crJuC0^7=hoKq%^b!wVYYS-7QGsE6uhA`| zQz7swN_Z4AnoL|0@+VVIeyrl*mnvBci6(}Fp>0G<8)D%l`9U58C?%mT(qweG=tZ46 z904i&ez|A2j?l5g*fz>GZFtBT5+_W+B zuXB=zcu9^1B`0}WQ2lVJL(D~0d3?1{cFmfPUOw2G^PV@2z1gXf7w4VHO_f!gO zKheouVQpL4s7c?5{8W0cZrw328Ht8o3_4h0NF%i!2S=ENeC!~K|h9FQ)XG_Sf9aOm0v-=)tjs+50*%aW*k2S$?8cP zq{vaXtkh^|hImSwmgUD5x2Lx}28jdC{2(&|UomrHiNukU<56TytlrW~^)%g#gBcy# zvH7VJB2ab?aXPRf>!0Z=VZtw#A4(-;pewc3D`}}5@v|!O&;baqT9r-_5O`Ih6v``y zgD2=6dP&qwvi7y)*k#PZ0IFk+*8rs32!3QdDF;6-ypy+lR@MP_44tfOegV6X077Szph zDtq+E;S#1x7?0s5q-WR8$QI3?O&CL4?mV1)GXM==I}%n{r`&UVPQ@ZqeC2gegr&fg zVk?!x1M@=GDu)^~YUEJi{+e0s!ONH{vT6x)^(G5`B^7H5(kuLdCSg`wd?Bz2_5!4C z#&!T3Qa-GK|0y|a!NNM`SRo-pV{*wRGbQ1%s}+ybJ6l2}Cah z!YvMTl^vWeM?yliu&vLl-Q(>%wkLVS1}{sChF}C{3zili8HvS2l(e!C2&xN54-r51 z_{Q8GhIxQChWv1}E5#iDCRiyl`iy-Ontr0U7^KE{hPGFIAVb(#<~OeO4V37G3q*rB zTWv-;-6g5@Jp+I)=ztoKg1G-XQ_>Z zq&S5wVtyGVFRbKFNMsQ*$-`6A)MtVwd@42Y|B08`mjPjr8AkSmi*vOXvvLBR$>b6% z5MBnBfAIRtZvD#r*kJO3J=>kie*3lOfAGfhZ|>je7&frH*WXLMyoGW3t^L~=m*3dC zW&iV=KYVY`vbkfJnB9-$~?ejUkiQU zTs@CHb5s{@_Ly!CX1nie?wDg1)b#w!qLC}kn83a~X~c0u`gQs=ybS9x*B;)mVaw*t zOz)0Zn_7Ko3xwLsbxsT-wq*@k`fFPMS_|UwGu$AConcRg*LicMw`<)R0P#_?6{khB zW-XdEp)p#%Z@BL2(-+J|d}P~J*I)Y;_GO2z-Nr=`U3U7yZk>;EO|e*xmDxp?fuB@a znL?Ddi+vX?*2XVHX?4pol-p69yQXPSr$KdKnFuXj(m@Pq1jGF79;C*T@AR{We|;tbdlkNo`d3V}cB5(||x zgt57^YEgE$?0%KnWmfe!gxcuU?k5?{ruWEEwVe2f% z6K%>8oOM}9(T=RYXfnjIG+U!h&yt|Y!mT?2M06?~vUYolqEjKJV#S5Os5sn&)Q^=f zCNvY_(jve_*8D#A59pj4Z6OZrdB=%2!^p^jV z%1vJ|G7rZCL^C%7(fBAGM>+9~NMxg~aB7bd#fc&eEGDG5(B!Eg2-Fo1R-P#!JzR}} ze)$MdII&eACp^s2BNl}f4vO!!h{Uf3(10o&Ef*`tA$;UZnWrQt5L*n%h8_p?8F@VU zIh@rktOS=iqZ^pa3dsRhIT*~K{O)^i1xW#P@Dx@OY8rgBNQA*H9wA}QLr8)2Po)5_ zNTozl=@puly;$8E_~xaK?b{fMpE>VLY-!y}D%P+P>!S&$M3`~q%i(4c@PBQL&QJzu zY<>VA=(p)74!}s&#$Sh3fwoD8p^YSJ!&9|ec4;G=gKc*3`uFaMe)`m@r;ZrPpqwa+ z(U-pXxvs}_Vt1B{%N*#{N%z6=)w(U9zpXM9VzG3T+1^`8b9UC~-ue>JBH(g`_wqh(qYXEoQ9 zoGdh}T-@j-7ynwtY>ht?GwcjF>jq*g*tybw3F`F-ZlchDodb84X?7MIR=rLUH9VJTl?Rs5sPJB~q4X2>f9pM0x4P zi9$xt!Gs4vV6`W|kc4~68s}ncN_%C#^3-pI5LoF9E2CAPQGBJ-!OSh9glKm)fKtKJ zrnL#T6At-ESOdYUPKs;~ajZ?!m&rbOvU zVQAl`O^c?D=gyiwbkIO*!!=*J0-AOCIUAW4bceP>Yszk7cMXg6RCO75Do{fx2x%FM z$?v$DeSX_qR112GC6!=$sVMbhL%V6?!|ZH}RXST-#Ts$DfjE&uSLrimt1-gD9!3R zYj$n3=8apnYSyN0%hsU!!ny*e*`Q(7U;Oez02AH>A#>mvgp9Q}y~|T2rztf7!Bn`0 z6GAFID!o)g2dF}|@GMp=#Vp{EFJu}+b3kr^%qW3kjt7!O16;$c4r5Eg3M?EkA;p5g zJRmu|EHL;N0a0-vyh(q0^^y$L!YJcaX_4MX+%yX z=osqU6La%YRt>MEdtjI*OSO1axPeO2rOTNjP zzvM1S2sE^C&fbNxLY-T1g6PA#JJOw60$1e5aK<3wC9x?)&lyGqWDgWyWP!WS={C%!P|4o;!CGusnNoA5R}I`-ItjU(03; zSum-uBia5s3Rtcfmuvc5FmI46zh;f;?1c8R*+U@i$s>FJH}_*hVb)Cx4QE?5-{b;l zv{NC#Y0xDzE_}h1L+fT6b;)y*IH3e@!q>ncMUnQ%{_AL&SO^hkA1oiyq*1fRmA`&= zxup}=)-``J_2lM_ja@+9oPt@&x^t(>B^$G9r+|~NAs8-SOB+=o1lU560-2m>7yju9 z8I!J62@5p7n0By@ebyRhMFSAXsdNm@ti}Lucq$sHXb}OS>~1^~S}0kfnX~1>aBNQi zPn*mzY?AW;&WdjXAB0yka8!M z{7My`NsE$fhBh?@rB@lr9TP4^0Hy@eX=R~t?XggtS~NS%pGo=uWq-u%&!j&5$vaT9 z|6hLU@wIHtJYEBq8JA5C@{NQMhNen4rm_-7EEp8l@Owq8SwJM#A=y=i3B_xb+u_{Uw4`HGyNgI8MNA(g+lv0SL zNOJ;9%L>O$S$Oay9xw$F15>_c1(yJ1==i5Ud!Jdka{9XZkIcd$IFL$+4|ws^7K|+N zk_-MRW|~G`nop5s!4W3%$W(Q4A*R*SR1Iwti_aqb9jYdyeq)ORdztegLQDlTThjvA zt8cJM(^O+YrsCSNLjyijl^v?2@!Hc9lA{GY1~}DMb)^O>HUG>uij&2QLLPIKzjnRg zoHM_C#phkSZ}kD<{`~vj%$QokxXjc=Lb)h1Wt(vs3?~v9T)mFJxdl|$rqGHaD|nii zsH!E+p*v|ZPIU=a8|l*N82k1+16jeUkTRDFw|4=LNsbLUimqmqNN~=)s;~x$gk^i0ud;Sa9`zESqyVSScs_fYK`kvou~>?obG3-wVXK{Y{M2tmONu$ z1*#BhlTU}#kDBKUaBR&0Mv6?$X;`UJS!)h}&cO+v-&z)}*AvH`suMbEfq6bJG8QI{ zAEp0WjF4G070@I`S{$^FC(aoTJlc-5IWVB1(>XPPMf0<^eji7FpPwlCpTRj08u2VIUzPlcoT+cUZS>o*?Np{a zdPB`r1?rz(VLa0G>Sop|X@@o~t5;HVWIiUd=}}|Jov2FW@0IBWmTwC*VzY$#fA9(Y zw1`F7KHJ~^&;QaEs?I1@QBH-q5mcj?TU_MA(2pKxgPJ8eEuxAviq%|`sYzRN_^V;M zkV$~nw07OYfBCmRfSHbD!+?GJ9oM#9%k~{wJ8|thWftd~ZvGl{e8!neuKVg&8a6uY zn9d!KKIX`N{d)}>e0+x^+l)MAnE#&wFkxG^X}A(xg*w4c+$yN{b*BPsTLH_?KRAg^;A!qlI7<<1!A6%Lrg$A^1qkAt1IQ2q(N(B|;1i`xII(!j%Z6RDiSU z!k<-LD#c8Wl4D+GHhsnzhnMY{JpSl<-5{9P0U^4jXAr8c{kdZk=Dj{fu#Lr_RPAn%1 z&w^QS69Um#Ujkl|5kKP0ON7LS%wCh1SNUd&LoaRkkctzM4;gsyAvTYMoY&+Mo`g&R zCFewW&5T-zg^)xb0(`<#AcB-AA)yZqKM9=-@gzw~jX9Z#Cy>LigySF~o@jXDyb=#3 z`MGc|xr@b(Q@5V=8RCpgop3yaK`eQs^~`Ikg`YzvJhjc^Rbc9Y3`xktg?Z{wcmhKp z)hD`7liysY-3cZyLPk!Uxk%-i3eoFOdgb8+$|wa)@=v1=he(AolHiaA^2Edji*N<8 zpqRkT)jTFotD9M!CyRpBCu+83!W!h2O8nCgP`Wq6ZfeD*>{K?q92^~6^N-%6oTFz7 z+4ttY=VM>4nwWRHA{$tai5Xsg?=9DVRqV@g{!fZonTh$uo$DRU-o5n^S7iG?scB>S z_)iFx|iKE7XGJbmbr8N)!`nPYoRAKA;5UvtOxuRXcPoH2dRoOSZqb58b`QcG$F z<^QC7zwA8rf=RtkpW1)v^nvg)6SMzchMgBq9pF6nyvZjGJg(ib9h$XjUa@^Na2DvW zhWYbl|H2%6H)`ZaHW#tBok31!7kkv!6q7%jDfDL@vHqSiVu&4Sz|G$-v}jhbM+>Jp zz>L-rudF#%N!ZqIu)i3lKAf8~aa&7io{4wS7*QGpkO@F6P^>xUI2j8dC>R_O{$T@2U9Sp{Z?49u(`#Tcv6uN zY9_M~!hpswxZ1!6TTmK-EauFd29O2v_;Oi@C-_`3H{X7p z&z6DZe7x)oHq`7ew(pwVlKtqK@8!#7|0l(`?2CJf5WOg>k&$AKkA_TBGC&L#fkNm8 zv^CGDg3%#`h&9Bd^5hT*5zd7Ye+mo^74Ug-1MDCoiL0pm~4>s4Xastd>2xcI?}$bFUspu`+MoxXREDK!%;SJh|SN%WnFD zmj!~)8`iGGD)mEhd~_b|DGW^DR4(2;2- z9?Co!D4c1Fw4+?`PhnFb1oNsmX*IFrWLZ1*=#Jng#a>HkGQ%O39sli;9W4_M6Ejto zimp!VK7qhk`LAvCHUJ%8vAL&27z&}mKD2UWj>njh5UV{k0E3W<7?u_K&(hNtOc--2 ztJWDym)hmAJKRcK8uZ8=HAdlA3_6N?`TSf=lGuAA9uC z!ilO}+EHs#;PsO@O^~Kh1952=v5FtZ4;MRq4ZW)Q0B81PPAjl1Z`!;Ls$~Tht0A-k zs903NqD4eGp`}o_f=1C$41@r}u~z#U8al=k!`$@c)saGw`3dr$z6rHtnd?8cV^AXd{$g z$4;m9OcxTK?E>RVJ)ki#buCW9;u0c>O;kHVuB|h@s}`J9k(x2tI*?%B)run=e|&Uu zJz0IKG_{S1#ZJYdw3c;f+ZrDoe&p~Wx#!t3ZH!pE4I&<63V^lyI!*gOq*tMFjQ?Ls9WvTWz-P zamH3Pg3^+J%o>O%`yQ$>|G7DS%t-Zc**{+FYY`3Md*myw{M_$-2mQgjEU?pRYuJ%H zc53G%<+IOOe%a+0&z(2xmRoNDoZ&||c*4$-3>`LT-u&6i&sfX`%{&cGLXjXOW2-0h zniI|#G9J1u9!BP%oW!t#Q1(ZH}!wzPC9E zXF@Qj%N&T0aAZ23BnZ5QUJ{=8Ar9O!iwf84yh?%|f2bS>G@fY0$^uy^z4DlU&$iz?l&6Sc&99R5PJs%0(F!jpLU!p0dN12eIVR;+Cvs!IMc$p@fS~6pvVf zNiHO*a7d=2LJ&kIJZVz{{BflSh`B;0Hhcu)?~InoTr9sN5i(h0D0hC8Qla6{gnt-D z7&NJzaIP;vDgi+_c;c2~@&lM$*kZ*aHZJtZMF?Sppdn{O^(rvpArKW|_=`MAk|@u@ z6WfCv$s>HiQN}5k5Io5&yOLvK!#QkG2ITT^i7EvYf(#48TN)J6(YpJ6hU1QU>1 z@=B_zl5fuBOn{tOj?VmPco`6OCez+04cYNaFq!+{hi|~nfOAl@CqOO4{XYTA?91>n z`*P(-_N%)g6zGfzL_6K4Oy{DU`kF)Q!hzUJ924{v|sft^o2xNY-& z!19CcNlI<)Ly)-srC=axRk*yatU2VpKZ|t^KLrc8fY?|tZwW|a=l!!T18M=gI z8JZn^q&C3FJ^*B2c8tn4Aj`7Pmi?ua9plLp#`-fUH)EeRzmBoSvPeO-oXMTaVK3Lh z=d8Fh-hezj4pah?0p?bGS+hL>l2#~^Sokb+)O8>fdPQt~#0vfeNI_sBFmC{uA zgg}*!{0z8c&NX6Exvqx}OvkXj46CY>)RHG4K!KMXWy6*NRGZq9oUTqhAXhFpgT3H9 zRR=UClBHJtK=f8vc*p%|_WV_KP&^6wp=>5;$H{0=s+7#6bpTBLD<sEc2Wf@p@-xufO z<(Hm&0$!#CMUkT^(I28B3goC9MvV{}Ak8Y;zK}Svv^Wzy0>sg~qD)4UjFu(VD@v(6 z5nx5xkVF(33Yc&nQ9{B&V4QHtIjl?WS3*R^6Qyxn{(5btusfmS)5m|x`(n#L84sYsRn3eR6$up zixw+Nh*t!%qjAMxq*l?(It2Wwmeg&^B``GD%Ec2y+&syjta-Bgr-v}D>QtQGr9ZQx zvg6<&M;S?oSPiHadcF|4pWJx}Bi1ORnX`%5*P_@9*C>=p6@`hkBy=ebo^COsWc@e7^%pa3Nh-H z%NW#5Jd8mGo!u~a837@qoq_;j@I00rJxA$U%iIPO3H{m*~EA6}-v5s#WgMLwS zJYnQ$hU`R-ZCvL@3xA-@`KX*?9J3Mmu~R^rh7u_YSJ0O5xah!%s;|l>xmrRX95GLE z_;BHgc_t9UQwLTwr8bC^MX8lnlVy(bj7OB(Fq#x!XUj2Z0}3%GR&xDZ&&Nt~la+H6 z7CQ$LY^$mbWI7UOMwPD}V@ImgTjm7YG6oTb$q_@j=-c?{`RRV1;4q$sfPTpDX~opx z^-POqx+uFdF*KaAki?p(fzm(2y!21)QsxkTc~PR!q(mB*U!gc=W#H7{wRQuH4uKeS z7DQ#SX{JsnY3f#Ot6MuEumF!8l2p5=b_tvjj_fag`0d8EkNAv>ec9I7$wLRvn>~X% z;R+JRN{vmRiN5EWl=IK7{N<$~PZMATFiF}rR4&y8%Uj}ogX}z*12Fa$9N#hns2|m0 zM*^`x#P%F8QC${#WlXmuyoFx4y$BGS0h*d&rqvdjNrNh~6&!(5Q()8~1%Mc8n~nIy zK?4|WxWMUnQSPlHg&B~mMw>%D5X9aWa4pgrOP*L^IEwq zXYSviwN^~D)sxaevN>PPbPEW}0*D53IUykc%;91VL2?9mWBDZ&8VtFZo^c?XKv@X% ziL@d^!65m=k32wl$TuikI5K4+KxBZCgR+B{1DQPuKo}fkiWRM17cy*h5B)^XFlUL_G5K7a#cA*0|99>lnXVP;g_7*9H_ zX^H?LKnc_Y^I&!!kPKU7DMg`|yyPX2^GX0t z^YMqEcmyVw5a*c?lwSD@2Oh3IWqtoad{U5PCwCPkFOH`OAO0nw8c-auNgJ_KKr{!* zgDmaE6Ncy`VHhTmKWsf^hrnzZO`a!6IlwuK4!^+ASOvU-#%xR`AJ!#$GHA@;;OI55 z%$aaF9+Q%($&3UkzxU=|?w!~60Lxs}th~pcN%@4?<=6-IY-I{Ku;*!x2UvDF_A^gC z1TQl#!^=}f^>rTGeP45@3|?3}1YUL+d&cPAzF$7Ac94&k=IPTi>-u zK6CakkomMJ1N;SZZu(+kW>a=QHoUxKdOvq%&#UR{ME9JDgGQWiY{ym&n>GFzFIQB& z!z->)vmiCfCykUf$NnnB$5}5Bx!@welU>HEuYMpn+K%PJY72BYbQH5M3oHV7P+ES# z+4s2=0C?G-Nr|%3nOOV1#C&?jb9J*i-7Uh9)j?3qfhSDqhexqywkdzy#?QW%bGr~ zW~{m7XQ8%s6U=f9+K|>q;`9e1S*#&7TM94^{_+K=@h267z9ed!B;*92ZD{*Mn&iaD z=V|_P`QZoeVqEqKbMC>0mpMnZ%eC12K67Sb_Mgjc`r?9@L1q?b$FLu|_YQx)oSU&d z{RQ)L+cv)PeC1G?3ZX{@;829<3=}7Wi0YVEAyk6sTEgQfR0yPsmPLs=$kF{cW6eOL z?{WM>&{I?p^J9|Hij@%Dyz|woWxN(1(XqbnBNMLoaf_(3PLR%n?vZi_5-f zTvU;mw9HFPc&Z&+d@eZm+>Y(r8~DHi{+yfhwFre=6OfG&0jd?-arNVI9$=XtSaw)C|Ms=7`o~`US$1g=JhUmA5>XYwlR`+vq7!<0&FY*( z0?rBG0D5ca?1yOLPqZ&IAvGxlwFc2u0=7X zpQtOf6#BwPjd4;%{*>A?O{R?6%Yf3Fr71;miX%$t>=7%G^&DzR(hkR)Ye|%j%faxgGZ}*(%&5Cpu&0jRM|HXWTqsr zFY|c))z5zhE1At9lt2FVUo>v>CQa;L*?BP16-@=|V^FQxz%GHg!aDvx{^QR|B;1xb zYrCm~!Ci?==7zA%oPquOTNfrz97Ett(e~)nkK-b05@bHQV>^ZeYrR2DV^mAZl%-j? z{dNhtSny98dScUNjc&XBX2+^QPJbo^5d+aqWivFNd)}Ggp^tNY4h(VvhE1C_?b7w= zBU&^8nH|-Juf_Tt7`g-=oyP`O;nu&~bKkI2;lR90=Qg>Wl8FacE~r`5_AQRvt%8~@ zUUq|HKE?s|1Xo##IZza~C7y%X*3H>|$z{<|H$<_C%cs1YlRTk|R#OJd$t- z__>IkKum-SNiOxX4#A;MIm!rS{Ti#}QZ*%;+v(L&)T6LjmCTTb;${1C05WaarkqP( za`zW4nnyTiLl0z*aoN-aDTB;au>AaMFYk0kwrjrLduz|G?T`ACOn8}NS@us-!1Au` z>-_VwkC)?K+_Cn#r&qb`YtqPGlTYn6WBh>J_vQQLnPd96AA7-+ffp8rQROqI=AM zQwDcy*ZeRhT?>t0gN8mGuHfYcjhkB~;ZW+g>D^jD@q;cQI8f9Q!_W!yT1KHiiv>7L z)e%BFbtK!`L|udp!992r^O?0Z03a2V4q)l2V8d&^3@3exQ{Kt+1dp0-UMhpfSu#W;+JZEBI{BS-27j2W&A1W7Pn9#$pSi z1LsgPP{7iy?6_HWRhP3~)=));L4b6`v~i;Pa#PP5#KL!j$|`2(-5MP@dTMt6{rMM4k45) zu|^OLf$~FrGO~(`K1tmYmB8p-^`8<-UY`;%p2nl7fM2m0uu)_92_Sa_#%#u~kcbiH z{K$yv8CEnFG>Mf(q9}NjI%Q#ep`+64z+~sKq2}gI9Im+8F!!I!4r9|m=otQ9>cRWJ z%>yqR`$j#*PiL@6LJ1@i{&>=mQ)&hINbRqhlW3CGaFr%d6iPZLKiSA5IY%}wet}%n zIdvOi960$3Cpny``|0pH7=2imPzs%j2R77o5la}UBvA>nle=(2;xCFwk;+p>v99tc z2uW0+B%;UDMA%^>DF>&A=`k?C?HRRDl(xxVMTXT%g z&Pl?jMxQduNC&W?p+p)wn|`S!133^LB+-k4qk$9(vti35P5UJw{l~Y-D$_L!6msel zUFDSFC%KK#)8`~vL#<6T0T&RN2VfBLlS(_15KqYsWFXyS0+rCW6;`z?mnS*BDk;Y! zm4YjyCZ@)#cCToJYb=`*Djbr{!E6WP;bkEAr9HbG!)6GEk;Nul8)e-j9*;FK=p)dN zauw&5E|=9AQf4&{7}vPi$}T6s;bog7@|BC`c8WkQs+UW@RcZn&GMQ5@B~lL>J=Rr# zF=f%XEUl(%+dcxtn&(krUd zNIDgULagI@ojJ|rRK8s@4_FN?>)@)|W&qV{C<&GgXT+8~^9fdRB$9SCr=>-dDC1OF zh$;%1BB#8HgIj6?gjZ^1v?*j(%pq`K*=RLE*?j)v?|$t(b2_&3*fh6N!~z6pdLVo-z9 z)^3Zkdi(uveyM1Vmyk-4J>g~J9rV^BW*)O5_k}NB_KRPB2$n*=UYVF%wQhb~uWnGM zubbghfYiT7-F5f3#P;afg^9WUfZpz)77j2XfEGKD%T#$wliy|885mi1eo zUibX2ClBm<_Knw`f9=37?)z`PS-pms;Z{ZGEyF&nBlVgyW+iDO! zRcHzb($xtZK4{{Qk%xp{QSxJ}fEJ+Gl=+pN-=4sx^=n9r09hcF?PfQYJ z(D9HQ=Y$(C0*Mh1%7_$tG#QMZqEI3ONa0pd9I_IXSkGkV8J=i}M6o7+@(PEf4WGg- zf#Jz7JoPLDH~9)IGy;ngE632MbfO{*&!MTX6&?xsNyx>634dmOp1c-e(ngW01R_)C zYTJ|ZS_C`DgBw@mNv)<@gkvr(7x5D7S&ASbzYw{^k65ZuN>$3PXl3gl#gxmT4j-u^ z4*}xG&D<3o+tf$<0+1<1ruryvhvH>em`&MEouio4W47iPlmnT8WwGqb!ON`7kTU18 zFaN^4|HbVGUfRK|{DU`M^e@cZ-WQ&79((tW^}b(z?Uk)e%zK{O_`);me80@S!oPjZ zoSMN?NB65a<+!P%`^=s=VE)uWr%fBe0n77h2AnZ-&Ru(9V6BWjQ;sjDi5YSAD z#^_ZzN@?SawP;o$vu9-_hBGn_y;K4Ng+hjaqLM+%lU#U8a+({ql7wEAB(yt$QF0DG4Zn390Deq*7$(Oq}*q5K?gtTcSU%dsp@{1BcQp5V1zVL`8wy zwzgWgXd%|@VUEICofFBlZ$P=yUp3-vhLIzI@wBby7B41yrc7%JZO4hw31cevCix>~ zsyp32YqbBC@-?OXE87?Pp)#6|v=MZ};%s-(bPBwfGsP++$rayIo02>=ppxY)1WpNF zR{sj3T@X}iX5WiYYK+Kgp2w>|*p_iwqhtre(QcSNL4`Nz$ zIoC~nrQfD1XuSNA#1%9r6@rx4Q(I`1G?q3}mb8?@5}qS`D#755aT!4-C`DFLUKLiI zaH$aCFdF1#1+k!5q2Mwai|lk`a7=cV4_i7I%3Y1gNMA*uFL~l`k~A0D_?#TCxj%VMS!zGp;4@=vX-xQ#5@g6Y);lPO}+{% z3V|(O|7a{BkZwjYOjHt9p)X|wez&Q?jBLf9@L=uASN0$&0Lgi2R9qoWK*{U&S3Zbh0`g{q|q)&Vvs7V z;R!EOClpO_JT)ScVq5`vc0;kX`APm*Qs)+(!g#)T+WG#@i@&#`pU$Vi}q!LKTKxRpR zWzu>kIXH}r^?dc3hZwTu63W0~v339eVVUwvf`GYMZOD`d|Jau?As&IXxUsD1btIic z)9XA@`wjdIb9NbXh*&7VGZTnOL&wXcC(yK@zX2VdnIGU~I21#aW)G=!MVvFYm}hW9 zoKzBKYLaQdvd(O3k{p4w>Et-*w?YV}qn61E&-7|M<8X^G1mlV1N@3PA6UxFfos>elQ^^vDueh_T|r{KxT&K zJv-OC@@w{_LGZH6zA8tuC!gR*_R^Wdm(3cn*l}x@dQBd3`jnIW`7-13;;BRCjUV98 zq+s zsCpQiiMc@&U=5msG2uj@4^)H&AObd1N4c1Zy*de;&za#kC>W08U{a9Q*(dw105#xg zDPfOB99RmjTAF?D?^;XtWq-)xB=#c@-VYBC9@t-qQ`t-g5Hw6JElt(s9$48Dr$0ci zILOFpG>aEYt5>EyYBQajm3hP3RTN$5H3zmCqb+%WExhlDn$-$S_9Q;OX-yi>%rs zp3~Y?Ws9<%YL(^`x9S3}0cvR4&NGdIR3Ni5G9?4-HUWHt#y+JQm5YXNQv#;5(8K98 ze63&!R#(%fj&%Urk!&9?`!gv9Wk;{!WvJO5U+^-doQuD1z2QoieO++Yg3tfsIk7DJ zhbiZ=U5@P&W-!??Y)|fmosan@k_JV!g68MM>}! z$Wf)Z1bRh)ENLa@FysoojKWZ=M1=$OEZK!;(s~jm>Y{K=3}>k@5g6wDijaJxqhcsk zb-VYK>N(ycs-O6^I$=PM}c){hP+cHAhR#RG6?|7&SN|01~LQDYDOp2WK=IW zjuuZ}B10-lMqYyp@NMFR#c_UHqzr10E$OB#V!#HogoTok6vzL$MK(S=j)|29yCS5V8O|WU9t|M`>fw2;~ zx!2gVwP+JYS>)&xsQn80Qn{rvqZ;*0S2(BUwTQwJBSS+;HG$@kyX0sTIhF8HjaBa< zhhzg|Res#Q{gtc009zg*Vr^nT&C%1MB^Cf^v_uW?$iLZ$`9;*i=%4dA9(X{yvt8ZQmA zl!BbJ*%*NAsrs%5;H7^mi{_w0So*E_Q2>-nLXL+rz^R8EYh;>XpQ38gLbV_cvP%=E zt~`?#=TsV*OP}OP|IuE`TY9XP5>9@=5ZF}Y;ANv&62!`-$eK9i6`~SS1A2~?i&<7j z(!(7d)_bK=W8fG{{nIpMMblvf;>pPija)LW^Pv#b3=ShpyO?A+1Vj=-Do|19%yA1# z5^Haz!mCr3NeM$U8<$By+eHkSDxJbAra@isvRHlIIMTfxXgoEqAFCN z%{sB#!Y-v^TH;Mj_-iJtq~d^R3{BMPghJ&S2Qbjb%l=6U#6;{a+|g-%$(<) zf7ajr_Ft^JFev~FErOy>V&^t6sM#@Q$D{#9xVV^uAx)To=kS}AgkOYFLJR|{Z z@%Pa&N|tEZF=ueVZ z2*79PI0wZOj!ar-#Vg>w`oX)|mTqsnn;CQ%uZFqF&7=|7YXZ^7M5SvP{z0tiUAk5{ z1g!`(Z9=1_1wBea^g1N|6$J4y4AjRYlo5K4N6(Zgk@RG#^i@wZGQ~lxTr3mm$&wV9 zgH>F*x~G{(c0A!M2?5AVB&@_{rXn6ug~(KP@+F2;=sElWC44ZEHK%?utb}`&DPm+P zR0`--l<-8MR4oZ9LZS&GONDp}i9^XrRVoT)vLl9nkJT0kunH4VjNnOCp2@w)hE*6! zf<}0egkOm8NP>)Cti05KCqJ4L!7JxA0`sUz1YxfDYA=-_F!BrGWZ^ZGg@2fPmE7bb zi?9_E7Y?OR^*tddAtZ?dZ}Ac9RdyU2uaS_ygoi%TMmEIKrld&{$Q*`ZRY;MrJ}XR6 zdg^1klTLy;%8L9|q--+e3UD?tnz|Dukl(CmA%LkFj$vdrW$H1Tvd@`gajuV-D=_(W zXOJqE=r{K6hL_#?<>@f?t9zb-mzkK^>|8kf`raLHzvheP=N!pqW!|**uGe0E>XqG_ z7?*v>>^!!=l!BLYKlZ$-Ll#UM?EB?8lfe*EH9jN zyvwnxZouhNk9W=2lG@{4j?FEv?ZdtdFP~j^Qr*PfQ$}`WUmi2;xPq76kNtPNeAwX@ z7dVt56@sv!z>*v+%pOYFvsD}5Ne#A0L2cGkC=U$~YCjoXMhrqie+U3QfENg5^MzaC zQ{Mx!Yr7xYe?U-j7?=GEvx_bnan4wBnpa1%-3cHq`!ejjVeKj=VCWUtgjKPk58J=B zhy#_ND+q6C20yL%j*f%F^bNbaG-qN+qC|E85L?{{i33SeOtB!gkf2cj!D;OvWyL2$ zyVe!$TERhD$(h}i4x~jdobz!Oo{Z^I5e#lpKcKx2lUV_f(Rpk>FgaOp-GRoorRpdQ z*-ucP{HlclZ_V|Jp4f_vn}kFq36w2RO;S9PuMffCAaJ{B`39-S@Tf?prsmzGvGLt086geffTweHmV+ z#nJ0Hss%kE`XSYbT1EBpq>oXYctja5p2!5!hzz3QC>uhAhd$a@_=Iy}h0qD3I`ZJ0 zf$1rsSRM|yuu3jQF8vM9;)He@6yEv z4y{|a)WPH^m5hi)49W4N@(CdczYyVQh{1VPBDysFnV;A=Ad@^u&1bC~I0m4BPCxtb zN1wUyoL;@V(fZo8Z|5*}_a2@9;iB_lF2IlgI1r3@S}IUcUUF2%~>Q4v*Ca;avGbOk_M~z#yFi;aVV?$4EwUdTa9f=VK#N(pgFW4 zF^H$_{82Jf5px(sPikPny83Wg*Z_XDm89c1us*o)rtC%9A`TrsSr=x zxKvc7(yF^v8bx#HRAvudniC$eMQL1q31t#J-8SP^t-m5hU@`qX*Av?M1LT!w>r3Eak9T4dxQK>DOQPZGPk zI^@)v8d(^ahCKNVJ$VVS=m^m?#M2@XGXBgP_$aI-@!)A*@^=`PGeFf>|Mq)j>f%91 zHH;~3Wg|{W`PBe=&0+gM<*Rm2^{mxYxZ$SlRI4UP3u<480KHgOd!cvNJw%*;d1JRS z9yPHxlJGab`e&DByIIt>HmELlVyIi=owSa8SXcph+gYlM8eorIqbN1m6kMed&w&6zaIHdWLpS}-f zLW2D8@9_is4m_dXh?7US1p5zv_zmN%FO*?SI26WgA zEI!BtLY=I=Z@&J*3%j3s`pGq0o?83j^H0CJ|2be;2-M8J{Qi3fc$~xrsGZj4hd>D< zQ+&Wra2_8Fy|N3-hyz!hpl0C}8~7ZsOi1Qn$oHP_edm$Y5Bei29DrG3NQDwTKP(Is z!}vlT-Q@n(2b|vqnPFb|T6SVX24c$vlN5>=I4Cs-l0)(81Wj;v?m9DyIU`rk$(ZHw zs-Ji@z`;n)D5&0E1tUNi{L&((o%$$Pcf!Ogwx$%*evpwT zVj(!_u^hjIl1qY^RAv?V+U~O>hXYEeM=mI(4T=%S3E?miivx#)SM;9xKe4ej1emLh{xfnl@(T?D8=zq3a=qZNO-2MGyzt+xZiq~PTP zd$vO#&SSs$?B+eYHam~)itHUvJ^c2oJ6_%Mq+7C?m|tRL-nM$~l#_hI?Ej<|)DC4_ z_LoxQ1|L0k&{4C-^?IokA2?!;qJ$-9o1vnsD9%{^sbMW%lFGZVTSlP3ocFj(u*&&=CYYuPy&#x z8ql;g!f7k(2>50_^~&@K*g8=RbWjex1`i_hof`cXVr5(w1 z-!DJ)=&Bu0u6u6l=9hPE^-tm)lf+myP6-f(@(s5lfV_~QF{p9FoI^|}q|IjdQ)yDG zY_e%cS;Ey3W~o)mZM@xD;6ta z5J|1#i5odRX<29xc$Eq#tBgpB@C*7DTF6-b{;rOTu1?PAq7fS&7ruecGC?g^ffIpRHie|vFePn9# zO(HVNyiu$<6em+U)zpNb9Ubk!T$Pestc1ukF*yloZ;C#QEdsI00#pgsj>S_TN;_A+ z@zkuJIqyt6V`@gB6kI9PLzt6ObJ)I+uS&pBLzrRo2PLwDhmRIv@#isp5lmXbE4Wf{ zB=PjB>C+ZCYf~8s2@Q{obc4W8eicYaPC~EpdLs^K zl9wc5%R?|xlJF!6K4I%AJFmz*#YTPt)aM*wlts@(Nq`UwN2#h)9J8hVhJ%ch5iyTc zb}bN~OtmN!=9tLclK@)8&ZMfwP{l@OS)`;ww3HR!Gx;Ve#eruvL@EvcuoWd2PR+(;^^=?A+}`%$yzG(x}pMl<;T#g&LBr2W)hvm(!=Pf zOS4X*B&3Aa7q?_sCd~YJ%8|f0q~X*RN0PKJ0uxb6#H09Po#0}Mr4~AQ@d-GQe{yKPvw=IsH#Y~R@OcA4NuU_x~u$b9FD#-b7apdttK{#x7xtUQ++C|;b3SO zbVj;iYow4+g@jCAUQ_lo4e_+NtOfEAFR6%>;(L-`eR`&_X?=N-R@REFT;>pz$S_e$ z91j|~1PjPWLVix5gz85^9ZxYmffv28;H>n_L!Wj{^=D8e6{W5WBA*$;>~`S<;v*JA z&Ono?F?Em=Xy9n;$VP0%C#@m?;D)Vg4+s1sm@z_4F^Iep(knbg5)$$oWr2!3UW?el zN}iz?D=laA6_*gKxcJM!#7B7pArt6{Oj-;Pdq&RknyitZroet0H-k1f?7W;vk&EM4-HUkR==? zo;)e3RuEg9aFn7d(A9ORzLKc^$(miMI2%Dc+2*n<6&IRZ7#^8Z1b!YkI>ICufwIUx z13!7xK&gZXPmE7<(xTxaBsraoQS&yD>}Dt$0vXBIoT)b&p}L}B2-z94wjVQI0;R>G zF={wSIY1Z|=7EcI8}wXNTOb;}$3p4_u*-R^At-m7}K+MRQGA8c3U!Y2)A_BNz3O9J9F*`&(ms8oLAF-(Xop$k84$GKd=iqcEHZs9GrD zWoA>i;`pN|_FsRPXoAfL%dWWe36dj@dM}JTe@?}CJ$%TCCSkfIwZSVuiUWHt)mV&cl)L__dU7sp{sNe({xgA2PKX806M`d}eg*8%PyF{ckG8O4T3byN+Co>Sf$%G@ zMIRq}prs!Z{lTC}xh!g;RFt-p1%fzfvC0Z=vF1@-kxG5yDI^5^LU1EFRzwv-PGa+T z6@`EjSXn@yeoT~Rvg^VxM3m8LFzQaUEU9PazHV&n!zh0 zi-@tW+Ym$daBH`4oPEozBUyb2uUcfGbg2qYuSy~JG<51&J#a#lLSAXXREylxaO6>| zB?^3Pj8) zspOK#okOpOTP`iNPW4L_omsdX4Kzau_%hIVKr^-+^ddfz^2BeRU4xG zA%#6^_?#}}ID~&dyXD5OG1pSMf`e#H(Z1;9nn6KySHmjcPI=|XZ@6g`@@ELhMRr88 z1RF0_H^O^0h)j|Of)LwWcD%F0iH{N)BT$V7^#5Gzg)uIkbg zxFqqa-tgfhPY;m<2Mom;cZw!Tg=lD%sWv20AvzD)G)EL^O{1T1?5D0);plSw8dQ5} zJ9{r;WuZAZY4OBL7P^xqhJ@-Q{ZrQbvM}n^5;D1~1gVIpcW_FfTzaEv0kN_xh2)|< zL$5`21GOYDg_0@escqpH>IR;v7Fnpb@aGYwq9jBw3sqq_1d}RMeFwaJqEjvKmyW8t zhF-@8ay5w!A7e_*;E7B{RVuRzmgQSqLQ*XROJ_)N1e)ydSGPFCsbm>x6r4e3;~LnB zYygocA!uaBRa&Hq{)D3mvI}&|vxGoWoIu<@4R#5)B%|TMEj&q**N71z#pYFP2)u@K z5jClZr&=OpqUt}B+*8_MJd70RN1Wc4NW^?5=<=wWL5+@5-BYe$u%6jjBUvX4N4PM0@0^& zLy&qXW&A`B@ggDOu+lB{F5Ok8sSVs{O-6({hsW@QO(=UtnO$maS}UV`M_S9>T|Bb29ts1`lwlmR~Kb7G&`PM`CHqntKI;%@#Wp? zU)Z^N$5Z!jd+hElo4z}Jd|!~cZpsOBCJ&r5sXx42H?i-W3GT$P-7-!tp_&Ysk`xx4 zh(6~587O-WFM$tsyumb>5i&J_Ta2iPIp<7Q&+^2yN{fbz8CWd={&N|oW0GTE26Ykp zenCS2RqAVH6Rf@NuU`En_v8q4e-6CN=xiOQ>x0#mgLWI&z?fp4mCa4v;cxd?E|x-y zvDn;AZLx-hU4&&<)`Df7QWjvDzyyPrY46IyS+-m;jR_7qfI=yen}S>uWHJYlWOSZge==5Xtdr=`lOB+828X;v2-$B!VC7QRgWd3w!i z$a7E>`uddh{i2LbPDZj26(&g%o(T~p5Sdpa+Ikd?BHXkxv8}nnh0toKJ(@s2r@&B< z{vTm?0{mH39e5lR1Y}Ps4hx?c7>St?6zV$EvC$X$}MlhD)BLimkbv4VZ3;m#the#d@sHLN5g@9WM0Ytqpm~%SLz^$ z7a16YpX?f6KnOK`f}x=8^pkLy)UB!Bt)VK>eaNh09Rq~$wARY34MY(p+Z+)U{Y(E)TbtwuHmG=we3kO^yw$|Bbn4R=eGxtN`9kEB(ANW5OiXPfz&yir z|NA%pT|?^(jER(v$gInOQ3 zy$MrwVvtQgQAH_IYBkW*QWr$KR|QQ1YmvNh4W!|?>-vel?MK1|X+Q~qJlU@-dIb^y3qJt53lm*vSvIF_?#fWW7 zNGYWtt+9YSQEle;lx`JeTh&FN6rrY6RN^XHq2sG25r-khkCh1l1u=;5Sr3=oGM%CV zG4jx^PDMf)w}5m>wt(Qegey8MY#>O6iD;Cmks_WtD9ab@$QU~q5xC}nAmcW{Mr>vH z3&(_tk}8E!2A!m>-2y_EVl;3@DefVJ18sPVb#iHY%|M)vT^CdtY38YPDS9!tm%y#pA&pmU`x4wSY*B<%OlMgO={Qm0?-e+5p z@*(?fH%W805icLH?`#pVF!`8yZxuUBAo8MPN$fmIMa>}XJ|B1Zt~Q`QEWVzJmybJa z>qGY0VE=cox%WHQ-C^t1mlrP!`mFU1KYM@5Uaer$D^3Dc0i)x`=6nzfkk|MuQv6{N zD2NbJlZF}RG_QwS*_wM}eAxO(B_thtcA7WI|ZE4`RZ7Fn> z2S=7;;0oeu3APY(NCZ=SQE`ok%wrI z%dMh2IgwQhFMW&+nQI7;w74^-wO&!qfW@7Th)9eNh-T9iNdRe$c)lp9IJ6oF1@ui~ ziu7&$XUoku_n;ner4l+dhij}AscyU&VkAd_sAQI8;6M;BBa#70g^p5JDpQ=A{mG$kIz>rvq3p|wOKf`x+!Yap04)T&WjS%xGkW-+y3swm0mGYH0oZDruQ zsuE=tB}I%w1_Y(iQ5C_5X*DD#QV5bw22c@s;sxTCoca00)A9Ho3J>dWFG8xp4C26F*{& zGc%BcnkveeLvl@B3n*OWapw(6!)fOSf{QgMrMl3Snm%anB(sI}O&e}#(L|6Lt-mbk z@6Y1p4c1-9!%y>%NR}}%C7}h^{9wg5ji|*BWErM2{Pi)`rk!@gNG)~k9b0p)<1kqj za-D(j#6Y&VSmbM3K!nZhBY5s=zd%oLoZcj07Gf^+^#{WrJHklXk$pUB6OwZt5KKXj z^4Lmqb29|&lILvXL5=!Sb0c}!0Y?OprJXAY^=Dnn6$=dnZbHlQW=M;XVWqCdqDweR zL5w37K$94X(iB{k`m;zu=iy6h4n0ImiP{|F^o-R279>K*+t`9dRP(z+%v;7G_eUm>oFD>MXUP=l6` zh|4EGBJv<3spwe&%vhA^d<)|6VZs4d5e~|X8MU|o!c68^tCh46guqh4vbcDp5l0s$ z`yhZ9^S#B79U`upLkwH|jt)qCi7_OBBd-*@?OgVX>qpOh5UKlNK9$Qf+Wu}E2fEZJ5TL5syf2GB0EBzBg>!uFAc!)+xC zilv!z*U#Bv7D^XQD-|ri^ukjwJnM_ZcRl;monL?K)+Zjg;eora+GqDo=k2wnU|HaN z@ZMV=^xmxw-FvG-W-)T7tR1u#K6mb%c4%nGUUl^d!1sse$nVTD({+0Y)NujSXd!caaL@%!5j0=Ot4~ByWFL)}h>>@e5lMa<+=|GL z9r1|j{pGiAx%mVB!*#>8HkU6x_bl=9tv6hC+meNobmNt`F1f~5n_}-l`7pb>t!^uzRZo&v3O7`4z63LrbYbDwL5Wvs(fnn9)>`$jqZd0V%>O zRgnc4O)cycsby5s(T@Z{qdHSIzU2TGkjhmx5|fl z+6XotH4{?Q@8Jo257{8I9-~=Akd;T70GdD>nKAG+jTr1I>ZnW?n!M31*>z$%PIbg- z$FS&VN)n<|f6J$3VVU{>Ix_02$T+MdsaQWjgfW$3#5r_jZHZKX45$`-6=hU$1x&W! zu9`;Ux|WDUo>2vYCPR~ISjcF4@&n?|4l)SJXbV&QPz{6_%&VeI9R@Y@5l?W=U+waL{S~an-D7Vhzronm(p788#ym9s*KQ z7DI}HSrVj#QHrn_{49?YWhIKRl8N0A1H@N~2mvrcjd5Jl(G@$atxEcv z0H!0=g;zesob{NiK0ux%k%6xXa92eOf@hF2{3Rj47tZ*c&E+#r`;6DtI9hxp{(LFd zT1V%TlagVPA^;*1g095Ch45%j_ZCImDtu{Y`Sa35idg+Y3`X`>OY5McqOpTrak{8P zG|}!`9-ym~csy8-U4&z*v#KXo>e_X+syh7anK`=|-B??%8Q-HY5A!`UH|Q*v{wIYy zjs!N5f_rYp!*Lh}9MFhnI!!<|D(h5k&1ULSf%_rjAppKo>T7XmTpiEqE#iV%Gn+&V z4a5gPpD>gK)Hg^@JlDj~vZ7zTdKO9wzUj%!- zkt1CIsONOv*cP+H*rG))x6YHlt#RpY+GJiVNYbi-CL=Qi!Cb+KhnJC}^XvBHR3yPP zF``gsrg%NyEm8)T@mqBkfKLk<-3(tD#EA_Pz9My_sS3)}2Z4q@@l;U}gd`fb-cEBG zi79BLD^g9L6dW~7UU`M-Rb`&7c$qa+?-_#bU4+YSNEwExO(OA`8HtYgN;1hWLtrU^ zi9{D2uVFI1Rh?`kAsEUd6%aLuq>_2nlqD4n3m`~Z?6Rl4(m~D0)gg^LW5ggeWDFd)fM#H$Mv@6<2Kegna^Z8= z;^nUGCJT^@lna&t+js6rc9F6_Oi52aew&?Thq0e|@^<^n%a&fc$Bu7!@2+odi&?OI zz@D=X+;em9FMEsGd2D;f?J4)da+}NGlUJ4Zot)fm_ql*t=^q@m`-k@1?gIyGDPFd} z>`3+@`)<1Du5Vgiyll3#1PC0sogRA%Aw_u(Dx1}X5(mF;E+BWvI%N^z{)v$Vs-{`< zxs6@Zorj|0RX#2}{ zoZZ5KY!Ojw<=3pp7ZqJ%Bu)VlFghY7l=fXpZx|6w%)+M?>Ut^IZ;madklW9-KZ_38 zEZpLENyaVNgsu)bdlX<($*qu_oQUw5J?Ez^m(`vxVeGKmb`%Sk&mD8cg5S#jP_ zhVm^pT-?XLE;{E^7ku$k=YQdo-du)efBF7qxaqM*!^Gr=93S4e_6cT+F`ko zX^AmpEG34_n9|MjCU&=gP4Z3t({On+zq%v24Z#&&Ga`|ctq?um$pNp$aVm=GEV#sA1Z$Qx-P^(S2i*n) zQ;`v5i!6_Vr{PK+_*@RRoDJ;@f!OF!mZrpTb(`sANJSWRWfY;tQIT;ANZK421W_Y~ z+t&9uk_1zJpb+nn0}sSq{75y)(7+T&Rmh+SCe_3jWJ8|{#OMkWggmTA9zF>_D#>|q zYMDYIC{kDO2+e6^ezN8;%pH>uBeRc(Z2G5);9Lgt#%3IDFn!e(u8Py`2pgC}V16ka1j#^DgW9}AJWry0 z?~6-J0PRaE_OTtd<69@*FJV}KZ){tKJ#E%&Yl9bkbjU_>VJ`_0M}cM3Le>!WEQKeW zOAnvq#Pj@6*K&qIo&haAEQt%%@Fkv}>B~o|l}a*6h>>BUR!rTh z7(3PV?TFJ39CviUA-jbz!Wu#o@vPt#Hk2V#kfOA z7*+Ibd=;TAztyZ+LL_}v2UZI;aDfsnL}8xn0TQ?tRFpibgJcjiArl@)Tojf6&;Rky zIq+V9T78w3I7kj6@{Ty++(ijQlF2RSH!>`O*hz6gmZ-zp;E3ZQv6#W=DY{2n7C~Nw zonLQ8m>dii0%%G--=I^A5hi}u%n`?2K>>w8z?BO}jC@XRp~xLvq&C^?o0(S)E}B|Z zO*O{5Wzns^K~w#cgxEP0ZKRuyJg#>hn_wg~8)zx6i{)N*XJFXY;5Dda$Rp_zC?MGW zvO&Gx+G}wwAXuy+cyOUD^e)RcO*s;RZb03P1%CajzjL_7p%kpW*M^Y^kcoI2uHKH} zRq7!o0beY%C4s)#%p}9D5|Rt;>1Z-xC{!II!Xabq>UO(2GLT`wB~pK0s)*DQWirh7LY-bhno6)b`$XGdwOH>MSaqc zk$KfLzMJITe@}Q+7c~pM5E)TeLrkv*!Sq!V6;jGop1|bP&`5be!N`V#>Wy3kQ8Q|^ z%Lup#38EoDpk-qk( z(;Wg9iKJA;WDqagL3Rwg)7OG!kur$eS3Qqi-@qu=Sfm%4_$CrA(4;uJl}SO!CGz~6 zsajnzN_b6%0(NYoC?8uFHS`Qp(~AVCLmD!Co9TENWO?VYy|yg*lhjB~WlKWif@NK=7Z*Jvd1nP{+@W5(`#}T9Xqq+TUAyHk8s_Z*g2y1-Ariq zOudoURBu1mQAsC<1=>PdQKyU|ONNEahp@bUi;jrT#N{K{YrTFIyU8*R@Q99Yz3GNI zdriI`_SV^3`D%on<8!}orVS*JqYpns5Nh_u*AYr|1e<;eBMwE)S|j6Y*orKUHol4K zfvu;t))xV(xs;15ez%t-Mpmkdz_wu1%>k$E?#)!)mL19)uq>5aiMtG%gbJ2_HSj$ z{<5!R&wL8|?Avd?{EMGI=?kZy=)+&0%lh8e_rLS? zm!6-z4``J!QM<}_YatucxV*7rp)sqsPE`al4ez!U(9Az-8HNTU%viW83K{T~$%1I1 z024cT}8z#UJW?}?Wds#ry z$6eoO4-uMf<{1&1i10254;d!<1b{qoFlItj4uEAHBS4lM@<3y*D66ZY>QoyBM_Ec{ zS9xgMB!bDCyU>(JM~p@tzO*jN8}SAo#gSrR{#A<9i0DWdk(p}piQy{6f&eNK-k60J zYxF6CCLE>UB*})nDGwYRLFDHfxgMkazkm7-@02H5)Id%1ZC6!DE-#_u$}Cv8B3@@; zE)7k9fYkIeStFWz%oR7}`=o$DkxRxeLR0nHkr>W|vvY?8=#{M;N6~5?Mw!z@H+l#m zapZwJyrHf~Ve%Yo_9VIX_3%XOZ$$I1nF|~>RhXB|gk&qOwIPWtWDl|!KgEq)l5?~5 zD512eQWecjYof|DxatqCZsEgWXOvw<0U#LxoIz9^-2+@L1{@s^0wN@&8ZHnZlNMUF zw4Gb{)opYr3XYVz68K0_Qw!^a2^mC1l#wE*9u+U8L@kl04xqQ_B*I253jxjatxAeR zmaqWC3qc0w6?3HZL!Au;Hh~jUm=e~2kjaCox+#T%TfEp=6}M_7Gl6k*YozEo+;|9V zVwgy3WjA!xot|3|R||Yt$t@OS(>X;Ml%*hQfC-?;Ok$E}foh?zNFswoerjrNxfX zqn=a8IW!m~{=eWtAV2`_q9Wq);@oz$6#|V!ng*9;8W%go<^gLit+8fI(&EL7^GIJD z>LHa^m5JdggtlW(IF@4%ML88hT3=UyOh^-jtC$g;WHZunVQ>?U$7OT7ij}7hI3h8#PcpR>l}X99)0XLqJHPmIhMA zD5D*B$ZDvk>8>g&Ld57Z^eCty_|hC{{Y27C8pU2JPs<*7ufx5NgQa3KtiR3;vcKpLvhLSR7JxrJF*M}%Mvd{vB;MlnbO5ClPnB%;TXZF zp#uj&K*_C&OvgC1me(7ptcwvnvT!v49tzg-JD%8@?6-nLFk*2If z9!Vy}Wg1h->9!^&46-c64eGUM)<$2R%>_H8(m;(J83KcJYc$EY4M@%buV~U#M2igiel zvdPD~)65Xi6$Fl=;$^3@WqOO*p0cgvPG66ZxmTFI#q7{^hp~mm)8F;7vn*-`aR&Pb z&wovl@rmroXR*I=w_y24-+TD^Z+!W|dlos5ZFBj^19lWI+g~nT_Hk^%vV+-^SCS9e zrAS$fED4S6CWDBV}6hG0JA8l zIOGwkBgG>XiV8M`&)zd#Wu+Cx%RQVFFI!^J7i?PW#HZFu;jkru7OF@jEYtaI6C*US zCro5z&=->mZ-uVvirGCSK+{IHrP(aa@3nY=U}p~ng`ve zS&7&Oag)65Zz|J;!98JcdaVzac^Z%P8`ocZ|9$5WkEcy4TmVJ%-U7pv43rh5^YF#X zJ8!$SmzS@({E{V$FTZZ#B?~V;=k}YfUb6V&8?U}ZAlzYWpZxN1>?;-lFU!BkF2O9|aAtgSePj76$a z`I3RtBIvd?D7bYYh$@mIw}6J2;buXMRMfzim*A6ux1iPFSX`pth;}>H-!Ya!20L6V zf>va5@^I+l$}>TYo5!}|EUuJI94GBt!Ld_I9y_(P7c@^x+A);xfG*dC8(_splQ5*h zh)=7!OjkCP6g9oLqM72Y^)MM64q$MSvkCF2jJR9oRb)YTHGNSTzAMMTkp?}Dgd#pN zpszO?2%H}ShcH|<&LL##a4JBIFW0zZosL*Akvz&+V?<^ua3pxDiU1MkHmk_1G-|w4 z$rSuaraR+vpF@^w%CK5NMPg&BA_YM(A|8lT!=aB0jtI)&3z?HcNO>l8^&=`zjkJy! zcC$@3e#3fe`7f%+P>AqX*#{231?+?oq8b(iW+knR5t^QIDDp6ho9#iC!Nn$g5=ORt z!&by&JvQM#D>4Q-tAcAYAs?8t2iXQ0QxHaP{OAAkzwm9&6cHnC2pi)bd^vLC(V54O zW)Chm#~3@IG#2cjHHlQuq(0qU-BO=0>_GRrjm84W4s62sAkd@?iw=U|dIibltD;tv z4FQ>msN%M9jj05pNJ6kiX%M-()fu$4yiyGzMK)>KfnbOL4mk;cej<^FRP3f{bQn#5 z0-B6~FEj}SiOTdLJuR4x!@N%sp9A?2t6?s7Pr37|Myi}{R(S`7? z7%5V*o`$Dj zdBY7H8RdIB?aafg{1&Cm2#hLYCMF64(<4EQXQUBtJ^VLTqL9$ogX^ zoIq1!)*vDz-QCtd(Nx{oDchqah$ zT4!Red+vZ>TpPSS1Tgs+#asxEfibRDGb-cIPO!{A#9#r9sHHsFGdG%k!R<#|#0lU5 zW^y{vVj}Vkkf&twB$zD*EpBmVOrbIEjx*V?Hk0sz%tEFYOm$Qv0;Ud+b)Az{73sz- z973H0DXjp9U=RTHRjE-!Z0)O+^)9{^V{(VI7dnxJCgfzINoSk+>a|9Z(Qtfqk)Xra z_LsFCpj+*wsPMr+j&&GSi|WI5wC24z`VEJI?*_{ADjSD+0kWMr6Rc0G3UV zLLa2YOc|R+FX?eNso2WQ-=g z)JW&=36llOURai7#{Z;b`b72!pWAVsK6foT77^yeCvQ1A33_dLl5wD;M@_pwhr za@%7L+{Afo-}~Bflh-USUKaY9s|CMiZ*w&t(#**Z_j0H$9?6Op&4l6_wH#*l6jyXWetvTFt!O-X!6pipDz2{#`30?3 zPfMQFL2$vjwH%O8-0IeX=&7%z6DiA_y=gMoEEb9IVJxQRYJRnKkotDgMat}BBiKU2 zbR-lqSOGm)z>8G8Wkw)|&36KbYe>#wgv7!E!Kx%-QV>EiqvsEHSVFE?RoyH4Dx;)BmKtblsIpZ@lW(8zv{Q+dlUGvi;>N zFFy0Uvp*qNb|m|ZQ$M=k+*7Y#bbbM~c-i~Qg5{?lyZ4zV9&jF8yzEFe@yybaL1Exn zL9E5bNUP5nH4U6GcFgwjW_Sybj8SIMG~$dkNvZK=dCIjw8IFP33Mda#DF`klc@P9K zf@V464_L>w1$;v_n#jtiJ_Jx5zH1PRDLUk#7vBaxC&4K=QH*BdUwPk+>!Jm1L^^b5 z+2x73da^-%E2&KrPmP2>JTXuEHJLsGM-g@45ON5Q9Z~rwGO3`bd4@<7CzUExAPEQx z1XBe^d8AZiaAwy8fT~lL4du~M9a>GJhZ~Xv)7f(Q^h}2q^xKX`=;2zy8012r#&n1i z8-3Jj1=NBo0wjrBOlzQcxm5*g4jVcGQckk%4^xEASx*mylT3#o`JvH^?rB|{0wNek zdCkEHiufwnm2j6p5P?i`!mzfn?~tmkH}0$UG7PbULob5aTZdy(^3^XH5eEr5?fL*Y zSpbmRsUlZap8v_+<*3*Y=d2pP|M2&JqyOORIRH1z5RPE@ItHx`NkE~xcJZbpdp4Yn zdeJ)i5`(}r-ifNic&=zlp|a4PFnTY2$;PCvB_<+?#s$gDeGI1uXPZ3WtOQzy(2%opmH*)`J?-vJBlNXf##iVYP}Vr~(162vP))3`Po4 zgj7mpK&*8<`==IGUDpqD|Msu{ic7{L;%;$P`P;dmVpXSV{czW5SDzC>QzE1epmS0m zWqN4{5J{FAABm$ZUBs!D3;ItF8-q7Ru~c zORTE(RIk*nhSLpLUK0>fTc|Ev)t&TM{Ra;BhC$rAayi(LZFy@37pt@}97S@9AWm5$g0~#V-Pul3R&!^inCb;@JXcd5KL#BGH+QxZizkEER~9%6f=Ajbz}= z49dw@+Kf$u0>J`#)I`l7{$|aW_Q*m6gg8csi+zT81d|dH!gM5M27smr98OgNx&=(} zq2R!QIU+AXmOQe6)yf)C1Bw11G7SgJq^cvLD74@N0Ey9HAC`VO}3W=W(hYPPLBVrPf3pL^!vpM3vOANzV>+120u*1a!0bNAi1UhGR>#~-uj z``)|te(#<&clWpK|E|rvp4=&GA+ud(L9vL~)l1FNbPz3S7D3yCKI!N^>@VA8w!iF` zQt#h?mhWSK=&)G_?6vmJ+q`Dhn^xFlgVhA2{=fX%*9f^*T4BW%Uc1`lUhrt{5}ny2 z_70#YsODsgh=-+?X%8L+bV_Y;9(2H5leszk@Iwv}V2a=@E<8!q#c^A2v4x^_)?CA! zZ*G)Csfrv1^D9#6fc^IKr7wSv^8ZI~F?S@}7cc#1%8M{U>GjuM+sDCh7fW);QIkX{ zRxWO^BTonlW348>%k3(V@$gnCYDrpab@BH~B46{QC|Kw!f}k4tQB$#9W|59$b$}p4 z3-aS=nx&449Bhws)k4{EQv!%VtS`Cc9y?ozVVaZGH9wAt+%!|snXiSV@IC=UUL@sus=5dKKDsft_etJtD4PwD8ZPj*k4|M^;H*N zxQi;nREK9VBqk&3E>pvbV2 z2UJvDc?%y>2AwM$5X2#rmq8(9S9uV^pu-JlQG*|s)=Fqf8^7`hx{V_NkPV+Air87d z(py+npvPsh-QdZL=!SE`9qI@rYSwNG7Dk#r4~H8jqaSKAorF^g$MVYku;SxlwdH02 zTH~z#fCT0RQ8}fkAQ4F-p`;csR4oZRm=M5`a;qpYQeUaWuTxZH=cW7g*O`2Fz+)vb zn#02!JcGqkoekDqd-J!vML!oaB2F08^e+8YZ|4PZ70AXWu8Qy_3|-KNoY>&J?IAK&YIUDkuY`n2S z?XN3hRhI9|U4*0vbx3Gv?6kw=&Ztk*U#r1`00^|oW zAaC5>kayuV2ZL~a7*LDrf$B0oUw$;hb~Fla*$kuW zf+$raRSW%wh9VF&B^hdJGDR>6Qx%yKc?d(!x`eJ$e^#1E;7a+Z>;_pYkX%o|1x@rp zgu0Z~xQMGoFtzH#uJTlt5YegJ!a-k=&S^F_VbVxmy8yC7!b8^XOsy_pVnE1TX@?G} z0I9DtBvN)D48_5fVok^fh3=#lFH|lfyJ#^K_^g#L&#+5us;g#7@AV$LhM*%Vf z93A~NFpZ!!S9{&uIdkkO^QAe}oNX?p*^sAgBTQVtAy+DN6*Bu=G7B>THPO+aOd2nY zSVqsGprn*ZA^TLmr^iI49cxGgBc~#2x|OFC*@&$c$4tUkyewEIhCF9afK005Wt~G@ zKs+&mv(RdJ$yaAsW3^Sy06tejL36KHGxQUA#629aKAM*rw$@M&*~}q~Pylk?{P~JR zzNSjCt*Dl3!JW_|bt*Q8kDbx9!*<)4>Mi<^EzR;5oO><{P^30>Wf>MATb;zhjXRnq z1e1noS*Ytox7K7pmLvwdS;2jW0fIef@wJEK+y*1X5Oajv%EH02b0Mx*dh`)T5YIe@ zQO1NuPWgtFQpAlBlK@jQMIALE>1ArwQ8`6eK!8kOis<>^93D30X5`Xn_lB@5%7zBA zd~czIhLE(8c-aFDKRV&F$%rmBAAeg5y24~bni@V`br@t(J^1HxXT2ADHG1X1dKJ|3j-@s3w#it7C1DdaIb0P;WMu)f<6e~Zfikx z!3Ea@hNe^l;evRn>d+La{xZE_LV#ICXoyHM#l;ZZ$`{PK)&Yu_du>^;EEOFWI17`1 zVMhApAIpfN0dYZUkc}Nu;$>F?&;kOu@D^}j;#g874S|ONVMa}+lnSnpQ_cP|BJ$KH z$%Np{;f)4wT~z-XNq3dp)`1Q4S4PS~`aZU+c)28A_CG1_FN>Fb1iKGqJA*A)c1XL} zS!oa%=dr*0t;c@)(&ImV@sV#o{pBBh@8NGhb%*_Bo6ACG@$&wAZc)7KOJ9BdTHq`M z_Wp9OFpG|hooza|zwBz$*)0TH%#cs;rt`kDy}$gSgJvCj@TSgV@3zCL+i$trrf*z* zf;V&&U`C~7F0G3 zTB`b<6^|V$tGub0J8kJX`}3!Hd3lXhSN4+fBO=T@Zu1LfXTy#<>|pEH_SI4JI!s#(2>-EwKIq$$@07kNy@P22-vuanZWMnnd8bda^)p<{Wq zXj&*)i~wy(3)3vq=(7n$Ea*aSODc#rvMr7RHxG50x@Qr66`WOh_(+-974h@M%Nwq} z#_6B@gpm2XvrfC@yt8M1CbiJZ%af4#>IJ^^<-~R0`&xYY7w@@!@%%GB_K6b?cfI_g z&-y&|SsLk9_rZN3y^6@K6CrvHAPEXVS^?VZr z$%#Qh$gUP#xAGi0>u<|CA~IBy;7A}+D!?yIn~0Sxl;}tneb?}t!HA?Yh~fo?B31(1 zp>QRbWP64D4IHO}5f@owD#OLnKP8Szu0G&sAoAc#49A#|(XAtVX~ZE=#Oo6J3{vJ0-GeavbM`T5aK+U!s1a8S*|ernjmx&#gtF!w z4Qf7NPtIPwB{RAjI50<2opEr%j$bOzl1${Gf%#b}yN$7_IwKIZ1T%baR-%RR0$(i< zweUh&A}5+`qB%I9W*+mBUJaAwUA0{;UIxAlc-7I-E;?z48QU%|Je`?Ch(3BX|cZ)I{dCIyfDuW+J8nP}2~ zW?tvCUnb(UJ4pZ#o*_qbU!O>6Y^tiUla z`dVrUCQ>H30f@Vq%Hbl&o-z7-sMXnQY5CT%(I>eE(v)aAZDLyB!1&}f2g6csbXyU$ zmxCp&jhzNfD&0fU$5;bU3yQ~0Q04@Ff{Ki0&sr%1OpU%_WOMH*eB5CeXrNrr?JAu0$=8iD9wTF12D zid4f5HRdG-k)(PrWTr-+O>_VW7wV%qY0a4cvskHUMF3I^;+Cc(xs^mBAghw;p^YjA3hN zgF{dhBfN@?!jT6Vr4>ly=U4ukqP#E}X;X4V? zeiv!NS242Fd=oor)1^sh)?yr`Z83)|A1Trn2oH9H@(@WCDZK&)iQ@=4yNx~|u|ZH% zrCS;HpWa;d+VUhKHWq&Q$MSvk>ra32N0J?9x0Cmpmme>qkD&fzV#JqH!dqHnL^}e2 zq_x|`Q&gIk6J}aK0}qqF(M%Dw;Mi_9eoeLwVf|3I)J028bb!W@Br}sHe_MWK`VW6* zxLG|N%CeqLUTyXY^AEoFP0_Jv3>IEfcH#ZUUnY4FJlXqy}vwpCHaVViiLgh%Q0-5%bmft^(@oN%XXhd z%@Ck{45m+K&)MXd{Wm#s-p1nPz23goJ9pe*^NrRNYKfOun(Ql2_Lo~Sp3Y(g>9n#Ec~EXIW3Ata~HuFCt5IzsSh-s7Z+7L_{7ZN7#1zWw+2wAtD%Z!r=| zBQpQ2&kKtETlt!+t`IEyIQEij7ki+6*6H2`I#LjQ|5xsQ;NE+L)mFNjZ@5+!Q74XK z84#N|2hN*^QQNXIyX?3l_!gUO!aXLEn3UHFVnc`6!c~!xsTii^z^&L>-dv3=uTr2Y z9G3S5GaKkf9(I_zPUMP(sc+dbdBTxA=Jgk5{s2Wp*&D1oiQhd#wijs&$&LWs(eZ@A z4_Fl;s}^9NQ})CFJBMav*w%))v$C`Rv_4vqt%-KNEL&b=w#ZsTX#|1UsZhKxh!zmx zVnJaOlviJQ<&D-^^N{`aS-9YWbIv&R=4*ZR>oT9n_VZ<*{3=-PRQ8S6T;%livOBK! z=jBg-{Fs?few}8E8ICV~`98M&Wj~X8?EX8Sc<`S7C-vj+Oq%RSjZ zU`9kH1ULkMHW(=av<6v8J)LlBLfFfz;oCT+pdI-q64zzVJ@$D0D*y5pzfNT;U~@j0 z$NP2}?DBi2p_M=Uf%kJTu(K#zWGPN^Vw2OgRU4dOsX*H7#tykOiey^WWmHc`qA~?r zS;|SvUEwO=k~%q@VnhD3o@sMjSDd}oRy6Wnybi20i_}QyI&bb=b#q+^gBOc|plJz~ zPb!jNmm(~N1!Scl5VX5nKvHpCV8Y>Bl9tT8O7x>l!rH=2%l`6Ak@__#U~_EKF9Z}2 zCyWutUy`9RgUcv_W-L3o;p9BOmq@B}{(EKxjpX5rVTpR&hd=zOQ%+$w>=dCO!%ZSK z*PSF(kn!|sw|Bh5SdO60ccchF9vM@XmLiq9^3KUHru44ecGDkpAyVZ-%42Grnk}Je z6!AO0cELT1q3>(UV;*HZQ$R~1o^lxhy$n>x4N9QCD z0f=OA*Vc6JmUd)Qu?1b8m8cKj6)BjC@RcCL)R*v*Xa@%!@QkSzgz8p>JZ@P=E%Fl> zzW(V-aY3Irs>Jr2HYv@Td)sEmqt)&Pfnh-~jN3cwiocx^sNi6q` zrwGPNEP|$;_BC(}I#ZlbmyaP&7*2>%d0gCjs>a~Qf8-+uX8Y`96XY_HKo!KEyQtYz zZ}KFeiJdOQGkl0`2y0O06?(hXQXZOFER0ZGw5-Iu41yQ);!7h&a7D+hvF~~Q!S9>v znJUYB$nUSvEQTh6YC(2XhKfRd;jpEjUhHOw5krq6t>meRuNIHzb zM5D;-FnSA!R^xc*y>q`g|LSHI60*NMh{QFbj%mt#OA!$L$}AB_GU(gzCGyh|ToY&! zDr;6+Fh0qlku4l5{4Mh}NbtN6rw5E$1IFehb zt_yCYG^f2XQV#Oxzc4*c4r9B5IEnp}AKFvhZUxH%Wb)_SwkCvG;q|8t>h89siS>weec3%v-i3tbb`GAI78*F`)1f9r%!f+3j_?K7;!}wS=;-yQ zPdWLZd2{c$^=7XvUw-k07oPh?2eT!+%wN6du6yoW>RIJQ=gk+Z*lQL7BQ9X{D7X`| zrdg~BiJjQf(MPL^DAjyor6Z~UR*Zn1sZVH33>3tzyioWO;Q>Lcuz+OlEHt1Ms)y1d z4*OX=LyV}S7N?i|st`uVY$3y!GwkZ)9&{8+&&2QBiM0WF-mF)+wQh_*#hc~S`)s<5&Up=wngX}HMkvZ%96wZ0Fk+)rL#BNWWoHi zZ7x5!?2fx`x$epZ^KV$R;JSsriG8l$MO}B*1x{uApOpVE-*oMTmtOGMvraqS`^!G| z<)voPv|xEUUiQ5&N3xyAw!dsuZ6H`v%-a?btB-}r`r=xWw_sVJj2g2w9961SkvU%q z0-=>oU6^RXu^@uGf*=L6H4a2+XfP#^xC2(P&~QP3#n3K<;r4Y#ectqO~m-=Fi z$3(VP2|TlYPCiN_X;xQK4W{^#gaCjPbgP*NiJ*+4+L|!9gCM9)@d7~^KEWh;yVy#h z@#{>j)X`YXF4dHP5M1+Z(g+Jpsl$^^VmE%BYrG5@2jJnv%kHf)>)8?Ad0gB)iohg1 zB7Kb(X8sbUGUQZBBQ$x-HhPS1&1%brU|eVjAgU8sJYpo$J@whsJf^GgnN5i%H^?}; zvK2c}nZebyA%(l)GOEu#LR%Wq`z_UUCD!m>KQ*| zx3pl2`bo_Jt>+9%Yg2GIHOjaWtOO#1g3K76sz?>d)M~D1>U-$hnPO@^2M`*W0YNB* z2#s*mQQSgdyg3FK7%PHjDPi-`n5sxu~yu#=Kpza>AQ z0F8tpgu?=)B^3+EYc&>7k!qwO8;fNho)`&puz7tP-S)*eoPZ?ZuyF-N2TF6o`E~fJ zh&a~%1z@3fpv!OuK^!lgP)jkX1~SGW8;=;Vm5f@rLm-kO5_$^9XYea_w{+2(Y(-ku z7C}QpM=evRE{A(G;ZTj5X{REbSnEq}aUp|@CgTkF%8EBT08y*UkyZyV`4G%EPat@_ z28e}<_)4niTp*+z$iPYhKcLqG?8I3N?W-~G%;i)tPA(vh^0jHEkhqg-)L`vmg411)KUVC?jE z9h(fSN*KJ*CtHeSLqHi7H3~VE!e<1un^yXP5@Dmu?pfKu=qUq)s#FfgABDFwV0UM9H zVqMgblyq9%Rq9re;c567GRupXMam9fyA~{aow+xf+h2ASFZ-gG&{*E_Y;P;uQ1%vc zAO7lHWePwDDmN+SP+rM~g*mmUVt0@6OILe2%5P=5W-NG3N^m^8mf-Vnf-N zvHd#Am$pv)=!bTH$Ic)5z%lleMaSM{M%S8t@(CaHrqyK&F0lP7Ko+E+?=?WNr!@k1 z@hJ$_f>}Gxh=+!?bK`z`$cr|;02V1*Zr}@75ma4h%-1UJM#1~V_TYGf!gGGOPjeUW z3V}t_NLf-r_+}hwH0g-i_OoYm*S#-^xQYlOF5p^k?KL2K7LC3z$^&pG#~iU^Qoe*J z*y%6dHnTJ6R~PDLOkNOMS+l%X3*SE~shZ0X|5&bPVj{bjE$OM+zwvu{{@fspyCOV9KtDTlFz%om;W89?!}(&FWZ z?_KJXU-p;#rPPbxp6nb+Ea__bFeuFDmQ0^8+!kZkdS#HUXBYlZyhr(o{S$VuzWDUY6ExwBK zrE_mcNFE7}L>`G7pcJxFw}?=aoRT~b0Rm)EgU$~DrmKw)3#*=KU@9V{znl=p&InJB z2gpAplDect8HE@jC@IpcTwgR3#+7Pts2NAM7?C!4(7{pTf`p@#St(hoD3pQaH5ST_;lo7`l0(EHTiULKn1vyd3bU2chPm6`| z?n#;nL0%O_W)FdnAx{|2us(yPgoRSP%%75J#Dr;zGw*RMH7*KNBNBGnt0u!Jc@#lN zM@1PEPzFMY{A4bvNOCO#sI*L$*Ro-{g$xY>)tG{sTwKO+%VT0ooTKK|Iu`%TtqqeB$tA&Enb%c}3mIxK**~g(5QaiVGc?iz1pv z9w}lgbxR{ty-|Y#nslUyB3Fy4`N#1U%d3v0TVkO|*lVTmywq7H&u0ivhLjrWXI1tY z)312|UlAFamtB1Eum1kO@}>khJR;7dz&ZblYs3fRcX9mqVEk}f%u01~#7Rg_R&~`h zdP1$K8%Pj_O;VioB(kY)h9?z#x@(akFlvO1Dd(0j`L{-Lftb^PWaEF5Dl{G)3M%5l z)E;b7%9~0?I=CU`N<4$w8dUWEHcO>S{p)uPZh# zXhcDYvUE@VAg)0a8Cme{ElPl_Ar*%z4Iv7M6M$f(q6=j+buuSgnkFFyZRH!AMM;K+ zsmj2KJEqLe=2}M{Q;h{85lO;yNShdnpa}yQtEYoSAqT+Q0#kh9uxbEy*h)4Ea@ogCK~!X~ZKz5P7ZOR)8;$ z1p_jXt30XHRRlpoFuth_KSR07k0P0h@Y0&Kiz=zCR4m*lvW3htuHt3E@{cS`78K)j zW&>KrEno@`va!Pg%}f_NdGq9C|N3_}fiPWb+`=E#Gf0q^T@!d16DbrV-A`(;m}d+t zgVfA0x}{!fL`!wo<;BZB@MSky$ZUVvN$lsp&1JF^_o6VlHDWWw;Y(lkmk*u0{So_ZFJvxW?%iasBs*o@ zE6lEfVi2hq*-31XvSgPTn#lT;BX>K_9`sDS>|=j=(dF@sp zDG(Mf%kcNwXy&{L6NJxVPti0NUBGDTL}X_-goDQe6fLSHZZ-Ma(t>7^2S>+FjX+fr zehP|3#CDmT$M&n`_wBcjC2!yN?&%enD=%HJ_{z&}y8ap;0RvHV?V>A1-26s<5!T*f zCI)KU1`q^^N)kfKL*Qw+aDSC<3 zVJf`T0u-lv4T|T36l<`IK-fD?f-J|U`6mcsikiJ(5iYo`J-jTG#O`FQEon4S=JVLU z#?%%khZBAKR>Eq?=-Vh1sga@279oX#Al-v>RV4AUTJ%y~u!*@dcmAKJ&B3`Ja@Jec5sL{<42Gi{raA;;x z_)0TKdI2I%(~8uRrxz}i0z`1w1Q7&{0>Pukg2t;Po;|n@awrAVzBD2OMuHL5wN#3L zM-X>1B}HkLsJU^E14GbggSt$A_+cB zs=2xijt(;>fV$w(ge;A^6eI}*YNDWZTwzM84DrZM*}@+{RZviiMp{rJIU$)MQ4xU< zCKhS!Hg+;0tEh>|8Nry!LtwNruJR}|uY@DP2t!#drM(OREepa{np2fA{M~U^ zT1xQcI?|21#YN=ciFC9gM^WNof|y4ja3jUku4K+u*^}a!63JC!J$xa!LeMDszguY9 zLWVjzKh+3E3&0FcWf{rbArU5rIqt-eYzA}cxS+YduH0SJuy7?qpR8wXu_S_&#SshE zs09r=2G@v+v@AFY;i#_XkiUv14OY??q=jrzU|yxPpcl$oSzV+sbqk*i^wm{RgQG|W zaa{0%jD;(jifkflSre4HJ;y^3g?cF>IFiJzZlje^hKT|hNTMkvVKRAiD%?#()=fZH z?C?dbLRJ@k!YfteR+1?XsCuoF zf>;2irhJ&_$e^h%pxco$ks=7Rmf#Ru&rB)m6hzRPvL)kaq##TYLJxn`Vhu$ZL=pVr zn(RZ2jO2L`(390lQ6v_k-hf;Wr!8^?;wBoO>gP=^+2O|~969Vz-NPsE^9Ce{l4Z<`9c?c<( z46cCYG?A}qMCy43AWh2?n;AWD&{7DZ0AG9hT*-sRl*!eMN$Fwfg-ORq2= zy3g!G=4|8r<&In1KDLi6C@xGE7rPy=Eq5?maNS{SIFrcwfE|4A%m0^;+;{TwvUqv- zx2?AO&TpK2?`sl#z5IJ$!rQhxIc(Nz+sa~0{+qDTPOG>{unb2~?C>z4V405xnd2n3 zg)h8>NK=^X0JfvozUAc~1VT0jaQ594ZOP!9|WB!eo9aUh)=DQt>*RM<@$w_VEO7xY%V*J?dnMO zwF{lio}9`*gxp{zYLr^No2a!iJ ze;~4v8fXJSMip1=cozhKiF=MKBF4B!;hWf4Vm`d3haX{4M!5P_Y zS6pNOVM3q_Eo_2>j|*1>4lz)3mB$MZDS1XHuX;GBfulpmR7OMs#08E#9Z}%Bv0&Sz<$T zB>=Jc!-zXKVzB!epK2i7? z7C1sfBwdJ2DpsNbNO=;X!|XCNwYtHVva=${5;+0Y1#t}rTrHRw0ix+PAhVz^?~1QP z9vYF>xfhJ+p8OD^T;G5r(UHZnXEG>-S&~N?Uzx$wZ3$~|@}Q#|)V^*BS@qa49R$Qt zNTV3ZBd)YoPPI6y4qo5`2R=CZfNtgE%jRYSJ&ILFbwzrnuC6*c)ry+_X35a3nvCQG z&Rh@(hDFk%&wz-qa0r;HtHsr_p88CO6~V%%x1$dSvet9}N#yG>5rTvvQZxyJvb+Rd zyOUWMXkoH=`7*=CZ+xS7pACLak+YgIoXNlWtN-NFUOXrpE8HzEw^B*Q5S5`9${1;4 z#w0KZF4Q3d!A759sN~TQ$p%3Jq(zNFPIRIs4~=dH5=V~1mOF|m*Nyrh$CLX$wKMt)k5Sq8pFQAH`TwPtpJ2?9rgBT~jf9#9c{+=)b5VxvsLEUfhljdcKU zyv!A9S+-Edk#@prr@=j*%5?B0IFd|~<3)H7SNYMCnO!X+4TwgBfHJK~rBM{gyGlw` zk-=RCH3GobSV9aBQe;?4t7#W)REKc|)M_;)e;Y@NQxkneq70~|Fh!8QW(Ik!hUwtW zJ}l!3M@7}qk&#jvTq&i92Ld@uhE^$pZXvkER0h5T__Bn4!@M>{5QLD_(IKZb0x2M^ zNNLHf#4`F;eb{Z`OhGh5NygeVH4R6q+9Hm`AR7Tl0z&L0r-Cjg1kU(O9W}ga1;z4) zHl`qwha1V}TA-AWDpo5=3@b^KNz*Y@!dEwft|6$XR2rG`lL%{u*EA*^Nl`?mSqyk| zT;UKhD+ygu8~94y5}y4Bccp`}x+$VmUPG{nb247~Mhe*iWM%<3;ra_eHbi0cSgf0l zQY4GX;~tL8;3(r3JGH`BS941lSqMD&ikJD*H1a2ZC?koT<%P*YX<@P?<932D^+aXp zIIVcHk)&3uh(U}&Nd^T{MTZd*l@==MGp>?b(ke}O)PlQK9i*fOmh{r;WN4UhKy=i) zXajS3@v`rId12WHz8uN+3+5MJa9qtBj!$_pSvV|Swx=vimM>&(bGbNLuHQ8Slui3*D7YhRgN;2G3 zp{0ziWLF5BJs~7Cfu37Yuux9E5L>B;+kURsY%TrbO@Xbr)#@+k6Ku{u`*fvlZRw(8 zr_~21g}qi5S8pQwsuhST5X8K=BM4uf8!1i{=StYg1CkR{jsg1603{g}F)A;<4kDwX zD&lAe0wTw}9d#xu6~xhV=j>?}mXgXxw)yG;UW|}ZoeD@%T!K#|a*~#F%M@H1JH-^C zIFXdfc(&kGS^LX43i%=1Ccu;y7D|kS#v*aAej^Sj?`26c*zD$gp(Bb%hub4zXECL$ zEq*vB9smA2m)@}SmTT|6^QIfGzw+`+&T)I|&CZ-(vUs6g{wr@@a@EyWEx6*+bFaMY zyhWFtC0_pQAAG=9zplFUZ1J)eoCV85ZJ)^g>RmVaKDJ2tTTeaw{qKDJ-~DiM{*zJ! z;|CfVd=F8fsaw8D-sy?put!Ur6b6A5uZ6ucO>uOfB^9kvmM5nKEjn=INrhb%D}^a3 zbqh`aI52}ut2}H12nW6l0qiE*t2Gz#cz^w?zY|f~fOCX{56QtmhX9a=sDlo6k|GFz zs7*@?{{Q*94=Bv4^3eOnFUA{hjGwU~y~EHu4806x24;X^=%AvApfPIHt5GpxZ`eQ- zMUf8DVd!OsGE}?f#uRHbMpM3<@8+(%?p=3%`F`*FXLjB-#kHQb*K?lz^u5nH`+4^{ z=XIEG5QiD1P_Xy|sc?v>mkItSf-9hmA&)1bxA1kSvIP?#aOb0-Cudtw`IDqx-~mQeCXl(AAjQEr=DJ+1y4Tp=wpvRv~2mk=np^gz$1@7_~?pd zD;`^p7z1144dA@xOj81a&TRm3$Y^a6;(330mo@N8S)&@cR+ za~nVJd-BZ!MsIWrjF$RhY&jtV77|tzx)cQ#S6cCEEw^TU<{^v2y-pn2t^~0D2QKeg z&WlK}#QD!#v0y_-f%{#>HQ#R|MoQJ}GgdZ{eg083XIbLnsiuHvy@R9v^+O+RU2h?) z1D&iWuRNmQX(jg;?+5wtln15wsg9-!BEuY@C-+2@hk`=1n0InPu$g!q&zH6MOZRASLmHkfo$WW>ZeWrQH(FQV^B*A}aA!%Bm$=w@$S=;_o;LsoQkXeOi(1!ogLSIbMkEvQO3Nn6X{ zW>-5$9=80UFX8(qFT15~e+J!>Qmv=z{O;u6tEHmyRvv&{$+`6>dgF))mn$jiqtiST zf^9OU`M{G_!tentjESTc(hO;mlzD?SAPP3qz!WtfoI!{UH>5$tPY*Uu0>zp$!in(A z;|J4?InKxi2c)a?$~KDO7=Ct6)yrq&jvt6cwku(%Wh;wpnS`b#PgIVA;)Cn~NY3-n zwF19%!RsYh8nuWx%O@o!bJZvjT_PQMh8J@DT2?r~?KMInn4 zp)RW6iK7H&J5Mk>%qvKz03R4CaDom&em0uA*kuKbq!C941&EVHKrkiT01;PjnMO<& z!Xhb3hW=F$oOzBXTa-J0$CW|C{pO&E_LU^{vSd!K>eB+elZ$?bvuaaSOOF_t;0#Vs z2G}J6Co8w5m$w2(k6BAla{Lmgg%d9#T_prFI&EcA3kqMGHlD;xJZC8B_M~On;gBLi zA+`BG4p|J)^$z930g+DpoL=*tL7f*6=Z;g%i37$t)o(jfqSf7hd>>nNY)@HrtWx$e z%@6;=1<-=o_ZjT3d8Fjt?|za?lMmnP-Opur-}K1+x7lv~!M7&QX{()|USapy zj`JOlKXmJ|d#=0Z_A7kv>&XYd{^0GGF1_ya_uu;ErPp0>@E#i;vG3-K4%m9({#zZj z?`B8tv+2TpH$CQnEsi^Q>*MEb^U8&DPdQ?@6XxvzPB~(i(-!P@$`L!Acy?#>XDG>2U{de%ky^PC0y|V-H^MsD0Nxa{mo&RUf$9`a5j- z!p+wIgWrG7q$>Pf+sr;p%1eqheDHu0_Yp5JZ?6Vs;j#DTc23?hnK zm|L)$Z#OJ#4y^FbDvsyf27c<+MTiWCr?Hh>o%v$`i)dK z$NZs?S7d7;^kd5?qC-3!Km?F{aN@_zZ9E2@w-Eko>mn{R42WrP~uNXnuo(Nq{;-a>{l-<7TTETm2;=Im8V{g0nA<4 z3zqWq(;TkF#)OfQ9C-BMIl+J`#F3Vwqo)v9Ne~5KPKwW7ct7%pL%cfS)aEMp8YxB_ zVuJVw5YG@goFP-r3^%n5K{Kjs=cHQ_E7_&^n`QeOoJ2= zyRu;r#g!-M5Qm8IXa(4)!6$yMZRtynDbN%^_MjYBD|SG~*!){WyDL6=LnUz4QK@3elN^{emtFMpX^m^o={ zm2b@!4qz+uk_^hfpc(>g2#Z1{WfnCqBC3nIV2#consQCH!B78Wl&7yttHOXGkaLuB z-Zaz7e{_>m7yK7L{gD^A)_v)VPCV`yHG-ubqJhHzX=ai_fj4%kO&f}ZrLHs^Zp%w6 zSnaR;!u6Lw(B(k^H3(cUHfv73R+~(~S%jlEo%-!X>(IngAp4IPqh#RSb0&|sN7fWkCWI3SAAG+q0p*Gy=N z@@=p|jMM-Llu3cR6jW%G@G=7jr6_;KA%_hI*?RF;1(e=ua0<;UJ~WFM9TTkCV!}iz zMPlThK=2b32oObA4_)YbW1cSNCyRWCrQMu#2*;n2M5qPmY;kXmP`ttslt>*Gpm-6Z z7cYw8GkF9KkvhFZ3ST}EM^1*4k)PP`0i|8q8ue9G^3-?BS35ryVV;xt|?MPQ(HvA~~d(rY8p+ zmcU#ZL?WdKXE|gSIK)Y6q_KpL>_LhGTOtl2ln9|=N;%HJVNgUvkQ7{3%4eD3+!K+R zu(o>HNi=&}b+wsNyZ|_XzxbY&o=b;YTpodPzniF$d_ej9<458LphLp_YW-opLXTXfJSww~wjx5eC@*I!$` zY!g{gsSxvcu!>JDY;VzGRmEpdwk>8gsX9@s=2g&XNA2u}u1iaa8kUrz*zU7Z5gmd; zt-H38R+X);RJ8gpCvW(A?biyyx2-$~t#nnR;cpLDZCKf>maGtlu!646rOhFQm=Wo( zUd{pdD73Mum$lr0Fm0jY8FCf6T2&pYP_<(5*BiF3Y!aztt$uwNO3kRyV|utjL&2)n zrNW_>AyuuGY4K|H3X@BEMMQ*kU21BDGvCy|-kcSJr@Qq^GAVAn`Hg3*+-*8YzAk#~ zU;B9h1JC7LD?jI)b^0||e)+nq{YUn@)x~y}FZ}FBFS@`VDf-K4&%>XqWVX4iURErB z?$aMuFME0U>{l;-$6L@ z7_wA#Uq-in(02Rl#H7SN3yPb&M`Tss+~~$fmM3I+)s1y%qmv$hflI9hJIq@|!0nW{ zfu;cuN>zz)EN+E>n3dux%rp!oq(Fz|M_XDr&jC0+Tm=w^<%bB)Oyq;Zx)Q8cMD)u^ zJ|vC}(2#g)Ia~ruf|iB>l#mVJDId`hBt9Dy&h!_Z7ITX(o;XZNwuT-EXDF{1F6yLo zVFnO@hkj9{x#ScM{jS?4zhc2Jc%I+z1DBH7!-Ou>G;kF_(mZlYX8{%+I{ik?u$CG6 z1G3q|!<;s+5OoDH=w*e%Rp{~iMw5%DYlVM`=~+kom`6U#1ImDrne3u-(%cr2F8-5k zrm<*^mvz1A#DL!Dax&jA!X?yB9mySclATUS&jD=`Kj zT41R3Iyp5!+?1tp4V=p7~@nZ*#Z_#wFHA&T~G>NHU&8 zP`F(Zcph^hAgYd)!__QG74?f^`4RiM#@%}#enfJCQhiU0Wo^q0;mj_XEF@4`y~sw9 z%8Vt1>qBb;J_L+{+-SBKLH=+!$Tw~ex~N^mJV@ZGVi5iM=U(8J=L+cuRbi@#z5MQm zXrv6ZK{PaeesN>+`>kG@5m(7rKGb4xrN!fQ021rwl7!+T?fEq!v=!l>tu|tTZApLNy^&!&`Lu}6u(f)O# z|Fa+cwUPvLZ$a`+uP*$UneZdb_#I{x%j#mA%XX97R&MvX2OBETB-tfM!W{ycln{s! zkth?a{227vB;=||C`%kzBM_tkG&d$1rVo&$5OF{hBWQl|$AC6-47#gCxHkSVpu!h8 zPbRTX&k+>EhuA=9;Xvl)SxJ&e8Pi*yTab$d9RoH>w5-bpw`D_H6gg?~@EI|Xr6Mgr zf-aJvNt?sPSew+q$@Uus6Jp1oEeOr5<7Bs3R&UK2fiyFP;1EyIq0bHtI*NF;B9#+T z7%GL}DbWjtA-y9-5QojuteAo2)^zQ8WA8KGa>I3g)if^Yq>YuVlQoB%y##e>DaqmC zEoQd-M$`mzNJKBr;SW`$1Ogb+1x5&|DjUQBibFsHh&Z~2>3CLe*~$teDTkSctPVn3 zonNpyJX|es3dPGP5qT?h4oypXz^u37lM-_JNim+IU~ZTOJvP09o#mvB(bC14QZegl zo)5=HDoRn=8$?L-{2#K2D28vi5!(k#0N}zSMD!6eViA-3DTK3dvXnY z6wAut|N4Euf%|dwKF?kIz?Vwdp<)?et6u*06U)`hKltVfh4XixTK=6Um-#rh{blvC z2eY4ecqw@5Q6K)Al+2GUzr~ln9=h{#^)mS8!#6y9*JbKipk4;{mly3nDTGzZix1k$ z7V`=7c3OPkc1Q2G6;LuCd%&Dy_TTbo{Bt)_Rx6elA7m$bb0xD~<|FrBfAIk?JMO@B zj@s{~3-(@T;ei`_<@xACwmo?7E!I{q+sahoDk0Tk3O%0+@jYkNoI24BS3zf^Q^}@Y za|;I5gD6VoD#yxabu=k-JScFQ711Fe-3G5kZKeiJAIH^)^ zvQ7}LGF5W&2ZCA6t4oQh^rleFs|y28huRPt5VasTIHodH9b2e)_{}$~{l&9G*IQ~; ztSWH}7^{vnb4I;PmR$fzA-Y4Aul0PjIhO>^rM!|xbmp+(;T}3eT1C3`k0cAzZxKn^ z#4ldvl-NZry@(v<6+MzkrvR0}s$-kTpZ%0S#rAgixt_be>T;?0sCv0dSqp4842slN?qkx1&_B zS`=&zNU|gnJ`V8PbbFgxe5@1czgE z@;S%!mH};?KK3OE0(UA2*;~wZ*>vGA;Y?&Vg^3Tx1`%Zy|4Hf9(#AK+LF=oci(!_h zNDFpohA3*R#B5SQs)kHXLnE$?vYza4EG3!NQld*l=w*qlz$J$>E#!33mQKt$He%O{ z*pNBJ#8SdB5YelQDp;*UoLX!=chhw99jhRBcI#Ms8ScN)Tc@=jG}+*o?k}sCZ79Q4 z#G_tD@l>{FvzI?~AKd1$UJm4TQ%frlPzWRW5WXRD43eLixr(S)OG=gsvw4nKy{uAJ zo>VWZkstPh!?ai)^)j}q<|-*GrPfpew~zn=6uYP`Fl4}?E(?h43P+$+f(!;r(A z86b)=+!0L)zy7%@8d;YYKTG;)RO@ylKmYghpKm+Z@1>r{?)7=sNw|91cOd`MbDrZu zYJxFW_^Y>O4iV09%xU6e@!G7fG?+iSnq4-pImS9-2`}5td{Izan6q=NShNC3KNA5Z zYXegYPqGBz+ygz>+@j8!vqz5Q*;1`FVG~a~7JeSd#%VZgF~_EAFgaT^8plSnp|oT* znk|MQlr*M4{oB9Nm0E@%0OI@MiF@8yRsN5^`h|x!Q7kFd5zJ8wI!)x!Gz*l*RmZ)b zI9n`xX1iyz{gNY}k|uCTmi9J(5#{Vg|bbL zaiqTiayTFg6Ohbt2~KL9k7VmYZ&oO2dD5i-k`EG6{kCYX7n;&Es%KT(y14~8Es;$ z-tu1{Ja1At!~<_E#5G%VbckP=<|nDLgpNNwhyY$@sDcl>aFlqo;a!~VFZ%;2>md*D znUEzpRrKeW)+wXy>>w`N@#HY6Y<23vbASk79mBxMGEE#CJXLu$>vIZ+AZGxzY1TX2 zmGafw+|B{G(~y!5iLVa2c#>~8g@Xq%yLHv(xj=FQ&jip_ln_eQkCFDbhPkHnj^qDW4%eI)kzwGccYd*6BCL30k-XUnzK%W6+s zBx*F3n)|i8vqEy-LHl~b5M6<$WUDl;X0&ap)>9IyqoX@i_hGXiS|MpmNUeviRRcnl zjUMx6MGZwt?W3wyBddT^kt$Fnr%DY@4-u5gMhX?PcB@sLM9{4qFwl$wp#+5#&tENreoHLQr4+;-@{2 zt!lpX5*fI#()ik|F0q@eUREG0lU38fq-y@ypS|S`r@rR&g?5?kFMA%_zfE}_`{D~e za``2nx#nvZU47~2ufOt=K8|gRx%Y(?Dcz5(~4iw3wP(m;HINj zHn`1aOGHFGR*ao%DWN$*`U%qDu1ydg(8Dewu*UN=QRZd=ksg2}JcM6b&Le<#iDu9>g$OJam>NBFq7tgJB5PHI_Zm zS>)~Xx;nY)W|V9i95U^&v5TmsO6|#R&zFkMpRHIpIb{lg+XS`T8-b*UIWXAx6OJv- z*hpnjOF7^n&asF>#~g5G_AnwP0TG1>qThwEx!q(a$MEPXV~}x(6VYMLmt+*$I3u)C zFO$L}6)2{ewlgISY0i_B3naCYFe!(p3hAsj{ydDK+iq(Kt@Ty)a(CEnwU!fB!U`=l zg<9BFvRe3wCvEI1A3T8i1YGTGb6K(cz=L;d;fQvh8>6h}m|z8%4;*W}AVrFYlSPMT zu<0cwcH*EUOo&EPCa2HA+GTcFy=?CfI0S7jS27F4BGZyyP-DGh%n*faIiXc7`(*${ za`=xWs3Ej)CCz7>+zI76BRsvVPs_sTJesX&F~D_Zq(hx=r{qDX-^4`mmL_ zsPz!F-&bGu;urW9rDqt-|HhHp)qxc!3s+sh{ou(W9~g4xLePm+6bPYIGl9*7;kMq4 zjsP14JdA=4Aq){2qgL^1YYY3_Kl?A6Y_RS|>#c*$WWYemG-!c3 zsRyUeDm1T}W3{7v_6;A;W^2t!6VDLHmB$7QUsI)4!qE{&r6z}Pc2|Mi7JL5G%Q1(D zG*Irh6hld&v{PcR8X&d?M8RPA@}bd;PGprK#x|KnDm7wp5z9^4CDj5f?}GhE~nvooAVf&%oqqdt;}!~_$=TL=qxRM znxKku8k`Yui7G}2ZTQi-x<=sb@Vw;D1~nOom2dKzU2uTE`7B-{rH-BiyZGQiC(>*$ zGmSVL4}P+Vq^+~Vy8|xNddX6PsJ8*iJ#>iSYXo&+qX#j72LuhAcJx9h%tNg%kgL23 zax%4PPFvPFR=l&!t2i(`nML}M3MZlv9BOH%aE*X1yLwG0wIqJH78nI?I2>%^N>6iD zas;TI4Tr*9iWANsffOtvooOCw5m7i$j2-5{;M6s%fvm%!b{HMuWT-S|ktzYQqAP8Q zr#92n4#7-29Da`8kbI^P63?iYI{}@85NA;5i1Ve(#W-<#dAY}})yod;CO@+LF7GdU zky**CM%F5ptDP0g9>xaWdVHCOvA?lm@@G=^mwoRGOgESP1@q)#?ECM$`i`3~z4iLf zE0!O)?W_0S`jw}be|^QhSE`p!Sg^|>yKOLU_YDu*V}m31+NfH%s`<19dz?CdcW*AA zc<7GPhq1Ri{*Y}?n75r$T1|cQ{%Yh&rM%>jElxaa>ywYzMzL&*dC>vu*k4|--v+)3 zcH{wD{g(Y@_h?m{l2Zle3n3o;0zNPTeDlgH%*tCE&}u*>p^{UPs3e4=tLCOVo*!4u z;jsWv;;Alu1)4UslIM!?D@B!oL@alejoy{E*{Wt#{n3C9AUM^DmACj+TYzIKQ%^_h zVv#w55VAPgQez^bz%e7@q$&zMD4fAY8zP>1MGgGI&u2gN+wHZ7Ym*%h6WI7q6(naI z$`$`rxFSN=%P~2jnLmC6kfm}(gI!~G@rxQWCl$rM@VWTtBT;mz^vQ}{m+<8f!cT@A zvZbXKXFmcImx}nSU;f+`Uqd`!;jDZfZ!RmMy~vC~?W|tb+I_y@{Bym(?0M{Wz3t5D zpGlqT&!n#S%4ffH;YZcWzV~(IS1!2u+OI9Y=Tkz}x zj7TKVm8+u{5@pa)W+8Ec`5>r_YO$e@q>Z@n7!HS=VV59IcmSWZ2(LFojW3(DQ?gV3WQ)oKwIu*PV11iOqX+q=(ffj`hF2@R)g3mQ4#2ht>{L} zB0+g@c?S@Oh%Jz?Wm;P}{Amu4s8KLSWP&(=07eIlF1>scqseIku;r^tD2gB}*de}( zphG4Agjbv)2%$$yc{hdy$rXTykhz=w*6%VwH8Iatg9Td zJEG!KUr`2H5 z!R5h%!Lr1v!}3Y6c;K8FBMxD`SaM^w9i03GBk+dCp^K^!G_1zj1N)_w*eXdQ^I*K*Es z-3Bq-?-)LO^GF=`|MZ}B~G|CPk=&c?JTE|+4+oZE}VB)|1)32|)>hdEG zpLgKCa}Pae|9J=Qcl@!7DiHJ%w1OpKda%+H3kqQ~YWybSaJF9Vn_$(;aw~fV)QmOy zPF@L-oH3NLjnddnhG{x8*=<3q%?Hj~_wvLcfH*`n7L+^F8!s9FZ6&-B9OH-x&tkJQ zyA4g_nBaem^7PSS<5jkEfRh}9Yx!V+^WQ@h%(Rf1`a~dD2%z3S6bPY@G3W$tU z%!<$AY^8*tc{u9jc!o>q$uT_q@q>defVn8^D*s~$NEJmPz-h}Rg7r#ouIh?NIm$2u zr#6=+^L*uOfiykvC#yxwH*Fd+J&1#jIjK^bGkPK6!+~V^kgL&0Fa5buo98riUN~i( z8qN!coraww<3a&(%1pbgJ-lVZ91dlfp{3ZSC|#f zie)9UZ(@5SyL$Pt<+s^l_87J|mwoK(wi_bg%Zf5}qC(H3U)~n<*A?#0Rn1--biY>6 z*_Kt!DI}GX4ppXZ@ybjTB_14|9{Z$T{#D|sh*gNHJVmHpo3J$aPhmfj`@vkjslX(N z&0){kl4@I4d8oq002He9paABSqUun+Y*AZ1n==*5m8H7UU!9(ax>c_(J`0M&wtcC! z5lnL~k>Unb5S1)Yy{cO?gow={2j~RcKoLkuK&Kgl0-xu8YETh76h2#=@rLrQJ8g@i z%4QV6webs|=`2F{#v;=UWkTksru|UhXN1pI7pt9tPh_i(J*Vy4UyzS}=sk9pmCW8{ z{@f?uq%^2m)XszwYoct zv&QYo04MR-HF_w=|q80g@TPahwfhlTQ)dM zLv+rNa`$xpVoQ(|JWeeW-3JFHbNPdFI56CUV9o~&5QYFE6CALB6hg71+t+q$#wl<- zF=I;uPCk?$B7lcQgeQb-hVlV_hqOg0YB*UM98x&gT$;p)nCUVgCpoMOCufL98G0P* zl>ypPn;|VF1BM=)_(v@0cw|EhFABY@0Mh{;n(_0~FAQ4AamYPgB?sb=A;3?n@*koL zKr-hi7e6lPtsq4&z+nr5SnMH~hc>+hPKsB_YAJ656C?%11t$n6l^fdh*E?cI96Xk7 zt+fo0;ZU}C>=tJl5+6F5*2N01AbApzOJiI@(1DDVOc?+prMgB$^i#+Ir^&37=pz;M z$HvL5$7wCpGOU%A#g*lgO(Kh>RlrgW*hjW&Y)@Ie3?O5FS(mC=+sD2X%M;F(^;ZdJ zRAx9+bR*A@D1u5PPDEXK9#3vZ7l`LzJs+zmRU8F zK?8@h#2pMwv)GUcAz&9h$e+=!P9GE(km_Y)YLeJu_Dq5WiHp2q8JO+;hY@q=z+c0A z;>%~Qb)7c!u*~}RUZ#dA43nvZQOhC+VT>#zu z0T73})&|q1)MOtMBuoEfWT($ITclae!ObXbD@LoeR)Y||cr5lXSl=2HD^HI%SjKx8 z`|p18w~dsx5j1Jlzy2!Aq{SQ&LcED>L1=H9LWfjQ?p5f0_u3t&$GqokvFYA>?0Wao zTWR(RGebPFwzMeC!xo3Te&ts!wmP77Kpd`CE7z=6d zQX?Ck-G&33OiC0JC4}|aH7(B_$_$rUpF2b(SbAw|l<9G%L@fWaqkxA<6e9vp(0~p| z_L+9N&(5v@0Hm~xMZ3e6+`1$ay|Lq-F2oKYK(FZN5vD`_^Ms*hRC5V#=2s3kQ8^}u zdL4EVAc9Bs{OII7GRccVID(?HWMYG-kOqhKLcpxK|0vH_)_plVT2XLbfRZmRhq~a6 zS0jQrKu+-M6)Y(t64D3`oe=zpm}$U2D2S;t`GFZr!_cM+b3m|jK`*F_o@`~Eew5r! zEgXf0&IqRu3M+^vypsrpHVo)mR*KPKfOCzYHxnEfQLHW%&+08(sg*PibS_{+Un5}7 zJtmrmF%zVr@hZ1z*6Sw-xIeb;bJ;S05`Ua@V$1)o6_jUXB*CB%%vMqbj>rx4LWZDF zlDIa?kVP5h2#%x;gq{)O2!vFE8KrYTc9SAFs|SxmR&r@|S1~xPT&$dNoi)x26nI2u zlT)k<2%af%QU!ZYS=Fp4RyC`O9eR0Lu?&>YRnr)7dLG;6vZ~qh*y`mc9=gjvO!)&T z&tpR#zW=6&?z>U3JpIzwwbL!;i4h z&r8iVf>m=WHHDh+jFW?o$zaz}q8d z)cVqwiyIBLU$yp3>EQzmLFjVTI z`zgTgDsz5DV2BbFUlQ}6GN0AjIJLSs38924KCsINExHg`5xzqR$8Ng*@+xK9%0TU` zUREjFT(;w^Ubd(Ff%m;b>%nZbbMG&E9{bPU`kGIF?A=P{&wcWJUS9tA2j6-1*DmzE zFYhn+^78$6+<41%ms!YJcg@~++=&dCdy^Zzwkp`k;*R66wN`g43=V<&nnFp7o)nx; zB2>BMb*n;wM|Vm(88LTHBnaV2XRg~ETcG=08sdR>f7F!&T0}qtJpB$+K0J&~o1n#v zMJD$!Sk+kEij)Sfl65E|{#TN4%we+JAi~f5ry(WiDA@EefdN1qri$h=zzSj@#*|Y+ zKP3f1mURk=AnGC&WT-C87>bAns!-(CY5|>H#72!QC3=Yi*5Q<}!Z8$V@<0lkUM;+V z!^VWe0V&Zk#2h6tXfcx-E>X)63N|pSmNs1gVx!GQts=rta7t!uF3uiWAkK3{DHE6hH~HZDLF}?fvPnE2G=N8Z@sL$OmP*zQ^v2gu56Y)JY+66 z^ir5J{B&})B9hXA#mp0+x2R1UNC3z7TOe&h0GPE6{aE|8q9>De*;CtWoj^BIsv+#WV z>MORNv!%n|{QckA17^{FQ0rt?%nHA;(9juSYBinArBN)KehTE-SD01J{fr~YQqgco zsbrecMnM|o7Gl(#4(QJ$b2=vr-}03kT&>dQznm0!D`6{MQqE{ls*N%z;V83nG6w!Y z@NlWC7B|e9CIGLJ&6&v@nEs$4Z;}A#gXAa{xPb^DGQ-t;!@w($xA`dwhXAMHFctu! zmvvTn!lf~1_`p3~lG7T%jc3WJ5T@&<*P9`bje>@O^DMxR53lk?u z6+-K>YHfw-l9ul;cSWwb5z7Mg&8>wV5k* z7)n~x8>d4+YUm+FBnk!&IF$ccbC@0K#pYblVl!mQMnnVc9N{*4wbh871MmTPSW7UN zHN@>ELk>I4G@j_Ix0ED>r|2N&8{!&4bfu+4VTK6DEN!B8B4?U)bGTC1#nh834)ZNr zV+e2((IqGNAv8+}_aJ)LMrTw|xj|cAQ3U}skmXWm#88+mf(+#k3c$=FwqoQY!Px4e zQx_Og=&UD|et2$ENLDzU=Vv~|jDg_9jg_ZiXN@yrP@GE67bhC%JaUSukyXJyi>*dh z7u#Y6s%Atx%RPy$OV!-7*>>gpnN&|@+gyJ10spXk!+m#LXO~&kyzHLq@4Mrg2kyG& z;rnm!NVb2Ky6%eeRm%QM3i#Mp`^&a~k3Ve3#fNNXf7$!X9<)C3uF z+Ur$|_I}l(y_C$o#cXr=#zs zo;^kf*nGhDdCz^eGSFr&wf=Tl4d{zrd+xTQJH7|J{Z5X-JG>YafH>0zAr2Ikx>Spn z5ZWqJg(?OX*~P#Uvh9g0idCuH(@R{@s{B-YVuL8dIUow!u(`A%Dqt(IwfzWyhyLnc zL`qmB0#Us(iip7>Rc#q#U?nHf>ERNi>TYzu9uU-vh?C}guApX)tGf71VVoG~p+Sp* z6jx<6C+SDgtAbV(E0BHNOF7)b*q*`mF0ZzEU~5fwm}hL$`Qy z&+6vu7SmnH`O+Py#hW`mWOi{&q*XCa_bGR=M4WUu-3O^CYlme$jm%}u}c|sdb&R_}p9hi0N?{5X2Ak&T5`Dmo`2F?s18fXf2Se`m7vk-F&;fx;v{P zTiLqvcGp%8E0b3|wp?{wv8*UY{QeKVWoOyb+MdLAXzSS_5HjExTPz9zlnsK(QErp3mmSkUKX-=B>_NM zX=pjmJ+@?TcqM^GT+1;|25=PorI&}GCQ{Pa`4b=cV3jFm3MsLG6 zdmX^H+4bT<*P0JP>SC2BYk1-{?$6cKWYaOaqAm>~et5EyDRQabx8+ zt|SMP2T~1;AUZ3C-bKtI+@#7nPLy~sH*l=u2@p+77u){!*Ut!YtCa_yNPZrU#Wo}g z2EBlXoFDYD$R8p_*CgT90M6u(>1y(*=enL_F#ULLGY-*BZAmaNJ-j| zEd*U7Myf_;u)&?qsf~GvkV+>JNMk_{izsUZU7%S_IaNs^G89FmrA{w;ffL~d2A*gE zLj%fQZfmmw!9c&X;KvhN0qO;CL~Q(DJ-Va{WQb>aFqedJ5pdOVqB(J#7o8(c7N<<7 z3AR9uY~xr(4AjV#%zgN)?PkRGnw8HJp6QpqdWG5dz9ygidg6ZY!|y$&UcPtf)%V|d zt@oF|`?SwxFZI1I?=O28`?hN?xcSP@eE*5tuK42n?Jpm9_>RXMG%HR0&0crX&S#&r$8m>kdEhQDJ81U}79FxiFF$+o z|I5$%o!iKvuSlv+E; z>QkoGiHc1wag2V~Mu(Nv1oh%~NCO8T%1Q3w1a#8i5V4w+2|MPPIc#Nufm9`Gl`lFI z=|`tTjIPFv27I%NO%FO|JbE4SmcnXk7STZn=K=i$5$T5rkV1zX=tlrb9Ad-3VK@pL zg&vX8A1I3LDOVkPD%%!wJInCiX!azw=dqQ{TJYu1f9Sk-zu|Rf9P4{upZe&#KJlS< zeCW^L>S1j4vR9aW?CX~6F1_QX$riI>`GGrcQZKvvw6xQ@kGDGQ&f-SXx)Bn%AGIQK z2yvgq>F(1?7;{8)0H@GO4`3h?v(s>hcW%avh|N-M=+T=hM7`m99mYl#9+X7V#x8_c z08S@2asV4WK$r@pQMbbJSWntCloLxD6u|Sb6wFYjQ$m|wnrQKn~DmE#yz%Jn;fNr(wzGvsxBT z7-WWNl?BI)4q0|7M{LL}*R8B5Ng)O%XdsKzAt^o(kq3wH0o(?BYxsmF3KR^DQJi`m z61N1Ulj(*-+preSY|9WRnLMcrVo*vxZ4P78!pj&s=G=e-R#5O10!o~z$AeO&yb`HHbPRBa zAvQoD3LaMyms#;Pnf+6Pe@u4SH%n=2ZjkjmcHjH*rn5TP)-EJPfY*x z>tEn$IynVUoF-(QK^|N6W>D1={bn!Dn0eLgN)>!|hBy=&9&HHm;$_!`aZE@fh{A0Q z1LrM#xN1dbQu&oelBM0D1)Je8D6*}qp+U5M(`$54%u;~g8V{n>wd0Jz3eb2d(&|C4a{0r&yEx!GC&=*57CP5V!3%B9^H{K$WtGv|V&) z=81Y4JZSjb2>jglS3mgK&wupuzx(lzfAZHq`te`=;u2a5S_pqS)WtmjafVKo%F`DNjY~sDx%_8{X=!iN#)xFy zA!HU$s^LQ?$U}3?71mD;-XHKaX1HPEq1MmU zX3ika^5b-f7z~#Zs@0h;lQDK~3T7r{h?49OO$$;_Oo<4C2%hdF~eqdBQJS>gcBTT_Co4(SJJ7>-eu=@cfOCovDsS%i|b zWoV*&dV$h9_0-Ftlc+PLa|#8luf74mLGZG#(S4udH%aC%AN#uK6K_9Y z*Y#`xS1;RKK77v&58Z9u!}nOxW&h`JxzW1eAK6U=Cc9~C|zw?5oi+0}_QZIx-dJOiwzR@*8zE9($-0o5G-4(ai}vvRlsu!{A>B}Xqh@-Vd_&UT;e zFS|F{!lp9~YR2}dGpDvyQQ}waD&*nnM|E*K-6(D`j4B@S(9ES#y7EMgiKoh!LWe+U ztc#SO)XO|%#rAHo(_dwZU$M+0=L-!p1VdG`F4?dtoCDZ6z+8u1Qrikgfs7)kJk(Yf zvx`|Q%n8Tp1)V3P@bKAT=4j?Hw>iMc0?AKrC98`)fURV<(fq~FdszMbwwvuWBYFVa z-%HtC1`tnWU-+r_zWeR3^*r{QUw?}4eSPx7@BGMlZ~x-wKJ2M%U&+4VsxNzm`L^q? zP%?WS+b6%=yVZ;>?cAZc|rFqhpqh}b)na##nzcMm}8@=UbiH-r*TJaX@_T) z0naeE3VpTf zB3UjK0{!p;v;d|gI<+j~)Vi?Xfy*RF98p(kgfK@x4VJI+4=izQ0C7?;V3+3flrcCf zAdBTQhw(Fn0pweTa)8gdfv8t2eAmW2B;rt_03;YNc-qOHgE{y?-Epu@65-a(D&khlTk@kAE zEr7dSwu-Qxad+;P4e7A;$z-82Es$p)x}JOl`{@;)%C274I&2SG?aZS5ONddE2XSdp zMvO)6uwVr}S*ud^gtjj60&9;Em#BeC z%Fd%f1yyqvQHT;)4ojVkab7I5zQOTC=NQ~eC(q6 zZk_%Dpg$&S(ACbK&T_kSBDf$B0VV^5lmJ$kkO%3dN9Bd+jPOQ)+0Mx<{`>#;zkE7O z`K;C3yp?vUu;C#!(#sh-$?_jQLMu9p>SfhtnOc?AIs0Su(bO10!m ztn`{))^(=VSh!jcg2^%+|vu5VNe$JtP6Hl_kk{`LdRpl z1djpjjIhgGy=>GZq2guuHj9+@*~L63L4!3oXV>Rk;=t~J7O}~|B3Ch>)C)(XB+Ug<8>P1S zrcKZ=;UQ%wG{?}v5E=X^4B>ayaF0|4K4dT_8yn9946{n&(fSoDH?-6`2fG?Oiz$@z z(HjGiS^FC+4r&4%O1D3 z#q7asFE6W?mC5Sm6XxyUk?a!>-TuhAo9MO0tX`ge@@wOh4xjU?qj!GwvAdpg!Va%L zaYy^hXB@kudik)uHt;>MeLsuec01}nY97I-FDv2jS0BKsZ3Rl zcq%8AjS55Mp)wPZ2^K8?;Pfk0;Y#BQQoUJMmhvBwQ3wqva8F`mR$v19F{~^$KU@VW zG(1d@iW3iSYw~HN)*JfAG9_0v`c` z``(u)vHL$M4`W|&{-66lDPP7`FZ)WikA10^eII-2jaMzZ`%G@a3JKSE~ z$K7AtO0)rr-q!7{g<^9L0(u?dblY`Hf=7o?h%)Y9bYiYpP$&_J0Uh9UhzJ3*#LQ)) zQ@C?Hyxz=_qJ+098vM)+Co`wb7JjQfw_ToC#0DtIT2)rFXlbC+wSrMG=A@Phtt$*b zXDbiklqZD&;tB>tZ;zQ>Mt#G0D^}?U3d~iYzX*I8z@+8jSedV5C`yzAXR#E zo}fV~$*F?t#l|{*3PA%=0tpHqqzZzKOWOG9XHk|63TA9P$ADf2u;rc!fB|#mK)v#? z@7C|>%aKiq8N^;}haRKYVaE=pP#lmGtT{*h*v5gxLxRS0;-)zZiU(na2x#RMKj zw4flK=r*_MuRtg?JZurfhQbXkaS6x9e=QOIgW}ayD#XHbz%--oBM`~#*A;psO^hYB z7%8ER6?Opxg$X{SD$Q_$I2%d~X=AY&f~~t%ob+H0(!hc;;d0E&)@~CIvAPk)ab-Fow|+*1Vip|CMOZiL!1C72GIf03192PH+5A> zCS>#0!(Hybrj{Rn(q>}lI)3p&XMtYU^_DKU5oC)M%oH-gALTG}^7%jh-+%Vf(+mFK zxm#?yfjheg$|!l?x$pcx|Lfm4p!D`~v;@NGY#NVwLRD^$(4rJLVu;xe=LtVC@Wdf! zI7Ss7xL#e2R2iGJ(?y-NACP3-3)9wXA~5<*bL46ci-VqF>8IUg43F} zmV89?nz2n|W10y=oLY6CHK7_=7qzyGtzF;rhO@WdW}D46T3>!X_M!JvYqd{_!r2d( zkYE4u=kOMjTD#2Z;+t;%x|!zZ7x>Ai9KX}f+a0y=h+q8jrz&L=OxqLMdM7W}Pk*nI zoF;Q)C-c%S`Bq#O6*ijXtekFkCaj#NIPHxxD7Q%x*RsC z_`nL69L5Y8!Ix+J~<|SF$ZQ{c_WwdT!ZI+T?obGF&N1yX z8J=DV#lrJKHWsQ#m7UhyYg{lFT|ZOO*A#y0&#oD1dL3^%KZ$Ddv1Op?LO zTxn;KOL&?&h=3seoYVyw;854>V8QW|%55ev7b9k%DAS8R{2y*Ozsw5!PAhE;^aqL0 zwxcTtMvG^NP$E+2yK4em7iR*ImVe2?!?be*KpY}c$c?H>xVX87mR#3SC zSsP$ZRYB3|5K-#D$kO0p9gaDN@v~cRvV)V)y7-0}kS!O+ zUaSg8LnP;44+t=+?HK)2%3ww@Qt3W>ItTU=))r`tFtujza3n0p5pwLr(0((H7 zA5(8)a~II%bI|Has^GR$@oc`yvc5J%J!tSH6? z)WY-#htaz9LJE^|I6a)9N^Ut1S1d~lp6R3MFHr(Wf(Pxm#{@baesT=LE+u|XU;;&| z1q;UpQ55$)w!*m=met61oR!RhI@%vG>-8eD{pBxw_PqDL^K8#!`&TK&vXc2zAAQf) zzVs>2WBcCMv{?S?jaN@zVYa#KNeQbsL*F-J#9TuW!`e+s519Af$g*c;D zs+O0OJNI=Fq>7#ZgPX7Qq}8mSiF%bA1@KU*qNXE4RrgkY<`{+aq}G*$MGAGXlEa*I zkpU20h(O@*9MA%OQj&Od42Y~lxL3W*4K6vP0W&B|WEpjs{?W!$OPdT(E_|*M0g%#; z2vEr4!AUa&Kysc@4A>;0Sj2^&ll16?b0f`KnlaOWx!ydaKc<2yAZfj`}rf~dTK?H*ssiNE{)6;N3@Zc}W*vdf6f)!dgo*c`>Y{uaE zS-KiB%*{a7*%dNXl*d^zwT)vr*A7*>XbW;)@5I~b9-n(9aHDVSVb#%1U)%D}?Y=To zu?)s8v(mWFeA!d>=5l|#j0jgduXt=SY5C7d-ilP7or4_Ui57iCDmi0@Ao4tj2QwI9 zLZT>76fY`bbuk1ejoV^wE3mE})ke4aGE`y_Q7bDM5F=s>Gi{4542XFm&yC|qZ%YWH zP?B>->(D$w&uvjlGF+Ebn3;{EYpm@>R~UL+aJ-;oV!M^7d38~O-=oxiaMtBlBZqF9 zW}0~=OY}%4DDuy)Hh=W;7C_J?gg;fviHvL%J1IPkl$;sti2s&^5klVXI=r zmHL+nEpdXo?X>-l+iv~5-}}El`oZ%o_3`jZS^Yo$^B_u|Mo{eG}G)a+gbKrvy$0_G!-R5x+KI|0A$-R`Ngl~ z`&rMr$npk5iS10(G6$IM1cW0x9H}dTV`j3_C`ffPP_`k612}_{2n0h?hQ_McP$C8m zrBSvoZ-?Cj@J6B9ExX*{43`XOW14UIIieUy;nYi3P|h;r&ZS`Z&op4#T7CL4s>u}M z8=S+sxGEyk60S>Dr4EirmFTo^fsn!sDP%lwy_BcHSztJTap3^{Jmj891t8(g0Tgpd zE2wG>tqCLiI15&O@B=Zz>l%nGtr76!(UrKFCgG-Eb!eIyssf6cCrkhcLCz?e_@9!| z#R1~z#PJMpSg`qn2U5IbUHz!VfM1&fkj^Gu{0wq~OJwN;IiSr1sp!LWQKWNd#s*3P zWcb4(XSl=!JchEG#h8aShs^1MusHllaMphwl8RsI9A;vu9ah$e%!*S2Xp2+VEFj8~ zmJf}c3^a4fOyXJ&>*9=FEqtU74ps?MZ&{6#Q6`F17Y*^Fv#xcBj(Lnl2;wuGD>sgv zEKVz@59reE>x2cnD3+DX#~rfG z;)CZ*H;(6cdHI!#cJn;;$%pT%Sbpts`<;CF&ft{`celmtQEiW8AHK)B^LBmNF$ZjN z#xc8`ed65LF4;q|{F-C7KJ)k;UUS^erysM^2}f-2|CfF5Yi;$iPe30vf1WRKxxL!H zam!XM`h2wQ*J?CHA6!|d7}SfXrmKuq7{;S2L~$=xfm-M)Cl!6KJncm99x4^#dKt1j zIDFm#>%8P8G$;vGi0W1~AhouStCY!d0Fhl3oJ?S#M4^hVC{{wUm}x#JBx!S4Y3$G* zu1Zt&jUTgKs&G;w5Nsl~bxRegWJv)c7{tqAr7?UIu&e{jxq%a25lt=SY)KI*%Hbj^ zaH>$aB+nbI|FRQ~TSWilXYGyGv()DRB~&p)igFr&keMJ0^1KYFk^3OFN3Z)(wtCsa z*a~E2vSJy7)}cR>vcLS5FMQ+!=f2VZN%j8n$3OUAK5*_^F8tK{KmX|uTyxpv<>jwm zaq;z6UgDpme7SDLgLjU4*&U>16{Pi^n~rnDeac4H1mzO8HE=e5ePa#fYUVxK%%&ncgv=MPR#W4ga)ec z6kvg4C@yTRw=u(MLx*67IOJ81#ZM40k)N~?&o|cPCuZT`$1|%T$A}XF>4}Z3L($=( zfu4M@+J~@$s273(Qnn%V7m?N>$ZbfRt#GuSDRo9{H)oF3KJaz@Ia_Yx^G818Z@U@A zR|IA0fBxJ5LscatuS%&mogp-9t+L7_6Ao#%4CB0kCdkipoDkh?&alpNUHaPv<(1k~sx}<_OxGqraE!c=)kX61FxZXHv7|e;|F=YHS z{j9XjSmI*IDEF`d2$1RlcxcNmk$5O4s0-V~nU#w=H*}+DWumpH(AI_S7GJTf*PgNu ze5sf1EVqwrLs`iT6wAuwDrm)W|H8}=uef27K%D795IC<8`GA2-IgiLs&|xjNBNjrz z4x;*{SD0<*RV*u$l`|g6hA3DnqSZ3Cn^7!8l}nRArVCn;LL90Ebk$q=rWQranY`sN z;B9~yI7Ba{1tr5~lnI~gOq6+uNr=bk<07Vudraduf_RD)-kijsw=9)FGmY#-?aDVm zTfAAMwU#pqrrelj#$~K96}g5Cn}Yg|TW?Y_dj|U-fAtIh9(C=Nm(lPqzxkCFpz%r- zX0?VY52umEU`J$Lhm%j>=D#UJ?d_h4>iYq1L6c+OcT zEIwMP-cpl$tlW9mZMKpX#lQZ|FAq3ypN%$N&j(>t&_Dgz->Q+#w`Ls%(^5?>|B~a% zVCaoF*Wj)&e&6UB18V?X*o-D%m$$85En-{8=782pSX$(;c`IoW4w`$CY((T4NV`#J z92--^#}lnCEl%bP88D_UZVqXPLXZ@%gn#38SNnvx%(uVXxa1P|#(KNt=Q+H?eCBri zdeI&7+-N$#%oF}F#HAeOK(mEcoJ2SM_=6aOsm@h&9*PLK$m^O-MN!o1!kqqiP}nsV zPJ!NjGAMk&Kb!xm7@kXbEw^PJUYA(#!!ZDdaJ_g;GZUuD1qCvafNgaklDIZou0`fB zdMRak^7F#VSN@H{fUIB}Ug;tT@MkwA&8RW25ZMC5AIxM)A<|}WiwFhiDg%(zY7=Rq z!efKv&(I9XhuT4pEzc<_4~S_-htuiM=~)KK4^Oxu|J9YvLuY8kCjKmH;Y29cm7&z; zgDybaVX+U{TwP;JB>hy;oI;#wtIHt-g~H|uN{M1f7os+v1TmygAOXi__3I4~!H7B{ z@t`gm&{6V13nwx%R&PG&nkbn*Dbyte?i>VND7w5jB>~(iiLFZ6)i)ULGNbqMvKraW zvSQicWQ+OH`=?K0-}lrb_dWUW-N0wQfMQwU{Kx}0KXBJ|_uX+7@Mlu0=9{m(z{kEG zy7O{X^Sw7;{M8H2wYlt*UmnI*FZ(koaQwV&`b()-AG_!4PnzfZ*k>-@SH0{nrR*>J zIQF7_H$Qd3PD*B*%ZKi^&fXlPTuqNr|);xN&6i? zf9Ky)FMGP!OM>pEKEvgMv?@fK%l2pO;3(NtaCVtbIdQRDIUJ>e(Nb2$s*Y3NDeJU~ z&9h$f3Ji)+RWr&iJ8t7fuUF-$_$A_{T|~c`I|M4hc2w!aR>jJ+N4VRTRb!I!gE@tG zC|5Hos7X~X>mBvAr2^*@RFIB^OA_PR@7=|sX zn5dW~#UDHX4s;QhB6M!E!dtQ!DDf{^J8Zk96_E{h8eFA4k8SVXhQ35`4^c96Af0eZ z0N->mAp~q{Wc6~N`LdzhKC=B~yU7Y?MfBu9q(0`azC2Dp`Rdn~F8tv8-u3#kUUke{ z-f*gymp}Ucx4r*8Z}u>@SD4ky-e11zs~28(`IqindV^xw7PH^FE!bMdyOFsWb$fL; znVucbJYHy(h2oUyta0b+zUJ1}y(z)&gL(s9l*Fk;#GoqqE%h>mLuZ2gX)(t1ICH7$ zju{UThi-p(bQJ<3b7V!we=R57G%a7}9=XglQp@Q9c$_)|u9e_)&LLQs(4W&LE zW`!var0fvkQMs$Kg@Z^rWMp2dOO#vzhh=Ft=jAHSh5$}EPELvraFvJViLR0#10Dz> z;O78(&}n8!j9kKrDD9mZI9uLmtwyZzEMl!g+~46L?(C2X8KbTo_VTi-S?g701+rGL ztnC488_fBj8`B|FPRPCI#tUp_tE?UeK;f*-4WjkCRGXAQTBn@db+4GN3Ab*50I zi#do8=tsw|g$u{@#vme49!xD=BWgUxS}!C9F_yhau+yI@tlVZ7iVZ|7{ISkAG{A(! ziRgJ?4}usVGt;K$*=L@y(R%B=UdJaVqp|A7dtq0cG2*HkX-G zPh4W3+QziaXdgP+bJv{?+;{E^pZ~l+d;6cJ$D^=cyX4|6H{D1P{f5__sW9iQ)C23! zrFY!oaoX+J=XX64I2s+^= zq{9Zn7&Z4?&*8dS2FK?6g9Hw@St=@9T z1YG4pNk35F*G0pq78%lujdFMZ2mB|CqBkBm4Yl-4YNDClTa-aWkCUDXuCzI%N{e$C z9a3q~^4x^vB+g7EG8kr&{3*0tD%bj7E=zkJV4U%Kmti!Qt9eXm%&w@-d8I$-k2ugT`}%Kc>}^Jz!! ztX`h}1@msFAG!OIgSVT)Hiz!IoLludL+`5!#0AI zz15_4Y#pjeF)J-qg+OKJ9*sxAYhkGdTyLEhJ@(u{!=2P%!VJn`qzS_P(xSU7ZYj7v&#xVo5T zh{B&XY*oZ~Fjt)F1vE1xa13l&8*`qlDpfdj@l)Z9fj?vsF$Cz8Gs+?nh>da!6`JK% zr7ZT|<|f5eCTL@h9zd2qd_d8b5SksL3mFJXIjmmxHEdt{QYL@;lOOQVwJ&|Co$W05 zB(@?N9j>^(;QVvn^|mu#ecDm#W&gkI6WRWG`I3u1e(?n#@jUi*mw)lHFMa0P%P#h1 zY%ec+h1u`kt@hoI+(B9_b?fX5a^fJm!MQ^@Wpu^q#?+ch3tB|cfPsiRSDci)36e#O zuB91;9y~Y)bHugLv9(Uff$oMhkea-K!Zc#ZAqaHwjSox%NNZYtCXRs= z{g`D>>ku$quC$mz&X8ge5l9+(vWR4vc0J5JErr+!<{?f_f|fLcpUy6r#0BCp5@$Ce zar%pke&!JKR$>cBbkwra@WCl(+F@?fjG|uVhJVsgwEpoq=XhAfW$KS!@H|%)PnK@5 z?#ms%;?xsZG_1C16waJ6vWt&ipi4oG&IhYdJllJ6@&(VEyT{H<;N;Bz&)9u{jaAio z9;f#%f+#Y82!eD(K&40*se*_CqI8iaZD`U%hXg{4^co}(AcO>x0HGv+fW3|5?##~Y zl%1lpIy2AC>^?jDdEc+z$%bTSpXWUHIrp6Z`L|Pl_ndn#wJlsNmPj!Li0f{!iGpk= z*J=t9r=F`9Lh-bz%@)Lp1JwRq)n>O>T3~8=Vjj3=bs^7FV03c%-M4G#$uk9Ufn(8E zL5PKN>6YJeoyNd3^*g1|$3KQjNhwIjkUsQhPBnsCA~3IP)1?)K1#M{2jBn;0K&1st zOEpy@0vChKy|z4Ot}kefyre7zH0}WQfS3EX6b0Y~HO~AVKF~p3*IZMQhHRu{ZEWzH zTy8;+iEA#BSgGG-L8IQa_e_8r?Qhy4b(nMX8(%;)%pwK40iS{8=bQ!~{bm$RnWXufYL21s5iJ*3eB}NDa;gM<4`Z+7XP04^pj;} zJ&MtQ!C12ot39S(d#$y!NyD5(kF zOmh>8d3>!k*6?f`TXmscoT4!oV?rqD4Ub9*nDa0V2@+vcy;v3}NI@^Fx{yc&51^4n z^xy{s(h<;*{uXAf>#X92re&EeF32sY7wu3jMCLs3w;z7MGVfph={MlpR-0|&p}+^% zTEkzsmS1{_gAdqO_5JqOf7kj@Lxq$9WLIaZ1sl!#@3WU8JhSF|R=M`d%e}2kkZ)oi zxbJ5U`0U;ief!HHZHav}d&^BXUg!O5?zP)4lOLOCx7v%$7P=TZcm3!8^-q5LkH21N z<>gK|@t9+dJ?fhmopm`Id` zhE}SEO0I6Db1Am?4Bk^vDY-*LF6o0Na^D^1Ra-%~=rK`Q_@|pvsmJPG=T1J1RwkIeQYl;f9JJn@4P%g5p;_Bt) z-L_iG%gYGh&^YaEbJ^T|_8F&01R>GD(Ne%$>j9fMiYE*9gbJZZz&Yp&cVkE&G(k=h zeHMeP6OKK~-ZzG5B;GlGobPizeD}9TBDak5qh^Z%vThlF<9+vjYwWmdZ@q2YU3cGp z$As}GpLXJ-lOAG74+IDmu`Gc`;ZTNHJSa=^;3`V%XRx<@YRZcMt+2Bzglpp%aJKzT zN!|ebr7O#aQW%028o?t1C*FO1g)Qftc`65TuNAnl+A@X$3>Oqv&=uU~Ln7qjkEcY= z7XgIf;{GQk4jKc+ZiOJT+<-G2?JL>#moGf$_>+!3#QV!vTy*+1m!Iu>U-0t16K?Q7 zsfqX9=96E(_vJjc&1Ij+Ho03nS=X7rOflvl!Bk>4AA%I!$GoHan1BS7a+`VUQ&t5M zrI@WuMv;k_&70TsK3M`jI;q6y&3AgJTMEb}2otwS7jau+>18P^G#g@w`B18KMMPwY z&@JZH4-%S1}hYa&CeW;h0@RV!b=QoLXB6BOGl0*bl<$)H@ zuKY|K;)bIN5~@{5uXs~Amnusc>mOuz@)86ZCA6j!Y4Wt_T=KB@tJeh$M~` z2nI^GSjmb&gp?YCIOPQqWPD=P!O0l%wkt}8P&HT3R<)vU@mzH%t6*NGl3U_%E0-&T zlo63`yA5vr%Rm3mDvmfuGK@vQg^|JVve5>A0Nd&~1Th>?74eAxgN!$w4P3e^>6~!J zX(vg9mzQ5=af6QrMh6P0aS_rv7Up$?6`H``_lj*v11WM@ghPl1;?NUK8NjmY)3ptr zbg=P_m@F1?9sg5UPiCnA%iNfH7>NIa-f}VAjcC`wwmX&`LsVt{eD*sEQ z15J1vr7YrH(We-Cxmc-OepZ97RY#@J^9kKVq_}F3B2sX`2V1u^ktK-WiO&!al#U)N z`XC@eS^m$2)J_dP!BCLM3Z*D4k%p{D5T4T07Mb=oXd+9xT1o?oUGv(gRhG zmpyx-m^KeyLBUM6W$ZtUtFO8O4!oq`&;RI0bLP&T^}=*l;rSU;r#}1S%vsZZ^3(6H zxBlAet~YW#_>418`TJk}^@}e(Z;Ky88_lF^JUWGg-;QLT^^KGHfj3Ao>+wttgRy!L zVz;DIQp2M&>C_fjsvrZ6oTg)(8HFr|Akay9rVj$>x%wC8(itlqPplZ&fpAhdQ{W|6 zWH=K+ft<`aXt6u7|>(?H# z>Dtb=>9FDstH}NDzxr?Me(-%>lETe;gW3iGFHG^0p+w*-Ir>H?0p*qzb zMChp%F<-M8`Uod*&VX3aut=!kahe5~_KA$KEMln$yD&G>CPIpajo3O%eL`XqEDCO}7--KAnf&twsD+`Tui9#tNa#K$( z&RCVgSTyJZfN}$*B}x|=P-ir5h4?TWO_~o9-Nta>i8%U1$CCm=3Z#!*4$-MxL-*=M zkMO|OqgvOxhHHsEywy$WJ8nlF@c#agKw70dqueeq%v)I$$Yi!G%q~r9Xsep2UZeR* z)VefI5h>kjby{GJ_T5)yHCmy9^$P_-;E`S9w@tny8_KRiwJY)^U5ZP)trmaBj2qbu*Y=_+1f-e-po`t}$2 z?Bm#{9q}>W$NtKJn;-jy?Y@5GuFhkBY|}MAyv|C?E;;h>dGRGif5E)y;)^Y{>>`UU zVRkTqF17gLrfk1%fkPc9_Li|}#R)FQ8qG3*v&jVrg=)b}6BFRpVQHuksDvdUYp)if z0rU)Cjx{(aJa zov}29LCq9U3ZmR<;|)(b`l#^obyr??_2m~^3_z^o#$J2-~o$v@L3U>@E%X%Ng_=Ea4Dp8%~G5>0cW-2 z-DJDTHkWU|#ToLEPkW7QB9HT3GTYpaaE}3^!_iJ?JCf~tU-p+zKK@W&{knDRr4z=F z@$==|ZyLEem3`lYv5sVWf4T2{!OM<~nZnF6x`vL{%&W6B?7J;=tA8~Ci3}Y|zf;gO z*R0`+20{JNEe>wc6cp44O-EBN;}aza6=#^G6l1UhSn+u zmUK>{L`otif+o1h6>+6Ag#{K7O0)=}W_EluMXDO{jB+TYJBKN5&i#kK|LbO6un`UB zF+nEK$}Mh0yB*vmLasy+>EhiMaix+h9Z(RQyZ3I!0Ymiv_@{sH6)%5>x8l{0pk`NVSBBe| zQ++y-L1v3hH+7r?pwOV;x~2x7L%0@fzxvz1bPIYLZ~&9lR$js4 z8CxR*Wq$p4e?_h(y4>ZKUK&rsm-nNUGEyNkMyE&ZMhL(qV9Nxw*@LG!f#bjBBDnos2%2Df}vZ)_-H&t$cnjiG+6OG&Wd9w zQ5Cof5@ktLyxLJsn!2o-f#Qg95LiS+b`3@iDk+5me;&ekm8F57h>&Q|gt`buf z+u8<8#VMc&TPn-X@yrhrMN&pY7Rp30ghVNVI6er_CkP*gq>@~MDjq?#BP9-Q#am$0 zymnheLHcviz&|||LBJJq%W|7U;#8}L)xC5;Juty2VK@R^P0gB?2Qmg3l(e;#56(b6 zPQ2UD2XF+DKmO%US6X@5WtUrG)zwy=HvQ>8`O{yxfTx>h8K<4bGrFWF02{2gp3ciCvnpTZ zHzJJT^%tWD%5R){(tB1}(ep0j!+Td-$uBDOVgr}{t4AviI4dQQT>rjA>D3QM>?~0< z4_4EGdXVoX8?8?bMNH^i>U_J$YYxSEPedge4;zpfXkOq49u_z)?`aV7n&42~;xciRk#0)cCa) zkA^rzyV8ENa~lv+R2he4Ae~vkYTE=BT^z_>i8%^StTY%VOXMiG%IY1~Z<-kcSxN_# z0cbF++pr$kB?Y4ef-H#x&I)0{ zfY4SPhOBub$cBItf6|E<67j@D2zbUVtrFpip%htx)>0I7YtPhGidw?Nx#rd1lV~9r6|h~#pl%=UZ&Ur1L8r+3T94vW;-PZyR=`HinmF9eeP0Up??+U^1`_DSzoRn;o{- zM?quoxsQDT&S3KBUzj&UaKa&*p8UnF>@UO1#~rpkynN_BTW`0;`xl0n!9b9?yr`e0IL_XUh?xiygn|J#5YNicEszK*f^{IC|19^#Fu>C=D1FzRKLLQ7=q+?r z7TAa$E7K4uW+-tRtS}%T3>Dh75s90HD8Pt-R_G0z(haaSiOh@^$)-ZXBi+Eyr50J_ z{VAha?hQkFG*hoFWbVt?bdOZ_+j*2x7lB|%e?7k8@*?Zm5(^;u!#@dYm~OD z?&-0|$b{0(jLZ*S?*h!_!pn4k*}+&TCOa=tT4S)Cb5IfuFI$|$%MvZi{rH3J{Nt*u zAQ27zW>`PcGQT^_dCXT1XIFs^#%ctLV9Q3Dh_FrsTUkV7V&MVAaS_EpW4IVP?k~)4 zp=QAeZ3r5C?k#5f&Vs|+6UJYD?ioiPchmv)mmS8w{ie%(8QT`K&1Ikbdho8BCr`Zl z@rUn*mtUSSWyUj;n!*g=`j;h&8QJ{n))Y`M9Tdz#f|)|PzE&?2QBe9YUyC;t>WnCr zRYW2(9Z>JnizS-!1T(2zDFRmdwTOcCcSS_pmRS10vw3i!EIkeJB!WtA1+!|>Ol0MVN2lftdMUhz|V~#EV(2xksg{;db5=HW`Ys#c5GGQ=;_bfG4pNK?)#8HZ9?fNMNI8Dgs(k+VE z!Y8bD6_V~sE?HHaNOAn_<9GzuSYctTF0==>SYzlBV6T3pXIb&;s+_~^1|M5I-Xc*- zx(@}bQ%J$sWy6SK_JEYCTIs+odYo53BpR|j*AlUEHsa~OJjpH<8p_rZI8Y>y$x=L? z;);_fhzOjZc-;_xoK0$ky2?U^NzSjjrKFf!$`FKS+=y_CCx*3T98@AQ1ejNq3ZitS zVL`ltSgCi?-71^ai1B18C0Q-vRuK(Mah=E@P-7dcLx6*d&|e4@FCd^B{V~sVn0G58 zmy+3wAEw_3G2fUpp30Phax<(ModZS&6ztJoN?B@KU@PiHjzOakA>|l zud?d$uIHY2rd`7S@@K!4Wj0ZeVx)Tpt^s4{d9_65t5y}{N)xw?5@^U>Bf$kKOu^O`LHudz$>;}(K=vby#pN^#&?S>T=FRfh;Z z)ut-~bDyh~iLT8dykrF6b8-ESMRu8WMq9ns8_Y?66Q~i%>BKEatX_O|KLicYbDFgWAZ!7!<0rON#b6!fxkY{XPK&X{gl^kfK?4HRY?E3Xv8F`< zCb;ssMoT3=iK3RX}JIUqH; zz^^V1JuNUmWYU9sN@0teYe4VaR}6z=fS7>5t%BVS=xhLJ2;|Zs8nVy~98#&2$P^q1 zB9=}ciL%-?mY!k~agI5%*vgVF47v?jB}R{%kkE=?D}^iO#gz#{HiS~%Z9?MGO)gpZ z1f{WE7+#jHrwk95MBGZmVO|j_7PmBU&PF4(S7TwF8OJ~d9wOk!E-=Wg-Bu8d<}Eme-AU{|@C7V8cMTW24Klaa>>6JF;dfvC{@XrAIG(|_>B^FMj}g&)81oc(2+%d?+-5MG}3qOw7+`_$#h0X1B<3IL#-#YvEEoRucW7s~CJ@5W<_~Qv+g$bwW~Z_rx%bv5AHC*HVoCP41=uktRgBmC}F3(GX;XX67OV zPgkK1?kXjriBn3VkQ&6{fOE{<3X02(9wlYaNOWaRxz-h`-aKo{wxpqeU>#`cM3x5- zi^PzsM2k105ocNA35$inyP}bHYqGb^>>@XAAJA&tx+AGG_WgT@r=NR&PvNKZ?lQjh62SYz(4zc zc266`c9iru&(=gd{h&Nh?AQ$A?LPJ~`m=)0YRYj+OX$;1K3Q4gkJm-<RYnpO2VRI%+>p5ZGRqh_;cMyYLQm%qzTz>CVtdVOXoPA|1K?6ALX>n(t%808Vd~oAK1+_GZ+{HAaF~p&Imz&&rqN`RXk-g#q5eTTN zl!)P1Q^Y)n;4fYiO(k*6T~i*JpezpcAl40PNWtH&dUwT&2*k>28)}CpJgh7xS%)(n zw0sRfoHt~l7jR~skmk(SPF&sQGz=BNhakifQ%PA&@`LpJmNKq1l!yr>1-GsmjJ78Z z;?>@8)9h8N=1ovtOsPSTI>p%)W!kb!uMZ~RCRZOfC27N~P!Jgo-i|ldXm6{mxEzNQ zP41l0cgA~*+%BQLX6VMI^Eb{o8B$*RgKN49pFi}#zxc~Pl~rH!^vNWpl{QM4$hfzV zE2?F#$VC*Wsc0?3=tMCJppvB^ISzrB)mjab6Qq#B z7JJalpC*V(oDcKju)?xSn{9c?SYc>lh((b_h#hUu!E}E!H7110 zLaCH-$&z5>4L7h7FylMHa?t+!5^X%>C6;P(ELG{eL~Y)dSi3?nG#LJuE`>@0_aZh0 zYv;0RNLneA<;GBYZAp`kXYO)KmKtZiHi?xYIMIFkSohnHPFa?z+XC&&Bk zL2G7mN6#?WiGIz7@+=L;uNX&9nYMj4vf3X#T-wS%L_i#6$R$)@FEz9!`qYr9!5r3^ zd9@;|J8|Y9bLBL-3Tjk5=2k>SXQ{II3%SKrki{WdqY7d>V$xMEGR`SrE*<9~aB_a; zYKl?Mts-2cxE3g2BF-F*lmE5S%;97nCRCQIQpgBW7NtanR8U-QmF~)}>IyTWdvBsfbDUsym1d9`t=U~oI67BHOe z8rYN=9K%**-Z01w&{ceZeo;!4i*jI&fd1AZ0$bQ6CBgAV31pwFE_C4 z{pA3%{bjfqWOmyz?2c!b?)OsmmmSG|$Jemuj=Z_-Ghg3%cXWG3`vy<45+(_ojd_jBE!PE*7!T983n81-BBz%eJ0|R3bRHOq=5FDOx zCI0lZKn2)gOlu4&4p%vp`MG`ey6wj6>@Qz)#U;$KmcHFSxZU9!uJWXP*WGqpV#!6| zAC*F65UobyA`4LxBo9Tc=Q;gBuz}_^`bWn;}pkPwaee^Ok!xW}Vc8f;D zJZ09yuoCBO)h9+USjFVMURgn2o@N0_Dpc42|)^) zm9!$kq#0zUp7~d9voLR%OeL}+RjDm*dNrw;oz2mfSemyrq`-AF5oD=24TkNQ5G2cF zZRx1Tsd2ZMNO#pL1`n{2Ae5Apg};DIx*}F|nw(8l%}JZ=s*i|Rrh49`ty;2LyW)_D zrd>QKOeKpGmc~|`o;0L@z+_VRR^S0CvJkhxG`*+L(2`d~;dCW2e4@)%+~^MjfNMs!AY)%$cMN1%Bgj#j^F6J(siwY+X%5nqY#Jmga~HRIYpKo0ikMyf}siALEQ+2SdtJ zQun7%LkH&etqfL%GQ{@2*=~?jt-PTKzwwppU>cn%%&R@4z>os0&fap}sscqwlv{h_ zF^RIIKz@@hpyK|u`FCX)%m6Cd$98e3RFe_2YoN)ZvNbUe}HMqHNG zbn%o}St-V96A%X?RwO~_Gr?6NKVZuPOlDV{(MxBS6hWLa`AwEcTIt%gE2XLz%w?%i z%*B<}ss$dvfiuk%`lC)Q&P%jaI}9&~Bg0K`k!~@Rf*6f-tRO5Kl%8~{FOHQ?CG^tG zdZT!G8OBOfYR1fGZ4r)c{LKvAIGYU@3tMlq8Kk`OD$BX9xY9BZG)fk!7q1eVlr>b% zRfl4cDia2siOZ7&C5tU;R-lh8&S|AYdDz4tHjHD^5x_nUIX7$TDK>4LTa&IM)0~+# z2Fw$r4?5A|xt~YP+5;xEs%cr^Ra2DrA2$HmfoGa~60UbctkhPCo*as!)cbgeIK2-C z{9WnkQzuGMMpxCAbZ|;q9$3i`hk&;&Av{upf30@N#WppJYz7cnqes5i28t=kliH!f zZ$=XJWxU1BxFqnfEboutNxG*5bsyf-fW?QFKw zf?g>!Sg99f1-V*O4O>|R46h%_MaI?4RB^RoH28qPwHA@^vWG1qp1o*P!15E5A9aPM zfoQwTeQF+3b{N~)Z2MyV;AXWS9PkS*K_Up?3LTKx^1I%|rQ~v*iG%ls0fE&FG#|Or zmT9zQ9iwXhZfPqpO8dcq0~ME2-{cmY<#Iya(4Kh`v2@arDZ(Lw2oAEitAG`KbY4v& ziE=5Azp~U8=TfdyDZ0Ew5(R8YF@zfWIN5N7d7j4sv9gp`5M`>mB4DGdy$>d{R6Eg_ zai%SLoQY6M7GE_))49*q^<{F157>k;gMJ{bxiFwKpeiV7fdM591uvr%S5`qN&vk)e zz{|KPSO|heKcpvGVsb?W7%MJU`jA5OfR`<#=EKVa&%`M!f{9XL(zqo;B5W%zBzjOl zN>3FKFeF=CB}R;GYKtf(@&ApN2M{ent`G-W4F)Dlzkq^0P;#wPgrI}T;<|}2JYtI5 z;2)Y4Qt#c@B&K%mie_PUX;fO8X4bf*N!OlTweYg!>a*HyiPHbWfU;l{87_v7=L5(? z3cL&~k9_HCG>n4f7vFk)X8X(jB;`Ez8?&Ezb;hK*FHD~E{I_R5dGEAI6X0e0%fjS) zuYU5rYlKH9T=v4`+g*?S{7y$5xWi{Z_2J#OU1!&A)^;%4x4(d8sM$-+4qywmn7?ws zHp1ciY#w9=lMme4C%;BOw6~jKYGht^w!i$<{Wr9~e9XaH*XA==ee;wH&OXjzY^Sm>yYSS}BiUo@ zEVsYx``FJs{y>|{ug;q4{bfVCQQB-`TG7|)M+Wm|G~;;#yDa0p-iJWPk=~48cT1R>)eZc#+#8Y)$vcxntlfEq>vnp^7d0B#nm zmY{l~)xHI%g{9pbd%m`VQMO3HA2Iz~(ugR?E`fCFF&P0_LLTM{(*O7W{AW*)*rM;S z5!&^`CNxS}fm_XTl%BQn3M&w&hk9?Cr3xbRNj+~MIx%9=Za8v$8c!9CjH+5JLDbVi zq*xW(UGk7Y%Q2w6cePb~%)#l(RaRWqVjdX|itD7hxlvIO-h*ZM%Ig)*itst}b#UF; zikUcwn#vm|6A|2=~!W`A4BIs6)RO3X8xZA|($7)~O!&AB@tsv$#{z0z9+Nbn@q3Tlvfg*UijT>`t)}mBs z<3pA17MUUBL@n?biEepY=SAgNHFbXk2i_#bP z7O@_oxWIgMfmlI4aYd;hwdE?#W6`jVP`Ap~L1IGr@UoU8(kslaz_Lc=B=(eNp0Hke zcG^>~&3Va=vj8cB%)s(wFB2~h7nb#>(7l1*4gbo_nb_jq{(JOBQqkOgW6yp5(K*jegpnV=`^txJyLjRq zmx|kE_LB0~kJtk`b}AcW-g}4jf#nWkAFC-q{T9g+nbtqdVbn=&w+aobd7U&iEa@hn)X*$-06WMmC6tSj8Y%jl(e zW6NL#0K(3gz@gxzV66*KTb|i{22s6-Xiz1Hp5`x#Q4+C5thJLz$_`_1zvX5V@4f5! zrzc-?#U-7O&Oqh1bL3-cb#4kO!i z7SI6B$YAR5@);)|apoyUUUL4)e!)EUnr~iz<@pcaJKh$v&E@G&Jz{71#pzGJ`N|BR z$ZlRUTbXN2#71&g(^K(g5Lf9E%|SX&V?F}G)X*$ora&Xj)U@86RYr zNXNlif`(RH=3-2&6*WKYU)!)8^~!_hs+s9%PDfa&O0@ozYk@}=eMq4|-ou|eb{SFqO27{wy@U_YCv!jl0-o(^&Cm3Mh4GPfLz|doQ=fc zqbz^2i&1z|iG~zNhdfNQvKGBwf%J-E5Y-6~P{Q!82@VD88Z$o~nM93O2AiGpY2|@~QqB9G_ERYraUkZ_ zlPa2rS?5ue=E3CFsIGDu$bobqj!=H)ZMG{Spy4`hXC1aR9%@sGwkYLl`RR#s9-|d= zaF#+zE>e$bN^zl@MX9V!WMEm)a)x8r5}%nm@+bymc9jc61IwdZ&m%7{PxIoES#rt~ zfPkH{e&6PxDZFguFQwfy5P(o>2r0&*v{GeS8nu}~H->YL$wG=-8ZtpyaRmIMbBRk7 zu#zR6uQUwlJj`BM;u5)eC3cqS!Z;eZBb9vjhW#&pi| zXc9CArov^wk=uflaGcyg(*ki8NQGdabPIF2E|6H_z->NCA_{0^l^c9>ZU0(X5Ykm{ zY6mS91P>J)kY;F6I?9BGK&1?1;?z!-M7ILwm6avpR#qI`N=&-Aof|;}2Z?bOr|sZ@ z7|Nyg5Znq$RJ?SN`8c?(Qe$m|Pa8Ibx*B984k<>k(zQd03k_@{TlnsGUUVe;o!4i+HRpNfu|a0v$9{A6({H`>kczV{Vg zb`tyf$HqIA4K+Xe$k=h0p5(PbA8C z(3zpy4>gfl4|{@YGV-lRBN$>2jp0PV)*J-)neTLM2vHGG6peIbUST$MdcO}Cw8yHH zfDc$|tOJnYN&wST+xxkm*%C*z%~_9yfkdLwTY_44BC`aQ1%X24odh%HP-D$iSKVTx z4SZtEA;@ptdecqU+j1TGS+nnB+hX?q^0)51_2b)Zwe&KJIgh>V_FLIAf@-l{VOgiJ zR-vjLaH8qs**hz|=V(9!&%MNLA?=}oTQCH~w$Fl|TbPxN~Cbt+NiTcmuKX z=axMlGBnF$Yt7HMaJMG|KA{Oav#a;P4cEk3DM|4)FoQ6kaD`iY>*%=-L$~i=b4?JQ z6a0J`H173eh`8Nk*LmMxmL70+z3*GsUVr5|_Ln=4eZy7f!OK@)cFz5Gj=l5NYlioi zr%ZZq=G4cZdHf+YXt*}Tm=uimDofneEYM`pyxdG=#&2>g7lBAAx>BhNV`NFIr zlp?VS#T9?0qze^qQWGC|DlSM#R%C-##OcL)I=0P^IG{&B;c_u2T}lzttEOeKsus+i zW?tzuk**@}F-}PE6dVLy^x|fCZxOaVat0XOcFE1EUDefWi>L) zmBqngz*Md*CYW1A=vL~&TpY1Hu2%Iu`64f2^2;tsssSm}xP_ zKm^rDCR>9cV48HYTr)&*=_*DenYDrvm(tp==B-@`L`aMY>li|!Va+22>r%$zsp7Du20@q<>L%I9 zGYF($MY<}(R>P>Q;?l`tpez3P$km20R0+jCW{)1lS>nUwirr8CZdRUd^{A=t~};)V1p}? zC89&=U!IJqeeF#J0W#h*ZI=+tvC9l5PkQ_jfB*UJ_ul-Qzx|6NzkHaFb=Y5CcfED} z3jUOwj$QA}_9@KEvkc^F3UGyrRy*uvK0|~mg8rPtMd|EG6iX$tPH~CaExXY0 zMlQufdP9Leu2HeN-ik~FnSdt_Qs^d<2yGj~{Ew0j*lIJ#1VU-7DGV5FeD@WSMK)}<1})*rcT<+KF#ILGfQ-Zec)A63`{NQp17Z!U zwmYm~pwj=O(2EE5#i?hrTM%)JLk!(2g=~Pa;Y0)lc)Z+hk-3#vafu$O2qN<$@j-17 zltc{02SKGoAYQr%`Vw6QDJcNSj@acp>H%Rd1^@I19l!vPbs`-^-|1ksh+BzC8Nlx# zeMp%vlB$E5nNI=fDMpPjb$01)8eW~TQL&+3+mod=x|Qx~_^P)`e|Io)3R7HG@^8L4==9j)V3YmTE3uK-%>xs8sdB*$8uTGx` zFZ<-ztJ5BK9{ZVxZg}ab32)AL(Ep_DFW-LkH^5{V*~hVW+3NkyV+WrhWdVr(+F_sa zee5sqx7E=HZ3_`Y%FbrP#kQE8#O__@&uqW8h@;x}mk-2*gQ;Of5~#I3|=(q|If36v##9kP-qTk4acPPAY<;VwReJ~m=JBV z+PNl$L@?Z%*`gG#6h};!toN<5;vxHfX7VEsLh`rYeB;<_ufzoIzT@_ruN-rU{bk?B zKKq>0M-Hkjw#dhK*xFLZ&NZzV1#1}M;8Um{E9yA~ZK^5-=&)r5Pf>+bPTgFORY`3ZhjVaF$fUO8-?{zCs> z29{4f@vt!$f5ZOrr5Bub$IVwdlI=s;Q=fRydF%)7^cM5TC$jxXT&HNxHU8^mI)zEW z)Yjl%zccrlTm%!4?&GQm`j#1Tg(jWKni`rlq?;f{d)Y`|b8E6tRxM!CbY_YX6f7mV z;uyNRC6^!t(IhL%wAJOMv!eOP9EzJP^hBe_fzAd8Yge_S{cC9&qzP9M#nGFWwQ5?K z$V^_AE+$sFvYM1-SxG8Fc??}mbBKwirwFnrY3mkEv!R0KO1_dP3tN0NIcoz(b*mtu zm?syprmi>|-Nyo0Z}nhDlknDq=AmzDq8``uD8@n#4Jg;lo1?gh@c$^iiB+ zE!PnkPqcO^Dn$jMp+J;UEJ5`eOL5&$m+={~-~9S-=t&=J8Ddm%tkgP|Xaj}qD$3(V zkYz2!^lq*o9kgeQT*?rk_DQib<>7Qx9`*j||BG&R(PF0ZL*`I>>CF6cv5Tggt4 zVyl`oFx=>4AR^>j6f;1P`#tcE~a59 z7at6}R(HyZl!zhEQ=T6@Hgp(B>sSmd*m9Qkahq0%V90P^o}zAq8ad zkA@)^@nC7@V9RhEs#!Fa$3);PMNnL+bO;3?#-zEJ4vChiET?frtdvrT6<1{%m0Oj< z(p*skW)T_^)uRrDVa_C?fru4JuY#C{Mhb2jPAj7084V8X)dlLvv=Y!LNIDu!&=_8j z$s$XZ2uIO}{P2R3JxsGb$|1%C97Hs;+7h+%A)PsdXpSX| z?NntW4e8G=+R{y;BKo*2bp$NquSd9=>&<$YBhy2@cU+|y4KTq!xvn_$@P{9mU#$_L zE(AK{G3h$8BV-_LhfH`CD>cFunwBoF6uDLf6tLuwLJ-B+B5q=BXfiNLr#8_>vw8=d z%MS|LJ~lO*Z5eJNF!JyT6BvT0G1ePf_L1FkpyKK)ysWHI*N48brHLw|xj5+zF}`VX z9EDqrjcFnRWh62|6JnH7+(Ij*C`pa_f?Q;BrCZY)xMXQR3aUpnh`B_8uW(RnQjCy_ z3))!zOuDivQG;<5!BZCvNdz>u%PjcJi}vG7UmnHSdj8>${3Z5n2zuh9587OI9($4x zRzLBm-!WU~J9OI0zt=FO2;%TEi3#NpDsfj;oZ&nmNEeuAlAE02d;cl|B^0fU<#tdnOv+Sx@zRAi4Te3e<|3~0q22Cb+}D2 zm6HA$C0zt_CTNhViQ5I*R$rI|v_%kOg;-Fknvzbd9EEd2OQ8(iv`p%;*Wv8vratX6 zB0418r*W_uN`i?%B)G|~5Y7?-uVleWA*slGSt~px9YIB;fRON-6vbVopa)w0omm7K zwCmuvEWpq;oQ_E%<}E03fdSlfOm+j601lnV^nvsP+|!w{Ll(AI;Mm3?FZCSk2{k6=8KQN{qmD<%zVrzzw9jg znbbXF&jp;HxaTU@E!JK3(_4SQF>H|8Tg+`MyZZ3g7x&r57rpEqkGzomn9Rr;4 zV0md`GUK@F=5}E1p!JreiQYjU*X#~yR#CAZ&k z^R?HFS!v~!)>(I*ub+AXOln$(eC;peW<)amktKzCrcPrx00jt}g*}-!HCx`>P4+OE zZYDt|+!O?vZ8U@YcGa!t57>8K$Fw+*$wW}hT7nbI!hULF1G>wp(v)US>7y z?~u7kSvn7U3IRcjdkNG#n>H65mEUXk-925GD4>7vzT3g!KRoN}mtJ%>unZ{!#aCT% zfw z7MX&rE6^00!V(mpuBAkS^Ss2(KFA~@bBm{|IQ=PLW5Ud(kL%asY@tR&1WIJ|G#_ct zo}XCPH8&~CC|5JDiqJNc(xgQ!QOsg)u|i;8R!UPbMV5}Oz-j>*hKi(B>5Vq6k#JCo z!mUfKa7Yp7oU%;fL}0=S3~6N*Cl|OIZ63>f?tuL@MeE63cHGgUEgcGHoy$1{1lD*) zGa_)yIdo7QLxC(=Qt~tYXoyJdtd>FtHX@Uy7ciEFhF0+&;{eE(%64*{Th?=R6Mw)l z*m67Nts>M8=2Cb&hFGp2ZPGbMmU@?EY9P!0HJFUQKz}~b4V9$~r!kYlEaF(BfwJPd zyuk-s+~m6Arv2ccQ$x+VuxEYN&n<&njGE@FsWSH(F{5Ghe{1gzjF?Me&xT#m{33+kO3$<5fD#-GD_k7x`vJggF-2TUW)T=EqA~l(HD&gIKzPReCyCgg7*ugA9LJu9-uMETSu(f6Af; z=`}WHVue_i2tnx6(4v|*aH#!f1u}ZNVo25yr$FM6MTr2Pu4Ea#3B^34(95E*mWB`0 zT$#+J%wcTGh*K#k$_fhV9W|jfS6>}ohR6`8J5E#kz@XNpp=yv7xBUms^xCorL^Jux zZ@UUV`RVt8AUlJ<`R(6-W}n^7@*90*BQNl3mIegx`P#h3#Nb9msEYpL)dEjao2sm! zvl{0s=2uy?rBxo&unj(zULF%USOSf1w#g=XiYHnITP9e8G2bI80b3$4DZ-7K1P+=U zWpWowabu&P5)~)Qi03Jmr_)v@hPV&B?|nI0K_<`#0q;4-#Y=KM{xU@1lTp@k-pep_ z=$NK;Ps37%L^|M4PYS$uwblHNij%D<8k&qea_R5;(A=5CL^>91xDZq^SS^H$1E z0U8dJE|6j|$qE+XRx6U5MI1|E%(1Fjas?09auktX6Vgtk^LACkdrGMq9*|CL=8GVc zs9=6Wj3LcsX`e($Q8p%OK|wpqp0_w_-RHczYDjRj&wTYMIqx!q(puWLAA1N!2A2Iy z>PcU8pZcVoWsCgw$Q&{4xT$S2LE^~H+UQQVD?nrNyE|hG7<|Ys7{`eC)`WvJtR-kd>3kq6tTd5Fsd~>k)x1m!_>N&M2c% zWr7+P)SBGl2;7!}Qa7QkF@b0riV%uYGj>IQURf#$iEx$91c6opO1im4!&v>!w55?7 zXW(T|mjoCPsv4jlloSRGCAki;E*KT=6BhzvK`LZr!Be5IXu1Xlf!RvIp5bL>0W`=i zadH)h9|LZ2Uf>nO1qP6FkY2$-jes6;LN>}uG>LZD1zC!_Vvb$}lXy}I_M|x!$z7fl z3q5CG3u2TQrj>}>Y{C59A!VpBC^Hlv%E}!=pN{X@k-g>ufNZaM0L~J19JkVOOG&z6 zl21gWyAnzPs+KNM)nu|dZ2xol_IXTJP@8D4h%_(rSrjW6#nJC6-1_e&|) zBlq8~zeG8D?Nm0r3=~7kZXsxgwB5Ebl&DL@izl!B*oQ>0kIT9+E!Ffl_}2D`$-fmSdX zP_-vwn_9 zAdu)+m^`TN`|`0b|C74v(z9`3-Kw~uug+sn(Z zy)d%fY2-Jt>I*`1u8!6qZ=5fpzsS{nh5=tc(x>W8%?_pjWae~C6hs+Ai6%`n%@-yM zJmZ6G2&FWIAwV|BYJQ0_DLOAH*jf~kXgR?OSr%8)$u+4dWhfxslqAcQu^Ol82>}k~ zKC`W08uh%|^w+Yat8{LmgR3k7a}8c8Wm5?s{1He;Ps1ib5d=(@MH5-AMMdaoTGUPm zr4nv}b%GRaPdr}{M|o|`YE56j#Q4T7sz77Y58CXI2sP5t(GYlrQQlD8!j|;&y=iYakf;JrJW$|YeJbr76tN8 z%CMK)sX5B8?Ee9;#O|na|Nsk1TN`09L#q zR}a+-Six_Zqu!B`h^JapkWZMeC5U5-fY6*HLPLJW9I-fy*o&Ax9z61cIQ|L>qzoOW z6jGFu&RwpERVDo?GXyC_Bcl}`ijzX3DvpMFnu>V3(O(uCI?#%$$yFRHWUc}-(q##< zkf~Eeuu_~9iLOO(rZ63pN)uOE_>+#O+YGFE;3kpl2$DsRbP>hTOXoT!OGHT!w@4uc zWtwmWeL`w%;baAILrjo@A~B?fA{oUNOk5GTVk@1mNJNwJO2tI3vegIzhPNQcK`B|3 zkvr&|yJ({Qm|;zz@TNrlh&Z)ol4?%TXo(x)_Qoq6l!bBZ5(3UoJw3_Q{@}>huz&Op z1oVqP{>iGVtzgP?HQ{Qv8Z9tx()nNi@{jz$aDEnQ9{H1EJVq!tiql`Mxylvt4?|=% zUQts2GUkA^Mp4hoZ0Ra)^7R3KvmUf-P$Gr$Tvr9*$qFWG7~5e4p$2P zrMc0@6{^)A300P3RV8`^kgOqsM3!O#MB^`4q&Zz#n3^&%uW~(VLmGz-1o8bl2J?LQh?tW&Mvjd9D=$sih)E3 zWRc>k`5|BqSBS_;x`7$e@DMK%y40<$*Gn{B;<0(tc(u8zz0gWrIoJ_E%+_$_u zS`i5>Pn-U}PfVnbW`l>wMd1y^k4o29^hyKofzsl*uS2 z2ow-#%LH19s7Irj#g(VB6w44cB11!4ma0b<393e)6*L$$IIx{FQ7W!sn-g(EAcev zL|KB{ArdMKh@liP6hv?f2D%~x;xLSfD~5wOxpa_%Gc*_TcVF>YUU9`%OG*JXbRz!fE#Zjhk+_1`!K>>Ug7Alf@;wd&1kAqxUBe_m` zjDFgy!`Pk278iQHG+!Jy7PXI00c2`O61v@0-}rv3+FkDZ|SSX1lea z3^l{c;ByGt9<;ZZZAPE`#jQZ*Q@*_2ImhpQ=J9(Tf7p)BWAC@emLJ_<^<_ujdHi?0 zyx5ZPGDK!i#BQc#ww=(P)S0_YL$D#lXL$_3nv(368jpmQ?QtYk0P z*k$$Zr5<3g@7CIlRSK?b7uBP3i+QE6g|)#tu1hYp8$C$fF)%P*zwn{dpOJQQ(TSh5~DFaHPh)(%}Q>~)MyaM6_+wh2gu?hSGph)+c;oSh>~2= zi(?|9+v@U4xs4TNNJ%Bbf{3*c>|-TEe{-?kZBj#z6-rYQ8nZBCwIX{@$OMMVqO)8z zr5??FQd;Yx7cpG8n$b+*Kiou0)y&yE=D36Dm5_+ek5IZKtwODGwFL+gAB(33J8 z;!2skCn76fiIat(+*}Q_W1@d)jT@}Dj;rTCMhv;E)7rgUpfOi{c>_J>ydjZL^+gA! zG1KY-L%K1LdT1z2ZIz3ml`2-=FSb+6DBmtn5l#zm2%adh#Zzz6i+yd+%ADr72}Bsp z^dO629%zBh5GpCC{gA?$8*jLwCsf9h6_#BJRFQ%qww!>8Dj-PxidZBo0=($R21dH^ z8xu4D+~`kF1A@RO20%hpldDpu7*-?GGV?YaYE)E`BA317iVL`P!qAmZ*vm`QV1-)d zG+NQHnyouvUS&y)8-``MvKn(Cxr$@UR~QP^L(E=r7D*RGNT*3vsh}c4?!}N@`3kr6 z;R+(s3d3AYPc|wp*R|duE{ID(AczQjf)x?2L?g%qK9Q>k0zm|GiDZdTRY1>hDhh^+bpiW%Y5vM7-L{D}stNL`-2o#;eL8Gh8EXTX5o|4}i?x zV*c?@z9;c#KmQ@5>^$~SUpeY4Up>5?P<%x66q9vNmT;p3_3)$v#rTT!+{kvdAgj92 zJ>^QIc{VbiO3q(-_G{<*-~R1ib!xk-SOJp80gs3cm8yW@3?$Ai7gfJ%q@fkJ`UnrB zsof@sL>{2C=~Z8L=FKB_iF6=cam04a`3>6joQn614@Q2h2&Ig(+~#zHfT#Cvc~xsC z4u6&srz2yoxQF3NX=ZrRjaH5G9xr%=&K3^zC_~jyz>&GmhkHKenYkyEgu2Rt-_dX^ zR=TZDgcWfNr#9beKNJurNMfx;oHGRkMhWz+t8vQWLy3Y2opHvW2NXw-KqBSkx|J(P zK|@NOM2SqCTb#JIaojO%FT8`y5cF_FL5tFgr2pv0-}ThRk!-uh&XGD|I@2>24_Yx9 z{TFk%05kxf;breHyQ1WT1qSNnJ*pyLtfr;S1aj#nQ5Ma`U3rN)-7*BRNzH_)#k$Qj z&J+n+YS@j9_^4&#c%Bn@5>1Z6pANEkgO{`iLkCZiD4-_}C3lI_FddX42v21tDvKeu zDwjn1Q;Z;rnF`(N^|~iMpN!peZ_j?2$Uu>?(MR83u%g zT*FEr5Yf?a}ihzY84g?0fkStB@Z6ikJ=#1#pMA;T@m9A1uCdQeXB%AzL`O<+x& zEE3U6M8Cj5;sQf}P7!Pi_zYo46e*Fy@k#U4phMQVWip-^QqPfcx>0*DLD|&C4UJpG zgAOGwQ2!K&-qmg1j>(WNA_bq}Ig$=i2qxE4#BDCyR_=9%5fJ_Mn^bKdpDi(OADr@T zT?Z-n)0we3JoCwAud;3tr`TYfb}Pu&hHL!wKDPyl|39GyjM6}-2Wh3+yiNoZ_X}ni z*@kl4&9;&yiaV0+U(AkSy9)h(`R6~@_FkDi>!}x?d)zOW;pKPcJm=+Q0bcgKuUDo& zY>PR(Y=7B#>}eCn-FoF|Pu+j*x2`+OYs+?);bm7F&5mSyg;}uCY=0RXwrdO^gUtOj z%7(IA=vcZz!pLQQGsp;ux%*2VkJPzHkZAkD-nWr z($a%p2eu&s8Y1Y^ALeKy3B(O*YhL`WX{gyj#yZPoDe)l;tAIG-EY;*a_ zt1i9j>dQPO_=EL7F!8Ym=|*kaQgkx~1Ely{V4G5{hi(4S4JZa(Un|(3oPXyyTg{*Of!Lk`IxhvCxLE@0q!y?QU-qK zSc`L-Sa^GM?>x5EG7eA)0;{Ic-F(~0mz@8HrwVYfePr(`Km5Q1`_6q68(zNR(sLnY z*!k))7v6aFxxV*x{8tV<@62PyUUR{v7o6gIUpHNM@k96AY>WBX$qzkv*Uk5T>*m*I zKj-_{FFiN%+};dgzA%jISO7n=|7Fkn6^M!dqk!Fq9x-~bU zRH`5anNkR%sg#uF5%f)GZV@Q!cHmqbu^F(b&9e>_t*JPer(ZWMBGc0ais|pXtwz)Z zKbt*O1Tw?|J|;d>p?T2!RXXeV!K$NKm<}y$u)+{q1<@eDN|s1&kxOkh%9SFH7+X?^ z5JVKgCx-NqLV35=)rwGn1I5#X<`hP+#{qim(vme`M-Y{=8l+nba-|5uN6l)P*+M!3 zTH#D*lvt%3ouw$nx7Z?+C57@dMBn2b4H9zHBP=XAP~X&mwR4>Uhel=`6g0YuV4ex= z(hW}m8Z>gE7Ep~bwF8|@D)u`&vMrflxbp6;)K@2c;cfc>OmZ0 z&xbQZX-Dt0A3CfXq>yd=U#>27gWmbUn(wp)o( zYH8xpxZ;eJg4oiEtMTD~K`FH?my0-x2)WciMngX5ZK7+RnOr(S*eWF#XGPo&MiH7s z9Eebm&WJ@~BA4Ol@gKSdR+YjEhfEV0+=4(nDb$vwd*Y0qK3K_$rv`>Vx;Af_O*GDm zOGz2Fvs4@t3Lq}fwt$lJliTHXTUk12 z>Shg1%j^F-iw@;sUFK-~x`3HfDe0sDJ3tLwY;)OOvu)*j@B0?WJZJ7~PzY*XZ~e8+ z?|z~9SAYFywg??_!dd&Y87kt*l6FpEfg!w?&#A;%O_Ye-SUYj0Hd|<`V^V{{nzvHC zWPml;(%G89;=xR3C14zIGRi!TObzOHVNc-KJj(c&ZGQGj=#%)G$3g7l=7d6`hbdd53w%tX(hDi?nN83fI}))Ms#oCR7b z&P>F-=gRFhX0i(9=5{_;1UHGUbw+$RU-MH|5m{)k5@Zo7S60=U2v?NII9v;{YwX;< zCiK1U`)Tr9;!a{ar|lTFM0i;P6Xa@O*7+bajO@*2|CSQ*h9>O1FuY8PK%CA(;gqK& z!v*y#r7QpIn8wvcA&KZ!ji79OjhEQMSRKkNW7Ve#DabSYhV6(&qi4Om6{y#+S`8B9 z${X6MYDLTg*wQM|;@HYH;#KdsB^Sf^6HN!UxXueV+$u|FgMemlL1+=k?YL3EEKEa{ zjs{Pm2yUbcB6Op=c9Q}|Lenb}RLLgU&xKaUCBBT@gAL3<{3k>sW z5-J>|5JZO|xD{BW2uebUPE|#?!d0#mWC)PKk0Qwxkq%A&@y~vY3|r}F21MyrqLc*& zWy?)sBDi|q^s?d;I_~IouPqGM;T?auJzwfPHdguBl|R**4RJ!*CB-#`U6U?CqASw` zao-tJum&&~m^lre&O-@VG5ehZM|M04mP9$JrJg>Kk2a`j}U~-$twv~nAc9!ig zgUqkLW@$a+J9B5+VuqJred!qw>g+EA%X43RdSQ5Z#-v-|W!F2eKih}0{aDJI%eIw2 zx7((Wvbd}NNx{qBU$(z&Te;xGHGn(>@Hxl~BincG1!tSlekSES_Nhm0fBs2(pMT1J zryRBWkq7PY`Te#Z#mggu{E|*E|KE7oey-=YOD?vk9pwMR*L^^LTGaO*4=9QVDmD}p z=^zMFq)8E2SeCZ*jwnsB5ff2VL{U(rC`eg)@5s_SyRfiy0ZU9Y(Ugy7WGCT^C>WMRQROwS@^i+w%Y(!AZ507E5OI6fz-f6! zp8fV-BnkF?n53>%s4ZF)JHG}iwgHbc@@BUkxA$1>obR6fy>q^O);GUS4%_Bjblz20 zUgBe4mt1;*zc5c+Uix=F_KBknakTp;Hc>s|#b(nkT+D0cfdgEyDz-{t22L2nQ`|zq zJVPSz?$IyMz*aRELi)s_lW7B89%{ghLEf!zGrnDNLcH;f0%BWfkZ(4}!@FgcUfOhT zGny8b@SSaD2je+o%=`W`>m4-fmWwYu8(xN#J&!E|FM9yn<}%a_Ao~=U_m@Ft$>Z6l zo^s-M&N%7ZGr#oF!*=&PHoSbwNuTiA@`d01hVNrvdchfY-*(N-*UY*8iVL53z#0S3pFCK061%9@aH>z4TPVyzR>G!M-RPNmNOEYX2QuN656P8 z`->2|6sbrEb&hz%O0D9_L7pngSz5@^9Q?w7=v2heRjgxB$1fF?Y7W(l6(PxqGCe3tV%kWj8Q56Kd{zU!J7>#V`NJOy>b?e}Z`Z@~`(gvuCrx zA99X7{O~5zb=F+dtszqO0fZj{*|25FaFW@&e~ z%$usX(rK5>xh_dppPAGehHHpWHjQkvco02?nazpf5y`x$#W5ICyvm`vaX_hxjB)#d zJtU}t$2?*fu|)aTfBr{w_unj9Jdfwa@D-MOo#l`{XrC;wYcJG=h?^W?iTOnUMYTRq zsQE=p6qZQDVn(b3Q{6a#j#7LOt0I6phJ;d-Fc4BLQFX29?T%e3=;Tn1O>v~o}if8VjlZw;K20fvGYY6 zu}#Mia9z6Tz$qIPWsGhBY3kT6PwpT#LSm4?>_~{{i6M7Pc|fu5C(iX%RpgsPtr7uh z?h6GiVnwD?9Z$;=!grF{qn11d1`vyl%^9o|XTcwK;t$UbcTgCh4>QSPl3sEsa*f#R zc8xO_0~L(P+S->Qi5vkbCQKzwh z5?Bn^?EW~EOn*0MkJ>gY%aTwTv_lkt4c+c5C#jZ z4EY(KmQ)YDPVq==Qx_fc^yp!b+|4s6>1_J8j;FIZ){{|2(gRC_vPed0omnrHPavML zp$se!ybLK%UR8eCi#EUhw-m^1b9u7M{PZLKC*^tU=bwJq$G+fYAIJ8|uix}M_G5Qm z^29xtdxiP7%TF)k``9*@?IwfCfHS1*k!rynUrvIrNPz;-@FDCqj)*m zse6WQ_yffrgob>9VosSc?GYOr`EK9J7CgEH$V-9~UbcW|D!(v8Z30MCPoZZ(GoYXnh} z!G>snQflZkFH?t3zN6Ox+!oM(^bmcOOos1sKzt&Qpe2;pzF~k@l(k<~6YKB=kQ|CGrVQ;UfOC4P_qH8-6Y~i`b zNH|^ST;pSoJFQ%n#4z&7i9rxZP*;+1s#e!2 zAu>1>u^+#R5+P1gM=}P&S%RpPQ%=bdL!}adO2t8|Njeoa6!M2^dDVyH%*Zx8iB(tM z)1XczjueEnvxg9QolU7cgK2jpA^D1!Au+PhWq`3oY8Y{p=ajnUbY8HZA}yuQFp^d5 zq=GI<#%b;=mFOZ?Evj{Ia;ic?zB^@rYs9s$-Fd{YB=dqOVsUCX`RrG!h**|j&T)>& zV^&I$ok}yTdDES}9M`YZ(y!e4(M$DPY$QWE0hyM!sv0XKVX@45ttC5?laYe5uZxlb(LrOdz}s*1?xyXF&rVR1?hJT(QD zC-)7^xt?JYSjOPTX0Al%luJ*bT$_9m>e>`?H=h-e3LW-x@0fmI@j*r~uZ)bV-81 zM;0RDk+?r1pp+Sm*ME#5+{_a3AuTplsOv%?q;L^PI0kfmQfjDIi!`N_3YR8^i=TdG z0kG_men)*tlJTD@PxaencAG_Cl6*c6aJHc=1(O|H=eK3nR$6!&7Lb7ygjb@DFB0>3 zB;_fCvxljC6dz+a)LHLp*x?9%dF{n!y`}3H52u_W7C;%HKs3@?XxWI7(ZL9pDB`|= zh+&sGev+Aot|Eb2hhdDAvfPny0nhS$sprI&ZCs;*9L5_58XI{;CXH;Mkr1NrNb>xm zOA;J?jDt0qA{6wr$M8{8xX;vg>x zq(Z?HNDO&}tzrv&p+v+P+L}(64>qbIIStUMGT`LmnIUKK3@u48(gpXXBfg;p8x?={ zCqE;+9c`2hK#xrkqUy;Zwjy*y2^-(>>dK<=oy*8D8?nj z+TtB}dS_OuLrOSx%OxfkLcLc;-7atygUO+0$G-a2m%bFCz|MhX-^!lsJO5yTPh|V~ zvj0iF{M`})Dc2RxGfo4)t;+{4$s{P-=ln0+YQv)L~_cC%NNgUmgT?UP@Cb3bDC zODX?~0*(99mzR{`<#v{VX#38#n-RfhXu3C=kKTLTqh_yj?13Bo?nk!!>@hR?-k1I5 zsqcMFrpy0&fBBW~ec44dnOb>z7YU|==CIlB4Bwe>J&A3th;F8U0ioJn&IL)@jRgsp zd+lpIxn!(}=y_~H06kNxiY8otgfZW_32YDm60rf59=Vi2X45XQ=ytJTNo=Z1o*x5X zZO%f+0Oo66wNwv{VCb%JLSnrQ)|zwa`3yF+PW>=?DGO$UEjd=>L{aA}z&10lqlw;rvDYWbWDE!S z4lkPy`ns2!1D4!sIRYbW>nmZ`UNw0h`;;&Lp1orkklB~MfMt(l`#ARG2U1h7E!$j% zogL3R>&tV_JN57bcd@_xy|YiTzuc49w_G>J{_?~3-169ibNxxmlh_L$zWc?eC-3l@ zN*dK&8YTs^j7v%01Y{O4u$wFl^71Bv7?fgD%Y@kYZ(330)RpD>l2<*7BOzU|M9Hgy z=tvHVBolGa@sru5nmWfOn#9s>#xZkAjR00Vtine15IdL=&1=C!Gcw0jYz?dxxNM$9 zkyk|`pGi~@PNsGN%&koLNQ(s+M^qtG62oG}U~~+QB%muLjT{Um+D-Hnagd#OoJ!4` z3{%%2M!3<>@)c)hxVS9#Y(H7`*`Km>w7evoG0amPr#4cN1jcjLbW9)$v-3-a89YM7 zpvPLns95L_=^~9)MG}t(PJncM(sk-Vi?vo?-8$2Ap0;Dy$z?nw7=BYhNUbTVv~vxG z*CdMxmsP>ZRHw9PR&9squ?jcYC~~*W$iO!HJ4AT|S}hlyt!+bx7OAe_@ZA=$MY6EC zsJ-EcB5qdNu32N9Hl^Jvml-4r+Uu8H)&?A>BW zg)ecF%b0C)854Ywz$TPf38^Au3q``H#hJ%hr}E^t0@MSw3rDoOkrN#oe!2t-ou`68 zny{#-xiueV#$zjbN@H+}C=^0V%c;C7 z;?|^J^vGnqM%0wrMRkcNdYPTexk*I!r2` zJwprN+<4P9K(QxmVdUjkczQF|Qs<#Vl2bnI~ndHhfmD?sTLAwdh5+jKrLQI&UR7?zO7|h7LcIS=d?v+^u z*FL{O9&J|dZqd#qD&M=Rq~x)bPE!T0h;6N{6h6}<1?VV|q5@RHNpd@r$?po`j7AqH#3$tO3 z`>tGd`JxJgWn5VzU@YY+uXQ&9W z=*R%}H$bsx61o?k_YU$C^B-}0O9GtXWgE@cUU}KQciyfj2n?)+I-Q1*WjvM~9_;;A z3zV24GW-&XeOsmI@^nqdT-YS^m;p4>oFd{+Nc>^9_!l`UT4GA9>Y3d!(3c9jGTte+ z@G>^1aHYEhGWZ$qG{dlkF}p<&TgEfEhp{sif1yal5-l9W49GS_6-n+3U5H8%<6+*o zBHB=PnYgXgo-zatEW^lnTtu##pZR;>D;wg>ld{T@+4+(RQKbb@hAtVWdF_6>x7uq6 zMI{VH!&0FMQGe5E9rZmOyFTnx-*&3EW5cX;iOChDqw4UORneJAGbM$T0c2arKK#|| z%s=}n2IG}AhUwdy$2}_>a2DZ{kA1<*{wL*;>>vN&ad_GD*uVG#e@0yL|D=An;MPBU z=FYpXJKHPFQ1c%=HP_~{J!Nk$`}wjTOZlsm{4QI($Ftdr`k$27lkFV?#bEO8Tff_; zGaT(LW{??J4k`QmmyFHjWA<4OUOwT-ZBICA$Kwv(_ORJoz{^YgpOh_Tcnv5sBb#^J z95y+Eu_h){Dr9F<##{-|!K%=o0UWF|t-yor3xl6FK-;6iX8chF#WThPx@T^ZT|F@TqtdpsRml4g*-a3?Lm2uMS1}V6W%nX}} zrawXqzog+o2WCwleTmv8I3DPjg{C&<`8qCRxbN6ArQRK8El})+vu}Kvju}amBbiXM zwTGM7mKr>-}&Zc7o6<_*N&H- zf7T`6JJa#<3(s+S(RaV~y|cbye;Ho3xqR~HkNfsFPW;AKPB{OZQz2zfW#4wgB@f(r zqb=r#?!MLczWhwe7IPrW=M~a}R1@AQ%Hm0{;DW^u;Rz(4E*kSibxB34QOt3sV^f|}6RT9IA~T&P z&?0)ZQql;iqKr~gnHdcIaj7+W1HMOXqT0&$-Lo`SD z%Mn5A{y8mTQfE+cgvh$rYx~0~e$i;J+3OhIM#RPuPk`9p=`T5a6futQ!ez)XRoLco z^D!cWj~fdvKn+1VP8M3*i<QRvl+M75cec`8!P07WV? zrYjv$6-J^HMGNgJD#gYrbVL$bPm&|Ax}+{MFgW7WCCI_xSOhu}rHFYN-H{v>^H;?} zK`A*nspfm7PBCC6Gx<6my&0gCd@fUmt%z}9L_kHIUx(4e<_IC8OBF;zu~`f%VHjOF z$q{XtqDml6Dl`h)XiQPeI!TsTa-ish?zfoW{N}7iL|03H^SvpvGq2q#B7u(>xPzjP zfe9{5m#wa7l5j`bq^W;uHD)u|c!=3rNQMd>6+AQ{;vrv57|ehgPDYP%L^lK({n+UW zGGm%y-NGV@Biz^jJ&hKI^)R`)3DIpOdO0ovjzxgMgck?V^BSz=lgoBktQajFrB;=W z^0r_AG_T5Hu(nj!Ltlm(LxE^WaIr;*`_kr6O=iA!OTqrK?_~PUn5deO;bKp@aL`q3 zln`DPntXZJjW`TKXKJ=|G1nVHz4T`$a{-^MJb(1kXFvUk z*DbU3dscbJ^2;u>|2`u65wPvpp`0!ETYIFI?kw6z!su2&M>dUjQ|tn6tC~ zfFfK}@P!sJxzCe4w_JbM-M9OTlTT8MRgZKE4)p32-Pfx7EizF;q~j%=ks3nBa5G1* z;DV&dv-MjIx_Nw!jp52HR@i(L2j*Q_&Z*t^C_u?pPys zY*pXtU0$-_w=LMJ-p08MitRbD-|0PWWSK4n!8*Y$We>C%@!X|1^ts(+T!L*Mx|hG$ zsAh?x0En-Uq1^43&dt}&x%hm4h(=)&J9&ZAsi%%S(hPXR5%5KN;R_EsxP)xi0-?;) zYLJWe!j{1>m(0K&=qUWsvY74Y^_;Vd#Rwqgjz%XdMi-jvuCb^Q#(1KAHIod$OACz> z3C0Ct8D{*|ZD>@A!m7J&zg3@<4$p(Q++d;&R` zc^IVViOnQh7+h>A{qtY^97U|+iGTVrFNQS2RoBYVaaNSBgvgUAF<6qJXfR|3&gwcP zli3`pKj6$q$I=J|X0cL{!Q;7c6fH|2HhF1Ory89=MG>8*YnU-C!4{_?D%53|R@RMm z9_BekCx;59*(O6cA$j4L#!3}Q#Hv^_urDzgKn$MEt%e%Ocp~|46pRck2b@7>?^6Hb z=iWwf``Q9#@;Vt-RNa z^>^Q4-90|E_Fg-#yVr--nz_}xXKcRm&ReXq_jYR^wDbE8-Sz$Z@A%$*KIFeqlfOsV zV%}@pRS%uD{tmifWsx zW?>6atL{K0X>g(hJCo^|Ns3^BJ{d}gS5PepOxf-&zwOO$WQkKepuPFgtHBIrCzF6v z7?~n=y47q}wUTF~MKw7lcXV3Jq5IC>?d@-Qi-kACP%vYVJe6&taYMjL+v>v9Y_*VW z@Ul1%2+!i=a*n8LpUd)z7PZ_o0HRrfpMw%PHsj3Bykmf9!1dg>1_0sXvE@w;O9a!V zbC$4;@w{;AhPGSimOwXLdC_@ie*NoT{{3%#?Id6sG?uQo=sUh~?FcVJ)1hV=klFXK z&phpO-d{f9_(SY3U-;b#SoXayzm&T9+KcYI<*M7Rzx2*qu7Q*n&bx2k{gZzrSu8c$ z8{|y~trm>@lKe6w$S|Zqo`$}m4P*1Uk0Jw@Q&( zftxGgBY71WX>mf1l?hHs6)8_Sn-@4Gx-4(BDVuRty(lR{*A}FyB3b-!_MiXf-?=mb zs+Pw(hUGCFXW<7PS}mhk;CU2*<5qX75XgOf$%RQsVx3ZNLA!%Dak4#G6s>S;>mFl6B%|Q%Fr@mrIrhg zU-0bI*zy`1*)Dc>+bo;GVHaATtmls5U0njT8Uu`AHjDjTT@@Bu@ou~AmPA<(Q*HZ3 zv|WrwetBL<*4v&8F*r9yJKX18y+WJvT}-0m7d;g5=YoI;lp;ySBsEz@Pu-^?4G%AzGK>q- zxBwApDXECTDjGwo9SNj}kP&L9YSDI(8-yCy)42htUmm z8x*aHfoe%D>c}UM<>ZTiD)QNYS!o8K2ZTu$91KcT%t95(#~`tRZM5(a8!a-}XeTOD zWi*F8Vy!?d`^iz4E-EroMb+_lDwAHd@M}uYMl!XjThr)urU{b{G}N`vB<5v<>XM*2 zk|v>RSv+Kx7c^HvKDtE%?&(n`P~8s?EsoOWn{Mi2_hvzyj>J|f4*EPZG?30F|JqkC z1sl4D2Jx}n#0~9Iil>n3r41Tx9B~%0%TOc*inSu6-N~n@W+I2Pd^f%3zIY4B$uhywQmw1;Waq;Jd2Ce>}dGsfeA0>k{V)IAhMm#Yf*CgUi6POm9?(_P6<7f`2)|Rm*Ecwh02^M3JvEXs;qbmni9?*fa-?5Ei@)gm%jUKGhJ2 zj({iC5U6%>iXmqCsz9;~a5^0zPY2HOR^362R`pIT(M1C=w6KR3WvuNX8W}K_FA`4y z!|-x91X9F7p1Lz20gTx6A8?o=I8mOpbMLtIYG2*C=5l|CM$vyl(N{HG`uX6No7=#!PcpY+>b%gC{34AO-UNbc-(!`d-a?&V#YSz zyTqao7m}Pb8l_638ylCH%nOEh%Lzk$$(SR-%P=x5Y%AFt%94F#`2mnYX2AKsPJjID zbI&~nFTeQWlRy69!XG?8*=zP}Hn9BUyt@`YddKrm+~?b0@Uk7}Cmy=>;d`%~|KPO` z+;y4E9XFkO>$PVcxW}g3e_)k8c3e-Iwe1=^Z?!70y!*DR@45Y&(t$HJ`0yT^eRTE~ zhwo;8dBekZ|MjP^_xRwu_TGNAeRo`K?+>kdz|LzNzQ=nHnYGSd+pY4^-QIiHj5Uwk z{k@+(Vw;nWopJo3A3R{@dw1P#jdfO8CcNysUeob1uxa)**I4!WX^Z*7E~-gKLNUwO z=rpqcq4qk#K!{I*h~Yjhg zFw=U>-QX}d)sknrD6&UpPc~dY2-rxM4Iryl(o8^>ncLlFe4Ws7r=+ycqkz#VB5qC3w-;AQ_`{_w%O!pj$Y z=W7?8`}NB%{I>r|`8f96n=kjhuiJ0D9A2LPz#U#*_Q@|3u9XzBBz|PK56A+S)vFXr|3Bb4e*SL z&6^`8ETN_4jUzLd&8h4pnFMku&0p0y%PCk`U$wNe`~w?x533;k$}KPb#N_}~$}pP{ zT1@3KDlJSoL#aJJBOdyS9Wkt~>98Wy)*oA9p9UpVLhjaq@jG+AI-9SM#oC5oE#J-ZpMN~0Lz~j90(ua$bJpj0X05W9K zi%sMi<9?uZwP;Y`)NYq$k_KYGRx?7nRP_XrEbJ`glpZ2VAsuF5W`Qi$FfA z#Um97bV)LRF{T^SCA@3|P*+!%3<9(mB3ve_5T%n-qzWzSnw1z|oEa~MH5o13*I2yf zCLYk+%>hd(E{LAGq`{%)ERz53Z~l^s%u|eU)Cz_dhyN-4j%sf-~JV)Y!F*FqAeTT-BQ;L zd_gvZny)pw$z3fA1Bly0CZ>B{>jvr)Zm97vn*j2f<%(EhLudVUCLc3@+lp^kW~s@z zWOEti+oyjW5c`L}{co1UKoHby{b>;^o}$fKF%bjx0&8fx`1HjvLNU-S&oOzmtS!)2x-3+4+cf#uuxwDgq%s0Cl6i?;Y{nyv!Z zwVte%$yc3r0inVXa{vC#H*_a~J$ey4bu$!bNT@|dG(?{J-9X@}n;Y&VDDA=#ko;v5 zpbb(%nvAa>2A1;p*&(4RMf#nyTo9A6-&t^aI=$-H~u>8YkAAy&F<@xvB_~gU4-8=W@43@^cim?7ZQlR34{h@H**mO#@NOF&Fmr=_wqNIv znHvGi2k)}sfjh4cGJELSGuQ|1vhF?~T791#-gEG->m0WGdI#>Z=KeEQcLbIX*>Uv` z@3!6vhiwfnpYV|#j@)On{bp^r!D`E`@S4f~vS+Qs%dc8$d4oMfW*fx|$lmdB_Z2Dx zW8b*^vTu0(WIx5~(L&KY0@#{%O^%^j85mPtYbsc;r6ztGG@!Iu*jBa0r-|MqiwCZ> zYQ@lM(;5{#gbg86OMM^|+C@>x zydtJrfB^;m8cCXE2}?weIuYiDl>of_?XQ37lrNkBF9XN}Cih78WJCGv(b_pM#s20tWA0?R346)iG6WoHX=F`*89ID&!MYB5p}6j91lM-@eN z>MDMVJ7We_yD%!E+ZVH7&i)#URjSAnQhWq~k%Xg2RG_d;9-WGj)KZr;uN;cc>^=6d zUrevz$d-`ImLK>4`)OyqIO~C=2^XTsyPymmK4=Or2$!9+KNfv_JZj|BorCisq2q5xWi&%Jm@4KI`YK@qPkPMSdvHe1pI_! zqZen<-~|^@(xucX?M6%8QYmSYx z367eTU*xE$$Z>i+6RXd#M+Ee!>>;O6CzB!_!yRLNSarI{DW1HRF9_u0q@6(C$#6km zB@;7ID5f4WjvJ={peP9i20hUH@|st@%8Wx=;}82uD`gEO7&=mC5pM;E7-1I9mDe> zLI)9Vve8BcJy)VTT5fLFh1g#|;frT-%=KO7;$W)9WAyOShe^3xUY-weYe*!uolR}a z5_7jjj)!MZ>ajBI2A*`{i8iEHc-^wrCw7*B<$wQ|Us)l8QI@aP$ z9|HqWbYrST9tZ`N9lI$(I3Om0rBE1pc(W}_mUuE8G@}ck2AQ=BSn4RTC%ghnE`-6k?n zEWyQ2rCw@w9C#Um{@Kr7`q7UjPi2RfUwq-QmtT0y{_+o|K9~L6f_r@~`-XC5*a`uKF-De|P%)s)|`)z#0E^A9i&)yJT z{=zXkefFrGjyZ6vgZA8Hv-RHm#$}gY950)Z;Zx5enP<(Uu%X!mT(y5{evzzE?Wlsi z=)kP0CwOZc8j6JeU^+X?W_)w8dCe4T#IRmf*S3pE*}YtV)N+)lp5*kO;GMWy<21fpooR0Hn-yKy)qAv?H;?>6!}Up#7YzkZ77@gOsTM~ zS&vw3cmQ_W%4R^EHmA(3Bm>l3;2Ib3fFZs0Y`0F{y#g57IsyEL(5=HkUD9|#m%2*r z&f5*QtO9`=h)4yVWxyP_QdfofWUS2K%&e$MpKa)D^^bMqr3@>|s z`Mk5f>igKzjaOfA$IVxUmmj?A7U}*wZn8jfrIzYVOp8}vL_XCSoW@LLUe`EXi_k4v7>vPWN0IeX z>rYD>cjood=vF%+h2r371%)VKP#s0wT3VyXy9z8?^4c*T>a3d7Qe6f^LgY=dS?kz} zS}Vvn6>1YPW=lk3^>EKo=rByhz)pTCa=Gc5u0)$AIwZd=H5tPx@+C&nLJrB?X-Qc- zqDvTY7g*1k7(m=OvfHbcb+UCkAxshzbsdTI#VuBW9$ht#z5w7MOhWjZ zBI`!i8k77DM-1IP>pB%W*QTcT?Ob*;MxrPe3^Ewp1H=GOd%f2n;|7IISn+XdLKRnN z`wN6?S*I*#Bu7S6v9VkbgsyNYN`;ov98S?y!QjYdtx(%>fk~zD3TeI zT2~?-bQ0=v6gee1;jRlA7K)x;K|76@Wnec&sop`T}Go&Cxe-s;#r_*R0|smS;DZ)Xkc;7Bvz43h`EW)L=_MW@<~p3^4Tng{Hir& zcuk9CiOA@s?%0@oT~ltEii74*>Q;}5$;`zPS47n_AhC?Z$wJ-R^B5*9qI70N*9D&T zMooF9OM`gY8DvCy!iqUsC~H&rA; zFe74+cdV|y0hfVn6QHMiL(^~srR19zrXI$23_Aw|+~%T!F4HmB;AImuJWDJaG`&2i ztH@HpwTs}}Q?_O>iMw)Pe73nf*~~U~vwhJEv#{{meLqFk28L7q zdd^(G5;RR92yo4>{^5UEXVU1w?5#K7)aD?H2Vbo}E$>@Y11uJw-oC7Vsu*H*B)rxa z6WUp#R1t>xdqRrWb>T1B$g>wHC+ABE#{P@vX+?K?7@bk+IsBphaQS zli*hJSrXhMa{yteP=bHilg$iZZFpI5FxPXK1kroryXTWJfR^gYwBJ1t7QHr_fm2V7 z*lrG@vd7#O24OHXfj=xGlP@x14K65hM3+o{`k^@MHjdh{(Mg-iptvG5jbm4q)=`TD z{X`E54+6z@-kD+JTi#Jv*e0WoYOF#bqun)n-iaFr1*FjF8(m6VxZ#>B$$`{J;8!k$ zI^{ViVdD`%hqA;Jp2Au6Dk7pdhBqrED!eIEEg41HUEG+VF67PtM99vm{NRi^QHsD! z37hKJBtkHUQbk@uQCh;Bc6Cu4iJER9VzsK64cSjMen;w5$r6-=I7RFOm?glu)ai7*3`CFDnSc1hr=^!(TIhM~AHMX2 z^x|{#o?ZB$_m}-%%KOWYKX{vMWt+?J@&j|PTJXpXkKTXvUAJ8bH9zt2b<)v?e8~ID zyKc7@yu9ZQ>+HDsDqdUOXQy=!-1U8X@3_ukvo=0z?=6qsXDeIG2ko-K!80eD%U)qV zc;Sibz=QTq3Gpcs5isf7%)%_-ZiAyso*ows-cc$s2H5eovO5pt zpo{jh%Pb=iVk)$u>~ZU}PCMBSGQ8ZjvgfhkWyy{+1T87D%M4Td*w-m1eSF_Nw)ynO z4w`e`H@w2^dtV^)6?4w=p=|Fj`{bAJeMv!PQ@IJwi(CJg0=i6OObUpS z1&P$e-PL6JFg29o8ON?QM-n8Q5>cHlepJ+C)D=1^GEY+aTT(o!mX~2C*MzZ@n^sMV ztiBL6ac`q*FYRIlWv!)drOb0G;j|vp)%6EOY-ph(U#W#C2CI||<^mzKW3ZA@YO!kd z(((xfCu0b~lPC?L6g`0!((>$7D#*qz%|xu$VL!0uGITe|9+Qr(Us=Z2j)Yj5*Fb6w z-$|oa5sFf$k~|f}N?D7-01k52+S}UI)!o`y-(ZZGv@+*ftAhGK?WGitA_9qG3^`mA z0Y`aIOAP74NlVV+Y`M!q*R@)Tn1{zTuGErP9uLXlhK-iQ3I$!RoekGt-|o-8vu6{| zLOfhkH^&VQ=v1R~2j=yaY(HDK-Ug7hI|(FU3g*~2{a5_&~=GKP;iLcTnC{9E4Z(|=$&n%yw?pxehN86I(zMHXbTXMu zpp3*3M4a?WG7^)B>h=OVQLqUS70i&wP>PO$I`UH_4ww^C!9gY0*dEF0dv z^2!z)|L5QT*G!0hN+dp%XwLu_A#bL6nGclqa93j>4XNerZREb`Mv> zT;vz$accsFSojn!b5Fx5OiCj~@EWBkG-|0|{PJf&eH8<1A+?&!B3y7<;kPEYIEM=a z4J?HU#YC@#7Gb&xb~cRs!lViCR#`0 zpcWA^v2x**CavR@`FrmYaKRU3riuXJVvgvvga||Fv4gTmhU57z6yY*eP{btg6%s5& z1x>ydmnrO`Dp2xNOG&7Q4EavuS1J*k7WRm?M7w;Yjy26V4l_`w8!39mQ=Jf}!(dvH z=Ew%8(=ExDc;q|HazcugjJl&p#;N$Lg@GlhORT(1-pLozvyAA*J!kQF8jTpZX1-kAk{uJ^AF<*l4z= z-1FEd!18#h*^}5m_`wtKvh*XL$o2~JvyVP8|897B;rzQFyz{z8?wLG>?ak%K?!O6Y ze(JGXJdAzIb?116dBH>1E`0QcqYmDFtBu~~V_%cB{aTXOmVNTeBiYiCdw$@!{kQqp z0o#6LugwqJ?fu?g_8n}H`G6T~&)V{xv$k5v_r84Q3tqOr{PBY~2bMp!_xnG#-zGl! z_0hfFci0~9+v`JXY`57vcG`0Fw=6%|UtR<+185+m_w~FlY~=&G+PwmzEf-BgHdB1G z6B@+^GFutSK%k(m(~hQc)lJ9tRh3$(107JLHwOVjKoWX2ZGqC3m+&vK*7jf{fJ~7D z6{><_F^uTLo3?U*W%jVj3r7^8+Z_hCVNy4lJ)CS6XUo-XXW$xxDG$;&$Ej!+SUyWs z?13vF7p|ws##}Jg`oR;?Fg35)Z^PHy$M~&VteyTn=N@ckx$1Hm+eyGFj`7ve? zqAu3}VR#u9mhtj(co|X#nfuHax@WfSGUM?9?JF-n%qHjdib8(Bp>_oJhqjFwNSIIE5dYa9bLk*qW1LUb#`76v2!9M9`?5y%oKgYN_~ zEV0-mF+|B_R)wAb2d2WOMr=3~15Tl!H^aEkcXDL(-psc8)qHfZ6F^|Iii|AU+%zsR z$&peY&{k^b5F$$*Y&6PX4lr^juwZdcJG$6H+%8oQ(y)zTRLGD+so{=Z&N7lanIu!! zRb8GOJerlAY@n1=TyUga&`>y7Oo&Pv2YJeJ46G7=0e8e%%teVMgiA6yhNB{?)#7;V zMMYN@A{BWm;DoacQK{BMXT0hvBJz$RMKa_I0tUmB)?_w{C^07OI7L721UHH)MMe@>7*iBKGw`F66Idoq(-4JCY^CI|N@9;9L69}RkzU)Y1h#Dvnx?!aaMfDkqBW` z?j)AMC2i+Ic2Cp<%In+_aNVe%qH(!5^O>oxI#r*(s$J&m+LLqdLj93*7M{?BS z1ygBZr;JltazO?~qExB)EHNp{cWi?n1@kBhY854UNnVOhU9P2(YvM-Qh$=Z!{8h=4 zDq@zXR#Bh^8wt}HoJzxmQA#qII5VCNc(RZe0+KHn&U2TnZl|L#h8eIwyG#v|+N8`Y1`@i^Ye4AKVLIs2c z`67DkydNvu=39G>H2`h52zUWagY6bAsUy(ca{TD_on>tC6Jklz8=By@Q;RWlrIL_Z zoClWsz}L;!T+&uDjO+nyMLwNv-x*StdVl%qOV9KRW}C}U^Tppg?OUh*zRhLtFJF84 zxf95I&78S6U2)f~*WUB}>p^CCS$wQRnp+M2El@2p%}MqHyjGDM zml9A$IDrL+8CK&h=dcM2dJa;A(;Af=i!l|gliA$DuMlAtYSPrTC~XDE3vx=?&r}uV z0Ri_4Km+#IVI29zYg({5s;HE~w2LQoEO9vyTeI>`FvSovrwFWd5E;P7POY4ChmQr{ zDYMCB27?jVKY#v14EWFg_!nNQLMeVLf1QD9MVJYc$1n1p);0jTGv*Oc548A^7Q>V( z;v?G_=9Di~kgpb*JYp(G6gd)@Q>?{CbG#x38S+Ju8F{zE-&II@t4Yy#a6x2?|$zkVh&Aylib)cV#A zpfFkYLd1Xd=f6a;F16;g{`6oiq9;@!<(8~8swLp$v&5+*y?O}YT*#g*k%vZsM@f8y zB0%aWP>O=CK$9e5uo7R2GYO<|hY-4W=TwrRG5SQK3(Xxr9l(Tc#rw)L-bn^a+hWF` zrR>{ki$C)vx2X-^8Y?3DNQ8_OP7#gV7ZVL5M3(f9^5F$guKiP9NKkO?&PQTN(>LT1 z!DPlrIthbO1V`Usuv5n}zx>mmtHoxV!V(V&nugv$E-0bls1BPThDkB7Hux|M&RUL0 zDl(fW_Nzq#Il!`SSzFxd8?~&+>r2K2m#Td6*ZR`Ps~9*UKjAV2lICMy6gl;{xHY9m zvJJ}9@p5@_(1N_?rnm`$OmUdRaLN~hi11RxX1BMP%|VIA)D;Bk_`Adu+Tyjk87xDZ zD$`rmDVc??7?v++8eO} zCOIk+a7hL|bv319Mi!h=H%nqhEKsUr!|%v`r;Ln6Hh$o&1GQu>8Brk{@F)uG5g-vlaJl= zlNT5K@cH>Z_Vw~J4?p|Zo&8J-WPX0(+=cURzVD7peDw=ne(Zs(k3DS1UA9@{fZaFQ zVT*Tfzv(;np7GxOcUj*jvf<^A?zg4Se8J1d?)O2*kIeqSG5c+G+yPr1y|2CI4M1jK z`GB3*^d$CvJFVua>|+nu^mh;0>hni#|9c%Z*{ zOW9xc*NIn6K9T)vylmkIY}%Q!l?-44uz(@ZY83=~Is&$m6%nwB84m@&l0XYi8>TqZ0`O9S2OThkh;33^XyL~o1GnObvdP5EFj&=F%vNP~Y@vAH zw}k8AR)dmKTY&(ty3D49M*T=f8Y<&|98@As%EoUG*CV)AEZAr< z08*{4N3yM9yshkxf#p3;8vC`Y$HMV@oZG!Gf42NZiM`}P~Jw7(1&JGGna(_Zpk zWcG<{880ux%dqoz&N%6`Q$BO_5qtUt^I4~V;map}?5xwj2rplK>343w=`x$kp2xm# z?u`%Jd9z2d9j)Gt_J(~EdaFt{SY5P6YHIF!F$*}g(vWc-xpth!X1l9Iu>J(`jz1nW3 zQZ|s^>blhwArYxgED08ecv=fNTA5f=p#(Zeu$EI%6`bVYQHvbO${8mfPH}-5*ifvx z)yhJ(B(>BvBP*ps(j01OCPx*M;BiDfL~@J9)zRusT@nby+``@|rFn-E4k9^wGO|E1 zvQT8OBjZP^$uMw`JuS&tA|sxx%7y$L?y!U+Ruz1D6)iETC?F%zPB>kTN#g=*2@JFF zC9#N@Tg($Oy$S}}EpITZE){6vxIGWf@lzu%%Y8f zC@I&dE*g&VI2DmVyM)4>%)?I+EfOJuZ*X?u7-q#v5p|B)EMi3?Me53^g@{Lrl1D{@ z(k#zTa#F1{3&p4ETq8jwS%S^o6-A>Tq7Y7(S{hw-6fU5UpG?O@kvm z5~7MuiAug>M06QEyjF!wses@oDhi%qs=88;2ebi;z!?xMBq+b+H03EpW9TE3ig9IS zUeRz=Dy6+621&P+$jJ=HWD1CT1$vH#5FbCUbZNV{4?Dx)+~gG1N(mRYo+7qlZ^3Nw z**X)YwP>d;P!We=;vfz4%CLa}jG;&=jiM1?l=-8d{X`NLfXGwO@`_-hQgDg02oOU?6?sU+Q64|G0o-`u6j;*}Jk(teX~?Rl$?GRtQ{=C4wj zJcZq4V5KD{mY{1*Jd(_GRHby0L^3ZV@<&~@s1`1z5edP7t?*))E=Os>LClLvB88YzYyC`;I7iG3-|nMJkhI4dJwB>{VwldOQM1j>M7heqs}tE)Ag;&gPn9_$0BStDc**GKV*iK<&U>O#+ckI(%US0-}eIFZL-u|+kWkvEH z+ji=;Wlv)J-j@fnee4Tf_VeY(AH8$l1Gjm9`6n;T|MBzlo?meH!biUEt6wiYb^p(Q zGVhrMxAlE&AIJ7TsU!B=+WX6g?!AToNzK^u-9C}+XHxL;tPj4&Tg?0IupY?lVeH9M z*#~ZO^uAjjvB&!lp1HpFmnAPRAG+Imkh0BXe_{UcUK`k4{`7&HAAi7RAK!n|qxatE zklohzJoZjoz54_2eM|2z+ZO5lW&6wCUxsrfKQ^*G3dh+m24?MnMh8WqOE9)mkk*c^ zBOYK7T`8an$-(-BV$nF*YMxTM~i+X8>768Eh&Nr3e^@PoZ)YlFeDvL7<4M zykjLM5lA?w3%Yt#n_kN^{HlYWq*<_GCR0YKB4R1B6%PD*wi#{(KNVp>v4?J_yH}M# zSEzsYU3bN7r5oM;YY|K8kvE)ia1;OI3|#^ zVcW14QLU^(EICCqg=N3^Tlw?dSV5ZgttDBjGV1WeKZ4*?l*C6V3muIxh8I;VjIj-q zaAHGm(XL2BR0SmiI>p9}Vs0TyjX?K`MYd%Obr?$3l^-^%SoE|YuQ5v~qm`=?Jw>C5u_atai>OYT z(j}(Rj@U}c(aLnmkBBjzcdir^MHzrWEzY8pQ0Qe6H^o6|oQ_G(ydoB7l2u`hfZP5f z$}5dV4|lY@h(W1VXf{b82g#>*oRTk}!Ai+@%yD9eo8n10z3OTa9$|A@qtpaW$;ln| zs7taWQ&?n1uS+Rr=INPns$Rlb>Pkc;oK=paq71!KbdE5o-ZElBow`mL%yK+ZRhUHY z5STj5#3Bv@qEJCNXQ$gRm=Vb=u-Hf-JeReRQc6Rk@?6F#VUzGnM4=B|q#-(DKrd)R zCW@4W7=)mrvV9nxOeI?Eh8S) zY^&A$=kcv8zR7J;X_J2+)(xF{6K=#vIdbY`HarPUath}fEpK=%fr=_=+6 zHnrqQka(>+Ei%~TV~e73;k&9ABJ_AV?MPjTYRSSrq#U9N8(R6xEICb0A0()oRUKJE ze)OC|$y#QVP)2HSj&s;Ul(1+Hqo~ODi51@}F%gJ%SB-$+Z&J|Emd#eD)|OVJmZK$$ zPixUA@)Ek!$?A7%QkZUec@~n!U?y>-JZ1=}n23*ER7nO%1eO&ERDjVDN}3?wY2Vf+ zlW+oygQlrhyoo} z=JEw?KpSCcsgJqcQYIzGk%~@b+B~g@?@no9EsZ)-3_-|{a!33l(IsiG5`a)*)v%|c zIF%0b7%7j9FIrPkemUmUWDob10Zj|m>tysOO;AxyP;5+=XNK3M@;j) zgVNO^5Hr=Dg|;sY{DQ`A_M!u-z^o(5D&ipurUrEhcZ%qg4Hy&=2=m%FMocVNofkCH zf*&1jg@W5mcZ7LW=LN)}6mnM`Qnp`!h=Q3%w$6cB&rv&dLjVsfZ&#WG*21=ZotQcm zeMJogma%aR=>)wq67&uT+lXg5pu}>x-n(qth$xb|053bWndKG)V{WY1l~?${$|pLoKNXP)-?uYU29p2zkuHoSc6^%viD z>(xGyeaFpL`!crom;F_W%~qZ*>RNWXI$R^B0HeLU>A(~)rU^Xgh?0d|%QtFyfX`X>3#7!j6C-^Rb@5{+s_*wVrP7>ivQe8zdQ$b*%aEiz<802fdDt1%_oi0U&R!7y7kVoVo{-&ffI|)%sUaE&ELSfHvL8qmT zSUiZS#-r}2-f0d_w+CHPMI3~V*MlOjItxJq*hj#PIW9(zoq!wOcFYkBnM6oIV@8S! znp1L=vJC|@N1A~0NL~h;U}|*tF)&9_F;OyA)Oqlyk*P8xo~o2khHcF2bUG!Q$xMDK zvXGIfMwF;vn4>)Acx3RHRl6e{zalXd0i3E(fvxI{l#+ue6D8shOF|maRWHdSva0AI z=A(=_s4F0fjK(fK+TE6{OH)mtl$KhPD3nBD5EDYOhgkW-TdXjoOJ;hWz+r=oDvU|B zN6LE-T_b{}8WgiDQP*;$7n=-vGVL!L^UVdmP|-dnIyRPgHOX%mJYpoT2dc<&{u0Q% zVL-;PK|;i=T`=H>$NB5(=mJrMfdp5s4s3*>NcAe};e}Ln33FOx(kPPz{My}68Jk+t zbdy4bndDf-N!=+(*b%4+0K!27;4G3MbJ#$pwH%LxLOvrMZLg45cv6|BW@E8(%AoxVm*?=W>NM@5e` zDp07)3&ap8_#+bn6%d$cAVm3sX4?^H(y6pBIT9mD;~KM_YO-354dg53NMJK9B+H;s zlokLPfSrgn3VBI<{zK3xjTn5A%R|m#ZB^tm&#~70_3@Qwp2N!mHQ<1ZMgC+>J_TEP ztQ0nf{)o+ALikRjl&(TC%uC&fhMp5%#voxHNt7fSO92o^21sCnb_{fY5`hkl%ixS0 z_=4GiSyiB?Dq>Tek3!m*Ce#{Kl5u@$0!K+{ts^rTd7fEVK)|&2^$0{HD&Y{EJ?PtR zy8gy%uW}R|cPcn#b6M*+?Nx3W-id=Kb@>RL2r#>po5Sn8s8JAycN|$_Dp1(;>bf_9 zL)zk*giH^022>Y6h8!0+nE>zfYp2UcX%%Ir=W*&VQkHj#jh3peqf~YLG8A#SW>o~n z4!o?C{DGH=%}6ryPKM2R2+6xlQc8Gi>FS7%s1cp|f`Lb-Wz;2^*=&e`JzBhIU}hBt z6>-+hFi0p;dL7jz4JXx^iXu~9UCdE13?9Tz(?UWv5E#Ex`7{#Zv;nH|%D7=LDK(bL z*Td`8h#;^8BimdqIfb1a`vREVXZy=el{!B2%sk)6{^?I&@K>q6_w}O}7x?6t=doXU z_EG;|e)jQu;AM|w`{b9WvY(uHlP9sIXP@}~!*^fy;GLKFKK2JUUdj8*K9OyIdFBUK z^Sv)$`tlaD_m_Pc+mqP9vhQQV%TteJZ{d+_-~00Z@_sw5zVD8!_2=bKvyXj!e4mYd z?<>4~=xB6zv)V}oGusn2u4%TODrXa^belR!}v41g^%wuo5H+M`u`Za6X;Eg>fZMP(TIXz01=fz=CPSU zkeO!aZe|1oK?XqultGQe2_q_^f+z?oGRi3P4BZIGC{Cy`CQ)M&HAZ8Ixk*mWUF+Q8 z+|T>}(sf?D+q=$Md)3;tYuDbpcGdGd^?RS;g_i|DpAeK5HrHURlBORD7|{w53oP$U zk-O{d=Y<`%+t$N3Bh%#mRF=mmcHDk@ zA0g)p84l3xMRFS_+b17k1K&I~*SFN%XmID3cNiHc29x``7ns~_Wx`%zcB2_mc4rxm zCb{VR4}Rh!XC8Od+;_h1h>w2g^behN@`a!Lkms@C<(s~8DX@IYb(h_8-R1D|BM;o+ zFPH_%X{e#SnX|pa$F{1f3xR_Lbw#ixb5ZEhoJ6VGwplONh)8#Yw^3}nSVtuzdbQ8d zS1&s^TTQ?rU)di{8KF$57k*gI;80Vgph~mr8cE6+azckcroDuiNt`8w*UnaVn!?k> z5j(2WG>p*9h>o_Vb=NWbMub|S4C+331J|Vc$St+{G8N^{AwaFh zqu;|^QbDEaKe;D8tcW4*|H&%Jl}FDKX;G}9||Moq_Q zu$(D@Q40nJ0BywsDZr5wl_fiTLU%qRG5NH^w#aGC83PTPjGxn^{R#b#AeikSW$g zNOLmX-~X=1t=(Mq`E>8O^C)92r+~A^yyxF{cdsztb=%EuD;rL8jgbVCZm_U(1MgP_ zQ(zJ)FttQ7s74Vil7!Z6Ia$0cGfmbs+G!*z@$tL7-o~ERy=07#?TB%79^emYn0(xRXTa(h;o5tZMrX>K4l$~XI7Qa zBx0;>YhDbDAVw^e_+}^L_<%nkK zI2-J+?_7bLE70^gGhXQ=W-+&0}*~cf4`MX}Ue`Mh0ryse?li2Pr_layU*(b8mx1YLW!QEd5nc?N_HeJcX*xq9H zXHt7?_Zm-PyT1%FdmbB1c7M6M%s%#Y;-Nd7a^%jZ9kcr(yRYN_mk-!w_5F5w^#QxQ z#w*Oe`gP=-4Nf{>7Rc=7Wsv#!12#Ks?+twK>-Af{Vyg{azQT)t@BVUl83Y3%eJ0wS zWq=esgy!5t1|%UwU=U^mPQg708mt2|>42PwCX@ydJWkQM_-Xc z+zf8L#U@_6E{%qd;hJUPd9ahjeuFS+F_>b6q0fE_ zc*G-W6fB5#I!?+)J#;Xmmo6hA;=xl!#SDq>KulrpO7|5diOa5*YwtWDLr17^-^com_1CvNkruT~M;w zV3|sdDH2UN#k0evuz9C5JTjNNu1VCZv@Kwhm zY2b?}5DLKX^~I!*K_m(JEnU3iLSosmW~^STQRKq~Wt3~eX?c-rLb6d3I8v?Gv;y9^ z8k(r+I9gS?b`(gLdxgnqY86#Oy`+UE5c0k^oy{(ag3YM;)L%qW64ImGP2!JC7|LwV7yNLM6hTmsg2pBO& z@`4No4#=!EqR<>MA}y+dPuaNWa4H5QHYIoT5vIh_r&i|2tZ~g3WT$Lt(I=$L1v|_Z zSQ2HK$rK$Vr0|Zdz8(G``+rzzi4;Q5qwp||CF4qt$OWTGJkt`@lo=VLYmCDV1!HH# zdlvpP&f=lKh-foa#^^jRSqQA!w6_awL)%6aAE{0xGmB@sECALlNGn9@Bw1!K6MhQ) zh5M2yt4dn;SXIF&eHmn5{+j|}vo>Z>Q*)=J8D8#QB%6as22c&1xw(9=e;k&Yk(HGaw*8@?ftHIJn(rpFgO~RO+QK?`Ge)m}Ml)j>#|kBevF$ zswOnTs5)KJK_CMZgPNmaFl)_GO~H+LVNZ9NRseaRp@v2$1U>RB?G_c1WI2%sco`#6n8uEg6A3gy zm;#*v$Sk-QY&s~Qpl)_Do<6xQEe{wo`y`@57+nZ;QR;#z zU<<~a>PUi0MKR-r-Go3@GtDV8DYU{jDVF!(SFdJCLbc>lBMyAs@!SJ5Pc7ORV{^A**$bS0C z2a!Kte(JINzWKxhp2z<76ZgBn?B!)(xmTDykL?!oV-H-wI@c8eKa?k}Y#sClP#58QdZgLmEF4SRTJc~k!<_2&JzJnq1)y}axe^Fh0< zx&O|uIdIq2-C_oo-+aKPzK`wxvj1Q9e^STH-RPj**WP2BRec|O%k@`$*&qKNUS4LY z<-EcSsUfHhM()lsfC-s~G~qs^3e7=zkS0J1W@=UH2=|3{=|H#uBajGP`V=-W=m&pj zG-WOj)X4(Eu$(WJVNX13N@}bDq@KqHpBZ73gq(ewRrr^8-NmCXcikF>TLl9_vqOOk z5z?XQZZAknQ%R!Lc**Mm5vRt5NVIaKFKaB*97J|0&Fo3%7H-eO+*=Z8M=OGP!F#!l+^#HC?jtUA1dUQ|llX_l`1} zc!yyfDe%?Bg$2A#CbHkAHo9YmU-ChO5s!pJXGBX_U|h58onxB}vty60Pp|abn5ow0<6`@faB?AsuO9O;di!T{hcIk<;-bhWkzStJaB61NEgi zKdo3W|Ih#UFHLrOzJCj11IxPe&YFqSmrd>nnQCX%R?ViJuxZM5!z4*azp1B|LPJPF z*sL``t?4vts&pC`9i%EG#OBDJQD<=RlC|Mf7n&x>X&k>q;t`T{gc{la5zt3rH=Geb zG2~P*94SR>?(8Z{k%2HJZ`3rgLxN`$#-YgwM~$v>tZnY_5jDM%=p_c$B2Ae!5iDpL zA@K{&m`V@LntBV81sPz0rtWz9gb0&sjMmg>VrWt@I!%N49h8O2gsG8;&FmxHHIXzB z)g<}Kh1RO{g)L^;6F^;b&+M?nf~MpnS4^yBf5wguVhv!HUozi zks%OrO+mG(q0kRaf@y%f(`-1(k{}*DIsFc<%z7@GkU@kI1H$MvNXIDqZSUb4n}3`9 zK}!V7gyUFdSWZNv5U(+qgc$s(9bl|8W zI+Iy``UwjINbyh5U%KQX)#rZmDXf=*%R4EW+nO(9HV8H^vZ|FsO~REW&82FXctncE%SeDWinNkiqa^zQ_||hP+;N zr$!-`3avW9j3zv}4AU$;9!Yv&K&u&168Z&$j%NwXML@C{ z$5ZfI2NE02j2GbuKvY`w6+=yUAJ-V;;+-91K^b8O6W_Q5!%pMX8f8t)SzwS0u^H45 z(qsW)c%hl)#mk0J*wk3a$T(9?pRAl!RF#%-yhZM0Xp#2}wnwkMtsIen$w6j}CO4O# ze!`86hv8)~*^}7MKKrBxv!8tY{%4+E;Co-sJo(_0kK8?Zh57OOp85JcUR(AT%t}vX zKlAvl-+1hnP_y@!zjp5xryjqr?|mJ(*QOrH-hcNEyuS=A`$RT!iy2HlY|l*&pS|gX z_S)pI+5Sdq{X=$N@0k5&P5x49&$SQSbqzO{kJx8DVEHYFZs}uRUSW2B+5buTKK8+T ztTTJN)ppo?s9as<$TJ~}WR;1<-y zkN~#8BUFe$MGBxF{UEcCMW6Ma)A711A>6Pq9%f`muV7{Y@I}HPQ@|#n?~l1d%^>jT z&ELFm0l=mfEX9*RngA)wyy(ImA)X{5d`f{3@NZ~b2nNwd5V5F)WcW(G*w?EPgLn}e zCTNe*OJ8GzVv-vOh8kigA!~TO!~hNxstQu@Mg+@~7$(umRzw+ga-9heoY)sE%R|S?mk3D%X`%~|Sm)&26m(M%rJzin{{AbQVUSalq z?3s^!UF~~cKK2DKyT9ztNP8!zcRRiIcz_$;71{aUF`@}|MNqb{a4vB; z)e^G7w&n8-b~EMP7T_HI(wDNnnC7J_zs*r06&@W7qJxS}IW1Qe7!ro4I)Cb7;9&$! zlF4|E(xHaf8JWq2z>;fPoC-VdN>x*g3Q}1Y^2oaNOO$sa$nB*G3o>7$S3@X`3B_DC zbQxEHA*7!#*9+Te06`2=O-`aqNf^_eJ0i}f4j0k!2Fo;=Wk&u#r&!AqymIo4ieY1~mL^M;QL!woc zDe)shk&vgRH3d~dtz6?-n_ef@5(^n*tzOe5WoDJSi`iUD8U+GL^4RPf-LwH5)NM2^hM(6v&W!`iiDR~nhomvQxdx=fEEn`HyfxHXKp2y8aegyNyuvBMe~6Gtry$k^)J-bZ1#QC*Z-I8ssZ zu0$O1&M65V)k;!zl<6y*xJuO#zZr4D?1wdD?~4Rx%0NoBu=Ub~O^ofdy=WV9`%+aq zTM|1`8&tc{cAgkj!g5ud63AMaVM=mHM8Y^ZezHWT@G|&=NT4AQ2^ds`3h|(mL`j`h zrz2}ci-{+1sq@l!#V~1I*pZPj7wj+#b~P8|0Z)wYjP?F3OiIBo7<77X0!EiHoSO6+co}18(%!Kz&sIniZ8lX+^jJb9c*mE7Fzbe~mKu)1~8HZlLQ<=dm zu-}j~J{=EvmNiieFXJ7>Txi9ZAb;lNd>v?2l^+;ZtEutDHZhF&bEFhrLiP)?LGfZv zSs3FYXBp{*M}?Grb{Y|dhL9SbCLu>kHj9n?f0Oiz*wF>gvIqsNoCs%5u;X;+jN$as z8KzZ@uhj~-o_)ou7n7BL^CR!9IYnFieyyDlAq9B($;UirwLtzRuPuN7Y2U|w;)(gd zvj1QH(GUE^*yH|u`Kd?m@i6vyf7$)zNAJJUZRPOtqw}x5{e}y@zxi72ID2 znGe`w!#O*z<4a#}oVz*l3iISMUvszeNcJ1|o&_(z!M{z-;AQ_Ob=d6nJ&!$i$JOS& zel`CmI z#GpxEMuxTS?GhTAu?PUs#M_5wT&V0w1)KWH8}Ed4gle$VfI>Lr|#b3q*+|L4% zMj1fnT@w(kW6=sjM{$z-4-sAPMp3A;;Y(j&GO+B=mpzZ|Ui0ObT!2yf>X+xVA?3+q z*cW~RSU&F~@AEviTg<@nd(SxTqVvzb_<|36dHJ%>pXbk)Z@e4@GkKSQt_58w@QxjF8(qpa8_IUAZNl}{gxauCb{Hz# zSt(h_h(i*S7drM5B&cJy1C|0`>}Rcx&tHsY{u?NTVk_FhBk#cPB#;_jV1;CuC}naA zGQbYMRfX!BNRC08@U|r^2$c@awrUtnOwpl`9o3AhW>8bR8gXW)s-f9S8zUnO3R_8{ zS9?PCS@y)HbCCvn%LT(iHZi&@DIko#C6gRwGv!gzL7H+c7(|jC1K~Q$ z9e3hP$UC($iQ5h*Oj5nf;XNnxdMX^y}8D&j`6cA>b7YXA!nlkQqVVoLA zdAAP5kWF&-{QziYEgl^icf6Qlc7FToU-+okipwwWD&Srxn{))vvWuYa`1uHicqn_@ zqL{TdeSDEHqROT)IMPZ|v>gt3t%9k$l0-c8%{3!PFFKZYv(Eyx?AMVCAt8(rWr`gg zAbC0US*zI7Y}?Ib=t`=26ah zDKnbnO=%Qj6TfW0K?no>AZY;-Lq^aO#592FM04gTeX%iu=SA3Nwl%CYcbZC?kq*3> zmoXj8^4PpI5j217?SL78?1nO!CK&LUToo@2MpWtOh}SWI9F}w^4(I?$?CPZ=P$qlf zf_C_$Bkw2x&Sv%*gqn@IJK9=5nqHx7!LU3rK6AHG01$zw%u915Fw-O$ z(8HVpDZ--@(Zm#X0jt(g8Awr149*6MPDr5D-Hy18ge&9kaYyS_WmXMr|yn1LKWvz`5zu1TRZP(_|8L32+$~vxWxf zA=wUINk}7VynhI1HAh)v5JNN6w#1Uc&|$y1=1bo2`WBCJ+M1>@K^WC$gc@d=&yJi= zBV1~YuF`lJ=SxSDjOZw(Itp8ansQVaJQ8}5JmuoFh99`+PB-@%G*c8P`(Jf7HJK*8 zBvuzqWdozy$)U*UG#oZ{=QKN7b!lqqPC}DHRIkFLS?jQ-i@tuQQ#Mj1K7D-gU~;F5 zA4C|@7ckP4ahik{aBJ9N?&R5Nv-R9!)`alZvwO;Lu`hZlL1U2FJ!MZ{tAfe!vcHkS zJpQQfc+BtqvR9bD{`LD8EV$GCWqA3of9S`wC%*Ia0(kkc`L{mx@SXlf3V9^^zFV)j zrXWd^u<+y!bH)|CyFZ(|B++Ei7y)WO#K62g`NA0)eA+t9E zligqTuTq}MK5&Rw z4nLCcUN9^RBREMJE`~0_Pq%1wA%RaR=%SqMNSy0=(?w*uM8Q`9$`D`)um`>TV-CuJ10p4x&!@ zE)DG~D761{)R0hcQO4Ly+cJegG_~tc+9cTDGJ-PFHC+?xn&SErxzy_lkw4oRB4bWuxfT2484hKV!@gJ1fTkt&^Hz_3q} zM?#LoUz~Z-iXm_Kjhfj=qU498k+CeN%-!fqpH76AXMDoogDwS@kRuX-De;&{B!d_y zRMk<~O8oQPpYjKJlc&gB#O;5&pgY030CnbfJ#rDzWFtXh$VfJ_)2P|UAeN(sGl^>E z7kgV5M(|GBt{QlzY-wBxE45^~BhGzEjt z5r$2J#b_E91csQZR{IXD74MRW>C`tAQAxOuEgdsI0DMy{MN;_W(efXzFn1uwO{G+2OZSm+C+M!_P2M z$9OAWtFh~nalU$(gc?&B$2U6OF({ovqE_%MG!bP?6+?%R{dAaOghVii!Z4m?5;QWL zG98f+3YCOq21m?^`mE>Mt}2zZ~n!%pU{LM{PvSy`_5AjlYjqPk9_;7 z`QBFcHSE89_VFKm?}=xh@ej-QKK9TZN^(zSKk@Kgs^GIXnjgOF`uo0m#pCziaQWvx zboY&)d+_#)58r>*ygk-AV6S!e-uX4Vzi!ppuU})IU0&;5=7aa%;^>24@8f`EZvj!G0sX{0lVX3$;$EVwsKhhLUn2Y7U2MWtBf`{JK<(U8RIogq`Nd z54A!y%!nrAd@-)6`0>tW`ld`LQ>u(-t#UdN682d$(PJJbv;X*`_5;ZF&-P5!1*?;` z)3WQAT|ql8<%C&yCy}X$wIZBu!XqA?y^*swvrJ(avCs64P_J>>$2z5xyNOX!3DtFF9)bDcxjhq>tM1%Obt!B%Z)SyU}8a*Xv^iYU}Y zgc9Nc;fYPVag1tBWlz8^vLY0xEcsLm$n1J#6q>rmVPs6Y=+w6v!OM&+VVLFJK$K?6 z{2B-s7_H)@Mj}oUd^&i{n7bxU%sMJ{q{(vKRU<;0ja@5#gi&AJK_?lONlv-QU^eSQ zh_b^Vh1}ss0Le8iPDP$U9Wh2h$fFV&Dc3BZ#+U-3Iie;}h?-XVctVbHs?6p{b=6Ga zDHzD0q0fR=B}Z&3SrFlLmME+0TMk=7rG;eJOu@luS?a9e63ag zx!|2;mJKlbzEnW&dFGh|5sPEiZSQo(n(I*HgoKS(fA4JztxMX|4o z`>Lv%bSZmyj=X2h1z+UUDCb?01;b8icyekCik7?Vq(Dc#$xr`Ec-MU8iVuA7tg}D-e(oYMP(U1^yp(FZA&eud zr6p{wh9fo>S}kHQiPOids-}KRm#f9gx>L{*&%P30mm;xZb!5;%oi%4F*brf87J#yjBpFmig70iVA$LrfFJ}fv zN6H3^C@9TcS>?3sF@o1hxzUvneBCvTq2cM*G~}aiatb5~nKFuTq!Vh5@XTh74$3b{ zBf=O_8Pi0FA6iJbpik5@de-Yj=Kq!R<;s zWc}3q+a9{(+Nb7!^_ol1x&M|+=HK!8Hy*gf?ypaSc6v3iyzj1S!^;Ql zwfRv8?%@8iPh@+GdEf2UIAG^>X8wYCvw1tN;pJtJdG_`aL$xWy{lZoKm9uXxdmm;ODx?EW%5=vz`Nt@zScuCk)< zbAo9gVR#v8gXY{qwdVxcfHi280(=MV;mN~*fF3kQ0xIe?fFY=;BcUcbLydx)+HNju zVgzL7G*CPP2`B&DqzhhnJO(*ARaL^vC~MqNqk!tGqyxiJfM?mKgH$z5Q}7lb<$}S{ zgcqSMVqlgvn$ya{fQ1q)FvOCUDb5PpN<@3&Y7rgwg zcN~5Gxqs&U! zZa+MFk>ino1yFeUlF%19q0&an5D;bvnG%w*AWoSg5tvdhRg^*Jmi&_FQiS8ILXzgt zDR?Dn)I%ZfX)+$Eih+*S5EFUhEt6w7G)e4!DdXuQnkgHd$WfAMFY$U6)iBuh()Gdx z#e-z3_{lzS<_3)o0KW6Qv#x5!v&P66BYqGsgSz54fwSY<#HP8rN~5R#lQY_TlyH6kfy%91Q; zEc24VQlTu8G*w4bOJuT1S*uX_asVpJNe3f?DR5L^Rmlw;QIiJ?yMRejz%a{2QH{V7 zGG+MV2rWOFQY&(~EhMJO7zMn(GGMXkK602fLf)s14;R*+UB~D`jeWtx%N~btDSYA5 zFYwoapx}iJX@?IN1Z^nT*i!%m5X<_u z&0|fpj5Mf(22$jX7e|d5abWtT%kUdW=)QbPwE-FtAzu8841c=XsfIgd9jcDd{8kF4 zz^Jt$pB{4OrQ~d445a**iuRwH7)M%l=Svb`oDd&qBM&wQ@1WTRckB%AnOEQRYhm zOPx8AWMz|8Vr4rL7^fTw%wJkdf}8c%UUwA<;s`O0N;vPB#-d4gL?S$PLiegNAf8FD znJY41mWQ=8jw#jFKgqPPg|v9df;C`7N<(NYglu*~U_`4Wnwo=%eLM-G>G%>1ox0Gc zP*jx5)#9bTglT4qP1Q8BpGQKLCBNZthZ-_r`qQHhV2=1Pf;4gVGtLHOfRI_^6}eKJ z9@SQggiU2?0+^0L0L7p?wUN4%i-cK3G94JnG{&E{X)FR@ias@#qv`G>xmFsTD*MHT zQ6;30&wBw-Fc5wzV47J%%6>LQ|F!-#5i!n-fmLHve8@$}-za}91Yj?WE3@_h$%>|D? zaLqH1T>Fl<%-(CK)sHw}Gq;%M?6&5d-PS?-?y}zgdu)36ylvfC?vZTIWBb_G@ds_^ z``8}Ie$(7dLFPBiUdR7Q?X&$V;pL6fksH zXe+BqAyX9WZQ8Ke-cUoDP`r{H(dken(LE#N3A3OVgNTAs3_KQq>aVhzoomd#584A#s4he>RR2bRhH}7K< z6f~)FL6a8|=ABTLel#63r@ZJ$m|Ur&nXSsgaKUWh6;f4xDNu9icR{hmas|Oxb(KN= zX6~%sdES*`@=NSOkHbY~mDAL~1?jq=V^=6PnR0q}nP4hx&LbVCdgpd>;pxST8_m`j zJMwIi*!e4sQB_)2e))1vXnqOwB{X|!3qvn^YzoxO3+O?J%#?qu)16QzOSs_irB~RT z;wg7%1SH9sDtNKEFp1yIWnrrd!j(wClb9FYY%)xdvt|K^hYL2FP#(!moM;U}E8`em z@adZo-IX-;(mkF6D)QEwUNlq4PDZr0o-xKdiGLw+F-Vi~=8IWP@g-&wHrdygL1U?^ zm85{PlPMC)qZQ8x%gv51qm|Qw(NQl=8B`S>UuCTDQealA$QuH_YIgqXKmOH8{#C24 z;_^pRN1Clw&q%DW+;aYx*$40~4CFwt(kw6w4_RG!M~%&oGGVKc8i`;;&K+O63xItR zWd<>2gQnH!0SU)DYksP-4P${F)!|Fk5jrS13|S0(fw9SNhp#oDL>M_PgvT-qjtfgs*8@C1JQm)P%Yu5S3<3*si>Sn) zm0T+wjA}^`Dyo+zVLRCC!V$}zpe6y&9Q9?1@bCZOr(Qb+^pI=>l;E237FgB z>zrv3JM1IIr|t9J02vn{J32yh(u-#Y$$2QxEC5ql>F|Yw%`ziwGCoXAOUed?$w(LC zZl;i`Auxym&;qJlvrG<$2Iw%rT~3RH4hgxA%aT^pZxq6KWn5cBe5Zxi0k}{Rtza%3SR4EPG^Ru z%OqC|{~?<9;k2bv)p|xENhr1@k~J{8Gu1lLOJ4^?$3-Jb2!Clxv-RUx5DW>0x}@)j zb8aDDB1tBBv9UA!5|+lIQl>-5nuU7*-FG02Dq($Go6AS+wHfjj^O5_` zI_|)2kJ)bv?=K&|*9IW-ab8kBWXod@+-!oB=WT?J++*#xAH3x$hi`k_ew%n6`-DTc zIc%SeeII*+*Su(@<(|jO%PhUr(o2EP%OY25&ttn6V|xjfdXgEw>z*>q2*buhf4~}8 z2Z4gI;2vaG6I@A+GE7GeoP_u&Q=<$LrVNwP$CI<8yI$eJGfz9&2SV3)^($Wd!WZnc z!#1`J!fb3GEgDvrL6*=@9Bmfm0TWlTaSS04yu%GlIy)U%J?lI+@oUVRWa! zk%0rzj59*0WWk-~aPg-;akg8@Krv{HJf7W~%U)iFqk(9*o84&s%H`)@e(AYqp8BSD zzw;OmV*AeXnha9gAH`o7i?=n(VCX zNZSXhYNg}gB5B8HyGzG5warYs%1qgghn9ivLfCgLIsc~lxBBSS&D7-Xu!>O#Q@hh;*2RrS(^=CDjo zl8Z`4*ra3sPc5^A$u+S_Q+OPOgEA=WVx~!<0csLC2~$~9(ljZW5Q7EK07XcWJMyTi zouMsU`#_gJJAounA;$H^$yO_!wqeirnz^H0gj@@_*c#(>0Nt%-87}rSFH>C+oWPyE zJ&*m{fBL1r@6^QI|c=mKQqaaIFwJuqu%nheBCj_y0JxAy9rZnUl=$V1c~rKTX~ z&hX0S1s~j|?@dxwiPc22TsaCzN-sx@Yn2&eAsUFRt_D62^Y{W=5nai+NA3^3}Q6tMW40O-oRUgtxQ)sG}fmsk?zwZ zLc~;rr^X$#c~m6~yyUdjJPMeaF%J8SFqE@EbBs>d63YTAIl1GAel)$nsAZ&%y71W8 zFYRGt$)2zuh%Opmcrh!`$@r+|$wmstx<9LKAf)yc-una!v zVOI}g1OaU335Cjz?o5RZxnQR=i6-t4Mi@w75-UYb8O9I@BbF9q*m}=pb_L*wmB`Q^}1T z>M?Mj7;saCv!FR}OmdKnK|Kfvk|HVljxnPQm;GvRn z7>X}W8E=}(i8zt&s!^6lH8sM3r@+i37YGBOkps)d!bQf5gfEbpTx)*GYoe)1HqsYu z233Wc8b146pq!@A(rTELwTh}7;*FOAO$y2gPr(lel8<3cNlmHC2#j(`8}Vq$T}fl1 zCZ&!I#2e7adT!F?$~0MHs!bVQYow`!>&qAWf?=FBne2FBoDdNrqeGO1SL7HHO?6i- z$Q0_!QD*?l!wR%qqj;;xp*Id*!aqcI53t?0VF)e9jsTh5S1#+_kGW#02&z1YbYfsmJ z&vKX!?_as{%h0sB6Sf#wQ;J{TV&khMG`hS<)HKA_D}zdQ(5UjI)mZ2=tBD;hhGxpl z(jiBsrIIj@m*WZ2P#Sh(i*+X|-dfG{Qud&Jt4%=^&{b#SU@2+bA%X{y<)*i20bFUrOowwWM z&!mprXLH}jo|JPo_dNCydwUXl9eDYT`)qW~{+piohOJLI>UHR(!?r%|pjlpCK4$Or zkDa&i+Ya68q(ip!ee8KVuQhweHP>BjnU}xhdA$5b%Ph6@gq`5!z9YTe*0Yv>$xFaP zh!0fsurvq>qk(mR9t;TF1%boGnt;NH6bp1$f=^1{(v&`ofaB6dPW^V5aa2KoblL;gSdp zKn}(#Sx^l-$Lk{0Ft4&EI>Igq1_(rht_-5Ua`6kGU?>z)V;3_ycL<{@t5Shy->(z) z@J#8I<-(R33T%3hyA&CSp;2a>5fr}mMK37$+>e6u?}wLx;%+5hb-CBNO_tB8P3|w_ z!DpJj_jSpIAAZMM4|j|CqaQjwyzG%|f5Gf7^X)fY;r{a7w_NLy?1%5a-Th_zOi!h{ zSz|+D6QZ<@n254g-BDX3YFQW^3$-or5T=t0HuV~I=#w}tuxU6t+7cs-Dq*qJN}?lc#l}9$ zxPj-i9W}KPf1|FX&xqkG3y*vZ5R;nj#gOLku1ZHp7}+*Qa2(eZSX0JOP!$gaJRu## zDI%}ebQnfaB*RGIiG&5z`Mh(i3$Z-_!obTIa%X#6M;kns9v7+Sd(F&OLoP)PkQd%b zl*P;l3mNq8Wbm2rcfbAhLl4}0@rCERmf6|5V2K23b~4uy%tY*nlPTlH_%9gVB?`E3 z;P}nWYU~aaoiRbW(>F{KR|i!o^&KH7+maA-0ZW%&Rpo7xikU$XQWzw#hA%wVESD!a z!4sA`qr*r}c@vTdnPP!ombowsB$_Npj*{Sm15$_RwB*XL?jjMqjwGX%alFwng0$*u zTp4FGDhy2&rb)+0mx#gC_~KFOGE0r4uB5DqA*UlG)%a=qgJ&cQ>~oYC2L0|)FSCO; zOqK<}^%6;|?kSkEMlGx2Y{vD%sIrDI(%$59B!)sNZ~1EAY-%NGrgRZrdDErsgyCC| zs5}rh97#@Ismhwt8ImUZEC`iljMDY-HLI=cr&RARTa%PMr0ojmYUumep3~NsWy+S5 z1=dPaI^tSv_E)w;jw4lT60a4BSw~?Pq!Ql%Z8IGyB&zHer0#6$$O48h8#WImX3*wJ zwJn^bZi@wa*zI>u6B7opjSmBow8;mZ?B}&Is3QwX_Jx{63Z)5UjoF$w zRaFjxhz%2#?1zFDLmgAbi_HpAwlD-t4Pm%T$kEUo%ZY_Fb2oS?rL1q!YUN!F;v|PU zOEMk_nDN}v)YJk_$QOOFscKDG4)G+a!jtgGB}p1B3E7iIWP;2ZA>IOWMN{IXnvRDJ z1Ry``{O!;FhHE#e8Rrf)oPyMZ@Z1?oLu)n(8z!bMG#Q+XW$Kp@b8V@#K4UE47H2D& z@DGs@X-ciNSJgpeIH*w%LI?AwK5`7uL^P7UI{M2#rQi^m*UTiwjS7sKUwGkVb z32e4{L!AX|2xUWQQrKjqUWG&%;pv+o`;8l~z51FfE`yqNaf)!dxcrNkROOdi7jdQ- zEKc2-%`#puR7m2?IOWt99|FI8G01+sm{O$~Rb{~dEunlZQZi1V391)~YJORZR~0i- zRnnPuN+M&fK3xmCRJjs$<#HazxS9-7CH&hTefOt-`5j2v&1GO2aQ0mtjN51ZqBepwq_ImL0!TW5w{;Oa3Coi4c zRCWnoX6Y9UyzGNi;M;CHzYbOf%J6_KeCL)ZfCj9AmVhS^r;6O-B*cW5gPf{?Q_66k zD%1%lGNOtHKl`XxxKHU@pdg_gf``p^+sWn@}*0G(UgbaIlu5SnEdf`-Ulyx+*;Mc*r0Kr z{{oOd^XYTQ-C~BCRWCaKgUI)>(K#PH&0jD>%E(=2-^T`)@4w@EVA=g;?=OGz@yVyH z-MQ=5ntLbPWL=G#x+8^R03aFz` zD{D#;`t9hbrIQf~wyd_kcDb5#BpJPE;<*??3YoR7(q-CFI+dCXfn}pWVK6q;CN`ap|Rtl|LqqHB)2Pg9yc0-gGZHEhlBs3RsX`iClE# zK{iUAD#kDgCs#(>!WrPxq?9f;StjR{uXrP6e}qJ*tm;Uo94cEUgjy%@Ec_X&s_twu z)#cBSOJEBR&nca0FH8+<4U=mgIpWT0#}orO3y3c<6OyE?s!P=+mMuRWr@gaa`g9~@ z!L(8<35{!%1*RMddg;QK((yQ`<678GYrbUY;|v&cO~Jy@oqiozDCRt7s(OTR|TRi#c6ykV+Lrjyd!JXw>x~!-6exuN5lM=-~5u$`hWS$mOb(KqrE<0 ztxo1^>V}}@)Jk*o+4f8B~FvyOwJsTGhcZN2qc%u-EoQoJeOw65fG*Mo!?bGJ^q&+> z9){&WE`1@X8YwyNbfSSn3sTi>U-~7XJSY(fBf8`DrHS-njXaxlPyq<9JII`(@a%$z z?!WV{TS4aH)Qh!@&{xHi;FY?|SdrvWtBy=jAUC2IJchgorEZ!e(@HaCrm5U^K;bPL z$%mKO_jbRzwlMhR2yj*+Q57dIgNz6^=Pti87W{8GtRcNbOLKbqMAB01a8n8(sMQi` zF)y(caDBDnTa$Rhv3R=Hw-gCEVzyDM;vEZI;89MM^ru6jzc_JJ0*jCWQ;x5&q{BhA zAt-a9B=5Y08A7g$eAujGaZ+Q`V<$kdz8+&!`nAg6d%4l<6rrlg3sw%v2BkE=D1+sP zoy=m8UIkf|uNcZB6(e%ONE1p~YuU@}P?nZf(NJq3f-xyIQRW?C7$l^FsqaXBmU&4z zchm&qT-vp)3z;jCE1-*r>qYw$*AYb0&f#Z&^=!A5d-@t)hLqh$R+0eGZa0I>uybG1 z4i1mUu;Jy%on`lz-C0H$cbO3cJ#%yUtMKxp_g?+=`>yi-^5qx5=k;5y4%l-uAIFZ4-G5s*mpzjG#(g}Pz3E#H*#;ed@D^?{dkp)S{Wm$`kS(C* zw;i;FPh|Tuskgsj>k|*&!uPQc-D|@`=giu8&1F|!{(rr{Jn(YJZriOkb7{79gdF{4 zF!<&kE35}U;^E40E@i+C0CeNkolxKo#Df8;4G0MXqA)BA@HoUrA6|A(xm(iiU-L!I zh%fVOy~QRZzR674e#W0@*d(0s&bNnNl{OM?w!)_Z2$_mOV4$=nzEG{95IXM|7_!W)s%rO69-h@6C9rH)Z|jrEsz5DJ zEI*iAZn~BoY9PFKx^*`?KNJw!G8u~|0U#ZH*DJhy-p4=aVQjB0lVIRwz!^-2ktryl zW~H0UU-;Zd&N$_$(@#F~;~zfrQy+UTuzcxfKla&Ao$aY??=RnV^VPTAaOLecUgi7P zPd+^P#}jD6Cez{2&dsh>wL`{6(Gg^GLW1|CE4h7(4W|RjaYX3&!qXwNpJ|Irm|6#8 zM_dOTM<`9q^zq0VQ|8$1D&s7tIpR!{ePbLksG{`QPg!6#{GcDksLhuH z$6LCd-ZXL4B+$7ogZi?NFC`vqM7#tR=+`;|u4;hIMO&%PrOwQ%?X^gprTw}N=Y|c* zCwGSzf~K;>2oh(%r5FN_hF@;+EVQFw$Bh;Co#CCp*I#>0XCx23c>mumArDQtgiS6A z-~YA;R^5h!aE(;141`%D;)p?ii_Dj_s9K+WaT?qZkNk*)g+|SSj^1&0cv29OF0D_K zYwr!nu)rF*(xn8xKo|;ABv(f5D%0huD_6rIZ|@{#jB$};*w;}oY%+qx(0VIlVv|Ed zqaY+gUI>$G8cV{!jk*QHf~J%gnL!?rn41D-$K=+e%w2)$j#@eSRSu^PlU7vm=u-Sb z6+jjm4h2nFTfC%0fn3a}aWUdh!iz_fYDO}frjpq~7PEM=PzH1nq=n&J!J1NvlnxXB z8g}2^xB0(3hmWQpGcSTc7{nSoykj~>@Z2%WCMvwt_!V2>krSFw`LxXB8R3J>mW}+A zBRY7cVXFGCC@&v*RoQvo!nW#!>aUhvl^gLrz1{p`j($UdD)Yg&N#z^SIV(tX#P*s{E)mDXhH#>6TsS-k0 zWG6sxc{9rbFRZaMs=xl_KM>l@+5n$_-p6nF%2jTf1G)Lq#W_b8i8Xd=CBb2fA=m1* zk=->TvWj%ZE0F|} z!#k|;?tH?O;Q8fCD$KPRFHvoG1u*fo@BwDs;U@Si0=`sDN}!BK)8T)sVMKbx|{CjV`?I!C`YjI{--@IcQonB>0HC)U% zGKvDwQ9K4nVaPKgbrwuFk)_U3nIvuM|K0FNGR>oGL-G8Ir@<;qW4^dkMUJCLma5X^ zS^&BSx-kMn7)|tHaE_4aYX-&Bimf!)Egp<`2>%f2Sis8GhLW=GN-jDmEGLq9nwUD) zOAM5AIwbW~T_VXLN1BQd;}8IBL02VrstrMjcd*dsDAyQO!VV(dRrAOoeFv$IgeZwp zUxyN93d)>PaCX(#VeSM=fka=Hg@JFRx)%>7UUyAHfld0xRT%6e3aW(0rBxN**mI$* zV`IrFNoOQ|**91x1g8bNqcTw6#?B_;@KqPpEXzY`q9s%oydP1!Ds_2s?Q_v_f$PGd z)xHQ}T-PSTXP@qoYz%-5JHy5JXC9jmDR;XWeC~_c6Da-Ug8;IBmGXfvCB7TY&~Z;_ z1Iz9#BMeyW{bis0@;vs#cVFrMr0%^LxH)nC)nDLY?0t4w-Th_nFVEX!J>UCsbNP^c zwmj~D?I2}z{DIpad%!ktn!Cl(^EN;3z^#uzXsc6?+UebI+2izMcY&0B8T-gN8$ivT z$3FE9+w^_xBllj<|1Tf4-{$-8x#7(BzLpu^`wA~FwbV<_{=j=7SwC)rsE`_n2OW2d z8E}S(AzYFmvyG)|I?xBSYI@I^?||&!IN*^Qq01H>SR3@C=KeA~NJkYVK84DfvAY7gkUctioO5qO;-*x`E???Vp%J;r}9Q&5*F7tm<3+}na z_r9L^+T_l%tw`HKhl!1%O`r{l{Z*uD$3nqwlcw!fTTP5Djbm*bV(n?k36RMXELWlVg#K9&ticOH#&7~#|y+ZIyLv7}MZopOXHXNtb4 z@aB#+$VgRph=7j0w>(fIY;+9fm%-v3-m_2WWJt(@eVU6!J42TYe0u|@Ym8PqOs$%b z6TaAZ1T#g&LZfbg0zkus{7eQi^H8_bHB%Y!W$bsJ{MHxEe98ap_n-BffBd;tS1xy- zT>>0p`_J9W`y*0ha@kknnKc}+2(o~Q_L;q_e^d`sb7c5*%(b5yI5m9de4A}Q%B}gD zRUPuW3)rYx^A0IfK1%Ej=tB>hM~%=}U>SG%JOkt3e~htDFm;)8VZzuK8IFX=l2D}u z2_9kE>$aTafz6VSvEG^#fguqJfLuKE%OOcCN+jj5ff%yi7{%k)0AsWOw5Bj^&3IXR zKIaSV^NR$-CgP$zSV$RXfkY&hkk+b<=%UIOQe`k>-*}jT4ncLcKC1q{v3mPF>><_*NVSromjZ|pa1Xwb@k<6n!ER& zlV2fTwQ8=#z#ZWHpa1sU```QSjn`l2m?ICDIspLG-fHxe=70R|U!d$WJqdUGnDswj?qhO%$h#`aco+d^?z7*-*dZJzU%JWZoKJQw{fYN{gmm$ zeR%P}p~fp13|NOjhB;6koiX;26Is%o5v|PXh%lu{lVCKdvZg8|2020)2FU}MBpB5^ z21uDA9IkUg!TK~(a(Ux|9g+-ATV{kotvXs@9V})91x75bve!~HU#R(F)>vdicagKh z2p8;=7kTy@pg4=xj}`qF6}j=&s*4CweJS%|3FT3V7fHdW3Qv$p@IX_`$q>j*x7lYw z6UISm;o-+{@C;JZOPMd$RC7w9gs`Sn6A4~x)m-O-ya?;7N_i+`d<;aDLUGdJU6a7X zS&}Ira;v&cMRBScMT8&Wu@)s^s?imIkW>kIA!)pXbs1rkpj?l#z?=k77Df}ME~6x& zF9z=L#mr+-CFgWBHCSB;8BsM&P8af^SMo}(&9AeQFxV;b2IVwPXT*Tfs%heG<|@!N z3m-04MXnAmLy@X$?&RX~y>B2GIf9g7WC*$seDyUjFxf3;kQsUOx<6L_@ejZ8lfQfh zScZ#z1k9~u{~`781Go6z7vKyjgVKagFSy-bN-enk$_00P`Qf`Szx<-JPCjn;L)-

ckI3!o^ZfsZ#`s7_m_{Fx9LH9the`0uib6iHTypHA5A`vJ^4QN(l2JF(^62uN2eU*S@mVv?dzWX%)lxk-SMY`AQ!WVw`W4lMPZ5SX#;L%kQc;uZXxXJ?i z;sK>~VVpwuYT3kq=X~jc_tQNikg2GsF@k4P85uH68U%((G4@gH@Iv263F+&|0yu1= zrK75*J$BvEzS@=RfwjPk-QT#~q{-C7*#?#s7-9$+W>1_BI%wS~O*Na5XX$A+BB>b|^FS*m5?oM&R7D#~wkr1-LV{tN0$DnlQoq{GM z=wVychRwCcZVKO4*0(jBt6i9EDSZ&5jWmV@Y3JGy*klC5K98C>Ey=uKMCH8QHNq9m zNt7p?O071mpOZJSj$m-|mxZ!}}jQ(%hYl{^D7Nzz6*b52tWDN`^($A}WW#k1K46Zp z-2s%SZMNLhPTP_0r$tZ2$gTK|5V=|@4Pm56a_}TBMlN>A>2M*KmP#wcY7u-hD_w|! zjPWScW=afVPQR7FjwXbWVT7-VCWaABtu!_X88pQPj!FWju8D*!qt-2<=_?*3Q?-hL z9kZsDaaFC#6d1>AqJRpIzQB}P8851A;tA_T**tdU$uIjEXER4)#;``spbDTSri_lH z6=k{+NM*HKz!pH3_2Nh{g0xipa*_@OO=+^94(~;x2{j9vbmYj?w~>%K<3jRj5=&-6 zLo08Kl+Dga8q5A4+!gkftE}SE;I^_RLcmsd>%R6iKI`FoUu?2(yw;UPML6LcUHTH{E#M_FHd3bGc=gU3tajoxv!G1pSZy{-2-!>;-GS`jvmY%;bf& zOE0<*;AH{*AJ6?WBS##1(4V~Qr7wHQANvK5eO|;5fGgke_LE+{>dJb3>fDdoo$4a( zM)aOD-nr5W%k4RP&vn;ZYq{lLyxOZ)IsJ@NVN5x3(-Sh1x%MDb3BPOx}8 z;K$gj(%}?h4Nj_Ngc@GSh1TSf$+(idv15!~U-A;tBFtk=`VyiHvahdYJXW$BwSHuv zK+YhB4rSeWZ;W-9&k|)uFfC~Z2woaxn4A!)PG;6`Q#0@TH%3NO0p8}Z%mjf8n7#nG zHe@NKk1)a$2EOFzPLq5(3~LFE2=N`fVxuNVSM9<}4d0STLA0u17quk1)1=l4X|Rpd zq9P;-c6AKmbw{JxsbfTv-|T3jpJgtXMVcZ!1$LBp3i`5KFCC){Qiy3pMiDX20!_Ur zpt@(4BMJsG#!@SJ@u-f0a1o6NN1V)Z(ZDgPG#zfvGo_R@%6Z4+MYU*aC5*`F)TBFx zL>MLovCp`$JELlC2sx^WBvQ?(?wKv-5d*o4g3E*pf$IXk%Uo6RHV+uBUEux@UUqYN zP&c5JliSTtFMx=>nA}Ibe9;Sm%-}QB+_TwkJ)>S>c5@kIo`2gF@UrKzuld3U{h!o9 z`)uf+q-MYVReSEZ8occO^1R(QJaG1`Blg?w;9WQHiR}JW>X`ku@_$lqIb=d79K7|3 zhi>a*UmnT!acuXOyTuGIqc`od{?YqxaP*uFeeVlip0n$^yKb|_D^{31kG%+92GHyq zeTUN*Wx+;J4y1+M0=8fs;OTd7uoSYhv81m_Ne&f4rZ6uk2NEKHju6Ct|5@*Xrtt{w zLt3eY8C79P)($&l|IIhu;QyoSPM|lfsyp8cGJ~KX^H>xV6jU)!3dK-GQBYM>6+;co zlOl@qND>E}6%iB%P*gO`lL9g;ieuE)IK-HA?AzVxF=R3W7 zBp$w^1kNP7(Rrkmsy>38X9jEs!P64-#Fkb`aVd?t1j>r!MBoyu!Jm34dBr?r`O*?k z^Pg43$^@dZ$-*|~Vy(_-S*fC=3_*zy3nZ35^ihRGZmm*9?2`i0srR&s<}UhYL(V)| zZUjwyFGvFVqpG@`WPAV$#PN|y*0`Ja&|DE#ks-j4AH}&c{R-xMj+`K;1JZe$94*eR zEf}#0me&e{z>;>v7!VSn30X8Dj)+>+tv8dmlvo|O`r-@EJ&W_nJKf>MFY@v-1#tF^ z)C_M-xSFqBJ|Vbr`4V6m@}0fM z&ONfj&F&t0@Prgl1zvv38()J7|NM2YeuYe6*}Md;OIuBN@>!smx5+YL8I8&*Z3&as z5YK@FU-V=UH%shhjlPt6Oe9e$ZATh$wG`FU#8~0kV*wySOI24(KVfGBYBU0|bvfxdPrce8f0{YA7!pJW zu{27(1y5lptV)U@z(K4bdM3F<5ENT+jg_hSwuytuVPR}l$pRP(jHB)dN|}z_Vy%;| z|EDK<)k$%w7BYg;KXYXwoTE=lnNQPSEi@4f`kq}}ddd0TLZZL730OlW;B4>PI_SB! z1+fW7ON|#DtWPNC*U8}sLB}vM0B8h3Rqwm<9Y-8;;8vS&GHbUTjR;6QXpE0JK{ud` z*l2CwOA8c^Ti#S%H!n-ROP*C8S3W0Moc2zKLW0idO7ru9Dab+&+`*sc$%6o1;N|w` z;Bj9mFUD5f^nyQzVsnco*ff{V8vftmV`xU)HZR=VYHJiT4qJJa1tS~6issDT&A+PG ztzPxOm+z*b@z?+ic>e8w`j6g8JnFE6%_!9AIllm}g6kg~uK(f2ZxX z!R?K&y&S~0rE5g{+RvG*PTgad9laqkd-q*7*p%(q3c_ncX8e#`4wEPMpxFo*yU1=+wQ z%2S5^<_bvp?%Qt}FI2j`=Z;&ya`zqmAlrRmx^Nb2!%8^gCP!M15WE_(oUDz%M1Hub zNp~VnziP^2_HH*3c)G2?vOQ(H$#5~e{ORkj`;c$H*pKirQeN2&pHPHSL)w5Q(Ito{ zAyeG`Y2Yq|$OsbCZCC1&{^-@kAuODs#D#$z(MPP7hG#Wr*C85+%(XjH!MXZV(wUlZXCT{D{p`j`+7+e66n0hHGtw>sx?l^5HooeRUwKU3b zUsW3Q(lttTIXJj1N4iY;5qF6Ra3_wQ#7fcQCX9;dKa@dPKocvmv`keniEZstRFIE= zO89hN#5fBRNce1c8GXdsE}(ZQlg@Zb5IQYxTEn3~%4y3aC{0@ZO-BtqnTR78WNDZn zacsqi(@i&%9u1#vdkWWNBJ!zZk~kiMVU&w#M~ zWq4WacTtZ&c*oZtzRO|kKmO@=6m>ScO=qzBv9I3ldtdPKBX{5U=)Ir$-s892dDHt| zcj=O43nz!Mfn|`{dF)e9+-Ld1x$yGprH3th$t>a2qi3JFaKH0UJ@nGmN5AT<1(%ii}9_yzNtg|mcpi}zZ4@?8IuT6NO?#~!-twp&i{vh}z9 z<>#-rh5cnP_N+Ch@h}%1e>7-jllOvi*K!WIlprUV3>V9^?F;llbGB;%OhsTa&t$FM*6zTfj=vg6kFl!XpsL&v_14JOOC2ki@BwwU2%VEO6~y!GO9mtT6}s&`)T zDv%jo{?xVa{rvSG@e5|>u?6R`y}bP7WA_W~v+x-CW_f*lMNaI*m6~y?OrPmCvICnW z2qljZu^_FekX5OKk|ia4%ZM{C&Dq9Hf7X|Hm3Coh0tF21)eUY#dM<#&dH@i=o^1pS`^IW zXRkS9pS||laoepoT<>{&PJXGKWrrKz{f^(=bGMy0-((XrgF)x0s$QgZo^Ihxf6jkJ zylYPdFMIigPR?W;;sh!|4F*1J;+y>TsvzI(w%O8@XfQf%%`$eZ)||1zPjAhNN==5C z8>eQHV~;-C$BK!Dn#G{BO+ZE%fm!&R?9egwu})bBrLCCq*~cC1=4NDfflXR{=qEJ`ZFb>sbX45*Wc7K zQ~}an0j{f%E>n?O)FxWKAfZ+UC1H-z zB{B(@TCM-<30`9DWHpt-wwPyLZJ_|IR7sHPN~Kjn>=^$&ci!T~9G3(s6^N#=SY39& zmMm2$NjI5nn;B&qWvXa0iKZM4gFu-<4X)Xj8G zGk-UNwmOo>M?pFLN|);-tq}w-b9>HNd!|~w&amx^wuCYDa5>I(ThpCh_7n?b?sn)B zO~qQFEi^X_4Q<14_*8gGla&LRXDd^X=A{Eu-1Wcs%YVnuhnws({=IY2|N56u)or)` zJ}eOS@lhk_BeuDJ=8f38jGvtar3;WC z1;T@Ep^B&59yryap|*s}AR)GbG@*cD%v}oDsvBo%WYr>_DKuYB(&g)oRaX%~wP+Xt zKJqb-t^63GOuBrDA&waZ<`UPRyDxe|Fvn#HG!x64?Pm_&Q%pl}V+3O)iHZ`Z)HpJ- z%14%2#G0eDF(oaBKGaJ?Y8X@L(3l!=<62BN_nlUPOT_7)s#-x$wa}ofdi3I|t5~`L zD-F8^-y*<@8p9y?psc8|FO74WsGcC1YK=JolT=FFP^OA5g?J(u6(jDZ82GxnAfb)I zbIB!$V(FaPE@d`f;VG!g+YzSqOdOSD3MpKRs3(Cyrb(vfQFI_hX3A$4)zOgKOvsY8 zPJWGziHgXE2+Rk6nW_w8d{l9nIAnCg*4)@_O1C(OQ6iWq^Bnuj7Ko>X){f`G%V6?v z0rHQ&?J%|@*`3X{kLzQ{ciiys-PixY z*KhmEtyjJN6(=t~W>??Gb{^X&vL|49(H!ToPda+?3Nxg<^r$_ARmaY882ggbkGgdA zON3XfIZC*6&EfW!A?2~Z?EPi?%WD_ydG<+rpTBIs^Hv<_{pB;49(c^bJNLb>jh?^W zG+u`0@(jHd2o!T8Ejr+8cnz#;a~Zrv$!P=$rNKkQ0*DvZ1MXl_Iw)m{8-A3(w)1@cET03Ri-bc;W_8+7XU3 zC`w{`%5+N~uvb1ENTjWxF8D7WLk@6wnJYqsGy;^;WO-G}lj`=7%MlYQpsASw_O_xp z-u410?{!G+r2|=6Mq4~>P(@u#yeWx4<+BoH$aP8Q;IOntxl}|em-q;^Ziezb;IOxq z?J0+si=EAu4=i^c8(xN;eMbAL_r3X=4_#q%*)OGhAKUxOz_M`DXFh6w+0U1SuikS@ zKVP=R%s*+jg~!Od6F7BTS*{cpOW^5=bAxmF@<keVT!bkCq5EbRf;SoYE?ZBDJdSz2~taG?z&o~sU$~WE~;`CMz}MP%k}I=Y_@=e?sLTa8u0lmYY@(bFXyn7I)5+ zYnJ9^S&TTSCR>i2M{Qmt^a?P=@TX_ih$ptB3z>@ue25URrG$9NqT_7??X}0Q+ix@3 z#=Y|Gzhjwr-Q}0Ph{wO)^V$@4BpWC;inWfy81j6L&8@fEj8pB^5{|SXeB3cdYA?$m zuqOo&*_;B0eKSEOc-6iYDP|izfhGUuul~b%XRqdpgWgyXWQ;>$TI7+P!?8_|s&LfN zYx@Y4A9z{2n2X(n*6teE9b^`5XAO1`BmzX4JrmKq{!mAXa>USC;{`EpZl4`hdKya4 zikE|RSm7xUB<Xbtwc@F> z2{ETLJ{dtp1W6I64@1>J3iLcALGtm}MzOS3hm_%5j|<#iGZ^WiZyZP=5fjQ}D9U>( zh#=|Z1Kja>6Kf*J^}Pp@jAmRF1gUa zamM&fufN=zO-@P}8i1#VJnktxE@l?GNiZ*ow4&s2K_ln)8JdZi*aNF+YmR)z;4g%#p8owSg*f@rTO*`UKPdUl=a_L4TPa!DdpbM0(6cC1C zZCKlrwKwfZnH1m0F33A+m5R;hwVXKg7C=56GK$#+_ zO3~ztSTnnaI*dS;Hg-oaBo6bQROnh-jTkiZr7~O0P7B;|=Plk=zWJ7$KKAjeJ6P_d zJ&^8Q=DY5`O$^`JU)BKtz;K_NE-lp30Z!9Xhe0M&$O7tW1+^unL)FX6#2NIJ8qKKN5uby8H38C<0yl~h+Np6ZqpL&PJbOqw%MJ-H<^d+NY~yQ4@> zBc(1)IzB{DOpvBPx5klmYS00H1lggcl~z~KGz8N$N?qcEgL_ISi4@R#pBBP&Vq>IX zh=7K4Q&BCMs_PpPI7Bl#34&2qX$T7k>3|p?pv#(^WmG3FVhKg^U7~cUt{~x3CgK7z zfdXkV$j}s2QB`7-lvnDq zoT|rD4$-5B)GkGouPsB0A~`+zlwi+-G~%j=k&%vJtridUP^JuAj}Ky(n7BdX9u00v z3sph(TzI*au2oO$Qb7DK;N>85uPq0eq2@pN`S*VLhu;@vWtYKZXRsZ~ z9xpHZ>KCy5ok#A5mmj+8(?59P_6P3x=o>CO<@h6aSaa&URZHe9U$iH@Y>Ro(OZHfJ zNo>!R9K4tPtDVxjAW5dgiWS_rc9=yDI@$54e&GIv;vlq=i zYw=!w!R-Cz3r{;_?J4t?FWl#)2kyAd7L)yDPGER>gY{qF@h9A87ls$g!RAJC2rWsx zO$Qp`KH$`^Q@xs_+@kJ?qRpci#z)Ru?GdrUHxrCJYXTCIaSF1c%BLg2@0f zHC&R#ccK`q>MI?{fKWhHE$XR=i5#`GLXf~&TfF=T`Er#V^nz`*h@R*z%GgTdNeU)v zdH#RU{C&6HYAd_sIBS$=3z$%q0xq$ITm*G(NvjL>Ym59~H62vMRwngmRWnh_#6&_7 zRuUPogVykJp6)cz7kme^vAAzW-SMXQR|RP~2)K?2q;n1@ z9^Joq9-AY|;pFJ5C6FSAe=8xAuZR^9e7U^b#EnZ%U5;ou&y4c}F&TVtkmkW6mZo`O zaIO^^l0!~C&96FfDx%L6GNq9rPSq5^N&@9juZl9!M>8rWD29VA&IS5Q5D_=&WU&?d z6r@R4w<<6)ouL43Fpu5iug)kM$S@;KaA01T{`-*HM?*uVP4&wGj3WaXKjsly0`aP=Pz@o@vQ$Nc@~fV4Ypza6kl z2RN74Njl>ghNc=7)dIBrvImsdY~zhL-Do4zn&(|uu?gI?==ftPu+#RF!W)&}4FdpZb=F1mo~&S&TJuI9W_it|^_QFBl}O7JcY~L9r^4dQK!-$ zMqe5|)_p{iJ!PtnApq-vfF@T&VVQyi0x?S5CTkqea0V$e3swa*DhkL*6jLnSQVD@e zPZeD1Y#un6C?qgRA_8htz=#$|#5pG7^h8rYDZ~?*QWbSM%mr!p)QuCPp)g|kHHt)( ziu87BgqPiYkl8bk<}S6+*PLbyXD39i4pQ3Fr@+Ki{cce@)WsBs?afr{~h}I1v^s3;N7Qs(zLN znv*ZoUBHkV#``i7$r_iuwGp^~2#_g-r)(v2)a}`|7wO3H9`Rz`grA1<{}e)GU&uzdTQjKfy*_uG$)VvugI-g@RV3!>GTsp+hN zJO^)8c(IH30@#|7IDvTL)_1Mk%YA+X?TaY;w16H54 z`gGQVrWL*IO|Ms1pb~xl*Z=(Q&PBt^wvtyYT>=P0(;A|PO-y=DaqlyG5ATnB=zZ_L z_s-kg9Y`6-b)@`jUw_2=%RBA7<3<~=&n7Ny!L!4}6i-1sn23E`PEg*=e3E5|7>5U% zhvhH}&Yqd{pb~%rm&5@>))t_raF%@c1;8A5S#Mz+HF`5fQ-DqrjiV1}_SSy=LfG@EtI* z%W^uBZI}6558VkeKYGvS9=Yd+AAaMGhwlE^n=fB}!cjY(xokgR*)OHMzdZgYM{`oZU~JZR+!dmVT9PDjn(e(TMj1uyduU|TCi0h*SuFp1?ub`Bw1p6ou$8QU?h zy!0X)qu?!k2ju~G(&53jaqZ*=pW#*@5xNwsg1(JuIGuxPC&;dmhr^IKSs_R7B9Y}` zGH;e6;g~d@>OzhoXfQKGNs2UxN)`+&Z5tOKAk;3K1QC|kH@xu5Rd}WA|{j?kT|!>cY*jJejY6cbKCIzG;8|m7 z0xyg4B$r8Jh^MBZ#2NaQ_FRG`wM7%;(^JC*34uOdUIvG+y;_qe!Jaat>=kB-4}aht zL1r}0Y=@V9?8_&=-u&A0Uwhd)hq3)n%5S7V<}clN?LD`B_Q88^4ljS}@dteD%m1Xp z%RvlYBA*2BgH6l7o#c7u|bR#o&vv7THO(NiPDOYC1MD| zp)@8VOw&xK4d-0bp@vc%rUMZ}2V|5Xo!={HDc6_3NE<%$yoxf>R73$BMlK-f`TH8SA{%z_vf18aI-71p z$+Tm|P>BEG@BRj!Vm+U9@Od#srUjWGl5;OEJuqY+JA+(vLMVXI76cY>a>FfuvrKDbA1tfIt(l5V9U zJjIHz(g#>WEtxueBCt{#y?lbwN?Kq;JPX)j1*qD_1vyy^69i&SLrkJl!6hlxmB6#M zqrs}i)>5*Rk{*CXCb$t=H0gme|M|j4f)r$G2?U`>X0)ON-eKW3mlKVrMiH$yY9_g~ z>cq7U09l*s@W&7thWL2kV8OB`;wGP-wWu^J$)PRd(}xuL)L4^7rcSpJ<4g^;#=N13 zN;qU3ib1bdD7Y8~g#MCDt*3ggHCMNXL+QqP^6@85y98adzN$Y<6K9M|Pdis65>cP;)Tnop|LfoV zjaON1q=L!2?zW?I$l&4Y-|%YgeU81Y=Luw`q2Zaw$WN1Q1m!d$Gmc1igS^vi z?W4IzRd&hYVtUeFe-Mm8_mK->A-2)%$s4SSo9V*q$v%eAd&f83c!S3aCM*15TimwT zWJ4Rl8p4hL=l}5=`@%Bk?Yp-(L7Zl>gY3k$BKVi9X-^qFg7?1b?J_YjQ`z~Yj0?v(uPd`R=zfh%ILEe*4b>YTAblj=6+=#4e#Ez=uBSvTY8nw7cL^ z>_&8Hdg+NA`>0Yy7*HDW62e>|tO(o!`4e$7Fd=65#(=X+nal_g+dIDZ z%Xd1J4GX)Q&?SjjL27N(f_0cXJm674Tg*@)9F4f9p$sRcgZTf|XXdmvvLJ1Mv4*fo z_!l1uRpc<2cGXzQa?y4&|daOb9|C3^+D0Ac9PX zNqnLyU6pcB3i1gO#~E{JiVaG{F+`RQ5+fi}k#vbzp%LOKGd{^Gj+-?4+8Yx&iUwz; z)1lJp##35mB8sQ!@j);Fsxxd$XSXV5u|Y8KGL_sTO-QSecA0JxqgI)t$q3|3t1)Cf z7haa%V^u4K^};3hiAVHYczM8NiGiJcL))HmuPWO|_Q@}}xNm$7)GWs?voB-YaTcC@ z@D4|^pM3BZ|C9Rhx9)!Q%b&R7b*r4m_I+%JvHgGfqy>9S!ZEX#pRoU$Wrtt9>L_2v zw#)1*+1_7vBzwhzSql%|cG*jJnLy@~_C9;*TwvLeY`e@Ct~$WSv7N^r`^)|(wfxvw zFFk16Bj;`RqRl3F*>l?UC!fgPu>EDbr9L)dhsFCrK1h4$LG!kM@r$8lJ3IEBJB$tL zawaVb0N~<<3nd^#zz_yh9jt=^f$OzruCz0&$d0NipyINV7kMq(=X91XS%{S?IKZ~h zC2VL187o`KRFc2}UPdpy@dg`o_=?x+MQQm02&tBnlkhB{>it9v<%la42UPjD@T`iE zsa8OsTxD`m3S4fiVAYhTzXSlwO+w>QfamaZ@Eo24qahY+e4t&=u~EX{;b;iAj|=gi zVQ@|eo`_MV26Rj`D@iCK8h<X&bPfypkN$F}usmsvo0-PLdZ%qQOK=gTj@VCCE1aLN1L`T83_^{K1vCf* zo*ti2jDUx!C|8qv$=ehpxTA^)QiLEWRe!FSpl1yRayq0fxdLTyE(b#ivIr%8S|8q{ zkl(9Rm0FPn%7>CPQ;|${HLWLa=}cByB~&I#u}j3-7!y9T{1JwCEkXZ;nv`%>mmvA{ zDR2?-Q8z=0<$x_3#lbe#bX_mu^~lSc1>huB@u?z)5_qNbj&YUYY zISZC2K3i@Z&&@l<*3fIN`rsi4>`&;X8?L|SZo6?=K~Z(X<~owTK>;GzigDb2G z7rn@XGIg0tL+C7Mg_uXAArmKdW)CR0)>8#d<0+vLGpV-LyC_DJ`bIZ}p2CFf%qHeo zp`u|!p}Q6nF?%YdjEZiY+bSm`Pjib#T^u5aTivzl?o%%+VWNqSruh z##k^I9k&n+Ll_6v>yuL;^NrVk(&jQ>ND)9sTMa>_x@CIiG{t(BFhcR{u?GF?hm=uE zPtF;W)}Qt2oUz=?oH6NWxb%Pa>t8`qZ+PQt;K41odcj5;Z^XSg;K2PZyy#r$>979! zFSzmid``Y|$at#DkrViA@swyBVJ=^6qzFn`l=g&yi8Rds(FDt} z2WTL(S1p{rHYsVC*e{~aSh?J}YY^DMX0IfN6mI$A=Qr7KLoYmg7uaSoTN$C69y0yZoxjSswx zvpWvfL40yZ2bTNNmwb;`kOtf3zD2sH1$izALK0?m;?Q)koXy)mIbV2EC+I9Y1BG~CcYC(nEpi{FtX< z3P&$4AP`6tFdv*{Hj34v8|D(DZt69IrRPu;2V_YcTBTB%5hQm|5-lB>B9}sTAgiEs zgcL@pmLO5iAgBsig^XVt#nH2N64A&R+`6od+OC5rl2c96w3 zl$`B6#^H-+Ji#kKgs}M{fSUOw^Y z-6r9sv%<@MyL|4_L)M*izYguPEZbo(m+QTn9?cj@-?SI}W`(AYFy!Mw@96x)(p*tM0|F$pO^jYvSXtw;66S;-nI)oxQ zguRn1$DS;_1~bB*_GJ*jxt1h8r?h-uyMljVJpj?MmwC(F4_f*C6YOax zPTN(aErbizT9jdD2%XxV7udrJUQBk>6J7@Ao%58iBMe`AK0qI+ga~SQ@<0bn zpl_nB<1Vf5+%ubQjKg80FkGxU1)y)zxn9nJ`-3D(&m36NjZl<@om$r%IdC$#&SlpI^kHkUu~(RcXX7s&jHkG|`}?|sX)AAX0;W#9Yq zODR8Jw#Do`_9OS*;j3SFeCac}yKS^_NVt4ro|VDEb;(^6a}@btd_qI11R72?8VP}m z$Qu+SWGXTM4KxgMCJExf@^-j${Jh!3bDFm2@=(f zxuRMmCS4J3Xyi-OI)WIAws<1Y3Y=*|I_IDF$ip{x8L)bz4y=<4cC2hp`N9uvv5L7g zCh-}9WTuQ9atoEx#N0?X(<2UgG6m4v)?qF`Q!^Xb&-HepghBLw{quiUS~Kx+7jU4k zy0vIn5`RIv(TXcT;SYc?OX(n+_R(slJm~j3{#K(v>Q}w*d z(Q8gMZ8g@1tQ`w6uGKPTfo>zwC&Ufm9@cp=1pLub%CjV%<0my*hd=2=;6nlM90zC% zXH7_ibQ;#c_BzpMU~02rJ-=>GoExerJ4nzU2bVao#x!U45s0838kD+g>1-=gLKV!V zQw&cD3e!-K&}Xz!CJu?Fm3Fz6p;2r}cdwPY3rr!Oo@5!ko)X9xs3%hsv<`twGlsPG zdT0Y|n$yw;F-juRmLOy-jElga%~~Yog%_o;9-s&U5;+Fbx7M7)Nk=Yti9gc z_vqM&&|@8z0E#udKA<6nq?Jfreb|{UPoW)|@Elvy=&4-r=)0(fJuQJ1PT+xlB9?W(t054xhWcGBsx#-erd8QX!JTU z^*kpu@I8)ZC>tMWWE!ph{h^{)R4DA#3=;wyTG`D5*66?g+rLI8;HGI-c55_x$8wlU z6}OLN1oJ~^-QrFn>t$he-Bz z9nXX*2bl)C@2XIk5$Glms;;Ivxc<=nU*@U-0=fD;bF;9;BfvPGz_^4cU4rUBO(+0P zmm?j_QA+|e2({tUrHWONBW<@`f<$Q#1OR`0q=^7IgO)+vfr}wf`7Q-Ob--adrx+bz z4QyQsm=~xd$QRIv=|-GZAWLDQ5lbTwBot{EGV1lv15ItLMbYCXfnlXCQ<8=~KyVqL zp2qu{HsE&nD1;Rz!bBiFo@u41Ko;HTARifRp)HqbhydGKG|0kP@(07GCCF7h z`ic;PoP&c)0UxC@v%|D9@|A{>waz8o5R9N!s@C@S$V7%)3|%JIrS>vKH!D^&q!QFQ zLaeYIiVdL>aZX9%3PcDVMASMwhhgPE*L|T+r5acp>4_mG!cZfL7&?#F0HLZU7>T1~ z#44tzSX#cZpjK^{Lmb7#l_skya)>i|S`N&UCSF0prOYuYHBFgV%po#=`eUxIU4|bD zfAljemzR}HFP6(m{Hs5*ox}|`#c^-AG`~HG?S*BBu|vmVco{l|k?lCURO+<0OP~Mx z(YGIZ^uC+F_lq762-=-9p8 z$UDronm2vk`kRAhmJj>6XRkfo7AJ`3NU$yD7j3yE6lryUpl!K&VGu^P<7xi4vfF&& zPDIewDqqxVy!KnI-PV1m(30e2^+o5Meahm6FW77|e_numKX&zpZAg9ay;trxXSRjO zJC7dr+D>u~>go@@|05rKpMy{S!Z2C0PFn#}|K1f>cnA!}n}=-@D?09&W1P?ShLNv! z_%sIy4=KaHFuQ}ADjs$CL3XZr4puR)0{yuVaIW{SxynF<@A=39R5iyoZit(wE#W;c z92`p5Mrm#dm~5jQ9|#wi)gq@fIUX#-bghy$gdl>)Z*uPxk+Ln{@{=w@(@9_gZk|{0d>kO;^I!i-zx49@^W;>vcbOf= z29sfAJIm4%GM&vv1~miJHkUtn%{wnS?^JmCx~tzg@Um0cH-G-)cisG%`|tYV!(X`_ zWPbdscTd724{-LxR_9z7tKnErEuey>)uAwgkq;pjiAxL4OC6J}I7(Fu109PSpB&L}o z7HSdB=to=lCn_RVibeuMK^o8WtfC^KlU|WHwi0L95S?M`;~iU!v%~r=^-YFR;ib6DJ;fEqZCaCGzG20UlAMPqv2qtOFh{r zd+PO#$?sIWvhjcauYbagO-!oXDisq-c@

$xW70ZDDOOmzXQd0VdZJZ5HZHVVpr) zqfZq!F-!CPZoJ_JL(f=7W2Dd@MliQa^G*$3yHS3@46{(1O6^LAm(5_nK7EW?Qx*^0 zgv7$&A+;%wo3X|{#-ssGAJR2nku`z>=&6ccb2JLK8mLA2jeO49C82dP1vz3SCB-`X)R^CftTrtImOgXbFHRunM!LZhIMi|j38vX zc&a;1hFffFEaG_5oc`u6TFFtDxyX{Q&u9YOu#zJdGy$cSQrji<6scwE(*)bl67p5Y ztsCL)V~64JK(xceDtS3vs3m5 zKlq_fMLjD1*0;a@YpL|4R|T$j^1GFeOxiDnY*<)ZjhsYhfQ4ytE% zdNMCPy_ca;p8P9PQDb34oxzH%A{WIbHJ*-~`30mWY1UJS*g+Ad#)JacGS1A>|9o#$ znGJ|=<4DJoV`KJUUTzPRZBnH<{f#3`%(Ny7(*-^hcGKBVb(50gZ6;*ylj^(kwF2-#KBJq1aN^OxK+S5Bp5CpaJWrad((jDfTBtV z0o;L=x}hO_2oi7>#?S1$?wxkXDrbScR0+FvzS$ zAhX?kTj92wwP?CD!?Lyj0mU;VaifeJ`6DDfRTZg)10#eS-4vM_Hf>`VlcbDWIe1Pd zw1o&ch^s5DN*u~hRcV>=6vwuXks4L6;vmRw<5D6%@`vT0*6@nNMBps0%Cs`1RQ+iT zWvVD>RB5$FP>bnm9RhNNDDf#w6DLKih$iZalUVbMcdXTFRLAlo}0N3wzC=`CiLfHSb{w^QH$#{G{!aO-!ze%BAaeb23*|G@osU32-x ziw@p*Q=7|vCbjU$?N44X>(t}+T6WCdOOKv&>alZIE!-DgzTmVY&s}j?c-i-^L1tii z%_(5=e&?*1cg~9af#nO&IOvi!hrVphp_i>b%%7LfTRQi3=N|p4wJ%wH!km>y&pvkk zjsq`mWa57cFN0$oOH;ZHP-#9ScnEA-8sJBlfTZOElmqUp*FDPShue6y79+4NYxkCq zicA`cwAt#LU3dbR9yU7eyzgGK4m)VRo#ksk@*yt(+ZXnx)vR52`od?g_nMC*WHyc+ zc>;vtWgp1(MGiaBK34)J-*L;A>@WMP0!&RS-@1gLef+cci6B$1y9JRlj#eUo6vTqH z6&&_J9$fY89VWGD#mTn|Y~_MU;bmL8+S{pVWT3RB#liV%xR|4EtC#N)6YHsV0l8W# zcpMF+;NTE-uR3DoL|=fKR$L%x7jjP}@R^l7^QW5}iz!b4kgt^J_L1p^Ion#+p=nL9 zINF17N1$UH$3Eu;FGIvmU&F`%veVZRzV~&_M|_|BJrXWu`v3B$u6>uo*xq0M_($FW zFMsT+ciLaRN~^9)VXWpTu~JKD`9^7JR(Gtws^jBQqI7JZ77sGj8^Ut%X;7z3 z+Nww{O;UqI3DN~*s#A%P1#RJjAt(&#_!P=f1y4?bfKqiKh7y>|L`h;cbeSExOgEV> z#pqdsLlQ%LB*d{5^G_%yAa;qf$qJJs_Fk=C#4!_lGL!S{0TKRK(OfO*+{=vMQWMzC z^eBO+oN==Vl1ZY9*^Q4ahfr>(fnf96p$Ou5;QUDw0{zkO;_yeN77r@>sMw9D;RIo; zh(0_qNLk>{sp8(%Slc~40q`ivgCmr=#|U`8PO}irQ#G9Fj}olLxZUKhWHhq~PwE}@!Vgx8_rW;34UNq^n!i{uw zNwlfTO75x4+E`m$s)Ya_N@f9U@l>Qbg~geRG7`Dy@&$DTb#c==ff_CatOTX{o{B85 zA{``%!=L_)fVrlT^~{hm+M-c{imGdfbQbGQF@->Rfg0?_hM8CYk%@!(oDM1y?UF=k za6>6obpx4HrC}|_fj*ww=<1#kxlK^8<3=de^$^>mY3meXukmU^jdE*n6RWE$8jr?^ zV;CNkQTVU__kR!2S*&Aj4eE}-db%_KZnRQf6KH~`*12RQ3F@V}B07xW>=qkem}{@Q zX3~Ht5j%SQ;%&DQZ?NI>9(wpb`^bOvOaGJg(xsEM-qw8L4}a>LQBQpH$;a(41J3SR z_e&yfnQ`BMa9RM(qvMncvSKwaLnQ?Rmd)8KG z?|_&C8P@G{qm5;zrU{FgUNCH)w4kf2>t;jzg)TLpAf*jA{9P8hHOLS%9k~?d7(n{E zXnnG7iAI0WwX}uM_H8@(HC08skeEe+gs_6Garzi}XF2xCsc{6oRTN zA+VBIccLDL%`4=&=$vq7<(At8%U#b`m*(Dw4&W(HyAM;?T;toT>)rfZcf_iLX#omx zUOnbEY9VXD+^g26umBxa1+}U;$=AjiMA;^+G`tzil%7VTZ9oGcq^KC^fLlZ5PvcC7 z0wpCXRZCr$Raewyl+tn(3Di?XEOWGu9crVZ0QIEe)H!e(j$LMl$vfBmOuP&n6=H~| zP;=x=7s-r9f^KQzzPJ~izD{9kNDP`74ptM8_1yc*>Z+Jf72~7sa|QM<#CEg=6JdxR z(&dyM^wFp&Uplo%UHLL=tSZwmvx>zCx_3|Glm57MHxUm_BnmPU8i($9YzNK%uXaTo zhnmSm5~D2yP3KI@BE81Slt_OSOO#pvlrs@DbGXI8H^2o6lakD4Br}OiK|+ztX;2If zx7}!}OptUO`P@&$A3FQ2va@YPG^1IsU8yWqmp zk2-7FL1!;N_`H>eoWJVOb5882U$*uz;i5ARdd=A{x%7-fE?O}k zaK7-=1J5{auXQKRJMMs;_SkyE_Lsf7JhRU{*}_NE@Z95i5G8Iu6Z2g-#tGIB%{g+Uf(qf^|-OaX6vK-~YmmKG)-LwOV$T?GyvWzQWDywrZXH3M5FZo3*Y59VZlqc+I+Lme)<#UUWc@O;!*;o1K3_Wdg&2|`fG;oSo-OV zZEL?^wk>WC*TnAevWLk~=fMXY;B{PT6>a^;yiitjI%tLY1;A(G(-jS5~a! zR7Kz@salSEdD^_kuDcRMtFSYJBVaTpRlW1r-UMf4E)C}&w`ECVQ#X@Y+lxf|x79+6W%8^!a1edB+yc%~0- z2_n>_K?E|@m5K{;rfXp$ z=y;bMck~rm&%RbJU+$%e@Und?+=!qsP4Hp)RKj17W70DpVuj(fRw~M2knXHY16&Mg z>d!lFx3w|F7d>a)I*)gZ6yFr$x~t;W@I~ER#Pk$6U~%h)qM;JD`hZv|@0XiLF-0^H zY~mQ&TWq?q5pA#-Q^@L>SlOlllZ_X?C?7cQS0<=pkkU%N7enwTh|%ncJDY)C0D+A! zI-9JRlw2Frm}{NtVGz6a@j>tJt~j7H7>#8HG}SPRTZVsa8F!gDN^)iS%ot*ezZ|s` zF>hU{JFX!T?4uMt(Ts*nz))s3L>8s$h*^yEnwIJ0BOr)ptBAI8;v=8UTkKdh`@0le zQl`4N0T=Fqy6zosA40(}-O`+@`iUxdQjbB}Kb6*tR1_$z70jrVo{A8wPAl9L33QgD zbqMfRYcxtGp6sR}ltGCheQ+y{k{atL%j0c;N!anLK}11^A+XDq8A(X%TJ`@-4y897H*H&fcC0)7DC-Ifg1G zmN_dC)JX@wKJ-Az>83V1al-*y$}43|q8YgxqzRPlZpv}@GlI1CVrbNFzr*&x^1?+6 z{buMt{?(t~ea~&qU_-}OedPU+vW?~+{^+|u`?)VrdE4}R-}&x@GtCL=?h{J1uE>(? z$r^%QDw7&?bN4+cuuAet%mbb(5RHHV-6jOcbl08}D#e7J0wY+vJ&5SjE2bu%37g>7 z1m&Fztz%e2k2nqY4p)b+t+_BYC8)!h^dyT8D) z+eZ`nXdQZ{kmG(4jXqp2N1EG{9dTe;Oqn>~34_!GFb57|^Ul znV~J|p^0>=rht5{W^I&mvMMzK(PG2}l+!$u^2}IWnaS05Jcpq8$P}h|#zaoGni%1! z6A0aXMKYO=4bx%7>c)zyg0@sN6vJxLE1r7!H!0l(l+S9Pcn)P!m;w@tsx_pPAB}j3 zD~GsIg{(T5BDD&G;A#A8I4N$8cDWo{#d%uuEKpo2ZZr{Gs+im~K~|B(go!UI%sfap zKQt-1l-cZL?lqYR5+)p(wvyXihLoMS{>hIVMsgx(f|p@rmx3ah$OI7Bx4i5u2aO@+ z!1Cni^~}$tT>8wHFJrqz^Nok^6rOnamajkj#d~kP=9`ax?k%rAb>T~P^06=fUp{@w zKC71=aK_^ORvfqQnk9!^e8#bttUmhEb;rWXP&2%|Zt3KI%IB^)`25ojhL_JcX|L5M z&#}2|XZhvp4i7KC;rs=cuRHRB<@?)ZKI8a3Pd|3{#Vd|D;h>IWlJl`Ls zoXeizWfQw?Su2s_$Ch2wsZCE0a-F_5?L*L>U0ZOiAofOWp4zyvG<#HQ35Gfa0MIwM zU{zDP?ONDbrX6QApr7N`&S3j1B7XoJ{`7U%f~n4J+h2am8(s?t`(Ac98gNG7JkS37 z?1lK<@AzFGw3O+bws)LuH}@X2ujVQp!N#{K-9o^@BhiY<>!2I2!+b8Lwl8X;hd?z1 zA_iKyzVIx}3)+hnX(qRXH7pZ&dXfwyYnnCJ0%X~R5j-nq8B3}?Vy+Yp9_i!E+p!lW z!Hm2SFDAp_g=rHt>uc8#)+-}bcSwasN_ z$Cf+f$TuMD&1HDmC%?R{eA~^Rz3Fo>zW+y0PG!T$Hl1DCgLZ6N?0a7~T=$;KU%vV^ zuUPA6QtM)JB&TPj}0sf4rBYPl$)4KW|_A9S=}**Yu6jfGWdQO;|%+FH0VM=vcFnUQ+!P7Y zDKNQV>d)s)Q8)9#Y#|%8l(_WPKlV~PjmTM zU=+D@XI&CaBFD@aAw_@2pD)T?_hD+))y=JPVlx9$RlBgQ3n9o#slhV5vbupZDQb07OdyHUa<5JSC zT_iR{Dd?6!4Xy0Z8IO%w_?p{^xq)dPUn`I?Nb3l>nTTPtp0R*Gk5oct_N?tST;{k7 z_%IHM$(3dYar#K8SU{yg+2QIW3k4f2zEEOfN!Y5_iCTe%1S%w*< z(bJaBXb|J9y*Ohl;htiW4&Bf+L54<9MH?xAhI-VX5)I|6P7Q((kOG9}`Q)$Vg`p zI%B0TnTJPpS|#1^WHSC)CIy`_m9xtVq-|nFu0lu!zZ}*l+%i|I%#*Y&}p?%OmlQ zPI;b25D{*Ny7-f*UDz_L=1@#RTM#fQDeSE0kVO+ypD{=Q3q7!W`KvE=(AtLb*PnRg z&wl;KwwMn(c>lfU>;WCizwiFLoy4~3{EcruHaVyL)LhNf-FJLQ#F-J$P%d(HwQmFzN4P0z>qhAxWwOcGa_^k{8`CecIkHL z8*F9B_x>`shs9D)EzcI17Za;C|{Kf~bv~DpgcV=r%EzJ_CAizxn66 z>(Ve6%{ck|bC;n}WD*cA2fm#Af8bXpvgg9f$btq$pu`+2I6^{QoiYV*keGz0&RUlk z`?CWePbbjOM=i`@iUDUNFf1q%f*}KIBu+yFX{Cz9(zQ+%IrJPwq@=JG0YTE$l^>cz ziT=6pavubH+L3bl<2D^|C}cDZk&rXg8;Oano;t9N02BIzm(kGB9dQ$-3kaq;PYaz+ z*NWsyCxuzi4AH}4@vM5KSsMXaH6i6QETfGU#c-&XZsH**R+(4NVPs5X#wyW6giG2g zeI^I_1+tJ;C95J%3WA|O{)A3fK~^owmtQS$JQ1Xu%RxM0%XgQ}Y36n-Li3M=%O>ZR373NP!llUyULJ2Kdp#Ls29pEPVDiX;l*=4I z*m-Ol%H!kMKs2P>32k^;koFeyw;#Xvdr#c=_*cL5_=8_~;-N46;$MBm_py&Xe7ni! z@^QPZSTy^zllEJF?Cg_{++)RY`<;90QJ0^$2xN8`d*&@>FESqjFQ2vSfb&)zw07x! zr!Ss;=90ag$G&*=At1BZ{xZC5XL;S?y-q)Fw`E7|blQSFUvb9Kiw@as&lhhTUVgsq z<)`qnO%_kEEi@KWFw@fGiKvIAFeaA+mINEE{(MC^8nQ#V`6e5~r~E5uF68J~qm5Bd zv*m!y))WYI0+zot`F@x8jt`nQcZ*Fog?XK<7VHiKzkskm(KxMbE7^X==Wh6v?|eFs z?JtKGM?20QRQduKh%NK|?|x_gz*0Wz(MS6t<(|9mZco;sN=}HCh>~qNIRtKr0q{LX z1&uA59uC`j1E_sy3;yMM)8CqCj|pD(lmH;5w&%|hycD>5r|n;i80J+Af7sT7$RM_^ z7zZNeO)2HE+y8=&LFsL`-rA~dW6KN3gksAMo;=&AvWzmaPmZ$^ld?0{U2}Zp+j9mg zQP`Ktu$nV_Hnq{HP7@U^aNf&~UZZ+>8Ds{W{dW0|TR-RH*fy7iHk1R-eHP5Y?2ldb zyFU44bNSVmuDR-cZ-tjXdChy^<0Lw|&n3viFxCz5mW{Jo1(Ae0}2YS?eu= z7Dr3Jgmuu^5z8?^3=_+%^;eFC(h_f#v{F}!QjTC@McJjbbp(rY%dyz9j)RKm@e~iY z!KJ#1Yhb#JCNc~WS5dw&>Pje8q)G}9L1FnA3W~~+Y2a%n`)h>+eQ3o=c9|O>6WI_K zLrj`6@KIDthqluS#|ll#qsNfHK?j*Q$6pQ`MqHzkUK5xab2+KOnV}>fiRYG6z10?z z&(zpie)+{0^P-JY4|co}=}kTUEfJc7c{ee;B|Wo{fJg{L&;d`kh1MhRJ!5R({yrj#a>7vm;& zgTy+F841c`0ZfRHNjIOFB7KFVVo=yfTQx!)M~PbpsGs=rK*sK zryxQ38m@{og^?jvQIHU*CzC$i-&mzKaA7fGaGYzqS~yd+2Pt~2iCv2T7EGWjC!kFJ zPJ{T8Rd&FH@;mRay>9GZW+0npa#Yd;TRb(B5ikJ{ou=?r{B2%1d6CM)TZkAYqK|ci zs^oeg4A3dHXj+$CVgq+NI0LFO<1=qLBwdwY2Kkw+hpqdL(7(`jk;bT2T(mcpd# zqFO302PPeOd86mAZ)QM@EDfoljW&OG*mi5k6bGD*Uk~xjMb5xjxV74hp$TrQx~5GE zxap*12L|L7fWwgQ=9_H7)TT|+X^6j@Wp%~`Wab^2ZA=$z#US9h4FZqQ0czhI^DvxM zm9m&dIi|vk;1zfw%WEL~lX?gOQyH^U2Dhw1Z9xvUtW+R{o*`C#Nd3o9m7pXGU`0<# zqIa|0GC`5P;gXd!0b8BTB+xHBxIG>Y)Pd>a77$8XIwO-)6?Ns{rujOv7!8Z*Zx~XZ z=FzA{R_5i0bISuGdHBGB!S6^m=bT%flP>`sgd>8z^3lr=DM%v-Im(=N`{g69I!qVL zEK!WDfX{^0%%7^D&ViRfW}%OAw`m(>jv&QSMhq|t`0x@0`lM1Kl9E1XhBBB8z@oH( zTdk{9W;`WWM!JUJnF#b+O{-}{Kkz=JTxbJb{?H%)M5tb1%xP4WZsca7iItvGawNJJ zUD9M!iTN~tl<`ON%uwBMnRF7d9SpOioOq%v#C9}+O;E;7rs{E|c%1YzPbQ;4Ob z8O&<}1tbP#WGSo`;>>s*%&t`2oi?8OtW#92EGu1%yAyO{bc~z zscc8D<%gH;FH75T2ASoL4_~*x+(xr+e~mYqJB%HyR{96ueBdYFd-SmfzWCL9uYdB< zFa7c-_rLDaC5O!2%*)FsE!buG3A23fYuSQ5;bku`pS|pe%g#O?SoYN~-}^d$)!`SM zc7(&&!pvdp{cJ8fk_|6=c{#{@;pqoDkL{CRs~67l)vr_icIuc}FI#og;=}gXYr9Ru z%l;=d@UqDh81xX^XK*Z|c3^$}%gG|UGyKC2EyI$Q1DPC1-lJ#K;IP@--Y-AdL-<@} zD-OTQQ+?}{y-`5hlT1&9{p7-0BY5f9b~EsM+|iRac7OM+Z*s~SF7{G0DDB*}-DG>s zK8r0V0-wF$Y%;9D^_# ziQbHE_t_(5m!w!9t-sFH@svnsQcEY3QdLFGvCbl*aVEH2-dY4UP`^z@o`+*d^EXTq2ftP`0?=@fZp(}3u)carmsn?CbV zc=^uXzrp*+b@{mWiFy6vSl=fL~LcWbXpuO%2sphyCqeyQX51$S~3kHBdCR5 zqIKWeicF9YFt<`$Ua{3`Imj?keYg!035p2!-R)#$46V3QPXh7kTO2{&Hpzjycnp}1Th80jO)gW@dZt57gp?M zhWBL808~Y6)B_-#WC1tTHI20?6Hp?{NJoqt1>_49#*k|&cB+$o8YUTvZKs1wEz&N{ zsa^PUVjEwCB9>2QBZ5n;2WDFdrHC6Ksmf5zKSZlr+tXjnhFT||Rz`|eyUQ95=Hp-WDk`Fe zS)F1OU^ML&W8Ryy9>6-^i;w(J5(ozT0%d4qN?=u8_M|Q4C3ItV&=VmmMg8AmApzg% z?FK@_)5D@xC`+V9(T5b(sqMxwBZak2qg-a6`W}f)(cO_xE*cXH4$fNt@vnXc5!+4P zckXORd8eJX-D~gN-*&|tW&Y|!7?CA|< z!-T~w^nx}HtuS;N%|mjfW|iLl&>?A?Zd@G`lp{Ud40q7YrB&9W3C&d$%*Oly-4a&S zTf)n&m~QR}a->;9tlT9wH+&z@ilerD!Or3MSH_7_9J@iEN_k z4BSJOEZk(Kb68j!I_`U4rCkwce`KbrOZmd! z@HGA|pEancO3V`~M`Fm*Od-xGT&#*vKM}JDG8L1HCq9&K*E?=@hrf^Nu5Xz&kv9V1ef|QXkR86PP6--SAe)G13OO!$5urO%chBC+;Ubfu~ zHQQPKlVAEsrcI3rE*{X>iR%ts+ea2A==fU?Ik5fRZ#*EO$d0qkF!&v zef)tNfBJ)ayuW<-ycYn=!m{J{ShaZWX^Zwy}MEj(y%Ld(HDNIQ>9)*^%sXSM2}FHHTlk@_@6K%sGGAerr#hV}JSF zlMg!S@L2;dPvG)X`^z>royWFDc&7;-^f;8`%?q{ig2PZJDC%IO-4-hXKJvfW-l5B8K}jSzEaog((XLf`eWUhKHtcKtod{&G3cu5IN$iwzX}Xtp9R zEWcn=>(cWa%$5UO1JvT>Coh)i&1FZpk-^b@2h4^zxC)zFBYe?ovyC^fn%GwMG?kKC z#PjK}bVsc$tTL%5@SwEL(`iRoJ;CNt+n)k;!F-$4wE`cH{ka^ru;nlgHNxXIsVQn5 zSCKV*;*1E0*p@L77AB7oxGN4p5wYy#ML^BgTWx8@WTh=O-N<$TRvcP5-D1RLa%mGk z|5kIf!hNy7X0djU%J9(AZ&Y=-2^l?<@-vg3OP8@NJ*_ z^atK>`8n|Nhu-sMc==P;zSsWp-M4<`u3K*Kee5sa{`rT#eB0L^yvNU%1q-xb9TY6G z94c<4w58ArYXNQjwQlC=88|J|iVP*IwuB&o5)H}`Z&LcXMSToNtC2Z;H>xR zZ$_4557+{6`0!)(?GEtkpL}d0d(XR6Tz<3c}# zi&gZLfQfK6Ot_#P?#T~FIBcnxzVHPvFkZIYd=rCF4*_pAoDOQS5>~#;=jop=#}l^G z#H*Z{_9s1!Lr4uwz!p%2bMED3E;s&^RO;!Oj}Lgfh6bhPvn_=)7v+&r5-YKwlw9{t zzA9QIQ5BX^%PnSM&aG0EI0)VAY@Q_%t0gBxk&A}fnkJ!0tUnm4&cHZ$X=e7c)9lT` zRz>rvWPx*|1r-|zVs+gJYM|tVyTsEa5yWXDo^+N@qT5GpJc*;ZnA((-sl5_72&!<) zwaYZ9F6o+p12X(w5{f1p5+{v7(0qK9V#{3ZqbX${MffXqsZ_vFj6aDP3Jsom3%PW& zBr$@73E6atn9tc-PzC?qRQGZvTDvJ$dmW-_tNs)G|CRA{aV+V#y{2TLA}}XrMc*V^BtdbgUG0 zyECaw+BW=|i!x>axcL1axN^6etKuO)^LnmGG{X|jR_+~fn!}Qa?LK?Dj(JXkNz8qePd5+loHyeR znF_YrVpC^VFrfpT-A#AgDDb^6GZi0+OTeV26#64j*J*7lxF0uTjxAyy1-aVD>=FfO zIT?fn$gRIok{!aT>XN9{dZsQ!JuM~9t#oTh!~_SrX*gm%Tqd@dFcCgl&F4`ajr&Wq zyV>NXu3L{r&ll@d*otuw*KfrtGPrq_dWuw`1H)24d&Pm8{P{d~-Z;-(n$sSHQOYxi zbj0DBJb5(mqa5@obMhtRM1}yRoM0>nz{$_y7eft-04ptt9zpxa(|9@T>EFr&GPeg^ zJ`Mt%=|&A4q`NsEB#2TuKnZ2;D4~GSQaaB~8P=uxy$?0$f;&={*2&U~nZb1a% z+B;nbeXzjPi;br^F&SMLJ zv!6+MwR!C+^WbIQ$M(sui_Sdc;?;-RT!xpO$G+?w-vry&$G+fY-}?fbFIsVs_m|H+ ze%~cW?78>$o3+0@jhA_Va2$7+6UfoB^I{{_Q)ml~nG?{nW;xttz2HzGwhe9Ow=VF< z!~th0()n#?|7<+ThabJajDtn$;Rn9rK(+5(JA?h@yKV=W;oN;@?_me|^{;-V9c1q; z`>T|1g4r?_yta(L!0m&k{h0|mzVDtp{gkRq`L>}^UV8Bb&T`u(CeeCo9r5fJumztj zvXHOk$$Dye^26nxx7#q%5cD9b7~D|NqHhBoJ(R%$+6aA1w?plDf^R5UteHY42+ZGx zj+LsLs_-?5Oh-C^Y-fs4=K6mqyASA1iz?6Kh=8J^m@tkBl$>)Eker&FX`(~{5d=XH z5KMppOytnyoSG&yp$UzG0UZYwb?1z`vuDrF?3ta}NxQRQ_VfO~L(PAhaGZ1M)LYN3 zTle0&RnJrR_tf(|ulNLmg%~KF!tx7)(gIp8wYKuPSFOpxjA)l%Faw5gBckS*4Fm}! z8w)EP1HO5|Ij5a+oE%=3JdABa8EW=m_KnwFX2%&!_G&ZiY_D1FV_*J%`K04!o^{&M z-}vfjXP$aY-^cd7ue)!(%AYUWU-rE(kl7>I9?Z5@3Lve07C}LgrQdRC9keW397Pwc zjMhpCo3qC0gKqsD@vze^HfyV;UyjqNO{P^|URmC$r#8uCVwMz0K$NgyK*uy;hnAPHBWPQw0a-6hxAyS(_ZP5)1W_>IKvQdJ;tgvBZVuQ?1Lt7*! z3A#7+Jf&bRdGf&&5?hLh^o8fynO^pA3xU+>JHxV50Y#1>YYRe$P7dd&mp|3Ya(!Q9V2xXB6cQHL5(Ykmy|uK zsufp2e;xw&h@*3v$0RDq(Mm6vRtpWxwH9#dg;R3^rFzHnMe^jEGl-3s0>2e1yMUMp1lF|%qVtp!BowVA{-l(h{H8f| z>?cH9{gOyfm2auTS&o^V5(Z6l3LE~RlzAjex{QZ!bp#v3XmLoCPNh*p0yW7neW^B+ zjJOCN1)U{Y(Mz3zhU^)Mx;jFm(+yg8J?C3XgF?Kr9CI%&9(kjx{K5Pry$dSve9E0=xvJ+MWmjor=HtZl2g)(kJ+#qLABiF z0}iU(C3B&0M*qX#{uTe|k%rD+5Lj|Ln-44psL>%=x&HExarc>PH^W^>#X_zgJ+N)y z6m-0uYZqG#-b8LHT8{NXcie|@O>iBi#U%@|3Gb3(9J@9eL}7!5wBTf(`AI$_b=l<6 z_Yyjpl3G=IYAlSk=|q}yUsB+ihS&w6I~baCBNe8Tt27>-G+2adu8)*rZf256GWimp zm@3?e!lh0PZS}%js!A>GPGmujT6!~#169$8)43owl>Ug_qfvlffJF{`fi|M?k{BL7 z!cdSOTc|`CQRwbaS)ssQ!vR=?^lB4Aj)Fr(h1zOWit_v~e-2vOlZ7aifBnLrK*|GM zLdpPj8_hkNO&VRagK{jFXUVXC7(;$=PeGGfDG$pdG6s>U@GlfRfH2`W@z5w?OC#n~ zXyTR}jGVQZ7qp-3Bl`r?i=WHJqmQa|WSEy^o&nav`zWvyUGo~_@GV5lRM`_>;x3;WR#iX1woIn&x zwWIA2sGEqI*9+yEYn5lFdk+szhQ*x0I2|IEl&9C?8O~4U5a{~TrOEY2%+S*CDmkzW z8q4h|gTs=7?PlA`(-YKO`u%U8^Y_Z0%C?)_r@a(j_{9%;of(MsB(^Q)Cmz15^vvV4 zfBe+_kKTX%&z_y_{pCAvy1@TQeSDpF`9G|-hwQGf-r;+1=>6sWXRK;}`M_N#Phv|mcUi@Q+5S(;&hnu

Qe^S`!yU_ zoB?AKd@DoMPgAM$;K1=a1xy zN6kg3z3{2MR@=Fen)%Yw^sIwRQ^mH6?&?nS_j{UIRRVFPZm-%NrAKq5s;F~IO$^;r z{qN+*+rg!8?eHwqcEe)Wg*_%vV@M#x`op)phZDwA8me?Y*CbgpA9M6&LFD}Z0A`Qh zT)G;@Il3b_U)Ttiv%UoG! z%`>!mV<{wmV5C0Y7scqq_x_`W($G8K!6I8JrAyU1vJ#_VM(Eju<*(Oz);Ku$fQl3x z+F=kyb+`0z|GI3eh^2O16dkpUXGc;WtOybI%aKZ$#6o>vMGc(R8D8Gxb*% z%sckQb^plPMx^`Y1$Z!M>g-1wN;xdx=^}frRjWwWHZWffjgmtk0h4FUmln5hS_ zJByXa82oa#rLvDumOw&069EH(N4lTk{Yv*&KNKs+(Xtj`=k=4B=ofK{R~FHU4dM29 z;v>D(el_1ult%;~tIm#!(UVufGFR%p`!8sr%OP+BX13$SmCAw%Djf?$P zu3TyknKROF;dm;nO3VBo7eN><^Ito+UrxX6{_GS4oe#0#nP-?QV1yhS!z;~_rr3+U zHOb%HkLxEI=GhpqO7=VUQKp#~LbO$2Jlt+#nfz3?{Q(OE`QpN=sIVHF!jKB%Hy0|n zfV{u|^@(O-kDZ2MAe2)M9%;O0=R)${;K~2eCuwcOVuK_7M(To)z|*&dF7fEE@2+W;u%>Bl*;(6E})tCKC0VoDIRC)##K%eFk@X zLwqLdezJ!JlCHfEpP`N+iLCnVEYh3U!E3Srh1S*EO4i=9I&aA)YoP?*5FU?LSTzVw zs1ESS8w({B<>Ya8`nPd%bMl_2KlFWEewyUDJTZw><`@>0tNy0Nc=)|MV^|(Frz;@8 zM+JH1-~wV8LpkIxT!d1ypKacPvFbmvY`x764Xk`$^4vB*rSO7e+eL_GEQHkjEfH|o z(&H7dJXbMc}PSod2v<3Z6oP56rTo3;{>Qi z`f$8gZ_wG*#yX%q-S~v zB*Y95Md{w%uT29)8??xW0K6wD3^_+_!i*ywqbdtHmNC(JTz@pljtV^(?U|%V)J%oP z>;PuMz9##rnB-}NbKfpsSV2WI!+NyX>Xy;54PKrOx$w*!dC?NTBj}%=ExmB%~SWiXrITdsiSyUb?BeCWAcnk=rv(gcoz%=w%e0#N+pKe78kk&u)b$LJ`q3EgI3EA7C zM|rp8{Z4Oo-&)31(>Ka7gynrX-czFc_kYyVi#n4U{4_~BIvi#2b8}c}P3fPAQd>~Ejbn@&IG;G2xiR2cg zhCE01fj1sz@{1yMY25VsvkP}`8tz6*75!^bQGfcmFoeg?*fVE#d!>oAR@bnfH+9d; zC*c$N!;b31N+6F^VMNFKE=eI9N{}GRoV7w5old9C&!Fey^|!7I4PNSb!gH@|H{`)> zz~;aC4f>kYwbeW``o%`!Aq|ssovB@0_@q=B;H>h99Th*7{-b(;OuHZ7>c;a--r+}r z#}b#`OW`w_1l0A-6%EsR5LXusY-SxF6Zw&{M0Y)1$(nI1QdPPwovMPa7@$x%9|aty z#_gN(mHKKS_cPRB{87Ui!am?ag^&`)!_yH6H@_T#9hYZdgruHSIG+$-r`+g0ZeU4` zv7lpYQO<;rNcyN_68d5R)b8_&sfIMmR{ZV(! zm#9Xk<{<6d7U<8_SYQ9K-ik1h-yB)qzP?&~g@MfVWMl?UiP|g06pw>2hEj4A!^lZV zH31M~I#bx()d%U1WQs#@=U1fvQ!&O|YRi;K5;G&B{yt_icB6sJ`)B*+)m#N1!%!CLuUnD#3P3`ig51cBDVFYS&zsU* zW`xGWn1x|2$UkCAY(H*SjjVJKbW1^#+Hv$M&mpg|jhRl{D!v~!yHOwkf>~!S_q3;xl@;fH3=8x5k)tI47u3jb}Tq(O$melARViaEb8y)sxHDrC9 z?;x{p`8@q2VqCH+$^95Qn}hgg^P&Q%P6kko&XdaTtOO8~0XuEx^B3$7&ZXDY9z{ro}@A^XkS9ks*m@Q_{dw#d5qg=9D zUdnZ(BJY#SlH89D8+<`t@aI3saA*-^Ln~eT>C!8E904&u^7zrRO4BmIp0S0KITK{$ zNzCQzQg;MlzK#QUjTP`gC?lAugTKDQY^@-VB)6Nbl>fWnec~gFgvp^cd)F<%`Q^;1 z_Y;A*+kp_j=>3)1VH)Yp)E)p=bpkr!tRcLB62|CpU=(FRez2Vr&jg#H$R( z(g&kw4ly@FVU9=+(P%s8z&>`+u92N;uzZE*Ik>3e)};Tq^u8Sx=_2U5GNaGz3sgMN zE{+}{?CX1S@aFS)&o6sl8`A#!CHPxUc^g8qTF)ml!ibkH1tTj!;yR6oN(ZIClt-Mp z-Bvrd&|;&n^vCao&VV>T-lMF0TX^W5*k#%WB>%MLm~T)Tbb5IFONS+t&{*CNVl2q( zF(UTfw^xjJN2E$obM{(meTHHK_8-1JA677=X&BdGpAecF_tiNds+Tt(*liZdL+ESM z%j5gK&nq}pI6S|%HrKg=aNI(=<@WT0jY{2QnT%U@zJ}aEGA3=S%P&dZx zrhWX5)u%=W3Ex2{tmq`=SA5n#p;rna_z569Q+l<@9Jcm$MmM6JEpJ-lX*=lB^~I5O z$`%>O>Me!x{Uo9<>7j4a5fX$ahQIJnftIV?t47I54$0h?3fc@QM`Wsy_-Obj>5MB9 z1+pzYWAtZq8KC*7z_qWKHeK5rjG2LdXazY%xIxu0!sa#U>4=wcuvcJ?%Xz*%IeY<4 zj>ot+AA%*QKKSxmNbs|S*O$HJiRd(TLD$fgMl+fS{fGAVzBT?3D1L20cY90zW;ej` zmh-&=e4KHZ#TWc`&#Q*Nxyi)=Bi!@X{5%T!D#YW;QNX5a(6=sH@P)oaL;~Af%4CT0 z{wT+hsskWKGR;HZUm|$Yt$fLGs_@6In|mu(?q7R_;mbNlk#LKqVXm-QK$MqlzZN8$ z6|&q1yK(=&Y3B582LPRcU}k{o;ILV2kUZ>Sf;oZib)n);pAIMroUxi0Xy&!)ShTNP>_qm4uZwE=DCn#~v~h&S z?`9%oIXbSN`ys&slSH2lLafgZ0l%j9b zv75qc`>jtz2*qIL3ZwdUb-ilV3EKnxYlb!JQ{MsVJb}A0z6=JCc{yF8JiGh|Wjfl_ zS?=JWfnmDCZW8=3dgctybezHU2Vi&VRHDD;w`$};a}h2=+; zJc_A780O=0RkH+A93q4Ut7Kz=-CnPr44dY=#nR(%S#(?lx&^ePi!Jgv0`-d(Co5xg zHld6H(E>r_**uc$#iluB15KdOKYsdD%Zh(dE?s!hm;gh~u0X>kB9G*{$w9-2oS&-w zsFYJvm0T1+Oa*;R!O}h@V$dp-J=#?lUXFDO3|aelgPQ&54H)dW`pONQ0*=U(SUO1q zi;#P6MvT7avc&QxV4IebJMU45YDYv~4;o zWlmpF0_`f3H3_1!bbojL53uT(78)H0&u=yu^a2h+k2uoSavMi`7+tr7ns6*ddTe)-Z`qVtkn> zJshxkM}s-np3(dNhD5wttj^_}Q~*ZD?YGN!3V2!=7LHP@_P4YA8J-k*Fp>{e`2)b- z?mtGy){j&xQLbQgfX;6aMwm2c?=+?RyXL3MC7qFREECs+p}RRRzjrGjim^59D>!>8 zgyZsib`042M=DK0^m;@!<+}D{$U1N-YShq|Qg{N-l|ZruB1gOi{R*P>xAF^E)0Rr} zzXWp_Y177ShV%__cmgD(+_Mm=yfj53X){Ac|xTF8C#~^L#Ss;1HrKyh_u8$ z#762c&+1E&czdvCfGI4axy7U?lyBi6THCf6V^0b*XOAEFyw%ORf!%n35&NU-XG3s` zAF-1pX2BS68U0UmoC`ETuHkokRTha570BS$zE|MHodmZiAH0Q-h;B zaUO?zLd)TEyz`?w37oB)n4`2%!sB?7f&RzFhf7hHD73v~Ff1paqX zo3VC~KAE6O-C8`H(v_<&~ zfAzw9(1h8O>w#Y@1vQB8*-V=x&kNrJabK#VWY2e*PdIF;QXM}7!m?KKb{miy83S6GRF~oEshw;-9(B@F?D;)k9Uy|$KhX@P#!{CO`!TNVk`Bd<_ntu_WCJ99f z3RTzz&tzzy!(zT}9Z$}a80Ll_zOcJ2lU{9MhBozAE?Dg|QnNj4DSnYf<}>c&ttrI` zV6MZ8<1)EX5>L;xraWJ~-Z+E&X}OvZ2kIu=ki)SJwoVw5A_{ET)RaXKKVs|ymb-5; z@(0GygTfLOu)+9bUPN4!f`9js_Q)n)eyXW~eG{=BdpXUqq|%4jj*bN#HjF9Ermp|& z!bI%OvVh6cTF9V85HwlU2GrqqG675dl1q=5sha;0Gf*U7?3r{w9+3l`QTQM??h1#4 zhJ;un4Gng+noj@uoeqISJK6!Ut8Y0MTxp8}R1aiI%A_6u!Wwv5!sgN?41YFHg;F^3 zZbrSBeffs=Xt}@C)ORpbl6X=9Obf)d{wB94M1`QYqd{7Smk-x)9@30rR+h}0hBErS z5)LYVB$bVwu-dwq40cCn%q@zPpfh9*v+5LwP1?7d(gmsU@@XxyO>S-Ip@GNYkU}nt z&HWcpQw*huR50nV6A$rBs{jo1r?%cg1deWJ4-yNMkQL7T6i$LW$tGW*+9AbWv2>K4 z_z5PX&W?U`xDoq&4eUrQ4uD-%IW1P1qPX}Y{V>XafpKkuUHkLisy6CK`Idx2Q485{ z2UsA^7uSy-jYtP^mNja>1q&Vu#=VhZKoMl(ZX?0)36YOZje?|c zF_6L>+99}*ck38$Jk%P!G8?CiL`4XL%g0onwsBQcz|}f#^p>%a*})nnq&rDn64(dg z7H$i^ky^Rm!4(JO!_xGAi7o1dbi(1=k)*xoX)-p(JeC>V5~;q}T)yZzsOK`|r{57N zZ{cC{I0^vf(WO7A8J?TUlQbWHQHwd#6wqln9f9k5Bavz1LRoej)&nU-88Xx1V19bz$>(%b1Wa(LtAj*UhU#*z zK%k)8cC`dq4#!A?X!s()kGK)rNDd{ec7ewh0iaDoq6f!9fGG^(N^OD=3Fe4hsU$YP53RI{nn4 zL0wekyqUgbTa10NW%M&+%WxPafso*8uc4c5)fqYMD>szzwD*;^GevGCu}_HJb|!D* z*iNUzP~xtuqcdziP{RIJ8FWXxsO`Gp3}q27n0d$WYu?nJ?7UJZhcOh zUM2IVB8UKU?533q#*rHW($8(Ombsse^LXok(4u|qOV!;|fZzs{Mv^Ov4(d|OK@)Sk zo5_3ZHV*RcHWDPWf*}!@0fi!I51J@~54MwGkfTXqxke=kkng`TJJqtnu=8sAd^jMo zy?T@ektQ)}9?&xmOIQ^9IEzrwK7id^N1 z^fD|s`4(-|m-evPp^N5RkyT-p`YrY)15fDOjJE@m09+U!UW#9{ajV$TA+wh z>eejV<;EbB$t<9P*qgfGM5~#3G7}W@%ANk>D22O?Xv1oT7rMU;z1cNF6D`>P!9i`)bcRg?}KJaMdp0%`zcm?Uwk9X&u$=g8Qv!o zuSr+Eg4~SoQ;#r~(!QzF_UuGL{gH&0;4*?BEZM?hWIgCW9k`YW=8nwfHn2>4O@z!h zHq$XMhPx(*&BM0^V>fATGhBnA{(7Lhr2I@L*9u;-Mkx=F7O0gMTo$+0cUtW91=z_! zI;A*o4M^H{H!Y>Pqg>NNiY>Rk@EE2+Dxkp8{e6{f9SB#i1P*($!KE8BkTF`mGH74G zy=KiO_LwtaWg3xq70(`pid8R?qFl=$)mg`rpQYIEM=H$nW!0q0L~W-yMCl%3 z4Nw+|%CYb?vgrF{62Vu-_Dq`6*u{P2{?}ZI;3yr^1X9)p*tzW)Ff2lz?zHL6qpN+& z6oipLk%b%WCp|8>(U7z9_wByEG6P?CmqlX-zhnFG@=LduWj7p`?rV{1oYW9x-<}Xq zmqkKU5-WX71AvaNEWMYQB1lX;QvN;?~7hkFu>`wLFhiC z&_r$Ei$_KdQj3z%jA&)f(osaPkzoxUX6ouHVq{^Kd(u>dT`%{%FQSiE$P}v^ComgvIj^_0+1GE};z%Zl2BlELd`c3z_VpB@6y+2p)jh+cKe(7LvXRcF@e_gV=l?Hizpemek`+51YJ z9$YL^8yJu0YEmB92Hann1)<;U>4%fJL1Io=>+~q9Bi;PKw}kCWIy2QhWh4Oy>V{?b zzNlJCk@Qy?<=%Mt`hcM1LGBLZSdu?dgZFNUXeApV!Zax(vsKNJV@z5~p+Ky1NlZ10 z0Ez1G!if@N^X~2$Y_cEd#$9lw%?$zCBT7algUgT8YAu@p*v~1?G7|bA_c)a}hZT=j zi#5_NV#NRlL!#@x)KjY&j(t3mj;gjQ@*HTL1JiqBkgec-ZD*iv?fFLf^?ES(zY~Je zEG6=^SY1pt(_n7L&eQ$^ag?Qj5e#P~K*X5Af=VVT8xn8u$CdqpDDS6QyD$#g_VaS( z$6~=#C_|c5ZE^n5_i6K@aoRUe;E_Q)vIT-FDg7*z??dJfYEnF+To>t2>g4SFIK*b zZY`&^9IAUV1dovVsr@3F0qRC)UXyE~hcE8Mko3B2r%xpOdirjo2zg=6bj$^(Lz*sN zL?vao$(ZKPPWh?G^U{>NspbA2kHW?ssmefWyN~3?>sN@16G%=cFV2`us;O?5cin5| z$JvALTY*SZJXac9Z(By+p08G`=-Xq=J9x`c2OM`=A4`XY2v8m}P>@Q3XTL7js6A{b zE`V2OWgL&vk*I)2bseOkDsN^=Zhcc++URNFz+F0Z^dxW}DL8{)`5#nGUZYt)g(m(SmP$fSFa_Ts`wpXhs4(4g2vRBKfw z9j4iLsm&FN+SgsO)$Y_=EfjTwTOG+;15O$q;2RNX$YQto2cy4TA-kT6Sn#*#_AfCJ zs$di#1otiJYot9RPbkkAgKcEX1{2Slgx>IJ>J;*$)9rL2uJiqs8SVb>n@{@*dC+Qd zsGJsARE8yUg1u6xfW5@K-GZZ&%2z;aFlgm_x5Vsia(i(S`Sd;L4LJdhI4^e|<=0-H z14O-}TApH;@$GcXq-K+m|6Lq^_u~a}UthBw0Uo)U^BlWK>+R797V(#22zROW)1?5k zAxNjU_&32o3fUa6yJIwdLcUo3qLXJe(WtFMzJexoswO7PB`!nqqT}Y5l|}ChZ?~#k zgdm^6bm4p&gM?aIgUkX9J(nH_JpB@T)1lAYYTtHRxQb_~UH|T{Ne_i#eGBV76iK zk)8O+B5eUF5taHj8N=JptwF{bJpx`xHe3#fZs%byp}TNiq7Zpkyh(Sk8kJ;hmcWjR z5>^W<*-r1ONpEsG8x~fdrQ3j~?Fn%@WJ)Ed^pgf`3Jj*C?zuuf$MU2*V>N)6ohOt` z5WH=I3w>hIZ!CS>;GNz8SE>+_^2^4xknI8JfJN_VnK=EPz*u!ZtD;R}6bxepkv8cO z)znGL8B>Z)t1rK_1VbLeAz*fnQOXz#S6D5n;T~qlQ@iQtB1yV~F&6wGJi32fodFnp z|GsvN@k4t+xI%QE7&?16Be7FM3uEMbA{`*scw!(tSF-yxegy#8@h zH^vFrQ+m%1;ASKbP#qgbd*XXnKgJAGeifuw9vj%-NCEu6md?eW>G%Ktn_)sDO>G$y za;}_W%UL;>v!oOmIh1oT=gp~N%$zEX5ppPJ!bgf6W{FiKv5<|u$?~4_A^Pq8z5V`x z-Cnz0ujlK!UDxCJc-|jAnLYk*@l8fAaF`#>3n4I_PS-SRv|Q=~0ElFb2&)k=n~>Q6 z0!Kv1!l!WrpZeU(4W>9MYE}7(it`$msFQlq9KN#`VW#U>aIl`7^c|6CuhRs>06IAg z6J$)Z@Wl`Yp7|~YwQt1AoQ_^Gt0DXNV9(7hX2T~D2#nvB}i z&>0s|Vxr{2k2a$?l1Y=Gw}}H-y2`#vEC%5DHdL%NHdOZTk2jx=TwyA8@n(Uo7D!`3 zqP~XJ$8Ucun8R(!VHEbh1hes%)tC!_h>4>YrPd^kA9sPt4a-{E>5iQ|g-buotmom5 zWsJiUfN^3VF1rqGSOt0iv2p=cA7m!TUb#7m(?pP?YrxsoE<7&~R7g=C;}vcWLz!4B z87_86E%Zu&!!MhZx@aEMaO3-GGGFop(;JaREpS8but}4~MJEQ=asD<4XST0AuWw=N z*KbkmP-9DJIH&#@%0+7Xh?T+Uko5VHz1ytC73z51`UTe;$w(E`-gh(>mq6b+{vuPiA z?=R4lrd}cj9*)<;kvMGm^{oon0F1ArN9Im zd|l=(B4#EF!=qS=+Kk)9UZNJJ#XnMQXNlD@u)*=oyH^oZ7<0Xj@Q_U+Y%6vU&GZw+ znw%hpupOjtT_(utgZ3(szj$L!sc_e*t!MQV#1;iwJ<$1_DxD`HQ;a)xNuORu~ROLqgKq#tVgg7+EqU&>gOu1uUNh&UBg1lK3D$ zbIm?<+E-?NpG{Y%vJOvMmr7Y!dx*#HrTbqI2L9IG7#4>0hElV}VwtxmKa(hO&x(>F z8fKBfmg9Lx)@QnncAiV|Z6RCr5DzDI>_0KPpwFNPK5a(qCkY{%(#){P5X@sXn7eG$ zl02%~PJ2PZ`1#&ta;TGqNJoj+tKkzYUM$)9;1M9bRrgZhKLAXzm|^vRzq_#tf23i} z11(&`APCuYNZBPzbB;GbnD`Re6&ub6j*Bdj{oL%;NF8%Xl58-#wPM4om+d<(!?f7Yy{d*?D=C`#C<+%0XmePGVOQA1W-oYz3R zli8IO>!49ijw{E!`;sqOOM}B@S&ri2;ew`{&mo>!Kq{)_@G=ZI#kts(PVk8wR%6Y| z`11{Fft3&rf>gNS8{vpEjp8PWF>d5hU%pm^uuPW@3|IRwinF!71PHdNd?ECp-lj{X(P1mXi%8#LOZd!4b)Wa>5 z-){-<6mwyp7l-)22y00dO$ulf-7t7b&?0ot5v7sBI~gf&47oG(;E^zD@WzUbvBD9K14MppCEYKz4N5!B!mCMvKvZzo2*^>Yf{K8Ys<_7Q3vh}k2RHZd! zEkHCJo(hmoXNg;l0Ku&&SfsFhE0%gfv`q&GdMuAOIsB$lEeSpM)XV-<^DJw0xC=)G zwe1JwyAmS~FwDZ_&sA343VV*);$m`Tj2r!;KDos4=E4=%s}m3IWVED+f!7kg>Rs~u z!LSrHpI~iEK3Y0x{IYHvNpb9Oy(^KOBU z1+P4SX{q9aJ!b0O{?8%D22i)g`>N6d?%v6SkXEXvra(N>|e zwNpMMl$W`mPi}-?Qs9>{iuqNdkm4V{VLiZx>9cP%i$55*!0^LJ58a{6>dKIuam^u0>LJep0Ay7{ZHci*@7dwgt&GpA}Ebubuhj4=;ur2KUaSpYz|uc@X0D$0JqpB%*M= zljWx;OxGmB^)cy;uw_WaR;#nFzTSC#2^6VnT;Mbt0ix`}~TlmXo<^zr9UNBO3I^bvlU$Cyo`%>gKVL6}r;aKiaZ-CtOSC2K)HlRj5G)qVQDArE23RFWg=O z%L9aERm)@o!Htv;*JZ{YX@dw7`m>)zlG{|tQ40m=w(t*HSY|(8!Hu4z*-hwKT^}KD zZ_S)(0a?<)-5~i^(I{XBVIJQLoYE1~ynt^>GMAU^nM51kR6gPVNx~9Vu|X;oJ3*G| z+o|ZbnfF++Lrt11XS4o1m}l!SMl~E)Hy2V*#iyvU_G|u$_uv~b?HK?_SI1j@TL8^( zRnc^)vL$6_CI)(PU5u9i2@a5-J<`?u4?atE&kTf_K2|;MohF!<`rE?lx3XA*@yji! ztvVLPpSJnpf+qEYQAg>i{PqD$SV^|e<0t@g#ilNzn>Qa2!B;t=zc%>>!CE76=I~n~ zPah4g5k?(tY|#@IQiLof6wSbLh?2`>jM9D3f`wP8kd925T4=ak*ha~rGZ1!=hy~@HMG!FXGOMf+ zG;BGsU;Dvn++RRK06-Ss37)QsJ?fQcXCMsw{`5&OoQ}ENtRZ`dr!NdO3iOIcQ_~9- zG`yawU!Q!2UyvAV@>2d&o>(iK2H)9&d_OmDVUEs4a|^*XZxeN+>%|;=_#zJSfT2Kl zyMz|JDQhU17*qN^nFG$YZ!AwO!YUvlq2Rv&5f!885^vK%S|SxBi{-KwaDoPK{jCL3HY-s(*+IqK{uEtFj-Yb5 zg~q#(m&i*|0L9AbuQxp%j%?L}2fcb12Kz;PyppU83Io~*ncZ00bH`QDb{)Z?%(5^x zW7O_hFbED-OSZtk_>C-gzv%e+PtMduIA#AT`rPyqFu`28Y<~2W4dEkumpJ~&4Z&hh z=UdF=cOvTnUj@_4r*0Z&&!4W>eRpGOq7QJ%d}Nj53i`R`Ea{bc(#TRpLk3?l-A2RS%n)&JRUPFUs>2 zK(GC(3=0UekUvTwG>jNhi-k=jxANHXV(Pb02#}!V;S>KHq}n{M4dYd zgwC+N(eS!I#|*YWlB@vJ0U&$2FjY1?TqAM#%=oQ=JHt!j;e7j5N6J0_7+}v`cy{VE zE)J7x)6m`{h27Lq?f>r(%n~;?vP2I9HFg^847m#_98iuv_p+u_NkI=)@Y#%b_Zd`m z!}%5AAsXNar3-5fH*C`EmXugn6;N8;*9vo(A8*+{e(#%jKlP(%a(Y09uTp{;XLoD( zh@VmeFK@Hx+s*Z_JY%L?3#d&3w4dte8Fy>ZDnlWvR#z~=YFKdw^xQUA7HwSmf>9aD zI(U1B5SS>f^Rexxx>8^<%N79CBmrz3}S0#Ild1Sw2&00=DY$ zwTf3FZ{3+LGVZwHxW*bDVeeW0_diZod$=c3B@4&S71`}Y+?Zy3h#uKFRBLZpU9=n?03`&f5}NHwnEX)ElyNVWyeL; z<$j{5A58vp?Ooq6uQ`U*&baq)V;FV1sco|VV?Uuu`zTFep_-6VzfiAv3TU*wazm+7 zsrPtMr2uUMJ|grN(7#Yvc=B|o#Rokf%4ol3#9C0#3dbp^=PZLIcJ9qU5T?!>`@7g6 z97b$-8S8ssMgUR^ldBL*t3ga&>ij2fbw4G^JK(jHiOMh#x|F|h-XcyImH6_#TRZ+8BaNwj zABITXRl&~l^z>lUiLm6!7m$q6t{|DBAgJDvc}KpNiihNX-4L_et|-0h8eoHnAWgoF z@yw5yDi%<%>%osKW9tmMAiBR^ZPfY3pZ*K*|DoGE|JFf!1ba>u@^*1Rxy<=Nv9X@y z8uSjiK-O>1DYIs{Z2`qIckLsW4raI;-uK_xgZ1z0M*!5N+)r2`Rpd$hV<~y{bIxYdw&5%=;Q7Ia%b*~ zXdS2Q|BOD^;Eu0$7yKabh&os2jh@12oWewzeah>2qrTwQ&t=5erAU9U!AQ%{#)^IA z$I1S*@xK60?>X+C{xJ@FbsFs$L-w)c(i?#2g!`(%oqzP%A*hX^+o2kb@I z@Ysmw!@})4UX+3nA0df>J4oSh2Lx!)$RC29&5GS~UXNP!yb^E3yQH_+J=@T2b8z?L z&8J24Iv?T2kEFFnV$%8(cirnrTiOzb3>>txO?nL0+}0DT_xwhG&FtAN61cS!dVc}0 z`lbF@Ek0oV+QotDIPd{IQn};s$ti#vE}Z6D?UK1+YM@y{I+~(L{Jghw6SG> z_QU;WeBXOM{vv-HXlzs9%{##>k)Ysl_OLNA;oBy6H$LZlTjiG<^t#2y$ACYhS>_`5 z-`QF8CAORh;E&wskMqA;we##BP1_u2IASlILVf4QncG9%r(E_yZW8UBrbp9(p#}Yx~GHOih_xF?!y5BPL^{D`)boHOu&4g)V`=+V7n7e1`V<#eT+6UWyQ0iXP zD4cntk+*!Dvh?kL?b~zgl@@PGs3|fK2VWD33G^xI;-=jF8QV_G_I%b1cpUcJu642E zbY}2JV&YXds^E=R!lm%JB8Uw$cy#8RF)@m^hut?C_zMWyjRKwcfp?}4Yw*fd=$Bow z&zZvc>`g&OPwAMa#(Cm~na8V;Ycla%w{_3v|JEP_>3(k%VAp}kWqm-^n}01rchR{+ zXX0NVzER~&j-%z*pGyFCq{k_due9>7ZB{Z^O-tcq%RjY z`L)1!uWH3pW^nL`RX3X+vk8IWcPuge#^!tfl^mhVeKa*HQK|{R8x~VW*VG?4VKioK$ynn6hMdDsneTH6|Gy}<# z%pDta^;ug(&giZAdTOIlCX;jLuNkO{Nkv+z*;h$Jr_MICeCFL!eDI z+dm(!*?&%;fA8Pq!8x7%$Nx>saBeBQec#nlgQ}F1fOF=I?ehC0w_A(tZ~YS9u7zJS zEr`G;r>~drzaRWHBbfVX-pF=|yB+_$%dzB5O5(8={xe}q>G#h+GsUd_I`(vP!}8Cs zxhEe>?boK}9SNVbFG)uz{x)1XDE9oXMt$zP+2uGajo*5+;L9rm@ulFRTYmw}mokZa zF@FIgZx!5fLq7Ixv1G<(eT~(Oz;9J{(eaeF432G9&EmG3l6xb7d$6C#zVsLHT3Q!) zx^|+9illq)eCyv5TgAP?)*J0V%g_ZQP9$V(Qkvq8hJzaypSwz3Z%`LQj@k`WH}tZF}Unw~GvWupWIfcGfS++@3kpyh*S8?pJr-&Hz*wutQJ08u{Y(uY(*{ zI=558u*+_P+g;!grRF0l?5J-H=GnO4QQzI!^VyEaCCIH);NR4HR(iRYthst+*_`vk zZ^5KoPx(g^uVLhW6ULmHo7j<1?Eh`4yltfaK>gmm3!p0&N(a!89hZQ&XV(jpu58dz zbBgr^lCKrr_p(sWqZ?tu*CQd;2FO-V-qcjZhTrHCh?`p>{QKxgSPr;6@Em>MccHnQ z<~`1`9Ci2GNBgs?CB?D(clvweWw-V(8xxJ_Ka41r*4!B@lgRHkC3rgNMGCXOV3^lJ zm(hO;sHd=esS8Sfoa4W~&aoVwe6=o@FvDg1Hq!q=`cr$7kGTEfFCe>y_Ev90M4l#) zS}02upq(2;$_DLp-Y(E;_d19Z{`SwD=aoO_J!atN|L4(Udxw`h@H3Kfmj{b6*wyst9vS4%5 z}RweZqPe*XC{li?Rd!B7eA($Ru5GPD}9(fNF!fpz` zFp$}qd#EZj$Od(V$oWGU2qb0i#HvplOfV(kAi2ZDiyfKY{eo%Rq!`DgJq(JB|DGGhGiSA!<}@=JiJInj4nuCFwd`UA`yNU zku*>%{!^^I<`Mox?@4+bQ__#rhOE8f(JU+0L4C>UWT=a?b+5!woAKjCC$aKI4fVe5U1xXN&nCbu&pUj!^)c*V-89O& zSZq4gX7?G-W}mKM@v~*su*y*OWZD?aVD4G4phkFHZegU87Q_ntZoEr@G$hV1iDKay zpR^tX#(h`pVidr8c1IhZA0;|qM*R7HHXz%0Nv;ica6Qr3xm=2~u-H4LR#ifW?_CJv zvvhF@t!Ka4gt#Q#cX$GB^uExcA1|*E>)ezh1ro3yf0K3mRD)p^!upBwhu@&&LP<~n zDmczTJB~t*50Ljh9;>mkfXjVwn;jf2n3q2ym~gOetLbHiC;RD)*di+f`SM8FTGqRc zAtCu!i@~7D!l25RT%@Pn!#lu zkRkn?sac9N2FQr%ypG^y)c%s-Qxo`w9Df(R8vFE!&>3MFi$Yn zA(SAz>Uges>)YNYGV7{`f<+=24o4;2{FS=Sw|5VDk!IRoKulqSOjy09QE{jR#Q(vA z4_fsbtf&g7YnP6ac?P^dT{?_zbXlCoXm`OrFVZ?ILjQJdUKZa5QbLSg`T$UWyWOwa z2FhJ8i8S}xL!nrsPw4X?W9MOwFFyH`)`6$;YP{wx#~j97^)`W~__yk{fU>SP%GY@6GrzLBbV6B2=|(e-fr9{yTcJ@W zBU4?RG->o0qO$*3v!L1l58%H>@Byw&k_ms(qXbZcVDcya5Rh1oCUXt@JiY>^x;A^e zt1W=R{mquoQo(oV^I?C!`3#wlb{!hQVl?p*(;*fApAgp^{%Y^WR)E|oLCB7 zaA7t1z1H#a5=g2gvXc{82gi(rPgw3?2t7xGa(QuFbK>iIByqkvv zeB&05Rzjhlr|SrVK$|ifoU%LOvEml0IP)7R$K(ZU41>` zPsvs(FI=E+HkYtw7sBt$#c6hzcsz0s>#0AApL(Z=YbuUb>=GJ!K!2N%KkKsAZKU== zUdWVRCgIlP!&1tfz%QEIjp{N1Q&`V{D$sd*k)GM-vUz0?1d|N(w#jYC4qLCt1ZglL z$%s$99-WeLd%eVv;<%IM-1FXa%`=wrZE{^FL-+IxgQQb%n>4O{=aEzlY07BK0qs>r z(YZchp<$8OQLhWl3K+Qhp&CT+ei&pD+7sUt{XAX8&;%Nm6){N?54yWLtqjsrqsZ$B zU0v~2=ee&C-t&JwuE|yvkULwj*cBzsHNhr1)&Q_bu22H-XAhe3ExDO&xTBF@b zMz?wlkTJ@Fmwedk6=v_Xv#g;Pntj5Uzicx0VG7$i!u1`HWf@KUWqqWBCQZKB|FcHl zZ^LGKml{-5tdhhi6oU)fwhXj)555>u0cR!lLwiL#?d)5XCoHb!GaE`Il>&g)6`&)l zBEm6>4z&Zz#C`7+0-vsiQu}6!&~?#-n|~nw`EGEw5^C4!BFIWZllg9|QeGI|OvX^U zK*7M^L|w4p8z{jlqJ=cz|J0iSZ3NFiZ4sxHUVLeg-U5;xbO#J^0>+*dFemT_3n8Ws zq+IN=!73!cMIkK7T1f{|U%8m-3Z@wa;5*1w0KcB+4}cLokL1Qb?{_JAuQjLfu()kF zjC-@dC@M%km%RNv^okQ}7Auz%D*Dty-Fg9ETlsnlJ5OmVCV@Y#6yn}uD+IN$rzsVBonccKUkyh6 zsZ5?31~PYi@q>ocRc#hlEgSVrJ;CvM~O4U#&xRxU;d^*6<_ zzIc_s7mtF>b+yDi`9UD0gDVoCcR~KF|13tG6bB8*je2=ZC`&@)vgkuSP!8{dfDG<6 z^Z|<#)}@AIuXvmQ<-_Hd{qZL)m7r!U6@Ex>*KyH9(zF$u{&1jF<8OVZ7hj|l&hCD` zTaqL!5j@UwGO|)UE!m|em?ut$7 zAqC#%0d*2j4h2#8Kb_%x)Jz^YM#dgEE%h`KzdTdHzY84!_UkLY5C6{c14Nc^bN$s? z&4Jl5CBPZOYQu8|tTjA6J;=2e!R7HU^t)~vs~A@MxNQI!roa8&bvbC-s)~>A>F%B_ z9bw;Fa`{pZrqUIIhPsl-Ym~s-W^7Q{9MOL&KN!vWBtgr+{iC(hy8vww>fQuKZR&NY*~n{#m8JQ9n2lYtIe1QL$pNE{{11@!p^cV@vFPM zS|{&JnVa&ket|#BwB&9!)TfA19Q~Yl%X4_Wd+oBk<%p?l@qvIUN5YTi(idy0?iRL| zPY)SoL}pntk=K-`aE;5y+~>1bax|d*0DYO$u@^X26)4 + + + SBB_QURATOR + 2025-10-30T16:38:21.180191 + 2025-10-30T16:38:21.180191 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From f902756ce11f0d59ef655377651b81faddc4cb47 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 6 Nov 2025 13:10:35 +0100 Subject: [PATCH 446/492] try importing torch, then shapely, then tensorflow --- src/eynollah/cli.py | 2 ++ src/eynollah/eynollah_imports.py | 8 ++++++ tests/cli_tests/test_layout.py | 44 ++++++++++++++++---------------- 3 files changed, 32 insertions(+), 22 deletions(-) create mode 100644 src/eynollah/eynollah_imports.py diff --git a/src/eynollah/cli.py b/src/eynollah/cli.py index 5ab3c9f..9787054 100644 --- a/src/eynollah/cli.py +++ b/src/eynollah/cli.py @@ -6,6 +6,8 @@ from typing import Union import click +# NOTE: For debugging/predictable order of imports +from .eynollah_imports import imported_libs from .model_zoo import EynollahModelZoo from .cli_models import models_cli diff --git a/src/eynollah/eynollah_imports.py b/src/eynollah/eynollah_imports.py new file mode 100644 index 0000000..a57f87d --- /dev/null +++ b/src/eynollah/eynollah_imports.py @@ -0,0 +1,8 @@ +""" +Load libraries with possible race conditions once. This must be imported as the first module of eynollah. +""" +from torch import * +import tensorflow.keras +from shapely import * +imported_libs = True +__all__ = ['imported_libs'] diff --git a/tests/cli_tests/test_layout.py b/tests/cli_tests/test_layout.py index c3076fd..2e4cd49 100644 --- a/tests/cli_tests/test_layout.py +++ b/tests/cli_tests/test_layout.py @@ -104,25 +104,25 @@ def test_run_eynollah_layout_directory( ) assert len(list(outdir.iterdir())) == 2 -def test_run_eynollah_layout_marginalia( - tmp_path, - resources_dir, - run_eynollah_ok_and_check_logs, -): - outdir = tmp_path - outfile = outdir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.xml' - run_eynollah_ok_and_check_logs( - 'layout', - [ - '-i', str(resources_dir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg'), - '-o', str(outdir), - ], - [ - 'Job done in', - 'All jobs done in', - ] - ) - assert outfile.exists() - tree = page_from_file(str(outfile)).etree - regions = tree.xpath('//page:TextRegion[type="marginalia"]', namespaces=NS) - assert len(regions) == 5, "expected 5 marginalia regions" +# def test_run_eynollah_layout_marginalia( +# tmp_path, +# resources_dir, +# run_eynollah_ok_and_check_logs, +# ): +# outdir = tmp_path +# outfile = outdir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.xml' +# run_eynollah_ok_and_check_logs( +# 'layout', +# [ +# '-i', str(resources_dir / 'estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg'), +# '-o', str(outdir), +# ], +# [ +# 'Job done in', +# 'All jobs done in', +# ] +# ) +# assert outfile.exists() +# tree = page_from_file(str(outfile)).etree +# regions = tree.xpath('//page:TextRegion[type="marginalia"]', namespaces=NS) +# assert len(regions) == 5, "expected 5 marginalia regions" From 8732007aafc1e6ef9c2ad6c54c0e92e356c50624 Mon Sep 17 00:00:00 2001 From: kba Date: Thu, 6 Nov 2025 14:15:33 +0100 Subject: [PATCH 447/492] . --- .github/workflows/test-eynollah.yml | 8 ++++---- tests/conftest.py | 2 +- .../euler_rechenkunst01_1738_0025.xml | 0 .../kant_aufklaerung_1784_0020.xml | 0 .../euler_rechenkunst01_1738_0025.tif | Bin 7223024 -> 0 bytes .../resources/kant_aufklaerung_1784_0020.tif | Bin 2076497 -> 0 bytes ..._rechtsgelehrsamkeit02_1758_0880_800px.jpg | Bin ..._rechtsgelehrsamkeit02_1758_0880_800px.xml | 0 8 files changed, 5 insertions(+), 5 deletions(-) rename tests/resources/{ => 2files}/euler_rechenkunst01_1738_0025.xml (100%) rename tests/resources/{ => 2files}/kant_aufklaerung_1784_0020.xml (100%) delete mode 100644 tests/resources/euler_rechenkunst01_1738_0025.tif delete mode 100644 tests/resources/kant_aufklaerung_1784_0020.tif rename tests/resources/{ => marginalia}/estor_rechtsgelehrsamkeit02_1758_0880_800px.jpg (100%) rename tests/resources/{ => marginalia}/estor_rechtsgelehrsamkeit02_1758_0880_800px.xml (100%) diff --git a/.github/workflows/test-eynollah.yml b/.github/workflows/test-eynollah.yml index 4e5cf6c..e5be3e6 100644 --- a/.github/workflows/test-eynollah.yml +++ b/.github/workflows/test-eynollah.yml @@ -25,10 +25,10 @@ jobs: df -h - uses: actions/checkout@v4 - - name: Lint with ruff - uses: astral-sh/ruff-action@v3 - with: - src: "./src" + # - name: Lint with ruff + # uses: astral-sh/ruff-action@v3 + # with: + # src: "./src" - name: Try to restore models_eynollah uses: actions/cache/restore@v4 diff --git a/tests/conftest.py b/tests/conftest.py index 703095e..347b7ee 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,7 +14,7 @@ def model_dir(tests_dir): @pytest.fixture() def resources_dir(tests_dir): - return tests_dir / 'resources' + return tests_dir / 'resources/2files' @pytest.fixture() def image_resources(resources_dir): diff --git a/tests/resources/euler_rechenkunst01_1738_0025.xml b/tests/resources/2files/euler_rechenkunst01_1738_0025.xml similarity index 100% rename from tests/resources/euler_rechenkunst01_1738_0025.xml rename to tests/resources/2files/euler_rechenkunst01_1738_0025.xml diff --git a/tests/resources/kant_aufklaerung_1784_0020.xml b/tests/resources/2files/kant_aufklaerung_1784_0020.xml similarity index 100% rename from tests/resources/kant_aufklaerung_1784_0020.xml rename to tests/resources/2files/kant_aufklaerung_1784_0020.xml diff --git a/tests/resources/euler_rechenkunst01_1738_0025.tif b/tests/resources/euler_rechenkunst01_1738_0025.tif deleted file mode 100644 index db6bae130a9fe8208d920cac4bcfc3611b014d61..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7223024 zcmeFa_m^Zzc9@smd+)uk%q;JHbyZh;@20)?-rMnJXJ%*C0E-0(vseNIK#&k9fdn7{ zlt}4FB1KV0`c3EPpV9X+s$OSTb@g`6PGj3d?cJCgH|)FlGCRM#5%D%|tZDvV$5o9+ ztAD;|beiu+H1bb+KzcxWKzcxWKzcxWKzcxWKzcxWKzcxW;LY=Z4HMwkNuhSOvy25@B7K?#r zO=*OfbQ-f>Lu$}yj9Nt@D=cI&Vo@s#MN~7k{tO0#PN!q(x9XxbmVrO5pw^ViJK_Ou z5{t#c{i0qkx?a!SwDgB=GMU;f%O&!^(gV^1zex|s&-34;_vP5q1JVQ11JVQ11JVPp z(gPlcRS47!CMTnzd^%CerK`nUCJ_z#Fxx?AD`DtWM7$D^bvkH=$_R=mRp-tZDo6(= z0(tlocJ*5pG%fzr`cioZJ-|J{Ey0CD%08bjkw^rCL5IVE9V03CkErktI!9iz^uTND z0riu7h4R`?SWZlOKzcxWKzcxWKzcxW;3Yj^)@dwyjn$xWnDt(VCG2x0BY{jjTu8^$ zu~5YC4tg9&$m^tt)MnID-=x(L1w>HDVkE>YgbwLVts@BN1z(~lq7Eo6X!<3mEvvoV z9^gkBV7XSS#bPm!#{(vd>*ZGA@9n;Jd3DkQZ=45Ke8Q{QH||2@grx_h2c!q22c!q2 z2c!p9ct9oJcKC&zJ$AFpYDC;Nliy{F20VhJVQ0U~?seEbc2ZE<9Siy>a@*|=i^W6) za2cFY%rAj9DlfxCDi*Dnq|Cnc9$?7J!MdvbWohFUlps^8XCOX^_pWW!S89Y|K zD5U~5v4q^#pvw|)nn@#Gdotw7Cqkukbf8i0spe6lJ_lW~-WoBlnxf#Hb}kmYEbQ%M zw5q8zyv-f}koju|VRUufzPNI6tB}6Uj*!vUGV z9d$7g z!1C%pv0oKxmJvgFE8+p}g!Z3J1SUWC-k^xe(rMBI(gUxL2UI{_p}amblrxbYkRFg8 zkRFg8kRFg8_--C>>UDO`xNe1&MQ~Z94xJXEREW!<7ZP-PEv${zXlw?P!(=gOlmos< zT^5_y?hLp*ey1D7ZMA!B4xhu7i(5-+XEo=k<-JIw6zJ=U4s|C78u5D3p9&kp9!(;s zr&`pftPHpFSm#xf5f+<FWg|K* zqSfmVok5Q**+uEElYY0CLAH_}kRFg8kRFg8kRFg8kRFg8kRFg8kREu=JYdym6wwkX z*4gxW#BMMkDlDs{EWjBA7QAfL8|_B3QKLLb3SKsA^<<%DNEwcXrV%Vqa57}bM9riT zuLgmUiz!F59E6ua=|a-kRq%n&aC9}7u4IyhWRlg}=}3g_&+$MY;B>mo<~Byw5C<2t zwHqvgmqn^Val4||nT-ak*<`nv9mw~_#cMJ3Yxere=|~Sq4@eJ44@eJ44@eJ44@eJ4 z4@eJ44}3Qdz@Y?}L1v=N4xJ(>LU-!5E`v@LQR=o@%`Au|1gzDvkejDap^KEP(+o0O z93~Sw6%e;Y6LM=}er?dDoD#;CXOwKrQb^hfIz!3?ox3Zc(Z0;&P+@!^H`bpY?aK{! zD>B%~^;DA8OgJC&`m7p{S?4q;Q*CEwfvq}?R)}7!6QbhmiY(_vAzF&%pY(wAfb@X$ zfb@X$fb@X$fb@X$fb@X$fb_uH9&nmWE|bY^GI>l!w^0ux1Ispz0?z1Igs67A(FhCk zoMo|X8B*rVYZIc=QRKDT{Z1#Rf`QC7g8_k&;bnr$QJ=wW;lOQUI$|TZ91rLSF2l>D zEXFQpTrAZFoSWrvZ!I?1$c*+D#`?;`-G%;Iw!4%hZ4}alcsSwrhdmyj-A>S%IJnBi z?Fv9_(CZOaTesWM^?EIWnZ2>%Pi(kb+e5NMdO&(WdO&(WdO&(WdO&(WdO&(WdO&*M zUG#w4q$O_awdjSQV{!4sD7}VM6p6sDt+mbBMgY3C%b2J%$gISswTfVLn+*YnHR!bY z?G|*GNw1`4Weqm#vDu{TcW4lT%RSXF>$D+dmShuXMkfHBj+g-FWQe`YmTJ!3EC;9t zK{raFdNBw-_jje5rFf$l>n%rk=S&?Ojv(2n=a6DNX zKNFMI=@rovBX6@mqicA8QAhGF-bM17r3a)3qz9x2qz9x2qz9x2qz9x2qzB$U57>1Y zK$u0&knhsMY*uCKpYpQLY5=9xh0xRigb}tp!^<|-U>gk%gC34{8g(9v5k^JR)Tgj80cD?^M>Z??3>SmXtiw9UUqT^k%y2@vcg^Rt}LO`HUOMBpvaHIp9@-%S4Xe%Isg}Ol)GxxwtJC zXScGAs4=Uu;bqRmCKbHQ7G|g!Ixb~w)w~mA29xtiGvM4+bi>O8ond51xu+WH>xy(0 zykzUu2y|R61rhX~Mgn#o87%boq=yIc6QdP?nrx$%&!&>GaLDI#c)fO)%WCJqY);NL z=wAfOp!7?VvZ(pi-5~OUqz9x2qz9x2qz9x2qz9x2qz9x2qzB$n4{To?+qy79x^LU; z;XR9cx6aIt)_W@PxZf1>86zHjz`@337FR3cG%A5(r(WYVE8;RJ;^PEtho1An0yZr| zDaN2(hoG}z84xC#j6zz;x(PIkUCiQH%!HL;XCcbM?5G}O&cqCfkR}z;5o<=s0?~kT zchx)67aQyjS8}#&Twh3;Qc-<8q{${sz4g%8V0v^QH8q-FoU2cc2EI_(;VO=B`>^z4J?c~op_ZYe94bBiq7oh=4E zkEgO4d2+L|Y1+(VtMu%ICRDAQB*rpu{!rf0yGmZR^uW9A0p9z36sSH>KNeJrRHO%_ z2VR{A#5+#?xA@f!Bzu+~kRFg8c(DiCzYM4~pKDZ0rSckj;M}oYrw(m9wtv&{1Dnqr z*@2)O+_iwf&Rgb(XGZG%U4=>}k&OmZAurMAfXy7V8+;a?Hnqggpt0X-j=D{V&tmjj zjWLfo?zMy*R`x8z!Ysg!`OTb$&4y+0xtQ_+&g@(!CDu&zne)Eb;0#KO_1Hv~VPwFW zKy$6&64VSYSM$W0osF^^K_Lq%M+2HzP*cpM<=^z{Ajg*;kRDj_03S-~pLEKb=z(*`x1Bw<^~}*Nr05q< z?Ywe!&((8#&!5-Yh&aTvqF8awVb|*>YCK_?he-`I;$!_;@EKSZPhn;yHVWIB z(-9uc%rRfiOwfmri!wxe0hEo?+rO; z=>h41@8$tM1Plg4`{%%S>qoYd9*`dR4j$m2UEY2CDJ#9?zrWwXgR+VAfb_uk=K%o% zd@JCOPb9M6U*EE)e;{%WW`ykNBb!ei+DH*{>hLDALe9Lm^~lbJ z9rGhI1C@bFs*z3AGfEn9*dq>e$Yuyw^dXxz=F+8nCa4)+W*IhIJULX@wRv=Uqy#1p zHWRaB6(E|e&6J`LfF{b!UgoGz*CJ(GB51^zPuSSDteo@}H?pIdHP}I##B{969_fpe zGN#_HARH}9`|6>AW(0x;uCX*LK1g|Xyg1sQ1f}6<7JK{LnpoHp4VhDM2T+`h+5H|Z zI^YZ)5A^1sX_T?y%Jf77>F+L0jC9XU4bDyuOpWzUj`oZX4-WVD5B7F-m2-(m#N%+- zc@nhQY~gLN{GzGOa8>ElXa0on9I_ z7wLg_&;xuT@s)spnuw`MUVanrfs=Veylz-P#SHd`^(XK z##f3t@?k3?o+JenPYoC0<tza{E5 zq{B9zCxt-M%4uM63r%2QV#j^;Kq{i?%Dak5OFFE}$4$+Wx4$b?FSxT&Ltia8+#Q=7 z%*>4yXGaU;{i)t+pq6uj+wq_=;?t!fmRcc@iP=I*heWQO#k9Me^>!75BYnAbla1N& zuA!dHaBp^ItTsQ@Bt;fydp9o(ZkX$%0$J)%G&fQ2ZzSo^{k&z@{KVZx(r@BDBIhhU zAU)8T2P7|d<}X=Wdfh41U&{lMmwzqyy&jG4Ts?%`y8PbNb9>I8 zAhNs-Mixc^%L1P%y>y!B^VUnpcAj}}^O2qFAmzh5<{)M8`Qp(Xrw?oboUu^!-qz`z z3&Y#y26oI3v5I^Bbc3)mYq^PKvkaT9$_PvESI&{`S=ax328JesRy`vpX)F+KQlD zJ--X(^wA9`53fIWV)Knld+*;oeDB5~D&IMO;L7nGr}nKsx^wo}uDO$Y7ld3sw*A84 zEu?oZym#;7!5gRdTsyhz_PPC!@0_}M={>Qo`@+c``?gQAg?XemO;{O5cJpLslag{Q z7@|C^*=5Z}{DpWR?z0CSdcw_#fHf6@myM7zQD!9`oiK3%7$;^Ug}AkxaugG`OxTzu za-DMIqUMNO6LM-|UJXVnP)aJKyB02HoPL)k1XGs-&1$Heb7QO)yrrxw9Wy7whCXuDNNUcW$z}b;AG_N*k5Ha9?U-IFE6|y6(lr#mULZ zMxy~WI~)#z%Wv$hAtx?9AU*IMJ;1+=y!YFGC=*}czhif@ne>44KqnrMyxfVmWL@b2 z>48`70X~$}zgIq(9N-mr;L@4x&@spi4&S|g5HyCAPaRqO!N-^1zkd=y#)4eCup4kb zesKQD_2UpREDSXR#o#kpk&?Z2cHiS`#~xleLh1QKn=c*RcK^mPm6z{cJ3^L?(wrxD z>GZDiCw3g%y?Oog=tyt1x0Wtvg7E;4el{ip?o8MdcA4EKjmNABI(1>U0a#{hGl6NA zc5|GUIN*y@yn+r*GN4NZ^)ND$4jIsaWuH}(3L0uzccb8|q@AUty({PGt%SNtet0?J zH|CO#@uB?aK(>qRMs$2AH_#m)>`4I5Q=>&J z+cyot$S6QGq>LQ}0hXs)e#g$u|K{=@47)N1J6Z(=NjLnt9tk5di}ya_9@@Jc9<2{Y+Js4 zvWxk%_pd&>efs95LzmC&J$r2Xsl!_?o!)cr`f)6G zt{(m1{&{qa7f$Yact8z-)u-FN2bwv&f8W4Q}T-#T^U;z1f7+A~kl=_6aIeEZ4~ z(oO67_imdyad6YV?bCDPU5!#G9xxDbcA1p`G%L3qMkN^Sv1)=Y4dLcu%GFf}z|k?k z9+YmK55{w;6rGgPY|KU}op9!MDeZ2S!~OLH$8?M5Sg|0S4bcENtHEJ#!qyx+hM*{q zwTiJ0T-kg*&`8p-obfQ$Sbu&4`$SSpL!Nog^SgJ)$t#u~czZm+7xESV z>T#Nf)9EA?7fGr7y8iaO-ttPM2c!q22c!q22c!qyJrA%+8CXUDXTi?kGnkAnHY{@j z7`lMyQ-?P`xqBAug?X=EV9WDeF~pLl%d8lInh7xzZU&jD2@b=^JlUB*Gf>PnYEB+Q5dmew%>bYjM;_z|r?_93}J&4ui%7kqH*V z&cu}AUoe>vGb!t_Ng-T}RDg@AK$hS#3avzQbeI}m7E7{;K7-5%;byoPSO$R<=1i9WLmn$^w_G-2Oj%99A%Qej6X>I^t_EQKTn4H^T&Vm&snj6lutfGH6) z5lZISRS0opYQopRGIR_kQ^aCyU>QLHzzI(iWrmIsR%H`t29Q~yO>e+5tFp0^WdS*s z{%U%(yGU?(uqzEdD{$SPAM8$-3&Bz@oJj^EL65_3*XvlIZItV=-?Dd;yqtH<1H_J3 zgpWD>(rUH({eE>xw(1kPFk5wnRHO%_2c!q22c!q22c!qy6c1d#z|w0rDIeq{F3z%k z|K6F89-ja7$)%4UU3hXA{yi)7hqq236cO-+c?BAawb@WJp=9=>zo9l-)PAAfHnfHkb@bQxjOG zEg@#&%Y?I8&P~sB1$q;|CXfv{PYss|Ta#r(ESZp_mI?Ki<2|Jq91Y9!1ZN&ZRV{_9 z#aJ;HO+*7uhfVHZepB8ca-PxyFYtii<@UfaP;9YSd_Lc5UIwLK(3LEb9*`c89*`c8 z9*`c89*`b*4LxxC%Ap$<55UXdFn}zO`IE;NAKW|+L4Ww*+?O9;C(F^+2oMeb0>S|D zr%$eY^7t|enEd$%*FJo30a6xGX7CpHg`GuB>>x7{XV?}NrbsX~7QxqGH4sfdfHrMm zZDP|9G^@CYHIrfojfGBp8C~Er?92pcOWYYFMOaYSWei;J*f=rP-(4?e${9aTaE5bz zb}iJ*dThX%bHiAAO-z|MGGS%09vf0d!N_WD^pKm=!;Cb6g)3PvowP6Zj{3}yGIoF( zJN+j^c8^ID_nPxjXEE-k2%zR+&1_+Ab|vbSWIhuP`kY3Cj{VC7m*xKD*YF)7rzAbl zkq5*MWdY8t!R9=C7Grbdml{o0XP!~p~G7Zsb0`O9qQr1z!!bF?_Y5~6(sYVa5t$=9i zfW|aoAV3(DrZ;F8ANX0arjC^Mi<9fqI+Yqj%0?k;6DU0|rI&~VQKx@smlNH^ZA;ywy zvHck|CJRB6Ws5YW;&?G4(S)Jl<*-v9_ZpLaHbmQ5zs>pB<*b+W*p))Gkc~t`UaQ4K zd|9tyFSFxyxCi7Mqz9x2zOx6wVbECo!ONWV#TI6h$)t8jkm>z!c(G3V4pvF!s;1?EVVuG(xs6#1(0?72w zD6GWha4?Kwwj37*EDNBfxj<|b4%{Z_j6x5m4s-tY{I zlFgQ9LeFG5IvZrJ<-IJ!=14KD)dEG>< zSq|Ft8g?!dWfrTi*|RJ*L<7H2Gjz<+UZf(pOc5bwcv~EVzn@h%*F=1?)u#5EEY} z1`SOUO-3k!TL}#Vt<)q1Y~fr|>QD-61H51_buf}eghDCxS#eFQ8P>)w)?=fHP&En= zOsqc*sJeGX^|d~9*`c8 z9*`c89*`c89*`c89*`b*eLV2-!)uRkgURPUxOe%Z2gH;wKDcp~>}OAIqTjuG>h|Rm zSg7;p*7^I_&xr6btP1ubD1fsF_(IB*il{LS5-kY}jwofliHQD}5h?#&g;0!f8 zP0HeJaa=ZAl{wvu1F_k>T#S3mNpB_PqmIk0X;vb|w7XW0)JpM2E!$l$=W`ja+edtv zM=-xWcY>UW^nmn$^nmn$^nmn$^nmn$^nmn$^nmn0y9dBzxEKMJKY4UrATzxD)yMb9 z!pOifrBr+W?j^EdDU=G9wqmtMfLsI)#mKT}Ld=jdEDX|;73@rw=(AvI7?vzWR0Dwl zWcmS|X)cJE#nn_s7a?O>p)(w#P!rImrWk@EFqxrQ2aZmkfHPgukdz=b{h$zWW~cM9 z0~^@COhY2eCk}29TufM*`11Z8Q#>;Zh$h5Ld>Nqr?ETAE&+Vm%h%>F|igD=&?q*T< zhS^>o{X8|?9Bft^r39Eve3_No4x>WHPQ51TRu21$y7grJ4r3|fspNdcoIe?L1$|bp z%W5|9@MYzEY`K5A{Z^E^^nmn$^nmn$^nmn$^nmn$^nmn$^uTN70nnJ(u^?gqnXobu zXR=>@bXSC)DHWk-co}vEfgx6b$s!O8%!<>!KwXGcU^2XnK&)SUcwJZ^W$KIQFo9r7 zfm~1&3P!+QO6i#%0BW)zDp6?&7r{=JQAB*1Zn4l6Jy7RIKe$hdPM{urmv`p=J)vX2Y`AtO?i@v1v4%3C8;Epj#95>xnO?Vs4+? zsi0xXjvYSQP-iu8c=fb@X$fb@X$fb@X$fb@X$fb_uY=7GnzE`D(L%9God?q56m z=;nn_AKgSSA`fqz$MWI5tDil-^~u8<=(jGO0Fc3Cpjd2GhDPCBa213kl+1c;2p1#O z5Q?B_6zG-uf_$lrkzh3S=?ZoxLXC({L_>7QSFEX~XP^~Q7GY=@nf|dLv;}5=`t>7W zM5hjIvC}g{Frw%X;KlemKl*_1F_;V#Kf28_Z_XJz$R=eJc$t{8;AH~MM4u6ijKw_Z z1X?Ff4KhQ@7?~RY4%A~O6*LV>-??<++L=S=kM7z$JILZ~o@2%Hqz0O)fLCWTX?PB+ zSc)yzW4~^11UVn+0qFth0qFth0qFth0qFth0qFthffXJAhhgFO?_9onS+>pcY^C(j&htn2+&FjW=$;KskAVBh!&~lNKYr!np?$lyj13Q0iiL0} zNPJnZGaK~syss7aqs&SVNDoL4NDoL4NDoL4NDoL4NDoL4ye=Mia`);NAKphkxPSe_ z2RFX__~9pyZ{NCn8hLz&J;-;+zJKo;I?C6dKE8M3%+1TkpWM6j@#E|04{n|N@ZnX` zAAj}!Z+-L84?lnS{{71jZ=e6@(KRYOzI&0BB9WztnqPi$59Q9aQy4#aasvzU*$20N z@bO(Nw51jOBPfsVToCfrr}wFL_xfoXUOj*K{>`(rr2_ixt0zS(jLZ$o<+BH|&?gpA z=jeegG^Ytd(X|UlE}eOg6#erLZ~y4aCtS;kL)&Tl^=A(li$NKP3M{t;42{qQl+%_?s8Q}@>soT_cN z>y1W@mUy#H<91o~I*nGNF&Z=mz1C#Zn~f%uf$h#ly%vK(QH%zI$!Ij2P?W06qGZ_> z&HwZ|DyXGIv|p(ZFn_h8(`Y)8S9-CsbLoL!(*x=o_7y3==JbAL^Ny$8(Yu$*%O7Yj zt901EUZd+sUUnLtPJ6ljuXOmT3a@zn9s2Xte)xZfe_WOw`?4=wr$emOzc`X)b-Gsl zwd^mRkCyKWMy<|-==5ep`A5Ga>Ew&2jxQw|Mr1g~vLGmAF`_K#t*lZ_h~A{p(S$E2 z&x+BaF_<;&rBp)-ej`76r;E(qeW-7sru9F z;&RlsD=uI4iMZ<3=?dr2T+~_7&x$N{#Dtga;uP7{>2u$w%6YT&u-x{glr`>8tHs$q z1e*jRIc+0f@c)7@u@+WSM)o*r-RWFR(KHOPCH=35d zX^R(}P7v+|q`mWJEN$;oF_wCht{9Y3is+|Z7|3AIm`oZAcN5NT-HhUn6V)(^B9W!C zzyY$Nvf3M2`ca19!WpGC9N87qXwOoI4%Hso6$Wk33Z4FW&DcO{W6l~y@S@NiTmlnl z8A;7r#ZFC8nQCg0XeF{#LqvUQx)_xC^M+$qtqT#C!#u4v4R1$0)8_Bi2V3i{tGM75#_PFecU_?7pYxYJgrk8bn0WQCN0;*TEQ z1dz$zy>{;Q6+*_B&;jau*U#U-aRL1YA1N#e)qA=nixYmcpJN zJh*}#=A{S%1(3;NBnwkh4IMy6rxis&H$r`QmQs-wO++Uc8IGP2!N`EHu%J*%Lt3FT z0R#|cN=%RD*DoGDesCKW>OkwHG`w(X|CMuxn2#7yMR=(H+aEl}$mke}YlFi?wBcev zn2<49NEtLHATHwC=q%DExJ(g7s=>%?XU0MiDbT%p%f$X2GxUZIETe$WbbI;o=~JhU zPEAdw)5&-&=<~YUZ0d;6S=>+DIVOwQV&(rJHTTl9_nC52X?1!X_ElfEzGr-1 zz7NIS_zKe1H~0%Juljmr|I!1$jt8Fk_h9+${XH6gow>bC+m5HY{I$02U#J(pfjjj{ zhy8a1%T(_8dOMu|%lQ1+I()_GKsug(hyHx!pZ{xoKVNg)4*kDoKWmMss~K zvadDnTK2X2S;GzjxBuyv|1drUlGfKmvJ@H&)~A@f49*z!ExRbvVppkoE&dW%w_K`4 z5yE%n7l`_O8a!eyX1*Uk|IPlc5b^5=|t^znU0r6R7dB9K=klVOL%p}D=6&GOP}6{;tmvds2~?Ctv{9Z zw9EGniXy8&?nDJ)M}P6(vz>GAJpc1w^tto}n~NiswLG*6p?cz06t%csHLj&7oT%lN z6`VkGK?+w=;{3ju!B3)DgS+zV{)Y`xQ=00L_WlVM^=%OT3`&ooSg*eEyB zx!L1!heAQRqGz+&C za%q3iYcSy&joD&!xt)mOQgNBBOXLb#fkEZwQH1!P(M+OT8ozItn0+e{C~mhF7Pmfx z@Wrd;Wdg@44uAaU7Ss$TBM)xEhF5@PbSfYeLDS!S@kqcdWD0K*&xL#irILc5@GO*y zQ6MJ~VThO_0mTp~;bua*lwyZWMOIL@;APsvwg_2--iRPW=yW2um_Dh`G)U2@Nw)~2 zQ&~&^h02uD3SK_;-d60?0n!l~o;kLg5Hod{3|WLybby+%=$vU_Asnm%GCIrAA!2A3 z1=YyGqNO0Mx$`|#K6f2}WSYuP(>(OUhyt@c;z zpH6%4wEnMmFa7G#eszr%Gg#yM?f3Qu?y&z3qjAswzSOZ3R`{eX+orx`ubsW`Ub54t z)jIU&ny=$;s-I3B+UY#k>VK_&r`D>!mi_yh&l>%6_pDK}hF!#To^8X=^{TXjKy96_ z{2HR<)i3<2q7v#Fqdw{x&)c#i}YKN;8db1c52va&B4o?P-T9<9 zm++t@!}fU48u6KsC{LU9n?oLh->D6Ijp?Yfkn&NpnD$rm;c_;ZiMhOXO~A#ob@bG6 zTbCla1bH1cjmx3&y7d8{Dd;zo`aA}*xlACN_GePQRKk;pxndDVG;EKAY>dLtOdu6; zFf?K6z+S);L~{6#G_x|vYBeZ7IVpeqGKS8tNciTOwA#xA#U!M~PUo^ijvrJO;bP;wI9gCmgO9O;FUCsQs)^M2Dh@+SY0^ zp}Xw%pwHv?xNK&ABY{|T*wINkRo_0k6mB=5ekLzsDK$xkG+WByG<*rJxn9Gr^SBnF^^-MCHiicuhYI@SUWN%lJsTR{jsNG?Y-S4mw;6`{gc^!H6dBw#? zh4_!MbXNHYQa%o!x#OQ=vf$^X1r^Jjvif7Xvv&X+mWd;S$)CM{_wx_#iD)ut%sOrC z05TlS&SjQrV*#ZFNJ6`?Ehq|piZ#{1G6Kdzoft`lf`yQ@h$ch06cJDckO||$vjTxJ z!pra}^hzDF#!k{C>C4{Tr)%L7+H@EFN4y+ zINdUUVKy>jUE$=w?lM>cI3 zzkKn?;e*@GpFMd0?!^yT_kQ!j-d!8JDp{A^;PpAwtr^@od=8o|IxBk#S~nA){9?bx z(|;Y-`1iNd@0qgx>*|3{f1OzUiSc4fr~SX_^e5&Je(!kZ&u(M< z)hieY?Q|p^_W$&!=9hcnI-5*KGjCPy`1N|ZX)j-9r@s#no9a-OzQ(Mq1zI^)PySW{T zMa}lrmaBCuA|zn#57x%Af_$!_BlJ)aN63|sZ4|A zh&qkSsPULJPNJn;2u^6(DMCT0#;n{HMNk-*o*BqveZF`t8oy18xJ^sF(JlS^tja+2 zPZmAuF((5AA05e%Ar&?v>4=GxY&@Wg`L)8hIt@6qQLiZ#vZuq2WYCrfSkbdlS25|U z7oxq@6lpHzj`_?KWg^aM&R5BLOKEo@>Et+|Ow^hRn~{iDn+%z-6jQEp#zP%a>d;Rm z6Qq-#O0ut(?(fQw((Q0}VQQ#r-FVN;NMp3OG}2QfyMC&FYPdW;P#Ee?_ts*~a=1|n z386!JrvGfrmQOh7oJYs@RHN&L%X34e>HhpgPiCx{8mT9SyW$A?P%YM540h-JjjXqp za+PAXT*yQPil~6Snez=+qgX^b-kqN6%h3;|{pB!ik!s#q$vH|HTP|VB#0wOR2E zc1MSM<3fgdVq}qlW&~j-^g|EyNs5kTcCaX9{a9`DboYjdI@3nqzOH}Y#-Y6%2KOuu zAlqh}>qkq|ed&qr#7I|Us2Uosg=hLRTc+!~7W%f&b#I)gzPDv`|E6JDiH0;;HlaRdtAuU^@WPnjC(H|$qKh~$e2QQZS?Y6|mZ%R41HxiO_iumu zF$!1;rD6nisR_x7=A?8>k%<2ilK$xrKg38KMkEAG6F8a*2(3gvl!~A*yo_B8L=pCj zrw_otG^7(Oqy(M8XNa4cv;xLaAZUWlM4aiIPM8%)3)u2VDae;a*T65p%h_Kf z=d(r4tSf5$l-%+6vDHK%Ew8f&Ui^(?b!n&9vbu-2+wzM2^V;!Ohu7Qj{5#~?SKa?I zNG|{2{Ht!}j_1=MGI#v>{HrGX>;+0*UiHu8>VGi52TME8w(I{iqrdzpEAF1>#$ej( ztd4}f*uqy?vsP;}7@TIa%VHsA^P`<0kzS7xU0BemMrF2|vB3MOQ zy4Y?uy6sl4)8V$;9fT|`7LV16cx^VH-7duMaJbB7(vZ^`a5$(#mI@)43!~3YO~i@> zoQ{y20G21}^Th-HWGIjdBf(@S6b}ZYet%rqYw2g7X3*_MLT*>Y>x%kFJ#5M3irA-_ zi-&9ZL@6E5#A1nHAQ23*LsJ#97|Eui(Of*5iAF@v7_+fZt3FLuN-`X7)sGjF@nR~0 z6q1|=8%u@#>4-lQrELHu8S=;23hMPneeQ^t3vmZsE=8EC$AhGTzPQgD@pu%Y(;0R- zVqQnW??{Jy6eaxL7;S~%1e&T=25Ve(*dv-Wwb9h(wl{6IX)L$1!lVn0U#QfMd_|YfL!c@ z&D206i9)x6jwc6ev!l(qv2Icr89){m^ofBg$UNR(0hu?g>tCGdSvOG!oF|7$$n;2M zcB}?I(>X)b@_s?e%weoQ4~+u3lf79umvm;J0E#Y*R>_Vwk`Su^WVlsiH%>MnR#;VF z?3S6H-HXG>o(&`WHjN$N)T7Pg=)2YrA=uYVmXX=<;`m^Cq%T3bex|m4V?U`X3)59( zeyV~jj(0HuF%8DrHrEFfZ=LPkyK!{q!XOV))q#JQ1gy;Js>a&Suhz`rW8homq`U*lVXRL#~LvdDg(|SHoQDPT-Y#PQFzOPjtSEP!DS%&XFvLcwy-n( zbCxxn3;B|=;u?iVM1A|kJrOk~3oir9U^3BU5mN@Ji9Qo|7SU(y05Z$7MYx%h!N6xK zusj=x{`t?||N6_DcWxcNcz);E)7uXoSa<%+uAhAS=*q?SE}Y$a_SBy9XZBsbaPZ=} z11FAde{b(5&Ih}8?cB|q7q4GGfBg7?$;m;V&&EGt9*>2ef5pFL{P2rTN_Ne28-8`5 z=jFcWcJ}kkcc|U|ulGyn-`91?exwIpi3d95saMTw?^^$z^6Bg5pAP5GF}tr=p5^nG z{TbqIYb90%uTEd$U#yN@K`hupRMeoc8JAWguO4?Ti-=>A5}Fi|L;{yYONE_iqcDn? zCILsMO=Gtx`!vPoO%$>i{T@XEUQN)aMM8dEIG{(8UTr2|$cN1Yi7Le1;&$R_g@^?y zMy+HKlx)yQ&A3~Wi`udgYbI<-1x*RRA@0*hJz60Vw}vdG<&>KZlEs7*Jr^)nV$Pmi zaJZTntEWe5$$?U|Hy|IF@_D0%Qjyj5ATQ%7YO7OR*Ma?Nj zY$OND;U+=Mq!X#9-HnU~OMfZUODira<57h`9#p~*2t9ML^ohb)&lP&0E75;BW-CT4 zT`5-|q0d5Kuo4-qCy-$rf-73mTe9_)?W*(?t=$DnGjAq+u2d8D-hzLukr;0#hilQn zN_eCzHqo6zFrs5YhN_Ge$2dEfo*76@_a`TNiQz^@>Y?GT;7~0_bKGXeBUNkIeNZHw@>uPE>YGckP;OtRKuS3}oi}(g=9t9^O2BV#mbs?c)a)`wwm$JhW*Dp#u7mEhE@BOlG&t z7PqY{Z<@|8jHTvBlbdG>yEoK#EL6A8SFlrxW#d$inu`+|23;J=Z5$~u7P5J)#JG%( zakeMHB)3ggnZurS-6+`C4<)C&LlbrXOiy^S5$Mm`db5_Hl5@5}AP!kKKBNf-;aB|=J`jM^uN4E_g+|;{e zB2Ve=b+v6%#q~q!$wp|f=xV0SwV1vV(N?3nM$%LaY2pq|&adgq`MH6!eoM-0Nw7NI zX$|ljn$h6U8m$`6?tj5wdt9LXQ~3o=o>K%A1Iq{sjQq{#?|=Kn2gI5IXE6EZCHC!} zVv#mXEg%}I1xvw6I270fZ^2KIVn>I9#g=4&vCu0`U{yh>G@&8&g@rm`HATQL4FxRI zIXFy8hX{NPz+w@E3)n)qWLcq2TZ{sAsZT%X5IPjkxacgXhJvXNFVhwMQzSqhown#y zV8YCViBX@4{o?n&p*NZ^E{qKJ@&GBI7#xOz#fD{ZgcsN=D42!TfHMz}f}^P~;>gf6 ztq|hT*ja;(LPJ(}(+>}v0=*yJKk?!F=iueb7kA&he(1`jeP>VYxPIl}!+U3MUONgV zBR8%dyL;>O?HebrUp{f^(y6m&kDfk#_~ONrQ1jipS8v?7aOu*;Mxz*u1vm;1Mi#s* z*je)OEBC-Fp{keru|f|0=6OK!@^91w9r9*}i^E_2rFA&}R}9NbalrQPAMd8V7N34! z-ca_0>h;87?Pjx+LvHOBufyVVS{xR0M4=wzDsff9tl3yN zp9mL{5hS08bX8KtTr3$2vVRjnA&YTju!xKf6~~54<0IvX(F!s-Rt2?SI#3H#6i^8U z!g>e_MuCvP9hlb4`Pt#fu0}RUa;6Yx1a;;7M3gueh^>(v5QLFZwm>2r9K;SvKo|<1 z>d9}O?Ab9lxOKX3VYoKkTNtXuCz{!rz9L1S^k6wQ+Lco5H4rt{QwTM4f!<qK{Y9O7PvS;4%d%!)aS)w#@p*G{b3!QymZ4jitq z8aZG9nW1vVnjbFjSl5rBh=CyY(WY;#+po$ zJJX*d-7sFmGCPn5b&bYE6< zxOJv``&_SpVtAS1=o3K3f&kGB2eX6W;I@En*qk1A%s2OL>{s!5(^LhBCQCISeXJUs z=n8|PpfQ3n)rgMQLNncQ6pBa%s0s@y)zG*Z1^YYJ^&ob-Y}9D#63R{pC3rh^~I-}5VyB4XNQ_;3oloq`mE2? zNZIH5^6Q35gJpg>;SS8zv&noo9N~y>PX5+t{|R`Rs4hHCZNpRe&@%ZlF}P}GA=clAt`f)l)+x88Q?`HG%OZcfAjfWU>PHu zm$3lLa5NQ2sq^D6AHdO|Fzh~ie)-wu`?n9BJGJTY zy%QfkI&=Nf{yWzX-@1C}!R-^buO7W{YWL-{`ySmn^W@$+;`k?z?>TyS$E6ELZeBlq z?#z4aalU-<=!s*y+2;J{!Ij_rov%Ls^!`9!$>Y-by*#y<-OaWYOBA%qcbxVgTb|K5 zww=S-M}x^^W~Yl%4ZXdTZ2M0ztphQZ5@BbXzH-9F@}D?2V>ze%m-93@ri=~! z{GliZ_^ zPgi6?yK;^_*C?)<>{2VWW}6%Fn`lnu_RiHlm-=sYyHuyWpXK4y`qVL+EC>ps+LxxG zwrwrN$GZRwTGvjvxjB*LF6c_M;E=3 z(%aKXs+YjkaAE|f=`EiDt4xEu@~AeO@`5fqnfACsq)RV^`qDq!o464AKoRp4mZx)W z-QI2Q2_?^6&k^@4jOzQqW@D}5()&U^8^3k1A7d+jH0OP`^ww#c&(lHq4Q=_s6HAXO zZB^qIP_IK#REd&EG$Y^omiVfOqg*+k=15IR9)2K1zpLQUQRTLFCsD=jDc8O;VWAHc22qo@x} z5N|u)F&4AF?RHi!&C&YoP@{)@bP9b%id_`>Y|6715Qjl06txM8b$J&jimi2osdx&f zc*rL8#X~kp(JA6USJ4S6&*;R?`LXH~Ingm9Cyvw#-MBILI{|Oh9VXFLq#>UL)wbTusM!1TIrvYX+1XOuvT$oD`(zvd@Co- z#sa29$U+))YeOC#IuiEkqkbdCWZ0Tdx+}RrS20{Ggiy$$H!HDn){kCDaab-(3e7Yj z#X`yuD1+TuYWCDpVnlJQu6lN^INO#(bZdnOrwMU@56AUP4tJF@zG^;5|9$mTeF#1R{S55m%2~S@oG1{FcJ2g<*GB-3o)?66xp6oC8 zmgCJrWT>8*9<0s|clFjogFP`0vmNS<_cg-(gz);4Q=@s#xgF?^_H+eF$u7*)=BKN( z6Q%jV%v^tJv=;2kyGS>T7B-ESX8V#`r>Z;W8e1kSGrft)W|V3RL)qz``1C*ynHw(7 zkCrFRU(%0uK0R_umgonW3>YtM)ofb zZyc?z>(66cWcMhmVH?Srp3LU)u6^r=7TBuUOW3O_>g<^5IlN_J=WH)_nv>nVu74Mc zRHwVCPo4F{xpf0+=6QJQ$jRMP2R07un5}Q0>Do43+qc+vXv^^a4gHjIU0l)L^}R!7 zpSTt-hiaQA%0k&a-%XS8dU&K7z(|pB1wEh)mb?Q+4+HI5*Q8r2)774Z9y&j`c?d@> zj^r7I%BT0v9NRHYdiB^QQsF>Gq;n=l6PnYCdA_%4khUmCwv8U$KF0h>&+ebQaBzWi z<5-b7Fbb{ke$vRMYv8YcS>eJ8hof9Zbn5$z5t_49C3iF|d6T2o6dYkHw@e1<# zGGYA2&<`)|qbp{DaGBKEHk(~P8r?7+Ul<`G?3nKJEsn*edcEj#gTeXX@OaZTRJD?> z8wydg5;ZU@u8ZEdSneEV$mE$eDK`%;9qS6IGE(zw9MA4uZ0wk;%niiI4wM}IMSE}F zI#_m2Gy~JU;jy|8<8alDPKVr+Dc3_RB?4D&4YR8aM9hHwxaiE?R^WlL#O_%s7K$7EMf zbTU%nVpF4CuHL+9q~@IN<%c6b>HJW1>vZnWR(?S$l3f^%ZJx{^yXMR52U0@?7t@$& zM%MMl$E*H{T42La=F590_5uFu8b>w{QfJ3h<@mOdh5qEhO+7O`p`Ekk!&~~cO%*O4 zoOyC(_x{DkgA3blp4@P5|K!na1B8Z8?i!)!#)-vKyGK8}bLh7|x$x1A{rApo{o>w{ z-}(IVZ-07`>@R+F=U@N9$G`vejUPTfiJj(q7rM5LdP8?658RsWnEtgrR2QxUd%;O`I2Y0cNLeopU6K_XO(_DOQUt(KA4NojiQ9suf_7$b94SfqOTP-njf8 z$b93{p=%f3yT!wrZ!=qE|Mj&iCtzfF8F1dZd3x85`3vU`U1z`cC-<&iKK{`Mw|?;X z!?ULkK+va7?46sLE|sgC_Sa~1cXc(KPB%9UIAF6m6i^YT%eB5hFCDfiPTUt+*Q!Ou0;A^^~x%guO;%HDYlXjg6 zs8bTxqFf!k-?|D?biveAbGNo1krhUM0RY>uEA^LuA83utGwl>nFMLU9@vOp+Jo}BW zt^97#@}9zet*pvcl;UB!LKu#8WmnH23UA(LgkJ`@#udirX2lRszwV1K`pc<;rLBkY zk@ApktIcY&+mPpWzGQsvRF^wc2WmanuMM1^tso9H6{i=r4lrCgMsqml z+?1!Zo0Vr5GL5G=ru-<;!Z9ZG?29O}+~E9F!-E}_X}2oSoOcutw-fL9)pLNCdB`Jw z;w`Mu<@0)8VWfXv7Y0Ja+rQPx(ob!Cs;rI~;^VatMFQbluMVF)D_>ta!baatC!07GvPN$W zwbU`!uK2n2R#hg^`uO344GWjRJJJlKtG_R`z3sSFmtPi?rxT`~N%^5vae%e;pg}=} z7LJoev|9F%x~*0v+uBmPr08^Z!k1WD6pS(KcJcsC!jbHS!Qy@h$cZE=$T6-N*N@{lReQ3saPo!DW+rjWTN$~&rCI+ zt>%-3R3sB)_hP=7PWM#v1C4Sem#gOUjZ(glO4SO5W~I{KXbkr>k^XvDqg=*VFO}-0 zT&<8QXVdjkzOSBdR_H1OmIhqP&y*l@Cg$bLTL6}4#K5P3Feq9~d%@xEY8*rbNTJ=~ z-u!5PacqE-W_dUZ=i=s(fhNb~=EeuhlfzZ)P%W)sSa_CJjdB#WO-EhyGdIzk8mVza zZN0>)wn|IfbvfjgvvXl;&f2BI#87Qws4~)5pf`l!7zhh8J=#T< z;TU~(ys>#O0-HY4iN5*;z@bbFR z26iZT{cIPqZmKdlk^`2(XJm9BMG7pV%#0TyWlp|@rhyR{3K)S0feyd~{F!J(050$c z1cQ1I6tWn1Ec9=m@7uY4VE2Zh%`;7qd8jK2Ca)VQ%?=gFg2|)J1Qs~DuN<82PH&rP z0=&>MXbjSS?11-ko+2 zm%~(GwvayD#GHXvW+*NPJE#TWa%nI!b+|yT4jl{FMOMN8o;cVAd~xlNS3PM#nrR!Q z*kL_*5DTR+8Wvy>KY)kuCSAd32qcTnSa2w;in}NkGhswL3^roH4%mXFbb^JHIt;|j z1!hu*&gp?VSm;xX%WP3}xLEPRkuqmtp6bi){^Slc#J0;Ly6(_5#D2R8K_ z+c|uE*9Z>5X$Y6h&lLRHz%KwOr}j+qLjjP^j|?FHjgwn%p4xWp_?9Ev2aoO;I<w_!%e{lCWmQQXR`o%X7fAP)ZU;gN$fAL$N{L7z{KKc1)cfPuR^N+v%;&(rP z@;jg2_`|R7{PDN<|KO|J$Upz_!+-qC-~RKTeelnIaQBx#dVun0zy1Dy^9P?R#@~DV z%ip^D$3MOECqKJS`upEpC;eA{_~GCF#n=Dyzy0~&|K&G-^^1@H?qB}!-~aVb|KYEG z{2%_ipZ(1*zy8yoKBnit{N0cL>i0iGN9gMB|Ln&Mgz@kG#gCZG-~Q>h|M~BJ`EUN{ z2mj|k{_}tMS3mi$|Nc+@pa1yhfBzTX{^$Sx7ysXX`EUQHzy8^u|NP0{{K=R9%isLo z|MfrqDS|@wFMjv^fBUCj{jdM-_y5a3{PI8io8S4zfBn<{?Qj18{eScO#}e0cWQwt;i| zCN3SEIlFiK$d;ZR)A_NgvlP_yCUyO3!(4BW-wlQe?uGtzZ^qF`IA(iubG_xo(VmTC zgQN9Q*6$10Z2s1_2=R~U)2{)~{1Yy|Y=3uNCb+C_VJ6lro-IWj86%|3@@xcZ7IEhv zfBBK%Stu0+0aT$*@RkMDgq#0=*6zbit76L^J!1fsAW1~fBj$)6bHJQ)&Z43yii(1g zljPh$1D$iuIY(%k91Q22ncv*sbD#US+|S!f-|2B??w#k}>1RFFt5&UAwJPmh-(9u$ z3y{K?fngz23b9QLiqeEkF?MyKQG8%nU=~=Gg@iIYv3JZGq7N@ig@5VG2rS#b2BoQm zrcj~sMwBxFmgR+!X*zCA2yC*0I1IDfxhAv$twcJqlk*J)UgmJZ)Y5tIG6)Rw;z3;u zqzo_HGRACMzX-*_?ATKVsHMg^Z3nOcWCT;g&eY;-U|_ZHebHR}-Fq8W&s()bxo@xd z!56lDYWss#i$4RU?Mi?D&&6Uf(TDaijci(&s&HL;V;j}4}fVck+8Badh zAHQ!X^jnAjRj%QK(W1>#(BjsDG~}Ygmyn5t>K{Kx=$Q|anpo=n@fXX>esXA%HH;FHp`WYv6X*X{J08T3P5f7$hkSU|qRLLzNg_}Fu;c%;g-E3wjB@`geg04j z3j0ykaQ!&f$4HQ2mGJcUyjQY;%s01Ub3lbgb zB<0EypMO`WjU{i4M$~YfGWI+kqD#k}kK$XkQ!gIrgS5U_%k$|IFZ5+v zY9!ZApE&u#b1$DT`kJW|ufBA`6%)tcubMLc`stTkI%XuAI(F2wi!Yo${vvXzS56*x z!xfXKPq^Tk%f{bw)zqt}UW~cz+UYZ|nRegIYwy49x_fWE=B}G&;2*m4CQ(Vw(E80y zzaHXeN8&d(ouPG@);ZfaKK%d9)D6W%xTD(KIKsW3_Q zRmnj@${b_LL0@@U-~ey0oHEL*vO-^HbvD=uVV+|+cI!10?!4*JyKbKP*Bkt_=%o+d zapl8zUp?#28BaWL!_yD>QI%UByYIStXHLKU`pLIlH%a*NQ+GaemkLg!^T<8d+<*HO z_uY0mhO+F4GMk7^S^S2JcJM2xniS*rS^UC_+9^7DT$zVaeGaqF6d%}B`b$sV@xtS` zW0ZKujZ+vDKJeGSJo(^_&p&qCi%ePRQk!DfixX*#mc6{vgbC+hw=kF!83V#@uqvICR_f6Yha-?zo)%)@v`mX8Opx zuN(LHoztJaALhFBj;lxi^{R{Ry#a!oVykZEUq(s2??yNgh`#iJnRIap^@P zgre78I&$jh5!1#dh{DA46Li^_5i=&8bHk;huAZ0(ubFi2)C)$;81HoGsA(6CxPHn= zF!_G(*j{(ZExyNlnO8C=0po8yecKavT^@d7p1E(vQ}_Jk$-A$3{=sX|a}QjtY9S;? zvu=ePr>NSEmz|@gDs|8G7pvVn&;Iq5M{k_-`h%ap{RoEH`5!zv_pOIzfA9G_zj)`- z4_~^6SuJqawPRWM;KjRNe&hzJZ$35i(>G>O5PtN^ebJc8efr)jIl#`xuih_7cJ%%W zcfIkXjaRt-N_k(Hb?xg<&-~z(dp~(|7W(wdpQ;x8x&Nk#DuRi+L{r>6{k$0yM%;SE$h&U1_?k;c zTy@C^%wu<7{?K2iV(z?toN}Ljz_Dj1#;;HiA6|U)hL;|@@#QD3e`eN|Ua;BK)orOfbUHitfx6FS1o_QZWy5#FuHZGsNb@iML%Rbz-?(5YHKlp0)GatY4(5G)d z`pqZL|M=zWOTT_=+v<6nmVdTx@y9>?_|Zm~bk!F-)_#S`vVO|gx#+;Q?=Wpud%EiO zch?^{*ph{5D%)AGe?{8%?~Bt{6ArZH5VltC>ut$89EtNPnO-RbV~U;3+sl1P3#R4aPCz4%Z=&c7UO zI@VUyQ=Lv3GuWKpU6s~avgb@sg=k-GW_wv`NBO?PO?h(tpQk(enz9bJ=bz{;J>FB& z+nCu}xtHYh!HSc8WxpM*BOmN2`1^6*wu)lT_Esu|eHIw!7#>ryZ*AVLH|<8X9OQCH;wr5tG~INAz6r*%~99c;<{=cDz#wfoQXmi+Hijp&y{ zCH)QiDzjFnZJED&G35SBHU@2S)hk~5sKyE-_pcMiYN`gIUf~7RUUYHk3jn{`6 zjBVcl$gpt0XF4*<4)exlG#D$&NF=ez6GApafu>TBsGyzxzc9crL=67|#ZF=i$%Q0z;Ojlxmdj|n z*>|u*0b?f`SO%^!)TFY!Y|)3n^2)^@FaN>MRDHa7!3Rseo4s~fViy~rhL_haoxfq_ z0;u`NZ|1kn5gpAR^{{l=58zVtBQ{N?9w!^_`&GaE+svh!;%KSB;NL(nfh z_w)zvyt!chJa~D!?{UCK=Ykt!5?iE0TV;03@P6hjqJGaVe(Wr{U`sN72hDHT`B}Y? zU?z+kH)hP3i!PFh4ZFSnZp;ns0ofh?T?!i~arD2_8;Y>tFR|H^_yuXYzwguxZ8~{k zuqdqkKaiN$6DM<;h!L_?DNT%Mh(;VaVNl~oZQ^B~Ir~xgha7afFF4eA=!W+E>=LiOtBNFZdnoSluoQYf{oR6=DdL9r8V=laq z&&h6LE#7DweSty~O;1gz=;&Dt<#35|7#VJ#8d6Xs4G*cw61fsJqV|V{;cA3RNm?G| z(DfhD;cEXMcrb1J*fFf>ps^Q?R4c zJJE(p6gu>11#bZOgzOM=&iFBjugK!Z`%vsK5SPp)DLmmGf06@0lvmLdQQr$a4Y1)n zZ14}cofy?aZGKVYy>KMw6Bi2BjKJ|@JxNGR88mtPMVC%;Z2A1L7mm8(vhkD0UwHB8 z^Wj9Nn8#goE)V%=E=(ME!Gy7+#*Z0w@#v8h#*aB~!q{^sj6LVFOD0X5GFdof;`j+; zM>FC>xT8m%mv}w%Ldv5&vcRjm*E8`G$6csIM43FkqdR1O%s5>zYUH?!&YwJf)Fl%x z9PfhUN;PiWv?&)~cFDL2W5=RtQ^roebnGM_gPk$ z#rh4DX%j9Q>!ZHsoj1v;jFIPF=r5DT-f;Pp>!wW>&b<2a zJFcJc*Xypl>&C0^yY>2~9=sz7jX`GU_~}`9KKt<9Pd@P1C+@%fk-Kl1b;pgu$M3uC z!N1-BF9U8cCJYO20>YprWJd@l1DIeY$SDNzDB!P{e13pXl0d0KNKnT3pc9npD^I@l z*SGq1Vy>ZxOHxMITX*IfoUJ6#MlJAMo!-{nV0 zue#*gY2zqdH~nHf9e&0OvmiW%ag3rb88c$qgme6q37aH;8TuL(f3BQz9@KWjU&erM z=;o^c|4Gnp$T2s~@O|Ksf4Ssba1VdofP0-lKl;R6ZgkJOX@}@5@J#PE*!RwxV^yXI_7=G!#*Pp-pnMZDV z=+(;9_zf$*T)SlMrd9I~>|9Zpu^HtZ*pRz_{pQtQ@7b~# zQekYXEp>%^n#&H<796NAI?zy@R$r7^ zlb=;vklj?8*L;miygk4ox{q_06mXiHV zMXACQUB%rs2WoORm!Fj`{gu<(a;6 zQMoX`9I86mT|#)Yy`ZgpkLbS~Z1oI~)S8RB z>(e`G0Ga(Q6?>zi;LKt>154_Q}32-!+w`ROD<-+rCKZu619L zmt_1TOyBWCW#0Od%+)QGyAHKwb=IY}*X-%4-`7;Wqq<;YUGbKN(rsT{^m@=ywqhBpgAC}ws1>T-q!Z&1M*hpZmBEUInY+XvMA#{4Ou{R7^f2i@paaM z*y&)PvRDQZQDyFi0Ev=dOlrA%mu98>*jcf^rFeH$=BDD*HI*3~;ZT?tL&6t zt1v}*m{fhJrh#gTveRr@W=U8{~(XJv$IikR_ekX6Q z-iM&(Q~l+lh(VUQ#4(nkcm$G@!{hL~r0nfWI1spArCKUdwBC`84s7hN|u=|INKYX{~ zW_myUrbq9(&Nm9Knmlg&M5jA{KO_h*kFsF0i}w59{{UY0T`#!U`^#W5L<}{H0@3*R z>Q|)3d2Bi`EQ(EAxD_UaD-j6-e(fp4r_eMtU=}(Kp^yTI8Yq>HRH!!aGUUq|5`|lZ z_@shVD3~dDHcEkc6{@U&k{L_m08`mswRQs#X-tczw=O4ead)xBmKYaG}=Woq((tFNp3%-0;>KC89`qZqu z-+bZ84_<%a{@ZUHzPIuxKCzQBY|q7r^HX|m^ph?+_mVN^PZ^sSgTod4)?P#tFm)Jt zZP43-qrq{HaQ?kKqU4C$$WiBvy5L+PO1^A3JPY1?h<{qrC!?|WxA_uVgdvAh)GH|I zt!UIPm_OB&8cF;#VPv5un#>*yo%kKYq@09iGWs8x#Or=uskq>ukbR&3X!B25!aZ${ zlto_=e`@wWWf2|yA6y--6R#O2B@SEurv?dm=qDPLh!h=sGGj2*6nF8c5fd&m4*2O0fDW0d9h`1}+jVvvGgW{h8>@beIU55mt+gy-C*z+nz3g*9>(P$(#c zJfZeSJLry>0?~t^_PerQ()^<(IXOH@HQD49KjQ}?`NOMl8$abD59deSa^(acZuLZT z+PD!jCXe*hRv)6d=cehm&X_3G*IYd>z5iBUUzvWzgmZ=N1y@cwKhz{q^Q~9kePu_~ z2W^B5`Ub3@o}51Uyr~oZY1*W7{Q#xt<(Hg)*~D}F{)Jz=Kz?__4{$vF$Zb!|y2Vdy zTsiduKOgBwG4Oseym%6y&3WaCyI+6$KIBzIKN4~O%;}CbDx!j_roO@ZR*#0#0@qI)Lpbv)ABIhQuE)oG zywZ3R@G^De)t8=s{qzg{dgCoK#*z40iW_ZCO~3F~cwxpk3fEpXiv4RRpJzMi4&S`> z3z$G1l;WbZc;d6VDyYyKFCQnWp&u?p zn9n|ZQRsf*2H`l z@-Z{77;C4@Tby3@yyD_>W=uR^IOT#7Q!g5U5sn>^;3u9pV$%5|ri~df-LBjP36g5V z&JNx5aS57mUSh-UhG`dGeaU%Oj8E*=T{-ceFjq}J*Nz^3@~DIYO}l%ST#(@DP#{O4 zDWpJt&6M-mp^2F}~3N{CXF z%VC>|vM6Q7Nz)FKGJuX<0H2|nft&@AAPz6VSG>*PV`|P zUlW2NAGu=&AQ^D-$9Zqtb(Lhl^^5nO{raPq-hXM9F9P|nk8l3?$d8W*`7p0f48`Y$ z?zrw^9|iK!p-6q@iCf=)`M$aDJ}!Lev73D$Xwet1uKND{`5!&|!7KNF_^Kasxt+4q z<=?)u^1JtJqJ2O2<#``GHTRvT7tDEe+1KwYan74haAW0y*`L1kt4=v0QK9Co?G?pyFV@b&`DGWP}l$PQ6zo&-jf>_f0DCn*@3N#QTq0! zIlEWo?OlV}zIy)JMIRUJ`zb$lZSJ1cJUABtOfOTmZ8Ia z)u)a&|9ZMJB#kwhCl59J%h}%l>z|JZPYpC4?X5X+u>SPnrncIgj{3Z=#)9sq!k*@$ z-j?En?PZ6%Dt|lD`tv~3KaO|&{aE|S-dfTBb*dZFT$a{emDSf+c(k)p^jKHbp_Y>F zx;!uh!JBXgM5zj0b!noYWpnv%Q9F8pI@JXx1x2Z;GC)cbYylqWsHTd`XzwqODNrLj zT!}OY#44&3*5DfUwHP~jsVfQ(tgg)6r0I}oWTz|(9eH#1EYqH$uUw8PTA>u$8`3(P zGMZ~rD~q<4=4~u5*o=y@*QIRvHf!JV`ihkPjyx17JIF^rEaynR{0iXKyac+t$&L-By>`RCS=aI_+Rb@tGqHXt1Zet0`AfP-kOKdwmw2 zy0X2fx;Uk>aA#G~uKMy+xfnzp4Vks2Db>Y0o2&Pu3XdT2w$zmD>Tb?C++A|8qp+ps zKx1WUdwoW0ZCX{)4*oFMSh*j=T2D)UZ)*Yfl&ClqOm}l0uMT&YHB{_np{psow>7W7 zy?{YxkM@G*%n(C3RG1(U(1<n%Rb-eL!@ShoMHcc=zmBK9hVVpsh2{~}AtCCk0+1vMS{Gnjq`qqkc_K)>f zsN-Ny>4`&C>UI2J<;j6+QPn3u(pyGCMnFIs|SAo!g#u%8Fnp`j}VI1cs2ZWJVi2+{ODL}8FF>CBVxH1B{ zu{A7-S-4n6rDMMO>>bvqiGt^hvp|Q3QW=q*W%`78I*JSbW3QPfqR2invK?#zV|N&2 zj_qUU*oHDW;0*l6wlchohnE3iFd1I9n=B)kEN|>S)1-hf35dP=n{PgQec_kyY*_Kt zsvqXU%d2c^dy=r|%O&4@`s4gLE54t%`Umj&OJI4`LOWq) zef1vr3`C25^Tpdh^p~H$4m*>7^w!I>-+bxAH(q@9k^64D?z%f}zWM$;?(j;pUn=z5 zg?^^cuF552##}mXtW6d_To}X1yKEPaJTD1~L+A$$ZOV8xR}?PrDr|zeF!2g!@(sr% z;2lEEqsNXL>z!E&Lm2sd?@?lYAHBoSv|Kbtj~UIHm-XW9SI>pcADP(OvQ;Ovk!M@n zCRglp4F}E_vHHgbXt)aOcv+9D$ggdUIXCgCc{KVQ=r!@nj4D3HR+>H5Kk%{|g^uvz zk>}FU8Wf_wk(2;^b`pm)$N7l;Pzyv_Opil zcBE+H7a_-wpEkihksnDMd&!tl6E7S&W$c(4Q>V_n{`z}vyY0?fpqrTw+GcP^;H0FgTpM3SX=iYqjrPp70;pJzZne+bpUw!)NqWNDf zS@6x`1q+sZ=j#MNtX;Zz-;NzSH*MOmYSpU6ixEbYupoU;*4|zFwr|_JbxZ2D&AT_R z-}aLiF;;E)>8H(W*KS&~25nipcKiCZyEd)avvuvJ)hqXGU9)%Fnhh(LtzNQZh`tkSFG5%aqYex>r=O*jTw8k=BMv0%uLDLyFFvi z_VnG`(^9sj@7|V`x+C|%PDzcGc_lg7B{^9r=fHvTe7D!)mYTwvlFX{2bQv@DP);e$ zO|2|UE6dwgo}b=SmET%h)KHPzTwT!DR(-Iew!OY&{ucdTEvecg&3>sOW+6qM%W z3G2$s+v{q(n;JSB8(V5?YfDS2ii^wgEgteJ3JV)5D!Q5yMQ5dM$=pFAwe+Ktak z+fkmsza%52p(L%dHov_(x27PqENf?V{@&*D%#Iq5{XNl5FU{OR(%a|(a?RoP$~_yG z(67qdvvvi*56!8B6_^1ZlKNXKq(>5>5vb!mmX>>>2E3OY0U4c z&rMQY@!sZ&w2s>B_L{7w@&mO+dq^5e_o-J&#`f}T3(J%UwMJdh{Q9*|t1@=2%-OqEl(68yhTR(%?f7Xy+K%P>wl3MYeBP!NUt$)2 z{?2r&py|Ltrw^x5Vd&w7{?p?Pqch~COYrosFbe?eEh99UYH!pQ# z!T!x0O`@(0hc;&iLubiKo++QNN^&!T#Sys69)HH+|a?W1v`(R7?D zbTk$=+TN-)>vAV9mmB*DRd7bpCsQ+nR-Q z0JNQJ7wr1!`#l>L9oV`Q?Owlf=h{WP*Z-KhX+`?>^#%Jj7pHAHuyxt`#S2o_t;pK7 zx^Vx7oZV|PcC5}$S)HG{&Z;asWrLl;n*0>Ig!{KF-??_#{w*sc(aGBNlO&5nRFl8k z9-=j5cU^Hubzy5o?hxg*Rundu6*m+YHJ0Qx6z9|yWLD>;Rp;4QOz*78Ni04abDPWa zJ8KL2n@y-}+eS`WcGnl!SF)XCuPU~X@THj>^1aQMvBfIU>d`t@i59f6a6NOdzP@O0 zRI}*7#_GIXs4{2A;r7B){gtN=R$**N#Q{!dI7J=fl!Q+7mfI%#Ww7p_PqxscW7CQ! zbntT7)v{&AfrgS4^6vUf!b7dxR*P)QNTtVO(wg?SBQ1woiY;q7X3ZSS-P8Sb*5Q`B ze?Q)&Lv{w6p5K!N#0{W_y%r%_X~AO82yu?G^UaW*qA% zKHgc$zuEExV-wxNcS8*w`w;}g%Tk(;O(!utUzQ(+c>Wou;)yKNZ5499_ z*JZa=rZt!CYbs4`EJ>UWwdVzY!NR{Dsr_ZJ#%>pA4158Gl7gVK*c$uoOxKCQ#=-uY zV*~Za4>w>C=3sY4M{_|_ZB|=j-l3jKsjzcTdkGXU*j)tG=o6oacEtxzt?Ah5iuOafqn$qZ_Qza6W`XcKC%Elf&5I|$)ij9hFg6k)Wa z>{9V;(ud6@DU?v_i591?g{)=t2!g&O5EW>JSV88d>b)>501H7^6mBWVTw9#8zNK~_ z1(3P1Y7ZzaSM-ZgxTn_?suV7XA~j_k6J2&s?Lx{G1v{W-*conxUJrGaP@onl7^I~PBL_5wni-)+C`lBo z25E($alm1y4LYkY-vi-dinBM?mF))KU^)trGALbHxE<(bsMmCObvjA zsY&43q03z^h8mKDz-M4NCOb%LcpK#Hb-4z-aqeLeu#5@R9E0C;B=DI|0$Dc~g=Gdo zc+T(yb=T!W>;E|3?h2!bz|j=oT+act7__T`Dy0E~hkR9#0$dz&k01bNav&N;rmyAc z!^K)&U$E2HmQP2h8D#{MO*iwdwIYKLLq+Egv=$lChN4Yv$L9N*)4Ln?w^pUp6mDuN z*jk&tp{HhFeg5Wx)MZ{3|Ki<;KYaPF?>>7DWPbk9YvJXG@16F-lQ+El%+1d|cGGgr{muUqw%)Q{eK`n4DDfB&t=e^~Ir zf;sQL_u>^+#{L_3R^$-+jw{H(&M8?bkE1a>3_cefY-fPtAJ!xktbKPnvMv)F~I`|JnQBeEjzO z*>9$9TBWjDX6wpDTUIRGzWT@A>z6z9Ri3rCIBi$X?#&0buBEW+r=@g|R?1J^rj;;s z(zmZ;A#>-3){6X&>Z0bdT&-G~v0JKDoVH!{-4{F7ET(|=Zd{3E*V@z&1w>!STJ%Wa z{vA<-XiItCp_b|sJSZL2I`1Ytx8&m3$&+S!2VYpgifT!rbZDON;pVdQ1T>TM`L-rb~gb+P6mKhjZ8GSFIs|CdwU zza4Em-c!T4`l^0K&d#!|?c7jPg+?9mJW=Ao`uxL9MMqmp5k|DP#_L7dm?Q8`eSULM zYI|AQv9_{PT~z~(h4pzU1^+?W)Wa)?|C_Xm?}5o)*ngkPl34V2v-I_;!l#oCtklC3DNd zob5jn9@y}$525(rNln(qf|O;1KD1JqzAkg?GA*wc$_{KQ-M>+rx0N0c)d*@Ap}vU5 z&reyyf5ND~uGCpl5xLGnd{#UD%c8q=Q+-LQ2L&2m@8~$~tS>M~`y-4V(Gijk)n|5B z?(?;lo~r#m|03+KO+VJ^3o2Rou8P!ybs3_^oil98!CQ=->Z`TlvT{S1SRno5c>4)^ z6J3>7Vic@fET|H@Ap>S*F@S(9?CmQ-2f2HfLj+dNqJV()GX?~p38Mu_gb=;)_uJ71 zQ5X<>1KfZ$3SJMyW8gRvYF-Z{p#vmJC3GYbi2{0}fh*}S8%Hro!Ymz3C;-3CWI~;Q zFbPQ5+mz;nrxn*gyGJp5thO?3c+oz#g^1A9WoWR60 z5(KOe3pgf*ZW*vhsY zd1TkHfv^~1A;3{h!FEs@g63F7&Std>9~>X$3B(;$j>?KcQkI%}2c8N$ijJOAx>Pub zWnIRxguE?3W}<0m)_8xgwKT=7Inh~U;y4>w;~;MN9y(oB`>L}yR^{&W+`tnIt8R;J zR{^!H39tO5n?dRL!j-rlQ*S$TeGuT;tqOameR|%n{K4Y&br`y?G zXM?K`s4SjZSAC{>(ZNt7VTyvNEGiq>BX8*AHI5`yp0l~Bd~etkhFVMIKBeQ^ofED$ zV)qFdKYggW*(v3cl)B1x$Y`z0^T5_aKYVjdjz@x?)_QE~ajrh{BoIj|$=!9d zzvfU^d3Q@;dqXbjX)QY3Q+fQ5jxO(OFX?J7=xoZ5P-;tUR%7*n{?6iop0f7F?3TKW z-u6P#;G1jHdptxMs5#tQ-ddkYc=TYEyo@u&;E}%S!CphfILXJK7^purXi!!6wiQ!5 z*jbh+x4X2vHJ|0CnlvGs^_Baob-JriXEC#c>{G+EHDm>J1!Hag;yu-|+xzp;*5mzk zhwMA|RR4COgmDGlLPICyBq8X z9O-TP<#_MU$9gd2XO47Xj`lU5J=#6k)7aBuYq(u5rLYEx06f!=bGUrx22KGt~TU{!xtNw2fb-yGwDuj5N`2Tfw@YJF9{?^Lw#?tn>{HDsRrpoj-_qaM6 z(|Mk^c-Ye0meuF#-LS8=Y+F3#CG=r(zR($&2 z^YdoEwt3a}j$yC(e(q08zFGd==XRDs<{!TN^vl`reE<0zU^!*O+HGrAfWY5>{qD*o zpMlAsG02Qx{Qd0tpT92Y+b`exYVK=SF zz*l61WT7rI1CtQh-D7=Qq1LR^j2?NTg2!s(O)Cig`z<4wQKRFDj0+@kva;D*;KG>|g*@HWeYGDlipEs&M+C#f-05 zSUfk#$O3%LI18eDPy~bvs|o{>(?@ug3C|pA&85(BPs50A5H(RMSaS9BZ;GE1m!V$Qbs?xgB^Juk=&>_0V5{N+BNl z1DZfE{BMUE&-T`e0#oD^$iXYf5!wo*R+qc0WZ#DBtnGx??>K(eGRFzdJP35=d{ZeJd zR`^;s#d8m-ba=~cm6AQ4GemY3(bS$rfW!Lha7UTP6uQ6@43L>n@BDVGRn+EtbwLW@ zul<#<@ZrX602xx|ZD8cnT_wQ2ik#{!{te#r{$1(rqb&)H1Kk|&uCjj$`?Qp&d8!ZK zkYEtVZ2JT1K|m022Vuc?Ldec`*gsA*1~vrlplLAKW)lbof1!Ul)f&5~Ff2d^Y{J4Y zug%r~$lz@tPz*#I_!lgN_(-S)CAx-23%N{ojr? za$C772;^!a4+@}B_!J-|31|$B;>js^+Mum~WkN0H+dEp4Bpyf5aq<>+j^_^$w|z-G zi6W44H3ewQ9}aj5!NYjepb-L4YZ+d_;E-}`4l0+Cz~GJnD@7704U7{a{q?hrFq>jS z)5PpBeauZW<-n!|c_~Xx5E!|oWS3V9%_C^qfUy^5<0-bfY*!gMp0NPS@$5wudG-=p z-cTEy2wMS|U>&>+CjyfA0F2NX@CB670sRg^0sVEykF#+Q8-x%j{K#jZJ}|yoApV25@en`=z$SsPDyxxPp#yBvgf|0gGsq5*3w5%i zWhmpd7`4y=TB|)Uaq3`5nc6P^OM4+T02W>btE)V^sZFmfa$t2YeLNquc>vx+-Ni>d z>FO&B)C?B`wy-ndKu?KOB#97n=2(65@kJEDbM;ky1_Mg#DGu;NHTit%NbTvPb*!~H zwq2DP09nX50Is+`Bh9m;fhr1wrv_U9;il^Bc*+4Fd!PY0d$OVLkf$8+tfSr8nDX?1 ztF3j}uy99X9$*X43ZZ5I7K1R*E;%&a)slC@6{EirjI9Cl+X^7r%HmzXY(v$4AUD7) z9=1Jwv;iKE?6B~$gSD`8Q*}l|Wg7GiCJ%I1updyJeF&O;K$vmi!LE{HhP08~Ti)56 z3nMFz@VC=#Kc8q;A_6>tT*x=x4saGCwtxvq5D9`z;brd;;9+WbHc~-sFuAcJy`ek} zfMpGyl^QP^hyuvy_`%jVK?f0^I@|$1lLU?inMwNFYr2~&0B2xaDU5JHSztTD0N~_` z#{A=SHz$E~a?lvOZ8E4Dv%%g$4?nv~&Yoz7h!Myv1frp1Fj;Ct^#K^U$8)^q+*S{f zhIn9^BmgyQqC)zNAMUIl=%^d)syo`#(AQelT$SBep{6-UdMoVaI}XuMxx29{rQQB= zoudm0r0n&1@6mgM-iy-Sr(Ut}&6l6L;gfeB`2531K79M07oNKIv4<{$lmjn6_T+63 zKX&`Q4_trKEmust+z&JRzK`Ey9r~S){|UV8b6(3A`EJ$MySM(7wtI_n*S4D-y|(Wh zNErh@gUkRmg+1HW0n`*^v>z=aeV+we^4&Z_zd}WmzN6hT`p!0ku(rIAI5i5g(_vh8 z!T{;yarT>zli~YzdNX1b3+a2d%KQCSbD|(wIKWj#q{a)^lylQkgh?U$vhUxuDYC2J z!ug*H6~R8>9~39d`A;6F&`ILOZE8y473>d#+dcQw(m*#54gMp5I`+7sZ#)Uu9*4SZ zOb48&Z}S>#_swkzkT`Py``Uo;hF` zM1-;^n#98dmI=T7^zFGHy*_{LyWf2AzJ~*zGkiPewYQ&t;D>qdlxOeQz2OHtfLSSP z030|AJPD{38+m{Oa&hzY&t=WMCVT(JwB2hR2=sW+V?gBSU}aG%9XBHvKx!^`E{2{N zx(p(`RGQ9nhO+aVp7**KGU$n-J6c>Mg|4N}917j?=zC)9CMYDK8Is>lb-1j#aJjy@ zJi40271iZ5o&mc0#nVVv*N}Kfh-k*M#H!*wd$%uLw_+Z(Urw|Ux*AK3>#%EMJh5aP z(V;-nR-fbg$+(c5GKO+IX}4Oi7h_Y#k(-L_?GEha?fnUFQDJEz$ypb>HkPAqB5qG7 z4!g`&x|?_y>!GWgj?i7E)+>W1Y3uTY2T3p)7F ziRZnV%dJE&oIcW|J2Xq&bTn6EReEjNK6e~UO=&9q6d+!`nZ4O6gL`F>=X^WYI6Ole4UMWEYKbLf=g`ZpW&ln(C#32A+YM zTu`YdN!-m1&gf_cS&I?H0e5G;FA2jY3V()_6Pp^y@YaUrj?j8Qi6(_9K(1V*sfD6a zLqRm=f@n-AgP(7F7P4SA@qs5Qr8<1jq|t^NT+@kim5%99UvWVDE0HzX)s)TXM5&ad-5?#y<{c`50Q{1(bktRxzouhht z^!Y#;Xl8;SD{VsK7D?TPlWE;XnPTjuXNrtSvL-c&+2O&H$6d9C0F-#PML~ieoy9_MJkCp8UxHKNEC14 z8yonrBaA{Bu}(YI-(rzwM+0F*3Gvo!T}|bkjb**9RRdl1Ha;Y|!kc=?bq2+JR4+_S zhrmG^M5?=h_B?y6C0dmO`kJfynh!iD*A7~h^W5ezSEHukq&CqqDo7r4R!B|nSSMrT zWdWm_EK^oB1?lCeJ2_MAXk=GEox*u{C_bA_+>X1IITiOW@^GHqTMu{=by2UVdsI0p ztAc+!-6IuJaNndE*DJlxxcN&tZisZ^8pQ}T*}35s>4Ia}(4;^@9wR5NBnDL!=du$Q zI@Qros-wjWR0Bzfw2z95s<^(^Aecns)HouWppFJ-Qxp9dy)49g@kFRwt<{4xtYOIx z`)T!@Z5@_(0p~qh09aV8^K=oj;%G{rlO0 z-%j=`4nR0@s4W1(q0YJ(S}_Qv!V9+BD3Cx66zB^LNTe2o;k>9Ss0#~}2|1}9v_Nve zYYOdSZ16TfmO;XQ6;w?vkF+yN_T3*%U8C`%{dF0_8xMLmx+8MwKAo7fBUN@n^Bs8# z;U=DZ*oAkgbg{CP@6L|5*g!6-uB9Gs@H0ABEuyvcV_E)A^&&Ua&3#F6A6Er(-J!CQ zkPC?Viju1%M#s5X)7M{8O$rh%5EX#A zx#Ge_HGk*YYF zCM=#cnso54A#oYO8%tr912MH}B0h7{QzJw?YE6oLMtC5ovL0^yn z)W4qSL-wX)S}8h8#6%OZMq*g9A5JpMe{!L6sYNR()QA#<;zXYh?m2$Y=JZiwzr(hIXzXIx$nb`duke+ot@9#DOWDq>or~80_|6LtUHQ&) zx1g6E@#|)n`Of^CPu=0iAS4jEmxgUbq%!V>hEJ``5BO^vNSteu_Ply0{mcto3HohTNyU)m8 zHiBD0T_K?AZD`;Z&V`iim7{>M;4t(`nF0cbp=m^u0yqr$B8()K83BHkMItYT8oAU! z$DtO+70qTqX8Q7qg3RB2H5)(%mi<(g9d^-qbKd%D_Diolb@#&0-*RYn*SdvHil%K} zk(at2e0C}oUIv^EPA69p#(Dr5;U-roL23>pc zvEUbipRwk54$aaUR&_6f4&2D_@$8)r*b5_joQoh|A*g$}*OSRSfVZYB1wfATOOjw` z=bb1()6g*-jR&P+?Fc>EJvmr|XA_zR#x3=|bH|e?9s1ya`zBA^OgZ4L>P`wr(-DpC z5W1`d#dS|F=p7v@#K(2dB`)q$noDbF%xKTJ=eiSVAMI)7sU@`@J{F=$h!OWMni^1( z(h<6sF&lR@n)0%*c@?4YMRhAilsx(|N>^Om0pq^RCWYue3X-Cm!c;`#O73#4UQtt$ zp?WnK&F()M9Dxbpwn+%e0HAY29C`3V`a=9ZQ3_FN@QDfK$H~tmMtWaz7!5t?w z@s!D9y%f15afv+cGDE#!p__mz3RRrO(IBGyiHawO?jNH!(T}n#$Rr)AQDU7&5-ku8 zhf5?n!lZ~Asi}q~u`Fa<9m%6~Er6n%G!D6F^g?v5B&!k2W7d)ilI%ShiU?RXDU44; z(e#Po3Mr!)s|KoxqmF79?JT>3qO!`0f}&`paDa!gj-`+Ayfrv!@DFA5Dr_4#75NgjD z{NY&ea+zzcv@sa;J0?kSZInvKSQ6EDQR`?EHu<2GC|xwBVYHqk&f}xSq9vm@WueSN z3iucR6f~faHH48Q)bhrIa-{}d2*#@Q=i{wl2$W&8^CGCrXY~Q$>U*x<2^upQbvJ@L!da^^IOo zK`kkis+uu$qZx(d$|4U_c*U8;e6~Q18Dxs#kq5urjviDs&RZd)s1!955J{3Tx?3oV zH95IECeS1)d3|B+4=x9$XR)o8f>c&A7-@L!qX0)rm@}toDo+Fe(gP zO=0e;><$6+T`8 z3yX#jgFxdXSPSRn3g93#L`9Q0PQ7?&Fz_uVPPl6PXf73ziZF(Rni^f2Jt#UW*?rN` zx%dGZ{c%{ zYCJ!6rS|8u>w}xMu}ux>qZsy-)sZF%!bcr3+CklAkJ&*ImXqU0lx5B0;%|Cl!T~}p z39luaSNrP`gG4*)l;|d1h9?OS%{~e@_=DKzOgMl?>?<9yPm=^g7^(OeY@vfE3AcHu z{moVr(=65t8d+$vxW?)Z^5oPO#hb{&$P%d}$kbzsPCVIQ1adT&4A4 z%8kK*VURowMk&IG4o%g>=R`!QF&@OMEZnXwOS9NE-l8IO!m^BnyfT*LO+Gl%@@A_x zk6EBojC{{1T&RI%-9#>x1FFVUJPpt?3@TAY3}$jX1#&$5JW)-AQ6fg0$V-_HO}U=kb8(z|@32jtjyI#VJSIiw?JD*A;Bux9Q6_p1R@d z*^kbB`~KISxWVtB`OUT09>49i$NuV<*57!1=1a3~c>3OJ{6O=AGq0F=<>X7po;&ol zrT-bc3?71t047umib9=0A_yspNu~zug(7VS$L1^?N(TsyH=S94VJXNJ_*zshQH(5h zY!Lt}7swp=6fY?x)PjL+DKHYyc=3WS@YIA#QG}w|p^qrz0eNyZk-U_{S{UR*xIvQz z6x9)i+p?<&upIi4%7PLh+}P3uV=+J|7;9hH<}$<@P&f9D<7hXG48sDvU@VX;YNr@U zr4}d~Ga9ppG@xSm&e`OXp3p?yhnC#D8X(;@B=n7Hi3#a}ce=8;^4lwFj3yT(k1ok0nUKsJMC zAMPe$IVP!c>9}B8e_H3b$Yb20;#TDr5bIJ))4*#KD3c3g)#~oxQt6K1qDsf*l#Vr3 zT#c;|D8!vn7D-`2YTU|&>@X-)sO%VLQ=OcQmkJ%8#5*MEEm3DFoqZkbEccF~W3D9l z#Ocw2F}F^QqtRf5xJgBu=#*$Q&8VFB!}Z2E)Qk#^5IPjVH#{dL$r7G~w}zQkO5myXQuD$@Fg19B9D6ojd?>6uXL}rhrVvh= z#nJDEsO)i_Q9G@n2$l&APO0IxqCpH2Q-E)Dq76A2qg2$A5{L*7wUSGmp%R@QLq(^= zloTRST^2fqgn~4P>_MW@)GDhKo+uYjho5*QB5H&&dg*IJ+4)ZanbBHCO=(82h4F}z z$PxvqctlCm!fr1JY$X=Z1Ny*-#=CVE#<4sjw=hGP80W^My2Kzf)LmypV~C1MRco4- zh)M~N5=BSp9OI$VZ5;C?+E(3d>kAPHO=@bVYC2A3bqYqafQ0fgOJTTib(E?ZH3cS8 zl}oNY8Ps>Y;vvsXC4`Bon%wTuWrUW^Dis|nR6C?zam~@8E);PsvbJQwMc|L;A-_M( zi^hp8F$QE&V(=`)*kGIxZwSTImPK9#iN?4J1=%HOE;Z7sL)anF0#Rk{BWb9D_S$0S zG_ivZCdbLo=t{W8(%K?CASa}Q^teBs zfn`V;BML0rgO&<7JB(_buPmc0fEb1|zVQfoanCRd2yFc?8fq~oBF);YaZEb=v*o+DdWf%|&4KTyZ^w+9E`XxwfBTb#`wHN?;L^l}*qLSniH3lq= ztbziIB~$fX+?`!2U9wPIAu;aeE~oK*30ErDOhUJQug{!5^!tTHZ|X#sBTn*MQ|k_O zA?LG;p&F@(P^TbCD5^#nqg};ilm%f#=|m&Q5(Z_#Yq6M9rj?;3dt8f@9vhd@7jbdW z)EMJRv&R)fDkgDp=yiVM_d{Zw$FrkQ#ubMY#{rX?OUhLoZ=&h$&jLG? zqi2l=`X;6jkEn$w6oN>CWaNw@7@;3th372LvE}4zNEv(A8AVDo%_$@PkXXFMT3U7k z-oS>GJvh?l2}C@Qm_%CJ_?N1K0xz2^`h_Rqr&=RLD^bym^BEsiNP5K?d}N8%FmA({ za9&c>H#|Q(&}0)i-d_Wa@!&J^ULF{N35iW-U?j1BSDp?s`ygR|Lw3Jke6C0-NL~EF zOSgaa)_rr{xaZBMZ}wy8etG=mM{e=k5Plfqfg2~^f5W8vZ<-SO%kc6QlSWPQdoJhx z&){Xdv4JJQLvYe=FW^ZCDMQV$upL`Kk`U1WEx}l7wshfEc>{9Fi{MSlfm?;_z@Bm? z@iIJ4jj6z-_`tS6E|Q%*aI0L|`*$kqt54r2R3bTatVFn*9Y&%!Za8{PLXD7zj$zAd zZ#oJRhQs76sG73E0!H?n1G`jKiTD8Zk*X}`oS{(^XRRSz&=ns$#ULsS8;7y&7vt^o z0ML~c_U$~IvMuPTux@RTtU%uNDs97px8aFkoZpatB z1%x47SQz+4Hi(g^9c1X31Q53A91>xWBp~ziH4?DePP7m&Dej~^aqCPvAnNwY zC5(Hkw@`l`Y~qrrrq*RHWx4~S&Eqzq-(BURb7OHHSID@lYLmF*Xa_B$owb|x6xFuT zJ9O|!y9u?kej$v$z{rS_8Fhj7q@z z2M$t1+<;=Vn-Ok6LULIUH+V=AjF275l0qI!g{U$IIKwMZ)#MUa!(qI>;FyezCuaH#YIc4m;i_Mu=* zqL?NYbj2Np9Q9Fq}v`GHF@$vCo zND(S73M?xthLA8SA_;@|!$S^7RQn7KIeCBIBE%RuE|M`wbYJuxxn8s6;Ea07$ViMp zwT_BM(ogIxTPDywD;v%q)Jr|Qd%z{@G_DxY=^8;QO+t2J!m=F8OLnxF#z{_OIw%SfMjK*c;!BFs zhqrC$3$?8ZCP}?Cl=jhdDyxMxnsMT(0+RIW$@X|261}Wv|Ad!gf7v2jM?;``T-(a! z$rV#bA0@$#;*#Skx>Xbw4uXXdm^_?-WznP##t<^bqBx-yKCdkP!Cpzm7zhmZ1}+8! z5)gRkK@FY`KB~am$VJWqJIt~WQz?c-G*k=>&0==oC21>lAu(n2OLUDc(84+{#<2D$ zkGlhjPLXOnhrZ6z-BOJKR{&GqAVJCAlu9{qBNzh6u9iuPrA1sF;XLNO7df_g1-X#ostW)RC!aYhUk4JAbSS9gm>(?u5cb*^xD z^J;KihA`3Kak_|d8w+$;u` z<1wZlaWytgl4RJ&F0)y~AZw}tPGt)b=n)_qWX1y~@w#YdrH^?Q+F$lD|K`#i@N!wk%1>Uud-f}LeE9NT zUwQ1hhi|(SJ^$bheq9k{zWK8AZ<#i7=3hqNe&zU?GsfR=*|^Imj=W^_Isa35InXXd z2|0qUa3#P9c#7Ix1uX+C2a1F!MWIMQHDIqOHOhdk>=>z$5m=7hUdoanP(~0&Ui!AG zVOu2zIORiNXZnPoYeD8-;k-}YWhmnIE$u3U!BM6BvC;KW9va<{|I}~B88r3GF ziErKo&Nb{!z_@@DqPWOg&sckti((>_m$K}Zd}fn5Zu&?yYg`jTSt^1oOe+%lo`zAt zlZ(Q10md2W?CWX=l}JVMwm0Mr z`esp|w=fbC%Iu(@kGt*xI3~EYic7A$7%X6Y<#yvxkQ<_98-!v@)T=HwUj5)gwTANG z3z^qi%rwVnKrdzB<2zR_2ng?c5x&{zqY%Ch;VX{56fx*yn|=Z058gK$eZmo8BvGU6 z!x2I{B;+VQThZ6y%a%}fAtr1(-{NY+C8c}yBQ8F!Mf?f(T#;O$JhkHtp|(w2MTZ{x z>B{JZxO8iOeHz;wB)V^?;}VS-6b&0KudR+9v~mNn6Ro6iFdcTZeLQNYkqNc!@k17V z9yljDOB5rEj7;I9Np))rv|@X)12A!Z%mGWU!y6tRfG461Mz=;cg)_2;LDo(UIu`^M zB|46gWD%THS=anHC>1qQLA54VUn3B?$m3&dDR-Qlvn{H(08lY+k z;f9s)Q!_?VwDXYs_v(cG;l;702z{!i?zwJrqR~xZhe4H%nkpBgH}wl)C}Aud zk2HmO6iKq^1^QCS2~Ci=Gseg?eGJHW!Ym0Hx`w5z5I!+b|CocSsg5ilLQ$5ZBUF=d zMX0qT9idzm2_H!K6M4BuDAm9)s2Zy>%uu9I7d@|{cxD)1nTy%#k{Cj7PMJQ0t{$qx zh|-&?Gi%HC8?Mm{BpOH5Wruy5>>J5=Nt(`Ohr{7NBlsBALf$emRIlGRl3YLIy6Q-e zhm!_I^q@X6cnk);HZ*M$Jsw@47n1Fu%Gx2?go27|fhaV})d3{P%MY(9IR)9(h$%*7iR_Uj!ssIw)J{wOda@%q ze&l74Wg$Lry-q=9F6T{H#Q9{sLM{nIsLFE1GFz=9i#~M;LhMAvG3*TX)DLvk_O(^^ zwpIuach{Xb)N2ohgYBY1(c=f9Mb9g2r2gNm-RakDMV0UQ0|<(uC<04^=#)t)@KY&QIgjXgp*UVqNQb*@)JIY}>12+v$vKapNI2K#+ms)x)PlM8U86%8wk=lr; zjYQchh7G+;q7)A#N-4-1K@WY4(W0~TtjA^Zd|BSt=lwI{8Le6Cw-stJ60TY_Y!R9^ z;w+%4)1#thiqxB*NbE4K6^zz(vqMhyx!S==qe!Wkhz1?h(L&uwm9dsN+>~|(XjXeG+jF11BEp`| zUVQa9cDcOz$@_eHdBG#Up8v>=^B%t8-d|tle)-g64mjz^eGmQ6-|hY8ZPBMc^3F3) z__Ti(dGP!9{KP(cd}Qz4KKRbv_I>*s-o3|b-}KsTcHaKK?S2{RgpL72X&1o&W8fE} ztW*(z3I9Tx-OtijR4KIpEFmykwz|?#Fa>4<%K_5bN=q|y@31xX0nr1eGvJ67dZo{*n3u=(G5eJaN-HO;p0dp74kg_5e*-wOFK-sA%cDd{*=vvup*uT5!$9C7@ z+X1kxmv1NkAqbu_hphsjskP?N;s}?-USTIu6r@tr!F$|)TDGlU>&v3EMr5trjIa}M zCQ;f^fezl4bcaLBu>Ao;+5TgT3$c2#dwknM?Z!@<(#|kF(d?*-C=lAxZI5<)yeBec zMUJm+`*@qb!vMx6&#bzc!YHDzMWu=mMwT4mJ)taT$9r)L8khA@>p>>4;;9sRCGiW5 zey}l^32 zVN_%kXkv=9!BVA%S4Co5w3%%nD630f#Rk#nsDfBVx&NS=+d7 z>Z7?$h)g+sM}f-b-c;XMW=iWQzSenFi=NaTUXcaTgRfwk^BQ7P)7FS0gwY(K=Lm)a z`Jf6P$gLM@Dw_G`|D?)*VmdYmT_KVkL&OuK!zD$!%oIC}_oS1>>vfWNMFoU?wU~93 zLfK)^pof=Dc*CmK4Y&SkLMWOK>Wa23e}sq~grQG?5vI&Q&qowYmYS{>PGs$Xr3gb# zxq!2bVXcO0T|kyj7e7nxkWBn^6K%^*Y6%h?x&96K-(8I`y%kwQQ;O>kYEr z!$I&eo{%YB<_t35L_?M}3};A$sIia&)Q))=rGPU}_|S>2naoH&Q&3l_aW~xdKoh3< zfbnSwzA4&(##^KvO~+GWW_jXZYy`!8jF`3rheAZ&fjKo6C|87ke$%4O>-<0N0T=KJ zHLr>uQgm*=(a}u`@bcKGx>Je69Ls-< z;j^Y%wjHPWY?u_itb=w5MXFb74eBFeiXdJ2o;&w_H%d{s5HvX z(r^vFA;ueSkW;GEVLK2cZU#aCrGhEENMJ?Y1T!>+T@&X2uy7_{US76fx*z!R{pH@f z29u%VAhY{rZ)N+jFJE5vhPGDBC6mh3;u81_YH{FCTUMK0bEe#xsF}L-iMWIvn z2V*3Q$4HHcp*Kks>1k^U!y361r7vw=C^f9lJUx#xo;(FD4rV8K+0Y2Sup2nkkeNb; z#=WOmq|{IvwRRY7**x=vSxmu~nocGX5AmF7KGznB>hzKBNGTglypAiH%=Z8K`fa<7 zw4F!%t>sqqIChmbUev5&!0?Zomm1}czN*-qjann!z7GA+9-sN=JwNq<-S_*4 z9ru67PDgxf@6(St2xRtqUkB~?mJhybm-qj}8}{34mwn#)#<%UZ^;j~KWTL&5K2`5v)f_o7Cq7C4E3O~BUGEoEw|D}qcFWdZPzpstFj_pvec0#je| zpU*%vp>}!uyWd=|A=v75>rDfQ&KuQN$MC=0yty4VZ4voI1VfC>yB5y6XW3lU|LbRhyds6X*W~>ouXlRQ$?HRi#9LA* zhEA^z_A(Wr7m#|*DK#%CA=SOfykO=ZIIwErqiYsVmoA_8kSxbEJqk%UhRF3@zjUNc z3?nR!1x8W(q?iaVc_9)d4CP|Nh$1>9!jesl@ui8VH8C(qVCiERpGxiOwK#6b4J(@% zgkf1*6tYI2Tsk(*4O=t?tq>}r$pK?m#X5^Gt1ooal4UC2@FqFuDJNtKRZ2)f6-826 z^N{`+AClUoP$>q{@ibvpbt#>ge8@O8!kSZ85&va1lt0pp6aiVYS7frLF1a9!e;$rL zWQsC5gQ`&GxmN0h)EV9f!d>vuFDq5VzP1o1zBSHHp~hG^NVMs*%zj?sRWu2oUOo%0 zT=0m=xq8uKmWiby9j7Hm5ece;Ebvh1!WV4H5^7F2rS&DA=YmtH`OgzhRz;Q%()z;Z z9Iy~tr&4QqY@GxAL>Mcm^&)y~YKw1LVFYh$vg1{x`-FI#3^|5^4NWawFf-NSyy|0! z3R>ON7n*8O;5OxYM~8&-`d(ep&{L_{lBcW)k!TAQcBM$)9q1e-!&?4uDx_%;Qf{UHUq9omGI0-uyLoQBbj>H&EsH>0cu&FH` zHi9)FtB-lhLrvs{ifSoUMP&2>lVe;ZN0^+~i>8Wz;wh>^78jnh~pkZxTjoiW-oNnYG7=RKrI7AZn%*-YAz*k?+MvYIx~x`0Q`WD1{vs3C2s2$mu^7q(Rdx1!zyr$eG45*GX$DOtwZR7G`izDc2DYKWWz zeVLj9*b57@Hr_R>?)7X!J?f{)bb%VGU$Vcs;TrF3lL&%na7+`bD1uVm2>WNvUd)D< z@$fH{>UJ2Ib^DAU+yR+hdUhDL_pa%<=M4)(zHqMUe)Wq5IF}JZ47m$pP&dn7s8)3U zT{o*sfiTqU>X|0g>?RsW$JCZmJZGTeFCG8clfQ5{Q~XgKWCo=#zu#V%?I(wPKouN-Mmu=R zBF`_N0EKY-J`o@+3;Ft{kMG$L;(f&iBE~DifYQEb0}lJ14!&;*VHEie(EyMqUo(WV zsS)zW@jkHJLlZ*rY$friMHtzQtDfAvX6`fVhOfGyqSt;XE8s${zwNq0K|;qUD@`u! z=}Yl+CoQHzxGM2qBv zJmjjZ>MCMLFnlm%lxvrq=Z4!RKN_hm${fgB3e;H04oz7_n!r>MPr^90BuzP4&6$Uu z7nEy}CZUEY%f;H;(PGXXNM;oU>q^16k(Xsgi@B(qMXm#0y)jVK237Q zGZm3D-ue}@$uY<*UAyFQ^VS4bR}*aNpsw}ImE~tmOLH5er}XodO9G-JD(02GApX=Z zMv%o~RZjlx9VdMDn>mGXfnK7E?=*zd>@2B~X`+C6Ms7_A7SUD5vz=r>i`;zYni z_5Bf=#a~v$+K~iDU0DtY(GZr&Pk31-6m)@d>!m)XELD{M;;AnJLV++MWRQX`vpID> z&=d-h0b8_4Qxz8QW*0dLeWXVmW}$V85p%VSd#)^54eD|Q~I8qveG&$<%}?j7$If6(KnO~YMPMShQEAUm8-)pQabsqZQRl3k(F4~CC!Xm=`cfWu-^ zWWgU#f_oNJBI`wlPbt zISP2=Zsv%yAR8TnCBp3(QXdWd`?G6F`~bV3Yx9Ft2F@36#YA-m`+1zAYRQX?rZp#? zN=2*?(HGt#OrkC##B-P%{8tgrh>ECZUz$o~mC|YD@?S4VP)*RRX%dz-Es}}|i$iTG zvd{vgQhduqXH8eB>8HOzX$qE-PGyiyJUfCZ_*Nok8iLUiebV$mO3u6HS1IW*qLT1jA>3}oH3?|FspY@76pUr%$U~&Qd{OTvYj}0&P``FX( z`pKQwUv$nX2Yu%opZofW2mZ@P_I&@_U;Xia+Wq)XA8_&!2OjmweZO?XhYvg8ZTtNF ztM}exNB7I{^1JtZ{T{E|{=W+^gM?rsoY?I!lnPfOfD}QFP;($RP1qAerB9P&yz_;Q zNyz=DV;~p&BvFw*BTUf*dtqCEmx3E)Sr#yqNhm<9DN{o%xYve)VP5j|fqHVNmcz^v zLdSFfWbm0mjZ)4%ydp51WvM0@REt6F%Bsa@47J|bCJ_(_T6XB>_s(~f41Pkpf4Jj@ z-{1BNh&8Yb*mn5~zG)dNf?58*#{Em@dRNW2LI!J$_k|P{)j}AheT~sO zoL*J(UQjO%^~RT1p1eKf9U;PAVM>9d7b(3#)XQwLUOiF^VJMJ$-H1(1ivU*<2l8L) zeLSuTORodzV`_qLqqBY4$Lw;lZP$eD{DwySkwF|b>ZSf?pZ`SQi!i=t!R?etj|M^Jm^GAl`@69G?+8QNc8O z3*h*S6Jb2Lcygn*H1{%MM3LoCuNR4lH;j06Y%3iVuS;%j{ST$<;*)fnFu`eZ&DxyBFkSZh>oslMXDkC+(3BUR}o)uR2^TbZ8sMCnl zL^VU`=t4_?0~+Fbe?d75B!XIZv7H{6w+s(Ufjae9cqR^T~6vt9ppR(wKzYHLdrVM zCJWXS1!0*Q0d`o+AXlTjZJA4rPFtw;WoyM7te$5LB;C}mPXmxfb(u>AB>W{N;wDf^ixLl2(RUP@hK{GsPil0UpJHN4AdG3CFM45O z2t`va|Hmm|tc`vr79(zm@y6ZskhSYN$rxhOgQnD?HXleZlw0G9NYYnV6>=7a88tc2 zw7D598&SKZ=-IVIo2$7bm~Chn?x9wVP}6B=%kApwTJt;$QW7>bF>l5_vYM7!EEbMv zXp3dzNR6Fp5gJVjeAAHX82*SD;~HvOHj6~qv@uvFlF?Cw@qtXAW*7paraFV_j&YX7 z=bI+*e5jO%MSy$!G(dHn5YI@S<4IV+z?|PfbJq{P`z`N#`)+&fvGXpk+3q#lO&k8{^0aCG|MIrO ze_k&4U;kVA{|^TmgO)%jf)j(KKqySw6)glR3uD5kgn%a=kd%U%2z-WiNu+QnYoIO& z%reyJH@mv23;XmM8U6HBcK(? zB`*u)!mw~F35*QYx};WJU63{lkTNh1*s92^EOabo0U}m~ag6kq>#sy^nPsO^Ewazx z@ZXlddIE#Jj}5JV^~=Xxamm-t`qGi`^0nXprvD`6$Fbq!P_yso`9`z1vV9T8``8#0 z)pA3Yo*CY5FKgjgr1pf7g3{w@oK8;xivrV@(&3*SWEmn7c03W-2`{^~LGES7|5rr; zB&UE;WdF1Y+jtS)rq>?c&TRke5uk{I{oE$meqLIw_IA4^q3zc;i}0$e*mHp$Hrr_x zVcL_Ofx%>_WzP{j9j)NU2M|qlZP)ZQfikF31nT;tmeiMPpxwUp10Vyucz9V+I(Xmr z@l~Cv6q*hugU`N(15+bk+sQ&sa)8fu6QkEoh8*+6l%*r(Ihwkc+H&oL#?H7%qc*q3 z$SCLwJEA03y2g}N>SjHqfT6>9%@v{oYdDnRrF@_!{_&g?PgUsiKe?2Swi=q8&jNYY zWHnm5>8mcwHwxS%B&jXixY=kv>p1FJKxmWqxbZ2j6T2P0hmlfsQLZwAa)$BxrPNq! zh{S_i45O(J7f~hDO2;IR4~#4!wXsUs9yb)!YRMP~Mcngz5jq{wv{)fQL5>Fkytg*Y_fpZY>YJFoTE`y)YVX(QksJ5<5;)8(y3jh41wy|%xy(<2r&o~C1GZ3#aESn&kw!U2QKiyR z&S3^Mln}2-j~u`p!yF48y&S`WAx>iB!3^&_=IH1!=EP{ZSq}bl=)HITLa$A7b6yaN zgDzuCeqC;^YC@O$*?q+W1ar}Rzn2qpSL5dn0!E9Jv8T*E^>*Fq(gUr6U3?SnXhQj!(U)&f@ z^^^ZuCe=Z$3x%eg{QtOVWgir9hLFUS8FcKGY)rADuN2Q`yzU@OJuMpC=BoL_PgFEy zTSp?pc{DJSPhxw%m;eUXl#sQ-n%Hf{JzmL81{1^dK+PgqXvpj9idz0D%Qi90$%cbJ zVr$?uifOKLjC-SF7$*~sHIxNeIxYwLEVP=~q2D&82_0t>>AUeFQJUpOO-0)yH3sn% z5Jr~1Mn}tkF^&7GT>BaA#As=!_Z(##oTF(~b+%Fd5TEmX2&yoEg;WIAP#C zcJYIeNl%+&I)$m@ZI8`2y)HXUxtE|QYvzoGNX!AVB1DgYI;Gs>i6e23hS96$i7HJ( za*|5fv}2g+8sf${5ZvH{QOFZTMgbWV#+l*?BW5WdG@TE*fzPpGHM-GErc+d_9^b^) zFFNCU*RY2BWng*hx_Qq#fI%6KJ+90qP};A4c^`Xt-}RB(=RWY8NAJ7_WS)KBb^fK) zj6YoMm%c7N>xfIv{Oq}>9(wB0AN~9x?>p*K`y6}7{$D)&qbGm%Lq{I??tgyon?CUN zUEDA4_x9c0FTZWKopycg_OIJvyZ?Q>+yyHj2_6ENP$azRD)w9FpXIt1v?N3n;8Vgd zrfe`-3TaAR_yVN9HB6a=1(so9SXCC3#;D7<`{m?mg3mBA0LuZIu6^NareuSzL3k3B zZve#z+o$YF4I&HXqSAvKMv z&W?%*H0G5wo*Qbh<6UkD9peRWFJsGo{j}rGKjXMlK7a5@pZf&J?7uKybNRWy`sugt zyzxhnaz79OCS#lO_qm_8N6CL|1s|IMCvzHhU{dd8s%IfF@iSRig+b!_`E{k zGYx?%r0u$ePB87UJqV$cGR!z_$Z*;1d z4@^1#Xo!$YJ&Z3~l@HGP0{v6M352)dUB0hf7kt zPW6aaM1f_b6;D0m(wN4KYe5*bSe$#N^eDvl*fpknkflSJ5TiO$(X^p}G^bxu&IlAw z^t9L#@{LeB>ZqjqzNvz<+*Qd7!Wb)|}EHe_^AO=Q1z z4S`-bQ$~dBUN{BY~_G4@htO}0*OEwYs3vjLysm{cwD(WG*U*5+lbGKI!UIZvyRq!XH1PO z1r`*kB@{)VEmw7#PcNUhe#QJ~!^#CyY4xJnD;Lb9uxa%o@{Ow&KC^Bq;ku=B=#x}S zDLJ*ZOP*A^X7Ll`Th=al_NishJ-uAlJjF~=9n@1D7Z5?vwZc_2^tTO%!Z^)TDS9G9 zSY#*^G}Ybkj`1I7kt&GJNZLilYoHZ#EHqCs7s}6VT<%WU7GZ@BkIq%|!poj$o?jND zB4&|ZXt(vHMKGC5b*HJKEpqKr(-*GlavjkqA)>DqQwWos8v6nwYxK#Ps;(-8jA)mH zSrQ*CH~L1?f*p3$$&@hAveG6tIlfIeIf-#^&^?2wHGisThEN_R+uDVCjXcl=L>z2c zf@6ac7J-s2IO0RjHxBD~V_K;7LZ_s9L_u2j$I@b@ii$>=veB&Xti{xEMIuS}TqNtVJY-yFa3gqm=s%~ zFhF7`AVMT1F>uXx#~7y6EheI52ScG|*&-kmMXY&CnY?fDkaIPki(XyB551V+VySy% z7s_syy_*d(!_i*A?$TL`CxM#R%(>tD*vn?#v0?Fp>lWO#WyOO}FZ%rtFZ{x>hrQ># z(+|Dm%p*@a;(){dd9ULR`+)bck2~~$;||$(_zLrT-?Y!3uYJevJG}Mv+wZ>9v|V4l z&F(wD@(nxg@T$obv-{<3Uc6iep#QeNe(xH11Qj7ia1vSsO94hg0Je)-yc=Y~h-M%Z zE>6dL&UCze4TJ`?p;vXGPF2{X3FNZxHW^{)v(x1-1r9Jp;(tH+0WRnm1U~ocr@+PR z$Vw^D%r5wQPDN@C~IUPVAfys)P0^T80 zcens7#0tHRc$UPg)Ih6yXJ2gwcmY+AmIT688cYVUq3N4`euc|rJbcX{{0lp0K?+$@ zW(UbCl|tM#*9+UAw7Rf1XEfnT8s!_jOybV^oUfnca#+%I4F_2XSJ zpL_Z-{ui_BW*5!A&fMGA-gy9%{X(p}W-n&@c*op+abizCDYYuwD=a;Xg+LO-v$F8g z@o+vIZzc~5%Fti<7f-FHgHm;2LohH2Xxy)9DYBZZbjqC=foQ1Nc8aX!ag(w z0#Zs@gge#u#Ka@*_H&~=3DkR%@`r~Qo8dSj>~M@J676awzBa#gS%NTZQhS{i(?a7c!am6pu1l)!oRxm4@=i#J^v&$pbx^F&(_ z9UdZb{veWg7DNVN$O%nLBVvqNeN7cpv^ijsG*6lyrcib?msyOibr8}`W_1%0HVR3k zO0_FhQFlmC!5QPDV(u~6I%1?!jFT!V>*P7|8A-DsOY;*$pUn(P^~<{Jb1G}F!&Q^h z%tz*}IZRG-*eWLrOX(g`*j3UOtedxGgJLn%McKbU)G~vh(IkCl&VE2I0&N( ziPpu6O~wg_wx+x&!o$Xk+bvk7Jm-JQaLi&TDAHvIOSKH81t=6f^6UU6fLg#WdTQBR zC++ZWkQu*f;j9(&XM~O^ppNAozvC%8hr`A6DS*uEBzb~SH&Y%5&?ieG>Qy1{{Q5^7k;Z%e4qRB6e z~fjVE%U`^op{+dPXU(CKIL=pvY*TT!9`#7 z^V`0^><7O5V7u=Kp#G7H7qjj8)`h8Vp5yHpC}C?vsjVJ5uv#q^=RG`$Y|0qag{eiY z$dZpJSnL(Ci7fnZA~lU#R#FXFVy!sqeIzTo{onFO?X*3=!e|b+9tU6!#?!%usR{)= z!k{(~sg2#y*G7>w3O)VRVj0;u%4(zK-vfQPuE9M2$mo1vH(l3)hpi5?vto5n* z(%Bet!Zq;n@+TE}DacDrUQCj%oqQK~<>bX?5^obNoqR9P+n!H829Mw8EhF;YYm%zs zRU$$!9VI73EU4?{&%wgN$2GUkUD%2_d>b8G4AeXPyHj=v4jrtVi@0PUgx$a)Lm5M2 z$7_GX8+!X>b3gziffAw~Mjk+7B3m#Uh!jW!$|#7K)X^fIoPr`jMi^$rmYimH_Y4{f zLYg8^fkdh~5{&paG<2exW~8xe$ZY#c6-iA_AILm-_l-W0(05a-UN_Tpw&{vdq}Lqd zvx#8}A-=K94e}o1Jj9T6o-9yJS}7tidpq!SUXo>8sB47|NrcZIjIcK-DJ0Pg-O;ZC zSg-LIE{QGo1cxM-oCiF<^^O}FWpgAwwH_^4ZfKp(@MxQYf3*^|B6O2k#D~TuNmUB3 zln@anr^$#^cJOe~46DGzGyR@&>a2!F(sxC| zLb=fq0kyb6Ld^&^KS}b3OKJ^8UIPmBu&PaSCG#qwCV>Hf8yrGvxm?oG>(iW-; zYw9AFrMlMnv!04T6GlTy3z{NwPD%;uQKi(BN@%qs>*;|*nf&v`zNZu@{s@91a_>4h|C zkMCg5t42_D2Xe}+5z01Lg+dn)KI44g39nG2GmepA;xM;c^%yHFGH~=;;})IsBpumC zmZb3=vuhU5S-S+zox5(S!?r6bx5n{BazKA?|!s#9hu!VB`$7)~Ac=3Dyd2v;OHDvoi(h%n8f z_p$*ZOfH6WSZHW`Yz6DNJOzziE@MDK(2pDp3@-;2r9Mw{&NmQ-nCDD!i7A%4UTp{?5tk(xpXV+*oFZuIlue(}t!>8s~^(A!blSMlVV zo?b$yP0S3}YZ1_?9B%E%@>w+OS_V$g=oHC%MS+zWON)#F-DM3Dc&fnf^XBVuIdJnn(EIzz0z{CDy>JW z&-#t5+|`SoB(X3Rjcc<_gUvo^(P%9)7^4WUiDm`fnb}WGIz>0rIK#d6oT`wh#nr*R z$>X3viHWgj-5IQ++5j0TVN|3RV&`RKKI?Mv$v96mN+Qd*qKZ*nD3lfu&g4HiKjCH9 z$^f!!Wsn&<28!J^qv5vBhXa$FWj_mse2dxr^0FCs!ONSLKI~=eIS>AF&V$$e;DX~$ z{OmsGf90^Ro$#>(-?!_B-o5jQhkxjd;|@LgQ~SAJKI#+yc*p_&aKJlu-D|h)|8A#k z{Qu=Q@4SQiiw_pE57s`F7-q!@YlI@DwT{C=PU9`hnBaU;<*oaUz za=dA)w<73U>3@xvEo!%)Du$*hL@gq#%zA8PS#H)H%NPsX`cf*TsTDh<%@f%ME$Y^F zJES6;p@)}!81)pR;E4c7U~C8~Izjn&678Z=b=yl-QPgA9e!`lqp1z8B5)4Y2nzjf% zsvNLK6yShO(rLe^7(SpHR`op^P#5Oy`#zv<-?$0$;^ALk3j&#?bbSBFuWcYuTGlsi zpk`{mn*(mMBa76+`%2EjxsN#Zdy;|xT3q0Qd%^U)@-PiVb8O^oStB*drS>%P#t75s z7?CmTA|pOXaD-)K#AJLXiY;DBN5l*S8i%Ipvf*g)WQSQ2=NXT3wbVr#wa?dLL#Bv6 z-f(CUVXEF7P+g08E2WPxCZ`ws{ov>DUHF$?n>4k?nT$6NO}u8ND%xd}w^Dp_Kt~km zopyOh$ZhjVteUGmUxtXibekZgh;c${R!x&!cBJvCq!v5W@~5R?Xt<$W%@sY&$QQ=C z$xmK+-qm`hM&C52b%WcdTX+(_S$9+W*icRlFXLHGsEG!&;1pyjBRSdVG&JPYEEXT; zIMnr#j#+BDnxdQ6FLlN6RyaEeS+M<(NFK4_Pn!NTOiNYfGFRBF7asUowwaj4DLQ_Pa#DO#W4zZYwH*Hv89Jo+K zQgzm{kbQO>{LDkW)@8jj2>}FIeY6%VLlP-o5QgP2*|8a695I_sP3O5RxoN{UP5ZD^ zv_ddX|a3-VNJY@4PTb~L7)U`&R+te&KE3rja z>r&3-BtGYjII^_K`K&woP7O5^ii|W(71?j>q(-?B_lYI!FihRa2e8fLctbquH~Qol zqwmpR)2@`n)RM~DpzKRFD4S#-^z}?996#<$PGV=%aYG=&{O6~%XzNs->l9&K#t&VX ztf>M*EmDhI9Hd!O3sEzg4JgfKg@y#*(8!8aD@%OH>10{mMvXP2#w=Gc(i~8?xl~py zlCgCyGNo>{@b!)kUR#RrN~2mg`ux#Z)|f)wFN1Y0H!(Lrt6d;}qF6=&`!}owa7(@8&;p=!>8Iu>Zg8ee5qB zx*xoJ@cwVxf3G)sAA8T&ZMVnkwuP79xYH|kdG#xIdgUv2nD$@y%iC_V!{3IMr%eM1 zfla9kS?@L@s1wEvNRC(%{DgMN!@T6+B%m5}1$cccnFSIGu6=2Os2L}5!wdXUrs+G* zu(pOm(_Q{*4rmR)-u>H~RPoQGSfdHODxwC%hI~0pfuAg>Qa1EWA8B3FngIXtZoQe} zFeDBa=bO5WFsO)qMtGqungC#sYsZtktyActyZ69NlZ! zj6>5rbhG@!i_h>rw*M;SqS@uLTW0@`*|(VehbfTRFMatK#}swl3_jbptyvQuSqrir z^vG0~dXL(ocA~Qos0E^>Bos&x2C3*=)Q4s&hN($dz>j9L09G^DvchDr(_`Pj@?Kdl zs+IQ5EbpiJK9^V2r1*5a<$^(de?;lxCmvbyj6TwdC{&D3z4se5n zZ_AylZIb?3#>C-x@09E7x&h{UHji>*+Im~i1KOnv>2 zkfz8lo9iD2`9DFk%2q{|P?3-pbMi=5s8!b}gEZ;jy~rdTOH+_i&ZThz5TeW~RZFML zE{_=f>iMGY$P{hT+Qxe>)a5Lwi0YcYBQ|AoL+XVqk_9s#^mc?NR}i3hGTR~&Rgwr# zZerJNt3xW=lSs6qo;5i$RGB|RL%h;cj8G$8tMBA|!wV?|Us2SZse`L^W=ZNSH*x?& z(~8Sfovnj-X7M$lQIgsvY)l2b(X6^OCn-d#L>7pdX%a4iVZ8|I&3P|T&wFg^meuAY zISIzpZQlBlmo8hCy2FBgEt>8BG7qnzqDH6Yv`7c9MRK!+OErqIa+l6|3~^ZXyrpJc zXp|4Fk+Q8~Y3qn^{*w<|!cWY&Z~lx2)Xfd4?92y$Z`|>Mz_KQIt2xdX3xUFD)V8CD z0>>h*GNn5RBTH_8jJV;(v|(a8br>Vmq;1p;8RL4TLSfBb2k%?EE85_WnN1Ue)<%QZr>&JhUA~Oto`H0 zRYE87hDPwm+0s{i^@tJr>guWBVCd7#n(Az7aa^a?6yoGa9VU&Pz7S$MYC=ys(g+fY zfPc2q`PSg7rOUE(WSycTC+!I4MStA5h8%*HI+K&&DL8m;@TFhK{#xT0ufCo5ZWyXn1FP0=2Zo2d&E@ zQ!ZR!2J!)Q{x|o?xr)hi-2tFrXPR`#*-_Nwl<#AQ2hiBIh>eN$P@8Go}r_ zK{VnGCC0ESiYS;ZE$1XHdE0V3vtyMR(#&b9m}zR+GwoE!lO5T(deBS99SqYhD&<(j zy|EXZuV0*R2D#5Ov{D7pSpHbbN=SRF?3jztbR#evniAC2yGeXD4$PR^J z3QgYfHAY}sqhNIK21RP%ka1Lm3GWm*!RuF5l+A6D5L*`Ow)MV3&$AsCUQ8SHZ(`Ixm&t;WzugHtA z*^WfGuuIK+>&QT~Mk*bziJRE9*pwdm!R+MiYs#aCoTR}2x)2jz#B$XO*skXwj_`7~ z$>C*}&PrW3hnMM~rx!fz_r7M`^UJ3f-oNm%Up;)sj~~9{hu=N-=yOj!_$$YK_}Ih# z(U+IezkcGqE|>j3sS`iF|K~ow_b2v!i{Hn7`>wBWzx>7>r@i5o({_Hzl=jpj0|gDrI)MuGJ7_RowjIDP$+i%LfJ@`onKw?S9J(ybXO*W;10_*UQ+jFmJzk z=BXGG7DB|@f}Lq9^$lqLxHxv{tdx5!E7ChU+)#x2aDaterJzW6w0Pg|e|76kKY?Mv zUavqyqo6U+`n6M!1yS7sz z!{D$pjLeR#>WWyH%GJ|KIaiX93Vw@(gyoT)^>@OYtslz|M5B{59 z`8@3GSHD~_KXChXzQycX*#|y8_AyaW5BgSw`7dkbT9RhI<9)Zr4m|c5uoqZRgr{TS zMAk(_PBX|HUS@&Bl4qY~`goQD)N0`~l8!albG!|F+|1hs?cqv0wRV6{job!n!?eRY z$J)$otu&(!OOCR0r{Cx3`@|!Ez-L*imMxkc8?1^By{y@38SHuC+2p+4k=x^`lePyL z`FbCRIxDW44-N#DJ>6hr*w&4(n_|3|sNEn#$K4b|zJ3Z)>5@5K{e2ii$5pZvI)<8Q zg4n>alnx<%zX!`E$pED%m|a7d{NbAe|H8SC5(Y{5w$T&HKOeDezO>A}UYSQY)(@dfw3u2BkCGBCy zU}M+QoB}x`XlxhXfb!EYag}db!wgT-$%~yjYc4sQn^$$*hHtWYN}7}6K#hWFnepZi zn@FRGal|)nv+udANFRMO+A1);8Sw~1Jr~ABLE?Eak|&*yj8qxFuwX@~+mhiMuQ($m z92VN5*;?0^Qtcw1h>6~n<+D$IM4L;x;OWLfo^ZnoMeLI#m(_6+Ua^cAl-5~&7v6?L z>+z}aLH9=?{pzaDhsKc(Qe-R!4>zycux_za-`v>`nwHOOTwxxXqMO$*1CYT|Ncm}R z4^GJP`OoJ!Xj&`vQS0Ua%d!kcd8-wg6e{9ClZyZ3YMF#20o$<`e6Sp>3uMXQTRWWL zBxlIE!DpItW<9`^zBHe;$PUO1FPrCeQxmfvzS}m!$^Wr+y_J&xs_>y$snx2%gqJZw zM`-#PBV%Di%pxOhQ2G&R695@9H1aC4-(WRTh21gIM-8ph#sx3_MTsUxppa5oRM(20 zz*bs>)kO)%wp7%XP8l86D5&DNH6Df?#-Jw>*E$`>0x41lvyQ~po!#ChgVAg0|M};q znwA!zj!TP%={hXZm(Kq&e28){-$r2}RpfLnAbDtwz{}3njZ|aTbfvEgMMGx`mk1+r zsh-kQ#9AkFUZEadRx4K<8f&V>Yvn4XW2>>@kn5dJkvGuQwZT#tY0P#J>^KbwJ6CUB zGu&07|9E!2B3UW$tB8;er2OYCPyOY&4HWPcU}R8wIEqi+=mwlwQxs&@FXL|a&>e9T zBGX1s^_@_xWD%wv#=sd?pvD2)2!<;Y0zSt2Pg_6M;cy)kMkH%+jjZ9uwDB5co4kFPq!}V5H0^Es zR<}VnAEauLTk^<!f%G02@v| zH=4$`HETP<$QlJ{!PghUs4ZfuTn9}OMBiw(E(D=TVu%X}#xh}~hzXRvk-jViTY>rG z8^`~_$;t0@nJ1w(<6@+Yh_NsV$S`q3yG_3=$m$DT6H58qN-X|=dG2YW)4ZaCG|h(? z`rK261^SANg|MRrMUbsuOVSXKbn6zsk z9ZP-eMw#2D4aZbrhcnsX;fplowu?bJG&9WG{LAx7{z# zyyx2KcU|dyZ2x@ugu~x=%pvbN;WO_)_RxI}`_Nv8?7x@)Cw2PKA9Kar``GV(^Nw$O z!w$Q>W?Oi9r|qY`cKdC2e8o2XKdFB2>lNF*>Tku%KfL_gmtS_tkA863Pp|snkAL{x zZ(V#I`oZ_U_5JT&dgT@0yY8ARfAO=cfA-TK|K!RaAUaq5=yJ-^-`srt%{Tu1!t>5P z_w29z>W1rN)w=bT8`Qe&TNj>p&Y8FU`osOnyzc$WpInKqx#IhmUvlBa z=lt8Z&p%huh2Qw9yI?>VphjI@!`(nMz}susZk&N)I2s~`tbt{6&{$R#2tBw>;)4Qe z7(!;fo((elD8N^$d(9inXS|OE!q?zG2Ci4+DqO9zPXD5;y3BGzTNp(wlT%g&md7*2 zNsW&0kdsq}->G?ZTy@IuJ9Lj2SHz$$aIDW9bnWb~o=nqyy03emch+e>YVbR0(m{UF zS!bR0g=3HS#F2;Xf5OopKkF->z3ALyzjeV0KfCH1zrFdp58Qq2Blq4o^Wj@zG0zTb z$7@lZS$?RhUz6}S^Tw1P-yrei!bJFVq!sy~*vnMvQt*2ns;ej~?T4pSnfkL8vUJj9 zfsT}Nlr;~^)k}P3%#+1l@zw@ z96Vg2VrpqBUANqqEWjoI8*cG}*)|-=-C|$-)QSZvE}QQqb9X(SaX<%L|Dup&(474C zh}2Ww1HHZrS}&6M{SDohiknZ%Dg5c#HF`~`3LU-HL9|%BBts!3vBA_oW~zvdGoquh=eAc!dPGl#b=};&XZs7)LA2lN0=@Sih#(7 z4{C_n&obwcCS(Z_-ass!=S1rJfP+h%IobiF67DiMnHp%$8NXc_PccDzdsjKL_-sp^?>!Qbb63 zYDnoTO*wtpn&1yjRO@u2Tq4J)LLbkptR^^C&%4q0*OG;IJs!G!&^aaP*U@G6(^H0j8)pj7L!ImQ{5^PwgfFKDtVs%05c#&FDfG%Fa-H>UKB%5 zHvi@MjV1$AQlv`#B0*YIg+czX%wZC0c8PprAK|r9ucaiM`SY_+tCCq3s+&Qr+hN*@ z=`gO8j<&LvL4N+n)(yI!GkMZhPmMmpAVqnOFW?1p;S-P2;Sz^YzA-*B*`b^d>|}}& z{_~1lZ0YlXsk~LI{wCKnqCg*EbPzG6u3ADZ{v1#-Qj~E9wMC7NirHseb;hX)AO>NK zVDpwH2~)MGM;s{BcnoWr%PX3!$&#}?)pPclLTVufNjO~4#J@o#%t>-;vW3McH|vxt z%59D2Se{Up#gFagLsOld6flfP$th!ULqI6-d~{OvdP+ivaSVM+n=^QX$>3BE>w=G@ z`q2PqZwFW8H4h(Qm34%3enZZ#B#NADF}?dinS$yLz2gC%4x-a>7ku`i5CrWL2AJ9> zOFk&}IRMlT`}?p+Y1kk19?5>JETRS<~TV_w4-$xh%7)z|^7dekYuJ zTmqt@W>@@_HTS)X|LyFroN)Z-4m$eq51)M8L1&+FG z@baujZgY?6lRzKXn&*MOa33Bd?Dr~3z#mx@)E(v2EF$a0qQD21TWhcqNw~?XVILwU z8A&#wwWdmoZd{Vqz75S9Z?kG|k+tKI+Z=2lblO$ydw3G1(kjxiN1?Lnwj*VeKBmow z@v(?0ysb$o9ouOQ)ddc>3Fwy~6g0e!_xrkd{n8__T3=-AkzNZUeG*iVivR@#BL|9C zxNq`BKlg(^a9;3mco|Gy=UX9OuXU~O?VaJ(py4sZ|D2TSX|5I@Y2DWsYV}ChlGBgU zsmtxL8Kf_^DoT}(VJzTE2}c@YHTf;>2C;z_1VQuBfrv?)A@!XCJB)Bfz#G1Nu#+pw zH~x$d;{1{FL~4^qGbWVAkpvO$?!;+RX@_oZGTG$UW6%b4IOJN_F$6qRgtEur5g1QI!5v zPe>GvYAjbuh-ut0h7kmgsZyNv zsOp>{pL#0GAGKaa!JK%#Xg+bk>MJR^!o z(&QmhQFxgmF9M8YO_mQRxs;kx!~&fNpFyQ*s=`}sp-STkn;4jy$YB*x^C1ZZ6}hBh zjj~)+N$9}B8{D0(UjYsS%aHO4?*J^F^US8zPp@49GQ-ON_10(BkqnS|@~IS44H@Gc zV5*dm@xkn5`w|xVvItWQ^H5q{MXDD=sp=yoEdHuDj-x@@`bEtEQDY~eEH!dMIv5C= zSzeW;AWJ!~WRWfyS&q?HB-@1HCuJ6p>f~)2>XO%Y6^#f@O_0Q}L!TU}g~*XEYejX` z0F@=zq6uc%lG9JKSdp+JC1e?X(U5jILxL3L{3y>2@+coldG1pJMpVi3BEuR5-DiY* zIV>d~drec3rfp@a(`P}J^BEtTQ;|Z}q<9FCWswXoGnEfYxnvX?N19Xn&W;u-AVut} z7-e?!lMgQ=z7;p^vM+Qj(_sXoh|j8zy+~PBL#h;qoT)7pg+hO`K$A5JJRJ4oc}oY? z1fA@#tcp^d#cN$(_(sA#!bnz&s;Eu@Pm_DKK6NKmU zFLXxdnIb3QOx>p+UMA--Q{;KhU_O(m7`*MMHXhr6Xy;g{85VYgbEHR|)iFxpWhsf{ zpHy`RUA)5{1$H_R4wA{cH}0K#zrxKm)a*SRsJXlT@G{68ZCL4bzQ^HZH22Y8ub6xH z%DH!5_RZtGkL~-*=bd)Y*H8G!*S@g->BsDQ#DTl-`_^fn`RJSA<~N&4u-yx7-AHfA8W8+#_FiDSw+P?kfpS$O!OybVh3*>aPq85E_K)5ukDCK#0ISEY?{)6El zvrg$kc$sgi^Wht3e95)4A}E_Cc<$O=$|d*9=bZ7ylRkgg@kf39)DsUo_spX%J^%Rc zT=Yd(%-3Cg&fT|P`Plt_3h54b*)LuA{VP})iZRcd;if%kLkRVl?eJZYFjR;q4DL~A z5s?H2p+6F)szpeXDOpVTQ7h5Zm8ED@7Q>7!fdT*ows~Lg{aQzk?!Yqp97GOJ)*4dN$A7~-582O7or4> z!^>c@D`ub=Mh+TF!DLw!vtf8+XE+n;8V}p{`R`1N$NY7_bRkkiftuiS6enRvm&t`i z)|N{FQ+TULs6`T4MKniJQG5(~GeKvOIE)*wK2i{YaXTH2XsGzt5j#BNh8Qthb9qJ1 z=O9VGsSEp5QD2mjvrMi>97bcm$O%WEvq@4UJyv=cXOtbA^SBd%>k__RZ(>c{CCjG`HWXxs1aqxtE*k4U258@ITlz;N9*7fqCldquHjL= zpve^BR5_t6x$0so9hDW;A|WGu<|@7x74d-siZDv0cye7}L=ip(SwdcwYLQ&udCMTV zlyV)Zic~|%F&U(wu54}5A*X{dEGQz(pPb1Dats~9DCOMFT1}8g)Qn5e)d{nanm5yz z$fiTbJb{-zL<|#`fMq(YshgAaMOG=ER|rqPXloQ{RJP$(Dn5ivX#%y~F~WaEG1LsM zS{cWqW{VA2QE+%uiyW`$Wd`Xq#NdurxnaZy-o0*BqhYFv_x7pp5dLCrYbT>vXJWXL}6NoVs+12`3v1)#>Bw zPS)@>$2~=aOsR<2K}r!LD6?bJN>xX-#ltGbOm%WJm@`_zu#;s?BNA#P+>qv^&JN1H z$mwlW|3GTpQn$(QGQ#}%j~fZSgN}!kU)Z`{I)Lj*0NGL9h_JvKugImMO3wLO6tUV> zilKliO+l6gX-3pVj5v^sRVRFsDzcy!Dmbz+ET~e`8B~Rk1&|P5e>sCmndkH=vw)bA z)hVMOOGhdzjJd6pGxfKksSe`{i%_e}!&+ehvB0d9TD5YL4x&(xC}hN5(Qc-7ZB<1n zwQ+jr6lWCKDBAxOHO+KW$601gmH+9!q=*h;!B(s}Zm7a+4(D5qk|1h`wY(|_g^QZ3 zwoFSB3V6=Qs;Dgrk>=dF^RV%yMuL~(>4=PUG{pjmQfXao)Y!yG`BQ9*q^*@FNRg&_ zl1rRnM8qs)y=F}lh-TfX6}76O7DjfohS5Z=;~B3yvm7Sjp3>$OO$McU%1)$8wNzvo zse)>h5&Aiq5p_w5hKe+kux7j(G7{ZKg*qeL5K^-up9v97W~D`vL@i;_x<;ki8k?w7 zBzcmbc#MiHh#_7_7*SV|g+`xmPHvsnKw}U%!o$l5E{-7&Q!7I8o%)>jkQ1*nUdKO2 zK9UfG7rcSx-d%@?y|eA2+4pW%PrlSS6)fW^uUh!fQ_H5WoOkbvx%VuZdHech54&GJ z`;{Ps?Y_q@uXyt=uNv_3HvaiCf|sMe?Tu{U*Q?R+a(Be|Zj*uKP%tzN zG83jL1&R^0>s@Qe7n%lm;b`~E?wUbhm(lDfCE=mg^@UB1QlL-52+~3Nbf92hoN=WvK7A|b`_neg%{#~=R1|A({t@ZP(s@;x3wbZjGv1r?=?qavf@=!^vm7Mc}NkR~NcB$QA? z3sq4R5D)<=QUeJDLV$!82qlf)#Cn~XJL}H?z(+sb7``z2>@n}?0LZxE9_>Qd+Wn1nFM{YST@Ho z+0Fs8qmWk28RajQ7+U;73n!mGAF-ty3S+cmP>(1vs+uZ5J-0K<@$&X5bo)Ck9B zVTe6|H0XrF0w4!e80hT0*aeamvFwo8(6U=Yfbip}#i?zA1u8U5`5R661~Wu2+k!R` z_Q3$qP>%}q4s;2WGcaa6mejInWeesEq^oDG*z#L%SorIUAx#ic%h#HF2qCV=&M31k zEBtguFnY^E=@4`jERN9vi6tn0?oBhjHRKhEA$DHz;3srm8wg`53C)G>v>y*+Xz7f+ zqAHmYjvd~uhDGRtirYakU$Zug=7@*FB>+8sAt?Au9t?nvB#fa}o{>dF!X+wEFhs(4 zh=a&$kbor|k+8SgFle=b#WIVGFs)&M;5*{n%?pwMY#@$%Cl8VqggY7M-f%QJj5y1@ zxwEshAYhbeBa|2mnVy|0*qhkICbE*bSFde1|KYb!s-xA*%4Z6g)^@X$nxF;o*xW%B zRM$dTVfhGW)2NV$fTF+jXj*OcQ40bl zLztF=F@8ajW0py2N0?pv=ifesr!yx8dNr5Prb*$2#1VvdUF-57lMKxJhv zETxSBMuylrKoTfz=R8ix5iu95$H-BVQBgVsfwde9M`^k6cNvy8&b+#^Vn*Tyi&-Wt zFeZYn6=38$&R77#2%wEzEG{Ku7CB1Nq_l=Q7FsSutkSD3Nr_G0$1Ps_j!j?jrq|7S%NsWS``2vz;`f&~+Gz4>wvzd8`%hi0q$MiWLCJ8X_BSfvc3e6|7Io7ni=0G3B2gjfFZ?P~MWwP= zm8x1)a;k+@xT;QeHKuJ`x{Y1Nz9y`&ZJ$@o3zIvH(NVFgKz$k66A=YKxvE4p8U~eO zmacSVR748|R<+yKvlpaoBCCQ`vI<@p0%L1f=4iFj#)HI={7niEUCqlEn7hPumB|A2 zCjy0b8_gDAZa)brR&0(ukfj+qqt;T2sQ_u@YTGdo^`#r7T7sfD~0LWnSP6- z{ZpI(TsRJ2h1IB|0V=mC6V4awsd1-DaX;xE=#pn~3nCOZ0rwLM$L?Kq;k)gyb9A?X z$ye)^(p`m~mlgoBn_Gma+z0$6!GchH<5+9frY2kEbD58prTLnOInpa3*;$CC$Uc16 ztXj7A!Q0n8WKY=*$KUAgcw+6{&pvLEeG^;}Eu0&dlGEGo_dNgPUv`AMS4+9gGqa*H$l2 zTxJA=SREP(xOF>}WlQO9FK(d$q*_@A!sr1hi8wZiNKVzS+kdIV0K2jm#7 zoSOwIQryy$FfT6U>>yH*$Q%HYyIaaIaxY5m%_SJ!s6Z{dZuannZWN~>0%K)d>_ULC z445I7*3&43a2%3Fi&OquR8AXg6fi@SXaNugW;7jk7+@~!0EHfj!Sq<`QH_92-&(nl zk<3t#H*stLNDRr1G$Q~!Q2{U#E0)yNmN2lm6@vgc5%Q8ej4oKq*mO!sEJw!>rPgRh zc8*92$G(AdQFQ3JfHgIijAbZjmHCf@#^q7OoWb#JrS$*kwc3hR?)Tc1dw*AqTIzl~I_)sL+m-r6?1tCm(ic!!Y9EQgn7g zmsJsQa?i+-SJSjyr7g)u1v1nr*OW`FGY8mczI0%YFDtU*7!{tpw4!Lc*l}WrUz;Lf zD@-UXd=p+p*X^X`PU0L3HJwbxT5N_|M$st+GRiJ17R-h$vOoLiG8$^Z4YBC@7LQS| z)(y!x2P7jgIWwZwIw&T>i?J3a-@?H+1zi(~K^u8lway4Be2HX$`MNGV&3ZP|df-