From f44c39667e39456e292e16a77ebaaaa15e2b03e2 Mon Sep 17 00:00:00 2001 From: Robert Sachunsky Date: Mon, 13 Apr 2026 01:14:49 +0200 Subject: [PATCH] predictor: disable rebatching (until we have flexible batch sizes) --- src/eynollah/predictor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/eynollah/predictor.py b/src/eynollah/predictor.py index 94afea5..2ab62a0 100644 --- a/src/eynollah/predictor.py +++ b/src/eynollah/predictor.py @@ -111,7 +111,7 @@ class Predictor(mp.context.SpawnProcess): "binarization": 4, "enhancement": 4, "reading_order": 4, - # medium size (672x672)... + # medium size (672x672x3)... "textline": 2, # large models... "table": 1, @@ -119,6 +119,7 @@ class Predictor(mp.context.SpawnProcess): "region_fl_np": 1, "region_fl": 1, }.get(self.name, 1) + REBATCH_SIZE = 1 # save VRAM; FIXME: re-enable w/ runtime parameter if not len(shared_data): #self.logger.debug("getting '%d' output shape of model '%s'", jobid, self.name) result = self.model.output_shape