diff --git a/src/DataLoader.py b/src/DataLoader.py index 76b2bad220..f2e3d85eae 100644 --- a/src/DataLoader.py +++ b/src/DataLoader.py @@ -51,15 +51,17 @@ def __init__(self, filePath, batchSize, imgSize, maxTextLen): # Read json lables file # Dataset folder should contain a labels.json file inside, with key is the file name of images and value is the label - with open(filePath + 'labels.json') as json_data: + with open(f'{filePath}labels.json') as json_data: label_file = json.load(json_data) # Log print("Loaded", len(label_file), "images") # Put sample into list - for fileName, gtText in label_file.items(): - self.samples.append(Sample(gtText, filePath + fileName)) + self.samples.extend( + Sample(gtText, filePath + fileName) + for fileName, gtText in label_file.items() + ) self.charList = list(open(FilePaths.fnCharList).read()) diff --git a/src/Model.py b/src/Model.py index f68388064b..21ed1815b3 100644 --- a/src/Model.py +++ b/src/Model.py @@ -199,8 +199,8 @@ def setupCTC(self, ctcIn3d): def setupTF(self): """ Initialize TensorFlow """ - print('Python: ' + sys.version) - print('Tensorflow: ' + tf.__version__) + print(f'Python: {sys.version}') + print(f'Tensorflow: {tf.__version__}') sess = tf.Session() # Tensorflow session saver = tf.train.Saver(max_to_keep=5) # Saver saves model to file modelDir = '../model/' @@ -208,10 +208,10 @@ def setupTF(self): modelDir) # Is there a saved model? # If model must be restored (for inference), there must be a snapshot if self.mustRestore and not latestSnapshot: - raise Exception('No saved model found in: ' + modelDir) + raise Exception(f'No saved model found in: {modelDir}') # Load saved model if available if latestSnapshot: - print('Init with stored values from ' + latestSnapshot) + print(f'Init with stored values from {latestSnapshot}') saver.restore(sess, latestSnapshot) else: print('Init with new values') @@ -247,7 +247,7 @@ def toSpare(self, texts): def decoderOutputToText(self, ctcOutput): """ Extract texts from output of CTC decoder """ # Contains string of labels for each batch element - encodedLabelStrs = [[] for i in range(Model.batchSize)] + encodedLabelStrs = [[] for _ in range(Model.batchSize)] # Word beam search: label strings terminated by blank if self.decoderType == DecoderType.WordBeamSearch: blank = len(self.charList)