Skip to content

Commit e7e51b6

Browse files
cuixiaomashahba
authored andcommitted
Fixed the nightly testing failure of transformer_lt_official model (#54)
* Fixed the nightly testing failure of transformer_lt_official model * commented out unnecessary excessive logging info * Updated the in_graph parameters and remove KMP setting, which could be different on different hardware
1 parent e4c0242 commit e7e51b6

File tree

4 files changed

+16
-13
lines changed
  • benchmarks
  • models/language_translation/tensorflow/transformer_lt_official/inference/fp32/utils

4 files changed

+16
-13
lines changed

benchmarks/common/tensorflow/start.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1280,7 +1280,6 @@ function transformer_lt_official() {
12801280
fi
12811281

12821282
CMD="${CMD}
1283-
--input_graph=${input_graph} \
12841283
--vocab_file=${DATASET_LOCATION}/${vocab_file} \
12851284
--file=${DATASET_LOCATION}/${file} \
12861285
--file_out=${OUTPUT_DIR}/${file_out} \
Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,2 @@
11
{
2-
"optimization_parameters": {
3-
"KMP_AFFINITY": "granularity=fine,compact,1,0",
4-
"KMP_BLOCKTIME": 0,
5-
"KMP_SETTINGS": 1
6-
}
72
}

benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#
1919

2020
import os
21+
import sys
2122
from argparse import ArgumentParser
2223

2324
from common.base_model_init import BaseModelInitializer
@@ -26,10 +27,22 @@
2627

2728
class ModelInitializer(BaseModelInitializer):
2829
"""Model initializer for Transformer LT FP32 inference"""
30+
def run_inference_sanity_checks(self, args, custom_args):
31+
if not args.input_graph:
32+
sys.exit("Please provide a path to the frozen graph directory"
33+
" via the '--in-graph' flag.")
34+
if not args.data_location:
35+
sys.exit("Please provide a path to the data directory via the "
36+
"'--data-location' flag.")
37+
if args.socket_id == -1 and args.num_cores == -1:
38+
print("***Warning***: Running inference on all cores could degrade"
39+
" performance. Pass a '--socket-id' to specify running on a"
40+
" single socket instead.\n")
2941

3042
def __init__(self, args, custom_args, platform_util=None):
3143
super(ModelInitializer, self).__init__(args, custom_args, platform_util)
3244

45+
self.run_inference_sanity_checks(self.args, self.custom_args)
3346
self.cmd = self.get_command_prefix(self.args.socket_id)
3447
self.bleu_params = ""
3548

@@ -61,10 +74,6 @@ def __init__(self, args, custom_args, platform_util=None):
6174
help='input vocable file for translation',
6275
dest="vocab_file",
6376
default="vocab.txt")
64-
arg_parser.add_argument('--in_graph',
65-
help='input fp32 frozen graph file for inference',
66-
dest="fp32_graph",
67-
default="fp32_graphdef.pb")
6877
arg_parser.add_argument('--file',
6978
help='decode input file with path',
7079
dest="decode_from_file",
@@ -85,7 +94,7 @@ def __init__(self, args, custom_args, platform_util=None):
8594
translate_file = os.path.join(self.args.output_dir,
8695
self.args.decode_to_file)
8796
cmd_args = " --param_set=" + self.args.param_set + \
88-
" --in_graph=" + self.args.fp32_graph + \
97+
" --in_graph=" + self.args.input_graph + \
8998
" --batch_size=" + \
9099
(str(self.args.batch_size)
91100
if self.args.batch_size != -1 else "1") + \

models/language_translation/tensorflow/transformer_lt_official/inference/fp32/utils/tokenizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,8 @@ class Subtokenizer(object):
6363

6464
def __init__(self, vocab_file, reserved_tokens=None):
6565
"""Initializes class, creating a vocab file if data_files is provided."""
66-
tf.compat.v1.logging.info("Initializing Subtokenizer from file %s." %
67-
vocab_file)
66+
# tf.compat.v1.logging.info("Initializing Subtokenizer from file %s." %
67+
# vocab_file)
6868

6969
if reserved_tokens is None:
7070
reserved_tokens = RESERVED_TOKENS

0 commit comments

Comments
 (0)