Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix_pir_pre-commit #1889

Open
wants to merge 3 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@ repos:
files: \.(md|yml)$
- id: remove-tabs
files: \.(md|yml)$
- repo: https://github.com/PaddlePaddle/mirrors-yapf.git
rev: v0.16.2
- repo: https://github.com/pre-commit/mirrors-yapf.git
rev: v0.32.0
hooks:
- id: yapf
files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.6.0
hooks:
- id: check-added-large-files
- id: check-merge-conflict
Expand Down
11 changes: 8 additions & 3 deletions ce_tests/dygraph/quant/src/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,14 @@


def eval(args):
model_file = os.path.join(args.model_path, args.model_filename)
params_file = os.path.join(args.model_path, args.params_filename)
config = paddle_infer.Config(model_file, params_file)
if '2' in paddle.__version__.split('.')[0]:
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
else:
model_prefix = args.model_filename.split('.')[0]
config = paddle.inference.Config(
os.path.join(args.model_path, model_prefix))
config.enable_mkldnn()
config.switch_ir_optim(False)

Expand Down
11 changes: 8 additions & 3 deletions ce_tests/dygraph/quant/src/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,14 @@

def eval(args):
# create predictor
model_file = os.path.join(args.model_path, args.model_filename)
params_file = os.path.join(args.model_path, args.params_filename)
config = paddle_infer.Config(model_file, params_file)
if '2' in paddle.__version__.split('.')[0]:
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
else:
model_prefix = args.model_filename.split('.')[0]
config = paddle.inference.Config(
os.path.join(args.model_path, model_prefix))
if args.use_gpu:
config.enable_use_gpu(1000, 0)
if not args.ir_optim:
Expand Down
16 changes: 11 additions & 5 deletions demo/dygraph/post_quant/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,15 @@

def eval():
# create predictor
model_file = os.path.join(FLAGS.model_path, FLAGS.model_filename)
params_file = os.path.join(FLAGS.model_path, FLAGS.params_filename)
config = paddle_infer.Config(model_file, params_file)
if '2' in paddle.__version__.split('.')[0]:
config = paddle.inference.Config(
os.path.join(FLAGS.model_path, FLAGS.model_filename),
os.path.join(FLAGS.model_path, FLAGS.params_filename))
else:
model_prefix = FLAGS.model_filename.split('.')[0]
config = paddle.inference.Config(
os.path.join(FLAGS.model_path, model_prefix))

if FLAGS.use_gpu:
config.enable_use_gpu(1000, 0)
if not FLAGS.ir_optim:
Expand Down Expand Up @@ -97,8 +103,8 @@ def eval():
acc5 = correct_5_num / total_num
avg_time = cost_time / total_num
print("End test: test image {}".format(total_num))
print("test_acc1: {:.4f}; test_acc5: {:.4f}; avg time: {:.5f} sec/img".format(
acc1, acc5, avg_time))
print("test_acc1: {:.4f}; test_acc5: {:.4f}; avg time: {:.5f} sec/img".
format(acc1, acc5, avg_time))
print("\n")


Expand Down
26 changes: 14 additions & 12 deletions example/auto_compression/detection/paddle_inference_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,7 @@ def argsparser():
"--device",
type=str,
default="GPU",
help=
"Choose the device you want to run, it can be: CPU/GPU/XPU, default is GPU",
help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is GPU",
)
parser.add_argument(
"--use_dynamic_shape",
Expand Down Expand Up @@ -243,9 +242,13 @@ def load_predictor(
raise ValueError(
"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}".
format(precision, device))
config = Config(
os.path.join(model_dir, "model.pdmodel"),
os.path.join(model_dir, "model.pdiparams"))
# support paddle 2.x
if '2' in paddle.__version__.split('.')[0]:
config = Config(
os.path.join(model_dir, "model.pdmodel"),
os.path.join(model_dir, "model.pdiparams"))
else:
config = Config(os.path.join(model_dir, "model"))

config.enable_memory_optim()
if device == "GPU":
Expand Down Expand Up @@ -281,8 +284,8 @@ def load_predictor(
dynamic_shape_file = os.path.join(FLAGS.model_path,
"dynamic_shape.txt")
if os.path.exists(dynamic_shape_file):
config.enable_tuned_tensorrt_dynamic_shape(
dynamic_shape_file, True)
config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file,
True)
print("trt set dynamic shape done!")
else:
config.collect_shape_range_info(dynamic_shape_file)
Expand Down Expand Up @@ -434,10 +437,9 @@ def main():
reader_cfg = load_config(FLAGS.reader_config)
dataset = reader_cfg["EvalDataset"]
global val_loader
val_loader = create("EvalReader")(
reader_cfg["EvalDataset"],
reader_cfg["worker_num"],
return_list=True)
val_loader = create("EvalReader")(reader_cfg["EvalDataset"],
reader_cfg["worker_num"],
return_list=True)
clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}
anno_file = dataset.get_anno()
metric = COCOMetric(
Expand All @@ -463,4 +465,4 @@ def main():
# DataLoader need run on cpu
paddle.set_device("cpu")

main()
main()
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,15 @@ def __init__(self):

def _create_paddle_predictor(self):
inference_model_dir = args.model_path
model_file = os.path.join(inference_model_dir, args.model_filename)
params_file = os.path.join(inference_model_dir, args.params_filename)
config = paddle.inference.Config(model_file, params_file)
if '2' in paddle.__version__.split('.')[0]:
config = paddle.inference.Config(
os.path.join(inference_model_dir, args.model_filename),
os.path.join(inference_model_dir, args.params_filename))
else:
model_prefix = args.model_filename.split('.')[0]
config = paddle.inference.Config(
os.path.join(inference_model_dir, model_prefix))

precision = paddle.inference.Config.Precision.Float32
if args.use_int8:
precision = paddle.inference.Config.Precision.Int8
Expand Down
26 changes: 15 additions & 11 deletions example/auto_compression/nlp/paddle_inference_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,7 @@ def parse_args():
"--max_seq_length",
default=128,
type=int,
help=
"The maximum total input sequence length after tokenization. Sequences longer "
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--perf_warmup_steps",
Expand All @@ -108,8 +107,7 @@ def parse_args():
type=str,
default="fp32",
choices=["fp32", "fp16", "int8"],
help=
"The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.",
help="The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.",
)
parser.add_argument(
"--use_mkldnn",
Expand Down Expand Up @@ -158,7 +156,8 @@ def _convert_example(example,
}
elif "target" in example: # wsc
text, query, pronoun, query_idx, pronoun_idx = (
example["text"], example["target"]["span1_text"],
example["text"],
example["target"]["span1_text"],
example["target"]["span2_text"],
example["target"]["span1_index"],
example["target"]["span2_index"], )
Expand Down Expand Up @@ -207,9 +206,14 @@ def create_predictor(cls, args):
create_predictor func
"""
cls.rerun_flag = False
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
if '2' in paddle.__version__.split('.')[0]:
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
else:
model_prefix = args.model_filename.split('.')[0]
config = paddle.inference.Config(
os.path.join(args.model_path, model_prefix))
config.switch_ir_debug(True)
# 适用于ERNIE 3.0-Medium模型
# config.exp_disable_tensorrt_ops(["elementwise_add"])
Expand Down Expand Up @@ -246,8 +250,8 @@ def create_predictor(cls, args):
dynamic_shape_file = os.path.join(args.model_path,
"dynamic_shape.txt")
if os.path.exists(dynamic_shape_file):
config.enable_tuned_tensorrt_dynamic_shape(
dynamic_shape_file, True)
config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file,
True)
print("trt set dynamic shape done!")
else:
config.collect_shape_range_info(dynamic_shape_file)
Expand Down Expand Up @@ -372,4 +376,4 @@ def main():

if __name__ == "__main__":
paddle.set_device("cpu")
main()
main()
11 changes: 8 additions & 3 deletions example/auto_compression/nlp/paddle_inference_eval_uie.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,9 +163,14 @@ def create_predictor(cls, args):
create_predictor func
"""
cls.rerun_flag = False
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
if '2' in paddle.__version__.split('.')[0]:
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
else:
model_prefix = args.model_filename.split('.')[0]
config = paddle.inference.Config(
os.path.join(args.model_path, model_prefix))
if args.device == "gpu":
# set GPU configs accordingly
config.enable_use_gpu(100, 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,7 @@ def parse_args():
"--max_seq_length",
default=128,
type=int,
help=
"The maximum total input sequence length after tokenization. Sequences longer "
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--perf_warmup_steps",
Expand All @@ -120,8 +119,7 @@ def parse_args():
type=str,
default="fp32",
choices=["fp32", "fp16", "int8"],
help=
"The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.",
help="The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.",
)
parser.add_argument(
"--use_mkldnn",
Expand Down Expand Up @@ -188,9 +186,15 @@ def create_predictor(cls, args):
create_predictor func
"""
cls.rerun_flag = False
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))

if '2' in paddle.__version__.split('.')[0]:
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
else:
model_prefix = args.model_filename.split('.')[0]
config = paddle.inference.Config(
os.path.join(args.model_path, model_prefix))

if args.device == "gpu":
# set GPU configs accordingly
Expand Down Expand Up @@ -223,8 +227,8 @@ def create_predictor(cls, args):
dynamic_shape_file = os.path.join(args.model_path,
"dynamic_shape.txt")
if os.path.exists(dynamic_shape_file):
config.enable_tuned_tensorrt_dynamic_shape(
dynamic_shape_file, True)
config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file,
True)
print("trt set dynamic shape done!")
else:
config.collect_shape_range_info(dynamic_shape_file)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -253,9 +253,13 @@ def load_predictor(
raise ValueError(
"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}".
format(precision, device))
config = Config(
os.path.join(model_dir, "model.pdmodel"),
os.path.join(model_dir, "model.pdiparams"))
if '2' in paddle.__version__.split('.')[0]:
config = Config(
os.path.join(model_dir, "model.pdmodel"),
os.path.join(model_dir, "model.pdiparams"))
else:
config = Config(os.path.join(model_dir, "model"))

if device == "GPU":
# initial GPU memory(M), device ID
config.enable_use_gpu(200, 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,12 @@ def load_predictor(model_dir,
raise ValueError(
"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}"
.format(run_mode, device))
config = Config(
os.path.join(model_dir, 'model.pdmodel'),
os.path.join(model_dir, 'model.pdiparams'))
if '2' in paddle.__version__.split('.')[0]:
config = Config(
os.path.join(model_dir, "model.pdmodel"),
os.path.join(model_dir, "model.pdiparams"))
else:
config = Config(os.path.join(model_dir, "model"))
if device == 'GPU':
# initial GPU memory(M), device ID
config.enable_use_gpu(200, 0)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Global:
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./RES-paddle2-Deeplabv3-ResNet50
model_filename: model
params_filename: params
model_filename: model.pdmodel
params_filename: model.pdiparams
batch_size: 4

Distillation:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
Global:
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./RES-paddle2-HRNetW18-Seg
model_filename: model
params_filename: params

model_filename: model.pdmodel
params_filename: model.pdiparams
Distillation:
alpha: 1.0
loss: l2
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Global:
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./RES-paddle2-HRNetW18-Seg
model_filename: model
params_filename: params
model_filename: model.pdmodel
params_filename: model.pdiparams

Distillation:
alpha: 1.0
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Global:
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./RES-paddle2-PPLIteSegSTDC1
model_filename: model
params_filename: params
model_filename: model.pdmodel
params_filename: model.pdiparams

TrainConfig:
epochs: 14
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Global:
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./RES-paddle2-PPLIteSegSTDC1
model_filename: model
params_filename: params
model_filename: model.pdmodel
params_filename: model.pdiparams

Distillation:
alpha: 1.0
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Global:
reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml
model_dir: ./RES-paddle2-PPLIteSegSTDC1
model_filename: model
params_filename: params
model_filename: model.pdmodel
params_filename: model.pdiparams

Distillation:
alpha: 1.0
Expand Down
Loading