diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e58400d0f..feca1dedb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,13 +8,13 @@ repos: files: \.(md|yml)$ - id: remove-tabs files: \.(md|yml)$ -- repo: https://github.com/PaddlePaddle/mirrors-yapf.git - rev: v0.16.2 +- repo: https://github.com/pre-commit/mirrors-yapf.git + rev: v0.32.0 hooks: - id: yapf files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: check-added-large-files - id: check-merge-conflict diff --git a/ce_tests/dygraph/quant/src/eval.py b/ce_tests/dygraph/quant/src/eval.py index b6afdc1dd..9a01feb1a 100644 --- a/ce_tests/dygraph/quant/src/eval.py +++ b/ce_tests/dygraph/quant/src/eval.py @@ -17,9 +17,14 @@ def eval(args): - model_file = os.path.join(args.model_path, args.model_filename) - params_file = os.path.join(args.model_path, args.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(args.model_path, model_prefix)) config.enable_mkldnn() config.switch_ir_optim(False) diff --git a/ce_tests/dygraph/quant/src/test.py b/ce_tests/dygraph/quant/src/test.py index 1dbd85d75..6300d22df 100644 --- a/ce_tests/dygraph/quant/src/test.py +++ b/ce_tests/dygraph/quant/src/test.py @@ -18,9 +18,14 @@ def eval(args): # create predictor - model_file = os.path.join(args.model_path, args.model_filename) - params_file = os.path.join(args.model_path, args.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(args.model_path, model_prefix)) if args.use_gpu: config.enable_use_gpu(1000, 0) if not args.ir_optim: diff --git a/demo/dygraph/post_quant/eval.py b/demo/dygraph/post_quant/eval.py index f9af9e39f..b5c1cb2f9 100644 --- a/demo/dygraph/post_quant/eval.py +++ b/demo/dygraph/post_quant/eval.py @@ -30,9 +30,15 @@ def eval(): # create predictor - model_file = os.path.join(FLAGS.model_path, FLAGS.model_filename) - params_file = os.path.join(FLAGS.model_path, FLAGS.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(FLAGS.model_path, FLAGS.model_filename), + os.path.join(FLAGS.model_path, FLAGS.params_filename)) + else: + model_prefix = FLAGS.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(FLAGS.model_path, model_prefix)) + if FLAGS.use_gpu: config.enable_use_gpu(1000, 0) if not FLAGS.ir_optim: @@ -97,8 +103,8 @@ def eval(): acc5 = correct_5_num / total_num avg_time = cost_time / total_num print("End test: test image {}".format(total_num)) - print("test_acc1: {:.4f}; test_acc5: {:.4f}; avg time: {:.5f} sec/img".format( - acc1, acc5, avg_time)) + print("test_acc1: {:.4f}; test_acc5: {:.4f}; avg time: {:.5f} sec/img". + format(acc1, acc5, avg_time)) print("\n") diff --git a/example/auto_compression/detection/paddle_inference_eval.py b/example/auto_compression/detection/paddle_inference_eval.py index 838d37e13..8fde3fd88 100644 --- a/example/auto_compression/detection/paddle_inference_eval.py +++ b/example/auto_compression/detection/paddle_inference_eval.py @@ -65,8 +65,7 @@ def argsparser(): "--device", type=str, default="GPU", - help= - "Choose the device you want to run, it can be: CPU/GPU/XPU, default is GPU", + help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is GPU", ) parser.add_argument( "--use_dynamic_shape", @@ -243,9 +242,13 @@ def load_predictor( raise ValueError( "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}". format(precision, device)) - config = Config( - os.path.join(model_dir, "model.pdmodel"), - os.path.join(model_dir, "model.pdiparams")) + # support paddle 2.x + if '2' in paddle.__version__.split('.')[0]: + config = Config( + os.path.join(model_dir, "model.pdmodel"), + os.path.join(model_dir, "model.pdiparams")) + else: + config = Config(os.path.join(model_dir, "model")) config.enable_memory_optim() if device == "GPU": @@ -281,8 +284,8 @@ def load_predictor( dynamic_shape_file = os.path.join(FLAGS.model_path, "dynamic_shape.txt") if os.path.exists(dynamic_shape_file): - config.enable_tuned_tensorrt_dynamic_shape( - dynamic_shape_file, True) + config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, + True) print("trt set dynamic shape done!") else: config.collect_shape_range_info(dynamic_shape_file) @@ -434,10 +437,9 @@ def main(): reader_cfg = load_config(FLAGS.reader_config) dataset = reader_cfg["EvalDataset"] global val_loader - val_loader = create("EvalReader")( - reader_cfg["EvalDataset"], - reader_cfg["worker_num"], - return_list=True) + val_loader = create("EvalReader")(reader_cfg["EvalDataset"], + reader_cfg["worker_num"], + return_list=True) clsid2catid = {v: k for k, v in dataset.catid2clsid.items()} anno_file = dataset.get_anno() metric = COCOMetric( @@ -463,4 +465,4 @@ def main(): # DataLoader need run on cpu paddle.set_device("cpu") - main() \ No newline at end of file + main() diff --git a/example/auto_compression/image_classification/paddle_inference_eval.py b/example/auto_compression/image_classification/paddle_inference_eval.py index d36073875..60ad968ba 100644 --- a/example/auto_compression/image_classification/paddle_inference_eval.py +++ b/example/auto_compression/image_classification/paddle_inference_eval.py @@ -113,9 +113,15 @@ def __init__(self): def _create_paddle_predictor(self): inference_model_dir = args.model_path - model_file = os.path.join(inference_model_dir, args.model_filename) - params_file = os.path.join(inference_model_dir, args.params_filename) - config = paddle.inference.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(inference_model_dir, args.model_filename), + os.path.join(inference_model_dir, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(inference_model_dir, model_prefix)) + precision = paddle.inference.Config.Precision.Float32 if args.use_int8: precision = paddle.inference.Config.Precision.Int8 diff --git a/example/auto_compression/nlp/paddle_inference_eval.py b/example/auto_compression/nlp/paddle_inference_eval.py index 073f032e5..9f822bcf1 100644 --- a/example/auto_compression/nlp/paddle_inference_eval.py +++ b/example/auto_compression/nlp/paddle_inference_eval.py @@ -91,8 +91,7 @@ def parse_args(): "--max_seq_length", default=128, type=int, - help= - "The maximum total input sequence length after tokenization. Sequences longer " + help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--perf_warmup_steps", @@ -108,8 +107,7 @@ def parse_args(): type=str, default="fp32", choices=["fp32", "fp16", "int8"], - help= - "The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.", + help="The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.", ) parser.add_argument( "--use_mkldnn", @@ -158,7 +156,8 @@ def _convert_example(example, } elif "target" in example: # wsc text, query, pronoun, query_idx, pronoun_idx = ( - example["text"], example["target"]["span1_text"], + example["text"], + example["target"]["span1_text"], example["target"]["span2_text"], example["target"]["span1_index"], example["target"]["span2_index"], ) @@ -207,9 +206,14 @@ def create_predictor(cls, args): create_predictor func """ cls.rerun_flag = False - config = paddle.inference.Config( - os.path.join(args.model_path, args.model_filename), - os.path.join(args.model_path, args.params_filename)) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(args.model_path, model_prefix)) config.switch_ir_debug(True) # 适用于ERNIE 3.0-Medium模型 # config.exp_disable_tensorrt_ops(["elementwise_add"]) @@ -246,8 +250,8 @@ def create_predictor(cls, args): dynamic_shape_file = os.path.join(args.model_path, "dynamic_shape.txt") if os.path.exists(dynamic_shape_file): - config.enable_tuned_tensorrt_dynamic_shape( - dynamic_shape_file, True) + config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, + True) print("trt set dynamic shape done!") else: config.collect_shape_range_info(dynamic_shape_file) @@ -372,4 +376,4 @@ def main(): if __name__ == "__main__": paddle.set_device("cpu") - main() \ No newline at end of file + main() diff --git a/example/auto_compression/nlp/paddle_inference_eval_uie.py b/example/auto_compression/nlp/paddle_inference_eval_uie.py index 2f378ef02..581d9bf43 100644 --- a/example/auto_compression/nlp/paddle_inference_eval_uie.py +++ b/example/auto_compression/nlp/paddle_inference_eval_uie.py @@ -163,9 +163,14 @@ def create_predictor(cls, args): create_predictor func """ cls.rerun_flag = False - config = paddle.inference.Config( - os.path.join(args.model_path, args.model_filename), - os.path.join(args.model_path, args.params_filename)) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(args.model_path, model_prefix)) if args.device == "gpu": # set GPU configs accordingly config.enable_use_gpu(100, 0) diff --git a/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py b/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py index 338825a66..5d06a9066 100644 --- a/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py +++ b/example/auto_compression/pytorch_huggingface/paddle_inference_eval.py @@ -103,8 +103,7 @@ def parse_args(): "--max_seq_length", default=128, type=int, - help= - "The maximum total input sequence length after tokenization. Sequences longer " + help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--perf_warmup_steps", @@ -120,8 +119,7 @@ def parse_args(): type=str, default="fp32", choices=["fp32", "fp16", "int8"], - help= - "The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.", + help="The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.", ) parser.add_argument( "--use_mkldnn", @@ -188,9 +186,15 @@ def create_predictor(cls, args): create_predictor func """ cls.rerun_flag = False - config = paddle.inference.Config( - os.path.join(args.model_path, args.model_filename), - os.path.join(args.model_path, args.params_filename)) + + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(args.model_path, model_prefix)) if args.device == "gpu": # set GPU configs accordingly @@ -223,8 +227,8 @@ def create_predictor(cls, args): dynamic_shape_file = os.path.join(args.model_path, "dynamic_shape.txt") if os.path.exists(dynamic_shape_file): - config.enable_tuned_tensorrt_dynamic_shape( - dynamic_shape_file, True) + config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, + True) print("trt set dynamic shape done!") else: config.collect_shape_range_info(dynamic_shape_file) diff --git a/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py b/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py index a1df31b78..6b3504874 100644 --- a/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py +++ b/example/auto_compression/pytorch_yolo_series/paddle_inference_eval.py @@ -253,9 +253,13 @@ def load_predictor( raise ValueError( "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}". format(precision, device)) - config = Config( - os.path.join(model_dir, "model.pdmodel"), - os.path.join(model_dir, "model.pdiparams")) + if '2' in paddle.__version__.split('.')[0]: + config = Config( + os.path.join(model_dir, "model.pdmodel"), + os.path.join(model_dir, "model.pdiparams")) + else: + config = Config(os.path.join(model_dir, "model")) + if device == "GPU": # initial GPU memory(M), device ID config.enable_use_gpu(200, 0) diff --git a/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py b/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py index 5b471690c..3fa841e53 100644 --- a/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py +++ b/example/auto_compression/pytorch_yolo_series/paddle_trt_infer.py @@ -244,9 +244,12 @@ def load_predictor(model_dir, raise ValueError( "Predict by TensorRT mode: {}, expect device=='GPU', but device == {}" .format(run_mode, device)) - config = Config( - os.path.join(model_dir, 'model.pdmodel'), - os.path.join(model_dir, 'model.pdiparams')) + if '2' in paddle.__version__.split('.')[0]: + config = Config( + os.path.join(model_dir, "model.pdmodel"), + os.path.join(model_dir, "model.pdiparams")) + else: + config = Config(os.path.join(model_dir, "model")) if device == 'GPU': # initial GPU memory(M), device ID config.enable_use_gpu(200, 0) diff --git a/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml index 3a2e8c620..661fdb829 100644 --- a/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/deeplabv3/deeplabv3_qat.yaml @@ -1,8 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-Deeplabv3-ResNet50 - model_filename: model - params_filename: params + model_filename: model.pdmodel + params_filename: model.pdiparams batch_size: 4 Distillation: diff --git a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml index 8f852cdf7..a27bbae16 100644 --- a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_qat.yaml @@ -1,9 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-HRNetW18-Seg - model_filename: model - params_filename: params - + model_filename: model.pdmodel + params_filename: model.pdiparams Distillation: alpha: 1.0 loss: l2 diff --git a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml index 922589c3b..82b5e272a 100644 --- a/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml +++ b/example/auto_compression/semantic_segmentation/configs/hrnet/hrnet_sparse.yaml @@ -1,8 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-HRNetW18-Seg - model_filename: model - params_filename: params + model_filename: model.pdmodel + params_filename: model.pdiparams Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml index 003078aa8..edb471f40 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_auto.yaml @@ -1,8 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-PPLIteSegSTDC1 - model_filename: model - params_filename: params + model_filename: model.pdmodel + params_filename: model.pdiparams TrainConfig: epochs: 14 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml index f739354a1..96ce086b2 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_qat.yaml @@ -1,8 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-PPLIteSegSTDC1 - model_filename: model - params_filename: params + model_filename: model.pdmodel + params_filename: model.pdiparams Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml index 52f256da8..37b7569d8 100644 --- a/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml +++ b/example/auto_compression/semantic_segmentation/configs/pp_liteseg/pp_liteseg_sparse.yaml @@ -1,8 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-PPLIteSegSTDC1 - model_filename: model - params_filename: params + model_filename: model.pdmodel + params_filename: model.pdiparams Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml b/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml index 920c3b4d7..898dac1ec 100644 --- a/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml +++ b/example/auto_compression/semantic_segmentation/configs/unet/unet_channel_prune.yaml @@ -1,8 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-UNet - model_filename: model - params_filename: params + model_filename: model.pdmodel + params_filename: model.pdiparams Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml b/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml index c25033f9e..ce067f68b 100644 --- a/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml +++ b/example/auto_compression/semantic_segmentation/configs/unet/unet_qat.yaml @@ -1,8 +1,8 @@ Global: reader_config: configs/dataset/cityscapes_1024x512_scale1.0.yml model_dir: ./RES-paddle2-UNet - model_filename: model - params_filename: params + model_filename: model.pdmodel + params_filename: model.pdiparams Distillation: alpha: 1.0 diff --git a/example/auto_compression/semantic_segmentation/paddle_inference_eval.py b/example/auto_compression/semantic_segmentation/paddle_inference_eval.py index f9066389b..73b003d71 100644 --- a/example/auto_compression/semantic_segmentation/paddle_inference_eval.py +++ b/example/auto_compression/semantic_segmentation/paddle_inference_eval.py @@ -44,9 +44,13 @@ def load_predictor(args): load predictor func """ rerun_flag = False - model_file = os.path.join(args.model_path, args.model_filename) - params_file = os.path.join(args.model_path, args.params_filename) - pred_cfg = PredictConfig(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + pred_cfg = PredictConfig( + os.path.join(args.model_path, args.model_filename), + os.path.join(args.model_path, args.params_filename)) + else: + model_prefix = args.model_filename.split(".")[0] + pred_cfg = PredictConfig(os.path.join(args.model_path, model_prefix)) pred_cfg.enable_memory_optim() pred_cfg.switch_ir_optim(True) if args.device == "GPU": diff --git a/example/quantization/ptq/classification/eval.py b/example/quantization/ptq/classification/eval.py index ef7dc749e..7d4c21b9c 100644 --- a/example/quantization/ptq/classification/eval.py +++ b/example/quantization/ptq/classification/eval.py @@ -30,9 +30,14 @@ def eval(): # create predictor - model_file = os.path.join(FLAGS.model_path, FLAGS.model_filename) - params_file = os.path.join(FLAGS.model_path, FLAGS.params_filename) - config = paddle_infer.Config(model_file, params_file) + if '2' in paddle.__version__.split('.')[0]: + config = paddle.inference.Config( + os.path.join(FLAGS.model_path, FLAGS.model_filename), + os.path.join(FLAGS.model_path, FLAGS.params_filename)) + else: + model_prefix = FLAGS.model_filename.split('.')[0] + config = paddle.inference.Config( + os.path.join(FLAGS.model_path, model_prefix)) if FLAGS.use_gpu: config.enable_use_gpu(1000, 0) if not FLAGS.ir_optim: