Skip to content

Commit fd22e8f

Browse files
authored
[PIR] Fix test cases (#10456)
* [PIR] Fix pir in comments * [PIR] re-run pre-commit for pir in comments
1 parent c8ba30d commit fd22e8f

File tree

3 files changed

+23
-12
lines changed

3 files changed

+23
-12
lines changed

tests/experimental/autonlp/test_text_classification.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
2525
from paddlenlp.datasets import load_dataset
2626
from paddlenlp.experimental.autonlp import AutoTrainerForTextClassification
27+
from paddlenlp.utils.env import PADDLE_INFERENCE_MODEL_SUFFIX
2728
from tests.testing_utils import get_tests_dir, slow
2829
2930
finetune_model_candidate = {
@@ -175,13 +176,13 @@ def test_multiclass(self, custom_model_candidate, hp_overrides):
175176
# test export
176177
temp_export_path = os.path.join(temp_dir_path, "test_export")
177178
auto_trainer.export(export_path=temp_export_path)
178-
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "model.pdmodel")))
179+
self.assertTrue(os.path.exists(os.path.join(temp_export_path, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")))
179180
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "taskflow_config.json")))
180181
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "tokenizer_config.json")))
181182
182183
# test export compress model
183184
auto_trainer.export(export_path=temp_export_path, compress=True)
184-
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "model.pdmodel")))
185+
self.assertTrue(os.path.exists(os.path.join(temp_export_path, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")))
185186
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "taskflow_config.json")))
186187
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "tokenizer_config.json")))
187188
@@ -400,13 +401,13 @@ def test_default_model_candidate(self, language, hp_overrides):
400401
# test export
401402
temp_export_path = os.path.join(temp_dir_path, "test_export")
402403
auto_trainer.export(export_path=temp_export_path)
403-
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "model.pdmodel")))
404+
self.assertTrue(os.path.exists(os.path.join(temp_export_path, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")))
404405
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "taskflow_config.json")))
405406
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "tokenizer_config.json")))
406407
407408
# test export compress model
408409
auto_trainer.export(export_path=temp_export_path, compress=True)
409-
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "model.pdmodel")))
410+
self.assertTrue(os.path.exists(os.path.join(temp_export_path, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")))
410411
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "taskflow_config.json")))
411412
self.assertTrue(os.path.exists(os.path.join(temp_export_path, "tokenizer_config.json")))
412413

tests/transformers/chatglm/test_modeling.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,11 @@
1919
import paddle
2020

2121
from paddlenlp.transformers import ChatGLMConfig, ChatGLMForCausalLM, ChatGLMModel
22+
23+
# from paddlenlp.utils.env import (
24+
# PADDLE_INFERENCE_MODEL_SUFFIX,
25+
# PADDLE_INFERENCE_WEIGHTS_SUFFIX,
26+
# )
2227
from tests.transformers.test_configuration_common import ConfigTester
2328
from tests.transformers.test_generation_utils import GenerationTesterMixin
2429
from tests.transformers.test_modeling_common import (
@@ -380,8 +385,8 @@ def test_chatglm_lm_head_model(self):
380385
# ),
381386
# )
382387

383-
# model_path = os.path.join(tempdir, "model.pdmodel")
384-
# params_path = os.path.join(tempdir, "model.pdiparams")
388+
# model_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")
389+
# params_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_WEIGHTS_SUFFIX}")
385390
# config = paddle.inference.Config(model_path, params_path)
386391

387392
# config.disable_gpu()
@@ -450,8 +455,8 @@ def test_chatglm_lm_head_model(self):
450455
# ),
451456
# )
452457

453-
# model_path = os.path.join(tempdir, "model.pdmodel")
454-
# params_path = os.path.join(tempdir, "model.pdiparams")
458+
# model_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")
459+
# params_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_WEIGHTS_SUFFIX}")
455460
# config = paddle.inference.Config(model_path, params_path)
456461

457462
# config.disable_gpu()

tests/transformers/test_generation_utils.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,11 @@
3939
PretrainedConfig,
4040
PretrainedTokenizer,
4141
)
42+
43+
# from paddlenlp.utils.env import (
44+
# PADDLE_INFERENCE_MODEL_SUFFIX,
45+
# PADDLE_INFERENCE_WEIGHTS_SUFFIX,
46+
# )
4247
from tests.testing_utils import slow
4348

4449

@@ -1199,8 +1204,8 @@ def test_min_new_tokens(self):
11991204
# ),
12001205
# )
12011206

1202-
# model_path = os.path.join(tempdir, "model.pdmodel")
1203-
# params_path = os.path.join(tempdir, "model.pdiparams")
1207+
# model_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")
1208+
# params_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_WEIGHTS_SUFFIX}")
12041209
# config = paddle.inference.Config(model_path, params_path)
12051210

12061211
# config.disable_gpu()
@@ -1265,8 +1270,8 @@ def test_min_new_tokens(self):
12651270
# ),
12661271
# )
12671272

1268-
# model_path = os.path.join(tempdir, "model.pdmodel")
1269-
# params_path = os.path.join(tempdir, "model.pdiparams")
1273+
# model_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_MODEL_SUFFIX}")
1274+
# params_path = os.path.join(tempdir, f"model{PADDLE_INFERENCE_WEIGHTS_SUFFIX}")
12701275
# config = paddle.inference.Config(model_path, params_path)
12711276

12721277
# config.disable_gpu()

0 commit comments

Comments
 (0)