Skip to content

Commit

Permalink
Use f32 inference for some OpenVINO stable diffusion/training tests (h…
Browse files Browse the repository at this point in the history
…uggingface#518)

* Use f32 precision for compare-to-diffusers tests

* Add f32 ov_config to training/stable diffusion tests
  • Loading branch information
helena-intel authored Jan 17, 2024
1 parent e22a2ac commit 76ce9de
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 9 deletions.
17 changes: 10 additions & 7 deletions tests/openvino/test_stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@
from optimum.utils.import_utils import _diffusers_version


F32_CONFIG = {"INFERENCE_PRECISION_HINT": "f32"}


def _generate_inputs(batch_size=1):
inputs = {
"prompt": ["sailing ship in storm by Leonardo da Vinci"] * batch_size,
Expand Down Expand Up @@ -170,7 +173,7 @@ class OVStableDiffusionImg2ImgPipelineTest(OVStableDiffusionPipelineBaseTest):
@parameterized.expand(SUPPORTED_ARCHITECTURES)
def test_compare_diffusers_pipeline(self, model_arch: str):
model_id = MODEL_NAMES[model_arch]
pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True)
pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
height, width, batch_size = 128, 128, 1
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size)
inputs["prompt"] = "A painting of a squirrel eating a burger"
Expand Down Expand Up @@ -208,7 +211,7 @@ class OVStableDiffusionPipelineTest(unittest.TestCase):
@parameterized.expand(SUPPORTED_ARCHITECTURES)
def test_compare_to_diffusers(self, model_arch: str):
model_id = MODEL_NAMES[model_arch]
ov_pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True)
ov_pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
self.assertIsInstance(ov_pipeline.text_encoder, OVModelTextEncoder)
self.assertIsInstance(ov_pipeline.vae_encoder, OVModelVaeEncoder)
self.assertIsInstance(ov_pipeline.vae_decoder, OVModelVaeDecoder)
Expand Down Expand Up @@ -301,7 +304,7 @@ class OVStableDiffusionInpaintPipelineTest(OVStableDiffusionPipelineBaseTest):
@parameterized.expand(SUPPORTED_ARCHITECTURES)
def test_compare_diffusers_pipeline(self, model_arch: str):
model_id = MODEL_NAMES[model_arch]
pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True)
pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
batch_size, num_images, height, width = 1, 1, 64, 64
latents = pipeline.prepare_latents(
batch_size * num_images,
Expand Down Expand Up @@ -361,7 +364,7 @@ class OVtableDiffusionXLPipelineTest(unittest.TestCase):

@parameterized.expand(SUPPORTED_ARCHITECTURES)
def test_compare_to_diffusers(self, model_arch: str):
ov_pipeline = self.MODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch], export=True)
ov_pipeline = self.MODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch], export=True, ov_config=F32_CONFIG)
self.assertIsInstance(ov_pipeline.text_encoder, OVModelTextEncoder)
self.assertIsInstance(ov_pipeline.text_encoder_2, OVModelTextEncoder)
self.assertIsInstance(ov_pipeline.vae_encoder, OVModelVaeEncoder)
Expand Down Expand Up @@ -447,11 +450,11 @@ class OVStableDiffusionXLImg2ImgPipelineTest(unittest.TestCase):

def test_inference(self):
model_id = "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
pipeline = self.MODEL_CLASS.from_pretrained(model_id)
pipeline = self.MODEL_CLASS.from_pretrained(model_id, ov_config=F32_CONFIG)

with tempfile.TemporaryDirectory() as tmp_dir:
pipeline.save_pretrained(tmp_dir)
pipeline = self.MODEL_CLASS.from_pretrained(tmp_dir)
pipeline = self.MODEL_CLASS.from_pretrained(tmp_dir, ov_config=F32_CONFIG)

batch_size, height, width = 1, 128, 128
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size)
Expand Down Expand Up @@ -488,7 +491,7 @@ class OVLatentConsistencyModelPipelineTest(unittest.TestCase):
@parameterized.expand(SUPPORTED_ARCHITECTURES)
@unittest.skipIf(parse(_diffusers_version) <= Version("0.21.4"), "not supported with this diffusers version")
def test_compare_to_diffusers(self, model_arch: str):
ov_pipeline = self.MODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch], export=True)
ov_pipeline = self.MODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch], export=True, ov_config=F32_CONFIG)
self.assertIsInstance(ov_pipeline.text_encoder, OVModelTextEncoder)
self.assertIsInstance(ov_pipeline.vae_encoder, OVModelVaeEncoder)
self.assertIsInstance(ov_pipeline.vae_decoder, OVModelVaeDecoder)
Expand Down
7 changes: 5 additions & 2 deletions tests/openvino/test_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@
from optimum.intel.openvino.utils import OV_XML_FILE_NAME


F32_CONFIG = {"INFERENCE_PRECISION_HINT": "f32"}


def initialize_movement_sparsifier_parameters_by_sparsity(
movement_controller: MovementSparsityController,
sparsity: float = 0.95,
Expand Down Expand Up @@ -250,7 +253,7 @@ def get_ov_config(self, nncf_compression_config: Union[List[Dict], Dict, None])

def get_ov_model(self, model_id=None) -> OVModel:
model_id = model_id or self.output_dir
return self.ovmodel_cls.from_pretrained(model_id)
return self.ovmodel_cls.from_pretrained(model_id, ov_config=F32_CONFIG)

def get_nncf_config_with_overflow_fix_override(
self, nncf_compression_config: Union[List[Dict], Dict, None], value: str = "enable"
Expand Down Expand Up @@ -628,7 +631,7 @@ def get_ov_model(self, model_id=None) -> OVModel:
# image models, e.g. swin, may require a determined image size
model_id = model_id or self.output_dir
size = (self.feature_extractor.size["height"], self.feature_extractor.size["width"])
ovmodel = self.ovmodel_cls.from_pretrained(model_id, compile=False)
ovmodel = self.ovmodel_cls.from_pretrained(model_id, compile=False, ov_config=F32_CONFIG)
# dynamic batch size for tiny-swin does not work in OpenVINO 2023.0
batch_size = 1 if self.is_swin else -1
ovmodel.reshape(batch_size, 3, *size)
Expand Down

0 comments on commit 76ce9de

Please sign in to comment.