Skip to content

Commit

Permalink
update readme
Browse files Browse the repository at this point in the history
  • Loading branch information
xufang-lisa committed Feb 15, 2025
1 parent a27c55d commit 9b1fff1
Show file tree
Hide file tree
Showing 9 changed files with 17 additions and 23 deletions.
2 changes: 1 addition & 1 deletion samples/cpp/image_generation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ There are several sample files:
- [`heterogeneous_stable_diffusion.cpp`](./heterogeneous_stable_diffusion.cpp) shows how to assemble a heterogeneous txt2image pipeline from individual subcomponents (scheduler, text encoder, unet, vae decoder)
- [`image2image.cpp`](./image2image.cpp) demonstrates basic usage of the image to image pipeline
- [`inpainting.cpp`](./inpainting.cpp) demonstrates basic usage of the inpainting pipeline
- [`benchmark_text2image.cpp](./benchmark_text2image.cpp) demonstrates how to benchmark the text to image pipeline
- [`benchmark_image.cpp](./benchmark_image.cpp) demonstrates how to benchmark the text to image / image to image / inpainting pipeline

Users can change the sample code and play with the following generation parameters:

Expand Down
2 changes: 1 addition & 1 deletion samples/cpp/image_generation/benchmark_image.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2023-2024 Intel Corporation
// Copyright (C) 2023-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#include "openvino/genai/image_generation/text2image_pipeline.hpp"
Expand Down
2 changes: 1 addition & 1 deletion samples/python/image_generation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ There are several sample files:
- [`heterogeneous_stable_diffusion.py`](./heterogeneous_stable_diffusion.py) shows how to assemble a heterogeneous text2image pipeline from individual subcomponents (scheduler, text encoder, unet, vae decoder)
- [`image2image.py`](./image2image.py) demonstrates basic usage of the image to image pipeline
- [`inpainting.py`](./inpainting.py) demonstrates basic usage of the inpainting pipeline
- [`benchmark_text2image.py`](./benchmark_text2image.py) demonstrates how to benchmark the text to image pipeline
- [`benchmark_image.py`](./benchmark_image.py) demonstrates how to benchmark the text to image / image to image / inpainting pipeline

Users can change the sample code and play with the following generation parameters:

Expand Down
2 changes: 1 addition & 1 deletion samples/python/image_generation/benchmark_image.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2023-2024 Intel Corporation
# Copyright (C) 2023-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import argparse
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
// Copyright (C) 2023-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#pragma once

#include <vector>
Expand Down
1 change: 1 addition & 0 deletions src/cpp/src/image_generation/flux_pipeline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -677,6 +677,7 @@ class FluxPipeline : public DiffusionPipeline {
std::shared_ptr<IImageProcessor> m_image_processor = nullptr, m_mask_processor_rgb = nullptr, m_mask_processor_gray = nullptr;
std::shared_ptr<ImageResizer> m_image_resizer = nullptr, m_mask_resizer = nullptr;
ImageGenerationConfig m_custom_generation_config;

float m_latent_timestep = -1;
};

Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
// Copyright (C) 2023-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#include <numeric>
#include <cmath>

Expand Down
18 changes: 4 additions & 14 deletions src/cpp/src/image_generation/stable_diffusion_3_pipeline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -274,13 +274,8 @@ class StableDiffusion3Pipeline : public DiffusionPipeline {

// text_encoder_1_output - stores positive and negative pooled_prompt_embeds
auto infer_start = std::chrono::steady_clock::now();
ov::Tensor text_encoder_1_output =
m_clip_text_encoder_1->infer(positive_prompt,
negative_prompt_1_str,
do_classifier_free_guidance(generation_config.guidance_scale));
auto infer_duration =
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - infer_start)
.count();
ov::Tensor text_encoder_1_output = m_clip_text_encoder_1->infer(positive_prompt, negative_prompt_1_str, do_classifier_free_guidance(generation_config.guidance_scale));
auto infer_duration = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - infer_start).count();
m_perf_metrics.encoder_inference_duration["text_encode"] = infer_duration;

// text_encoder_1_hidden_state - stores positive and negative prompt_embeds
Expand All @@ -289,13 +284,8 @@ class StableDiffusion3Pipeline : public DiffusionPipeline {

// text_encoder_2_output - stores positive and negative pooled_prompt_2_embeds
infer_start = std::chrono::steady_clock::now();
ov::Tensor text_encoder_2_output =
m_clip_text_encoder_2->infer(prompt_2_str,
negative_prompt_2_str,
do_classifier_free_guidance(generation_config.guidance_scale));
infer_duration =
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - infer_start)
.count();
ov::Tensor text_encoder_2_output = m_clip_text_encoder_2->infer(prompt_2_str, negative_prompt_2_str, do_classifier_free_guidance(generation_config.guidance_scale));
infer_duration = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - infer_start).count();
m_perf_metrics.encoder_inference_duration["text_encode_2"] = infer_duration;

// text_encoder_2_hidden_state - stores positive and negative prompt_2_embeds
Expand Down
7 changes: 2 additions & 5 deletions src/cpp/src/image_generation/stable_diffusion_xl_pipeline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,9 +182,7 @@ class StableDiffusionXLPipeline : public StableDiffusionPipeline {

if (compute_negative_prompt) {
auto infer_start = std::chrono::steady_clock::now();
add_text_embeds = m_clip_text_encoder_with_projection->infer(positive_prompt,
negative_prompt_1_str,
batch_size_multiplier > 1);
add_text_embeds = m_clip_text_encoder_with_projection->infer(positive_prompt, negative_prompt_1_str, batch_size_multiplier > 1);
auto infer_duration = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - infer_start).count();
m_perf_metrics.encoder_inference_duration["text_encoder_2"] = infer_duration;
infer_start = std::chrono::steady_clock::now();
Expand All @@ -199,8 +197,7 @@ class StableDiffusionXLPipeline : public StableDiffusionPipeline {
encoder_hidden_states = numpy_utils::concat(encoder_hidden_states_1, encoder_hidden_states_2, -1);
} else {
auto infer_start = std::chrono::steady_clock::now();
ov::Tensor add_text_embeds_positive =
m_clip_text_encoder_with_projection->infer(positive_prompt, negative_prompt_1_str, false);
ov::Tensor add_text_embeds_positive = m_clip_text_encoder_with_projection->infer(positive_prompt, negative_prompt_1_str, false);
auto infer_duration = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - infer_start).count();
m_perf_metrics.encoder_inference_duration["text_encoder_2"] = infer_duration;
infer_start = std::chrono::steady_clock::now();
Expand Down

0 comments on commit 9b1fff1

Please sign in to comment.