Skip to content

Commit

Permalink
Merge branch 'master' into cb-by-default
Browse files Browse the repository at this point in the history
  • Loading branch information
andrei-kochin authored Jan 8, 2025
2 parents 6038663 + 3e5c889 commit 7fc5b4e
Show file tree
Hide file tree
Showing 32 changed files with 334 additions and 75 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/genai-tools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
with:
platform: ubuntu22
commit_packages_to_provide: wheels
revision: latest_available_commit
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e

llm_bench:
name: 'LLM bench tests'
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ jobs:
with:
platform: ubuntu22
commit_packages_to_provide: wheels
revision: latest_available_commit
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e

- name: Clone docker tag from OpenVINO repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/mac.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ concurrency:

env:
PYTHON_VERSION: '3.10'
OV_BRANCH: master
OV_BRANCH: 345163f87953fb0dd8dd590257eb7fc84378da8e
OV_TARBALL: ''

jobs:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/stable_diffusion_1_5_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
with:
platform: ubuntu22
commit_packages_to_provide: wheels
revision: latest_available_commit
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e

openvino_download_windows:
name: Download OpenVINO for Windows
Expand All @@ -71,7 +71,7 @@ jobs:
with:
platform: windows
commit_packages_to_provide: wheels
revision: latest_available_commit
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e

stable_diffusion_1_5_cpp-linux:
runs-on: ubuntu-22.04-8-cores
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ concurrency:

env:
PYTHON_VERSION: '3.11'
OV_BRANCH: master
OV_BRANCH: 345163f87953fb0dd8dd590257eb7fc84378da8e
OV_TARBALL: ''

jobs:
Expand Down
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ if(NOT OpenVINODeveloperPackage_FOUND)
endif()

include(cmake/features.cmake)
include(cmake/version.cmake)

if(ENABLE_PYTHON)
# the following two calls are required for cross-compilation
Expand Down
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,13 +133,15 @@ from PIL import Image

# Choose GPU instead of CPU in the line below to run the model on Intel integrated or discrete GPU
pipe = ov_genai.VLMPipeline("./InternVL2-1B", "CPU")
pipe.start_chat()

image = Image.open("dog.jpg")
image_data = np.array(image.getdata()).reshape(1, image.size[1], image.size[0], 3).astype(np.uint8)
image_data = ov.Tensor(image_data)

prompt = "Can you describe the image?"
print(pipe.generate(prompt, image=image_data, max_new_tokens=100))
result = pipe.generate(prompt, image=image_data, max_new_tokens=100)
print(result.texts[0])
```

### Run generation using VLMPipeline in C++
Expand Down Expand Up @@ -392,7 +394,7 @@ See [here](https://openvinotoolkit.github.io/openvino_notebooks/?search=Automati
## Additional materials
- [List of supported models](https://github.com/openvinotoolkit/openvino.genai/blob/master/src/docs/SUPPORTED_MODELS.md) (NOTE: models can work, but were not tried yet)
- [List of supported models](https://github.com/openvinotoolkit/openvino.genai/blob/master/SUPPORTED_MODELS.md) (NOTE: models can work, but were not tried yet)
- [OpenVINO Generative AI workflow](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html)
- [Optimum-intel and OpenVINO](https://huggingface.co/docs/optimum/intel/openvino/export)
Expand Down
29 changes: 28 additions & 1 deletion src/docs/SUPPORTED_MODELS.md → SUPPORTED_MODELS.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,8 @@
</tbody>
</table>

> [!NOTE]
> LoRA adapters are supported.
The pipeline can work with other similar topologies produced by `optimum-intel` with the same model signature. The model is required to have the following inputs after the conversion:
1. `input_ids` contains the tokens.
Expand All @@ -165,12 +167,14 @@ The pipeline can work with other similar topologies produced by `optimum-intel`
<th>Architecture</th>
<th>Text 2 image</th>
<th>Image 2 image</th>
<th>LoRA support</th>
<th>Example HuggingFace Models</th>
</tr>
<tr>
<td><code>Latent Consistency Model</code></td>
<td>Supported</td>
<td>Supported</td>
<td>Supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7"><code>SimianLuo/LCM_Dreamshaper_v7</code></a></li>
Expand All @@ -181,6 +185,7 @@ The pipeline can work with other similar topologies produced by `optimum-intel`
<td><code>Stable Diffusion</code></td>
<td>Supported</td>
<td>Supported</td>
<td>Supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/CompVis/stable-diffusion-v1-1"><code>CompVis/stable-diffusion-v1-1</code></a></li>
Expand Down Expand Up @@ -213,6 +218,7 @@ The pipeline can work with other similar topologies produced by `optimum-intel`
<td><code>Stable Diffusion XL</code></td>
<td>Supported</td>
<td>Supported</td>
<td>Supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9"><code>stabilityai/stable-diffusion-xl-base-0.9</code></a></li>
Expand All @@ -225,6 +231,7 @@ The pipeline can work with other similar topologies produced by `optimum-intel`
<td><code>Stable Diffusion 3</code></td>
<td>Supported</td>
<td>Not supported</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers"><code>stabilityai/stable-diffusion-3-medium-diffusers</code></a></li>
Expand All @@ -237,6 +244,7 @@ The pipeline can work with other similar topologies produced by `optimum-intel`
<td><code>Flux</code></td>
<td>Supported</td>
<td>Not supported</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/black-forest-labs/FLUX.1-schnell"><code>black-forest-labs/FLUX.1-schnell</code></a></li>
Expand All @@ -260,10 +268,12 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
<tbody style="vertical-align: top;">
<tr>
<th>Architecture</th>
<th>LoRA support</th>
<th>Example HuggingFace Models</th>
</tr>
<tr>
<td><code>Stable Diffusion</code></td>
<td>Supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/stabilityai/stable-diffusion-2-inpainting"><code>stabilityai/stable-diffusion-2-inpainting</code></a></li>
Expand All @@ -275,13 +285,22 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
</tr>
<tr>
<td><code>Stable Diffusion XL</code></td>
<td>Supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1"><code>diffusers/stable-diffusion-xl-1.0-inpainting-0.1</code></a></li>
</ul>
</td>
</tr>
</tr>
<!-- <tr>
<td><code>FLUX</code></td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/black-forest-labs/FLUX.1-Fill-dev"><code>black-forest-labs/FLUX.1-Fill-dev</code></a></li>
</ul>
</td>
</tr> -->
</tbody>
</table>

Expand All @@ -292,11 +311,13 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
<tr>
<th>Architecture</th>
<th>Models</th>
<th>LoRA support</th>
<th>Example HuggingFace Models</th>
</tr>
<tr>
<td><code>InternVL2</code></td>
<td>InternVL2</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/OpenGVLab/InternVL2-1B"><code>OpenGVLab/InternVL2-1B</code></a></li>
Expand All @@ -309,6 +330,7 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
<tr>
<td><code>LLaVA</code></td>
<td>LLaVA-v1.5</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/llava-hf/llava-1.5-7b-hf"><code>llava-hf/llava-1.5-7b-hf</code></a></li>
Expand All @@ -318,6 +340,7 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
<tr>
<td><code>LLaVA-NeXT</code></td>
<td>LLaVa-v1.6</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf"><code>llava-hf/llava-v1.6-mistral-7b-hf</code></a></li>
Expand All @@ -329,6 +352,7 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
<tr>
<td><code>MiniCPMV</code></td>
<td>MiniCPM-V-2_6</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/openbmb/MiniCPM-V-2_6"><code>openbmb/MiniCPM-V-2_6</code></a></li>
Expand All @@ -345,11 +369,13 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
<tr>
<th>Architecture</th>
<th>Models</th>
<th>LoRA support</th>
<th>Example HuggingFace Models</th>
</tr>
<tr>
<td rowspan=2><code>WhisperForConditionalGeneration</code></td>
<td>Whisper</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/openai/whisper-tiny"><code>openai/whisper-tiny</code></a></li>
Expand All @@ -366,6 +392,7 @@ In addition to image generation models, `InpaintingPipeline` supports specialize
</tr>
<tr>
<td>Distil-Whisper</td>
<td>Not supported</td>
<td>
<ul>
<li><a href="https://huggingface.co/distil-whisper/distil-small.en"><code>distil-whisper/distil-small.en</code></a></li>
Expand Down
5 changes: 0 additions & 5 deletions cmake/templates/__version__.py.in

This file was deleted.

19 changes: 19 additions & 0 deletions cmake/templates/version.cpp.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
// Copyright (C) 2023-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#include "openvino/genai/version.hpp"

namespace ov {
namespace genai {

const Version get_version() {
const static Version version = {
"@OpenVINOGenAI_FULL_VERSION@",
"OpenVINO GenAI version",
};

return version;
}

} // namespace genai
} // namespace ov
34 changes: 34 additions & 0 deletions cmake/templates/version.hpp.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// Copyright (C) 2023-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#pragma once

#include "openvino/core/version.hpp"
#include "openvino/genai/visibility.hpp"

/**
* OpenVINO GenAI major version
*/
#define OPENVINO_GENAI_VERSION_MAJOR @OpenVINOGenAI_VERSION_MAJOR@

/**
* OpenVINO GenAI minor version
*/
#define OPENVINO_GENAI_VERSION_MINOR @OpenVINOGenAI_VERSION_MINOR@

/**
* OpenVINO GenAI patch version
*/
#define OPENVINO_GENAI_VERSION_PATCH @OpenVINOGenAI_VERSION_PATCH@

namespace ov {
namespace genai {

/**
* Returns OpenVINO GenAI full version including git commit and hash information in form of:
* <MAJOR>.<MINOR>.<PATCH>.<REVISION>-<COMMIT NUMBER>-<COMMIT HASH>[-<BRANCH SUFFIX>]
*/
OPENVINO_EXTERN_C OPENVINO_GENAI_EXPORTS const ov::Version OPENVINO_CDECL get_version();

} // namespace genai
} // namespace ov
72 changes: 72 additions & 0 deletions cmake/version.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# Copyright (C) 2018-2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

find_package(Git QUIET)

function(ov_genai_branch_name VAR)
if(GIT_FOUND)
execute_process(
COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref HEAD
WORKING_DIRECTORY ${OpenVINOGenAI_SOURCE_DIR}
OUTPUT_VARIABLE GIT_BRANCH
RESULT_VARIABLE EXIT_CODE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(EXIT_CODE EQUAL 0)
set(${VAR} ${GIT_BRANCH} PARENT_SCOPE)
endif()
endif()
endfunction()

function(ov_genai_commit_hash VAR)
if(GIT_FOUND)
execute_process(
COMMAND ${GIT_EXECUTABLE} rev-parse --short=11 HEAD
WORKING_DIRECTORY ${OpenVINOGenAI_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMIT_HASH
RESULT_VARIABLE EXIT_CODE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(EXIT_CODE EQUAL 0)
set(${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE)
endif()
endif()
endfunction()

function(ov_genai_commit_number VAR)
set(GIT_COMMIT_NUMBER_FOUND OFF)
if(GIT_FOUND)
execute_process(
COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD
WORKING_DIRECTORY ${OpenVINOGenAI_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMIT_NUMBER
RESULT_VARIABLE EXIT_CODE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(EXIT_CODE EQUAL 0)
set(GIT_COMMIT_NUMBER_FOUND ON)
set(${VAR} ${GIT_COMMIT_NUMBER} PARENT_SCOPE)
endif()
endif()
if(NOT GIT_COMMIT_NUMBER_FOUND)
# set zeros since git is not available
set(${VAR} "000" PARENT_SCOPE)
endif()
endfunction()

function(ov_genai_full_version full_version)
if(GIT_FOUND)
ov_genai_branch_name(GIT_BRANCH)
ov_genai_commit_hash(GIT_COMMIT_HASH)
ov_genai_commit_number(GIT_COMMIT_NUMBER)

if(NOT GIT_BRANCH MATCHES "^(master|HEAD)$")
set(GIT_BRANCH_POSTFIX "-${GIT_BRANCH}")
endif()

set(${full_version} "${OpenVINOGenAI_VERSION}-${GIT_COMMIT_NUMBER}-${GIT_COMMIT_HASH}${GIT_BRANCH_POSTFIX}" PARENT_SCOPE)
else()
set(${full_version} "${OpenVINOGenAI_VERSION}" PARENT_SCOPE)
endif()
endfunction()

ov_genai_full_version(OpenVINOGenAI_FULL_VERSION)
message(STATUS "OpenVINO GenAI full version: ${OpenVINOGenAI_FULL_VERSION}")
4 changes: 0 additions & 4 deletions llm_bench/python/README.md

This file was deleted.

4 changes: 0 additions & 4 deletions llm_bench/python/who_what_benchmark/README.md

This file was deleted.

2 changes: 1 addition & 1 deletion samples/cpp/visual_language_chat/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ Follow [Get Started with Samples](https://docs.openvino.ai/2024/learn-openvino/o

Discrete GPUs (dGPUs) usually provide better performance compared to CPUs. It is recommended to run larger models on a dGPU with 32GB+ RAM. For example, the model `llava-hf/llava-v1.6-mistral-7b-hf` can benefit from being run on a dGPU. Modify the source code to change the device for inference to the `GPU`.

See [SUPPORTED_MODELS.md](../../../src/docs/SUPPORTED_MODELS.md#visual-language-models) for the list of supported models.
See [SUPPORTED_MODELS.md](../../../SUPPORTED_MODELS.md#visual-language-models) for the list of supported models.

## Run benchmark:

Expand Down
Loading

0 comments on commit 7fc5b4e

Please sign in to comment.