diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 62a72b1cbd..15a6ee4a12 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -36,13 +36,13 @@ std::pair beam_search( class StatefulLLMPipeline final : public LLMPipelineImplBase { public: ov::InferRequest m_model_runner; - bool is_chat_conversation = false; - bool m_is_cache_empty = true; + bool m_trust_encoded_history = true; std::optional m_selected_beam = std::nullopt; ChatHistory m_history; std::string m_templated_chat_history = {}; - TokenizedInputs m_tokenized_chat_history; + std::vector m_tokenized_chat_history; + ov::genai::utils::GenerationChatInputsType m_chat_input_type = ov::genai::utils::GenerationChatInputsType::UNDEF; StatefulLLMPipeline( const ov::InferRequest& request, @@ -94,6 +94,13 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { OptionalGenerationConfig generation_config, StreamerVariant streamer ) override { + if (is_chat_conversation && m_chat_input_type == ov::genai::utils::GenerationChatInputsType::UNDEF) + m_chat_input_type = ov::genai::utils::GenerationChatInputsType::STRING; + + if (is_chat_conversation) + OPENVINO_ASSERT(m_chat_input_type != ov::genai::utils::GenerationChatInputsType::ENCODED_INPUTS, + "Chat doesn't support switching between input types. Please, continue using EncodedInputs or restart the chat."); + auto start_time = std::chrono::steady_clock::now(); GenerationConfig config = (generation_config.has_value()) ? *generation_config : m_generation_config; TokenizedInputs encoded_input; @@ -119,14 +126,36 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { auto new_templated_chat_history = m_tokenizer.apply_chat_template(m_history, add_generation_prompt); // Do not add special tokens in chat scenario to be aligned with HF. auto new_chat_tokens = m_tokenizer.encode(new_templated_chat_history, ov::genai::add_special_tokens(false)); - if (m_is_cache_empty) { - encoded_input = new_chat_tokens; - } else { - auto prev_chat_tokens = m_tokenizer.encode(m_templated_chat_history, ov::genai::add_special_tokens(false)); + auto prev_chat_tokens = m_tokenizer.encode(m_templated_chat_history, ov::genai::add_special_tokens(false)); + + // some symbols combinations can be encoded by the tokenizer in different ways + // if we met sequence with such combination of symbols, we cannot correctly subtract the new history from the old history + // and find the difference as a prompt, so let's check it out and use the whole history in this case + if (!m_tokenized_chat_history.empty()) { + auto stop_tokens = config.stop_token_ids; + // config could be reset by user and stop_tokens could be empty + // but model/tokenizer still will rely to eos token, so let's add it + stop_tokens.insert(m_tokenizer.get_eos_token_id()); + size_t last_same_hist_token = ov::genai::utils::get_first_history_difference(prev_chat_tokens.input_ids, m_tokenized_chat_history, stop_tokens); + m_trust_encoded_history = last_same_hist_token == SIZE_MAX; + } + + if (!m_trust_encoded_history) { + reset_kv_state(); + m_selected_beam = std::nullopt; + } + + if (!m_tokenized_chat_history.empty() && m_trust_encoded_history) { encoded_input = utils::subtract_chat_tokenized_inputs(new_chat_tokens, prev_chat_tokens); + } else { + encoded_input = new_chat_tokens; } m_templated_chat_history = new_templated_chat_history; - m_tokenized_chat_history = new_chat_tokens; + m_tokenized_chat_history.clear(); + m_tokenized_chat_history.reserve(new_chat_tokens.input_ids.get_size()); + std::copy_n(new_chat_tokens.input_ids.data(), new_chat_tokens.input_ids.get_size(), + std::back_inserter(m_tokenized_chat_history)); + // TODO: Forbid LoRA config change if we are in the chat mode, because it requires regenerating the history with LoRA applied } else { encoded_input = m_tokenizer.encode(prompt); @@ -180,6 +209,14 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { OptionalGenerationConfig generation_config, StreamerVariant streamer ) override { + if (is_chat_conversation && m_chat_input_type == ov::genai::utils::GenerationChatInputsType::UNDEF) + m_chat_input_type = ov::genai::utils::GenerationChatInputsType::ENCODED_INPUTS; + + if (is_chat_conversation) + // if chat was run in StringInputs mode, but it was called EncodedInputs generate, last m_history entry will be with assistant role + OPENVINO_ASSERT(m_chat_input_type == ov::genai::utils::GenerationChatInputsType::ENCODED_INPUTS || m_history.back()["role"] == "user", + "Chat doesn't support switching between input types. Please, continue using StringInputs or restart the chat."); + auto start_time = std::chrono::steady_clock::now(); ov::Tensor input_ids; ov::Tensor attention_mask; @@ -191,6 +228,9 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { attention_mask = data->attention_mask; } + if (is_chat_conversation && m_chat_input_type == ov::genai::utils::GenerationChatInputsType::ENCODED_INPUTS) + std::copy(input_ids.data(), input_ids.data() + input_ids.get_size(), std::back_inserter(m_tokenized_chat_history)); + GenerationConfig config = (generation_config.has_value()) ? *generation_config : m_generation_config; // If eos_token_id was not provided, take value from default m_generation_config @@ -222,24 +262,29 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { "(input_ids, attention_mask, position_ids, beam_idx) " "but you have '" + std::to_string(num_inputs) + "' inputs"); - + ov::Tensor tokenized_chat_history = ov::Tensor(ov::element::i64, {1, m_tokenized_chat_history.size()}, m_tokenized_chat_history.data()); size_t kv_cache_len = 0; ov::Tensor concatenated_attention_mask; - if (is_chat_conversation && !m_is_cache_empty) { - OPENVINO_ASSERT(batch_size == 1, "continuation of generation is possible only for batch 1"); - // If history is saved in KV cache, concatenate new attention_mask with the already existing. - // Between subsequent runs attention_mask should not be modified. - auto atten_mask_history = m_model_runner.get_tensor("attention_mask"); - auto prompt_len = attention_mask.get_shape()[1]; - kv_cache_len = atten_mask_history.get_shape()[1]; - - ov::Tensor new_atten_mask = ov::Tensor{ov::element::i64, {batch_size, kv_cache_len + prompt_len}}; - auto start_atten_hst = atten_mask_history.data() + kv_cache_len * (*m_selected_beam); - std::copy(start_atten_hst, start_atten_hst + kv_cache_len, - new_atten_mask.data()); - std::copy(attention_mask.data(), attention_mask.data() + prompt_len, - new_atten_mask.data() + kv_cache_len); - concatenated_attention_mask = new_atten_mask; + if (is_chat_conversation && !m_tokenized_chat_history.empty()) { + if (m_trust_encoded_history) { + OPENVINO_ASSERT(batch_size == 1, "continuation of generation is possible only for batch 1"); + // If history is saved in KV cache, concatenate new attention_mask with the already existing. + // Between subsequent runs attention_mask should not be modified. + auto atten_mask_history = m_model_runner.get_tensor("attention_mask"); + auto prompt_len = attention_mask.get_shape()[1]; + kv_cache_len = atten_mask_history.get_shape()[1]; + + ov::Tensor new_atten_mask = ov::Tensor{ov::element::i64, {batch_size, kv_cache_len + prompt_len}}; + auto start_atten_hst = atten_mask_history.data() + kv_cache_len * (*m_selected_beam); + std::copy(start_atten_hst, start_atten_hst + kv_cache_len, + new_atten_mask.data()); + std::copy(attention_mask.data(), attention_mask.data() + prompt_len, + new_atten_mask.data() + kv_cache_len); + concatenated_attention_mask = new_atten_mask; + } else { + attention_mask = ov::genai::utils::init_attention_mask(tokenized_chat_history); + concatenated_attention_mask = attention_mask; + } } else { concatenated_attention_mask = attention_mask; } @@ -247,28 +292,36 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { bool position_ids_available = (num_inputs == 4); std::optional position_ids = std::nullopt; if (position_ids_available) { - position_ids = ov::Tensor{ov::element::i64, input_ids.get_shape()}; - utils::initialize_position_ids(*position_ids, attention_mask, kv_cache_len); + if (is_chat_conversation && !m_trust_encoded_history) { + position_ids = ov::Tensor{ov::element::i64, tokenized_chat_history.get_shape()}; + } else { + position_ids = ov::Tensor{ov::element::i64, input_ids.get_shape()}; + } + utils::initialize_position_ids(*position_ids, attention_mask, kv_cache_len); } if(m_adapter_controller) { m_adapter_controller->apply(m_model_runner, config.adapters); } + auto input_tokens = input_ids; + if (is_chat_conversation && !m_trust_encoded_history) { + input_tokens = tokenized_chat_history; + m_trust_encoded_history = true; + } + ov::genai::EncodedResults result; if (config.is_beam_search() && is_chat_conversation) { - std::tie(result, m_selected_beam) = beam_search(m_model_runner, input_ids, concatenated_attention_mask, + std::tie(result, m_selected_beam) = beam_search(m_model_runner, input_tokens, concatenated_attention_mask, config, position_ids, m_selected_beam); } else { std::vector requests; size_t block_size = 1; bool enable_prefix_caching = false; - - config.stop_token_ids.insert(config.eos_token_id); for (size_t request_id = 0; request_id < batch_size; request_id++) { SequenceGroup::Ptr sequence_group; - if (is_chat_conversation && !m_is_cache_empty) { - sequence_group = std::make_shared(request_id, m_tokenized_chat_history.input_ids, config, block_size, enable_prefix_caching); + if (is_chat_conversation) { + sequence_group = std::make_shared(request_id, tokenized_chat_history, config, block_size, enable_prefix_caching); } else { size_t seq_len = input_ids.get_shape().at(1); size_t batch_offset = request_id * seq_len; @@ -283,16 +336,17 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { } Sampler sampler = Sampler(m_tokenizer); - std::tie(result, m_selected_beam) = ov::genai::get_lm_encoded_results(m_model_runner, input_ids, concatenated_attention_mask, streamer_ptr, - sampler, requests, position_ids, std::nullopt, m_selected_beam); + std::tie(result, m_selected_beam) = ov::genai::get_lm_encoded_results(m_model_runner, input_tokens, concatenated_attention_mask, streamer_ptr, + sampler, requests, position_ids, std::nullopt, m_selected_beam); } - if (!is_chat_conversation) { + if (is_chat_conversation) { + std::copy(result.tokens[0].begin(), result.tokens[0].end(), std::back_inserter(m_tokenized_chat_history)); + } else { reset_kv_state(); m_selected_beam = std::nullopt; - } else { - m_is_cache_empty = false; } + auto stop_time = std::chrono::steady_clock::now(); // If is called without tokenization then that stat will not be reported. @@ -306,12 +360,14 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { void start_chat(const std::string& system_message) override { is_chat_conversation = true; - m_selected_beam = std::nullopt; - if (!m_is_cache_empty) { + m_selected_beam = std::nullopt; + m_trust_encoded_history = true; + m_chat_input_type = ov::genai::utils::GenerationChatInputsType::UNDEF; + if (!m_tokenized_chat_history.empty()) { reset_kv_state(); - m_is_cache_empty = true; m_history = {}; m_templated_chat_history = ""; + m_tokenized_chat_history.clear(); } if (system_message.empty()) return; @@ -325,11 +381,13 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { void finish_chat() override { is_chat_conversation = false; m_selected_beam = std::nullopt; - if (!m_is_cache_empty) { + m_trust_encoded_history = true; + m_chat_input_type = ov::genai::utils::GenerationChatInputsType::UNDEF; + if (!m_tokenized_chat_history.empty()) { reset_kv_state(); - m_is_cache_empty = true; m_history.clear(); m_templated_chat_history.clear(); + m_tokenized_chat_history.clear(); } } }; diff --git a/src/cpp/src/utils.cpp b/src/cpp/src/utils.cpp index dcc73f2ea3..bc3bef8e0c 100644 --- a/src/cpp/src/utils.cpp +++ b/src/cpp/src/utils.cpp @@ -13,6 +13,8 @@ #include "openvino/op/tanh.hpp" #include "openvino/op/transpose.hpp" +#include "sampler.hpp" + namespace ov { namespace genai { namespace utils { @@ -265,6 +267,41 @@ ov::Core singleton_core() { return core; } +size_t get_first_history_difference(const ov::Tensor& encoded_history, const std::vector tokenized_history, std::set stop_tokens) { + size_t idx = 0; + auto encoded_history_data = encoded_history.data(); + while(idx < encoded_history.get_size() && idx < tokenized_history.size()) { + if (encoded_history_data[idx] != tokenized_history[idx]) + break; + idx++; + } + + // encoded_history after decode of tokenizer could lose one last token (eos/stop token) + if ((idx == tokenized_history.size() && idx == encoded_history.get_size()) || + (encoded_history.get_size() < tokenized_history.size() && idx == tokenized_history.size() - 1 && stop_tokens.find(tokenized_history.back()) != stop_tokens.end())) + return SIZE_MAX; + else + return idx; +} + +void trim_kv_cache(ov::InferRequest request, uint64_t remove_from_end) { + // TODO: add handling for case with LoRA adapters enabled + auto states = request.query_state(); + for (auto& state : states) { + ov::Tensor old_tensor = state.get_state(); + // [BATCH_SIZE, num_kv_heads, seq_len, head_size] + auto shape = old_tensor.get_shape(); + shape[2] -= remove_from_end; + + ov::Coordinate new_shape_begin{0, 0, 0, 0}; + ov::Coordinate new_shape_end{shape}; + + auto new_tensor = ov::Tensor(old_tensor, new_shape_begin, new_shape_end); + + state.set_state(new_tensor); + } +} + } // namespace utils } // namespace genai } // namespace ov diff --git a/src/cpp/src/utils.hpp b/src/cpp/src/utils.hpp index 9adc46c87a..bbb51174a3 100644 --- a/src/cpp/src/utils.hpp +++ b/src/cpp/src/utils.hpp @@ -12,6 +12,12 @@ namespace ov { namespace genai { namespace utils { +enum class GenerationChatInputsType { + UNDEF = 0, // Default value, type of inputs is not defined + STRING = 1, // Type of inputs is StringInputs + ENCODED_INPUTS = 2, // Type of inputs is EncodedInputs +}; + Tensor init_attention_mask(const Tensor& position_ids); void print_tensor(const ov::Tensor& tensor); @@ -66,6 +72,10 @@ void slice_matmul_statefull_model(std::shared_ptr model); ov::Core singleton_core(); +size_t get_first_history_difference(const ov::Tensor& encoded_history, const std::vector tokenized_history, std::set stop_tokens); + +void trim_kv_cache(ov::InferRequest request, uint64_t remove_from_end); + } // namespace utils } // namespace genai } // namespace ov diff --git a/src/cpp/src/visual_language/inputs_embedder.cpp b/src/cpp/src/visual_language/inputs_embedder.cpp index e823130642..0a67e8e01e 100644 --- a/src/cpp/src/visual_language/inputs_embedder.cpp +++ b/src/cpp/src/visual_language/inputs_embedder.cpp @@ -38,8 +38,11 @@ class InputsEmbedder::IInputsEmbedder { ChatHistory m_history; // Templated chat history std::string m_templated_chat_history; - // Whether we have computed some inputs already - bool m_is_cache_empty = true; + // Tokenized chat history + std::vector m_tokenized_chat_history; + // The number of elements, which need to remove from the end of KV cache + // removed elements will be added to inputs_ids + size_t m_to_remove_from_hist = 0; public: virtual ov::Tensor get_inputs_embeds(const std::string& prompt, const std::vector& images) = 0; @@ -52,12 +55,26 @@ class InputsEmbedder::IInputsEmbedder { return m_tokenizer; } + std::vector get_tokenized_chat_history() const { + return m_tokenized_chat_history; + } + + size_t get_amount_to_remove_from_hist() const { + return m_to_remove_from_hist; + } + + void update_tokenized_chat_history(std::vector encoded_result) { + std::copy(encoded_result.begin(), encoded_result.end(), std::back_inserter(m_tokenized_chat_history)); + m_to_remove_from_hist = 0; + } + virtual void start_chat(const std::string& system_message) { m_is_chat_conversation = true; - if (!m_is_cache_empty) { + m_to_remove_from_hist = 0; + if (!m_tokenized_chat_history.empty()) { m_history.clear(); m_templated_chat_history.clear(); - m_is_cache_empty = true; + m_tokenized_chat_history.clear(); } if (system_message.empty()) { return; @@ -76,10 +93,11 @@ class InputsEmbedder::IInputsEmbedder { virtual void finish_chat() { m_is_chat_conversation = false; - m_is_cache_empty = true; + m_to_remove_from_hist = 0; m_history.clear(); m_templated_chat_history.clear(); + m_tokenized_chat_history.clear(); } protected: @@ -91,7 +109,7 @@ class InputsEmbedder::IInputsEmbedder { m_vlm_config{vlm_config}, m_vision_encoder(model_dir, m_vlm_config.model_type, device, device_config), m_embedding(model_dir, m_vlm_config.scale_emb, device, device_config), - m_tokenizer{model_dir.string(), device_config} { } + m_tokenizer{model_dir, device_config} { } IInputsEmbedder( const VLMConfig& vlm_config, @@ -118,6 +136,7 @@ class InputsEmbedder::IInputsEmbedder { ), m_tokenizer(tokenizer) { } + ov::Tensor get_encoded_input_ids(const std::string& prompt, const std::string& chat_template_fallback = "") { ov::Tensor encoded_input_ids; if (m_is_chat_conversation) { @@ -139,19 +158,35 @@ class InputsEmbedder::IInputsEmbedder { new_templated_chat_history = m_tokenizer.apply_chat_template(m_history, add_generation_prompt, chat_template_fallback); } ov::Tensor new_chat_tokens = m_tokenizer.encode(new_templated_chat_history).input_ids; - if (m_is_cache_empty) { + TokenizedInputs prev_chat_tokens = m_tokenizer.encode(m_templated_chat_history); + + // some symbols combinations can be encoded by the tokenizer in different ways + // if we met sequence with such combination of symbols, we cannot correctly subtract the new history from the old history + // so let's check it out, find the trusted part and use it in on the next step + size_t last_same_hist_token = 0; + if (!m_tokenized_chat_history.empty()) { + std::set stop_tokens = {m_tokenizer.get_eos_token_id()}; + last_same_hist_token = ov::genai::utils::get_first_history_difference(prev_chat_tokens.input_ids, m_tokenized_chat_history, stop_tokens); + } + + if (m_tokenized_chat_history.empty()) { encoded_input_ids = new_chat_tokens; - // after first `get_inputs_embeds` is called, we supposed LLM is inferred and cache is not empty - m_is_cache_empty = false; + } else if (last_same_hist_token != SIZE_MAX) { + m_to_remove_from_hist = m_tokenized_chat_history.size() - 1 - last_same_hist_token; + + ov::Tensor new_tensor = ov::Tensor(new_chat_tokens.get_element_type(), + {1, new_chat_tokens.get_shape().at(1) - last_same_hist_token}, + new_chat_tokens.data() + last_same_hist_token); + encoded_input_ids = new_tensor; } else { - TokenizedInputs prev_chat_tokens = m_tokenizer.encode( - m_templated_chat_history - ); encoded_input_ids = utils::subtract_chat_tokenized_inputs( {new_chat_tokens}, prev_chat_tokens ).input_ids; } m_templated_chat_history = std::move(new_templated_chat_history); + m_tokenized_chat_history.clear(); + std::copy(new_chat_tokens.data(), new_chat_tokens.data() + new_chat_tokens.get_size(), + std::back_inserter(m_tokenized_chat_history)); } else { encoded_input_ids = m_tokenizer.encode(prompt).input_ids; } @@ -628,7 +663,6 @@ class InputsEmbedderLLaVA : public InputsEmbedder::IInputsEmbedder { merged_idx++; } } - return merged_embeds; } }; @@ -1123,6 +1157,18 @@ EmbeddingsModel InputsEmbedder::get_embedding_model() const { return m_impl->get_embedding_model(); } +std::vector InputsEmbedder::get_tokenized_chat_history() const { + return m_impl->get_tokenized_chat_history(); +} + +void InputsEmbedder::update_tokenized_chat_history(std::vector encoded_result) { + return m_impl->update_tokenized_chat_history(encoded_result); +} + +size_t InputsEmbedder::get_amount_to_remove_from_hist() const { + return m_impl->get_amount_to_remove_from_hist(); +} + Tokenizer InputsEmbedder::get_tokenizer() const { return m_impl->get_tokenizer(); } diff --git a/src/cpp/src/visual_language/inputs_embedder.hpp b/src/cpp/src/visual_language/inputs_embedder.hpp index 2cc2e7dc2a..bd969302a4 100644 --- a/src/cpp/src/visual_language/inputs_embedder.hpp +++ b/src/cpp/src/visual_language/inputs_embedder.hpp @@ -39,6 +39,13 @@ class InputsEmbedder { // returns tokenizer Tokenizer get_tokenizer() const; + // returns tokenized chat history + std::vector get_tokenized_chat_history() const; + // add new results to tokenized chat history + void update_tokenized_chat_history(std::vector encoded_result); + // returns amount of elements, which need to remove from the end of the KV cache + size_t get_amount_to_remove_from_hist() const; + // starts chat and adds optional system_message to chat history void start_chat(const std::string& system_message); // adds currently generated text to chat history diff --git a/src/cpp/src/visual_language/pipeline.cpp b/src/cpp/src/visual_language/pipeline.cpp index 5a39f4b743..427a5c9229 100644 --- a/src/cpp/src/visual_language/pipeline.cpp +++ b/src/cpp/src/visual_language/pipeline.cpp @@ -144,14 +144,22 @@ class ov::genai::VLMPipeline::VLMPipelineImpl { ov::Tensor inputs_embeds = m_inputs_embedder->get_inputs_embeds(prompt, rgbs); + auto to_remove_from_hist = m_inputs_embedder->get_amount_to_remove_from_hist(); + if (to_remove_from_hist > 0) { + ov::genai::utils::trim_kv_cache(m_language, to_remove_from_hist); + } + Sampler sampler = Sampler(m_tokenizer); std::vector requests; size_t request_id = 0; size_t block_size = 1; // not used bool enable_prefix_caching = false; - size_t history_size = m_language.get_tensor("attention_mask").get_shape().at(1); + + auto tokenized_chat_history = m_inputs_embedder->get_tokenized_chat_history(); + size_t history_size = m_language.get_tensor("attention_mask").get_shape().at(1) - to_remove_from_hist; size_t inputs_embeds_size = inputs_embeds.get_shape().at(1); + ov::Tensor prompt_ids(ov::element::i64, { history_size + inputs_embeds_size }); std::fill_n(prompt_ids.data(), prompt_ids.get_size(), 0); @@ -176,10 +184,10 @@ class ov::genai::VLMPipeline::VLMPipelineImpl { OPENVINO_ASSERT((generation_config.is_greedy_decoding() || generation_config.is_multinomial() || !streamer_ptr), "Currently streaming is possible only for greedy or multinomial decoding"); - ov::Tensor new_atten_mask = ov::Tensor{ov::element::i64, { 1, history_size + inputs_embeds.get_shape()[1] }}; + ov::Tensor new_atten_mask = ov::Tensor{ov::element::i64, { 1, history_size + inputs_embeds_size }}; std::fill_n(new_atten_mask.data(), new_atten_mask.get_size(), 1); - ov::Tensor position_ids = ov::Tensor{ov::element::i64, { 1, inputs_embeds.get_shape()[1] }}; + ov::Tensor position_ids = ov::Tensor{ov::element::i64, { 1, inputs_embeds_size }}; std::iota(position_ids.data(), position_ids.data() + position_ids.get_size(), history_size); ov::genai::EncodedResults encoded_result; @@ -200,6 +208,8 @@ class ov::genai::VLMPipeline::VLMPipelineImpl { m_language.reset_state(); m_language.get_tensor("attention_mask").set_shape({1, 0}); } + + m_inputs_embedder->update_tokenized_chat_history(encoded_result.tokens[0]); return decoded; }