From 1ed79b13d5b8a0c39c4947946898de65e0cdeaae Mon Sep 17 00:00:00 2001 From: Mark Ericksen Date: Thu, 23 Jan 2025 08:05:29 -0700 Subject: [PATCH] expanded documentation --- lib/chains/llm_chain.ex | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/chains/llm_chain.ex b/lib/chains/llm_chain.ex index f1646e52..56e805a1 100644 --- a/lib/chains/llm_chain.ex +++ b/lib/chains/llm_chain.ex @@ -338,6 +338,21 @@ defmodule LangChain.Chains.LLMChain do replaced before running against the configured LLM. This is helpful, for example, when a different system prompt is needed for Anthropic vs OpenAI. + ## Mode Examples + + **Use Case**: A chat with an LLM where functions are available to the LLM: + + LLMChain.run(chain, mode: :while_needs_response) + + This will execute any LLM called functions, returning the result to the LLM, + and giving it a chance to respond to the results. + + **Use Case**: An application that exposes a function to the LLM, but we want + to stop once the function is successfully executed. When errors are + encountered, the LLM should be given error feedback and allowed to try again. + + LLMChain.run(chain, mode: :until_success) + """ @spec run(t(), Keyword.t()) :: {:ok, t()} | {:error, t(), LangChainError.t()} def run(chain, opts \\ [])