diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/adapter/ChatCompletionSettingsConversions.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/adapter/ChatCompletionSettingsConversions.scala index 94ebc79d..ccc0f63f 100644 --- a/openai-core/src/main/scala/io/cequence/openaiscala/service/adapter/ChatCompletionSettingsConversions.scala +++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/adapter/ChatCompletionSettingsConversions.scala @@ -90,7 +90,7 @@ object ChatCompletionSettingsConversions { settings.response_format_type.isDefined && settings.response_format_type.get != ChatCompletionResponseFormatType.text, _.copy(response_format_type = None), Some( - "O1 models don't support json object/schema response format, converting to None." + "O1 (preview) models don't support json object/schema response format, converting to None." ), warning = true ) diff --git a/openai-examples/src/main/scala/io/cequence/openaiscala/examples/CreateChatCompletionWithO1.scala b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/CreateChatCompletionWithO1.scala new file mode 100644 index 00000000..a95eed99 --- /dev/null +++ b/openai-examples/src/main/scala/io/cequence/openaiscala/examples/CreateChatCompletionWithO1.scala @@ -0,0 +1,33 @@ +package io.cequence.openaiscala.examples + +import io.cequence.openaiscala.domain._ +import io.cequence.openaiscala.domain.settings.{ + ChatCompletionResponseFormatType, + CreateChatCompletionSettings +} + +import scala.concurrent.Future + +object CreateChatCompletionWithO1 extends Example { + + private val messages = Seq( + // system message still works for O1 models but moving forward DeveloperMessage should be used instead + SystemMessage("You are a helpful weather assistant who likes to make jokes."), + UserMessage("What is the weather like in Norway per major cities? Answer in json format.") + ) + + override protected def run: Future[_] = + service + .createChatCompletion( + messages = messages, + settings = CreateChatCompletionSettings( + model = ModelId.o1, + temperature = Some(0.1), + response_format_type = Some(ChatCompletionResponseFormatType.json_object), + max_tokens = Some(4000) + ) + ) + .map { content => + printMessageContent(content) + } +}