diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 6026ce13..ffa789c2 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,6 +7,10 @@ on:
- 'integrated/**'
- 'stl-preview-head/**'
- 'stl-preview-base/**'
+ pull_request:
+ branches-ignore:
+ - 'stl-preview-head/**'
+ - 'stl-preview-base/**'
jobs:
lint:
diff --git a/.github/workflows/publish-gem.yml b/.github/workflows/publish-gem.yml
index b3a5a647..b5a623ca 100644
--- a/.github/workflows/publish-gem.yml
+++ b/.github/workflows/publish-gem.yml
@@ -1,5 +1,5 @@
# Workflow for re-publishing to rubygems.org in case it failed for some reason.
-# You can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-gem.yml
+# You can run this workflow by navigating to https://www.github.com/openai/openai-ruby/actions/workflows/publish-gem.yml
name: Publish Gem
on:
workflow_dispatch:
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 6538ca91..6d78745c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.8.0"
+ ".": "0.9.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index df602bb9..c67f2762 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 109
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-3ae9c18dd7ccfc3ac5206f24394665f563a19015cfa8847b2801a2694d012abc.yml
-openapi_spec_hash: 48175b03b58805cd5c80793c66fd54e5
-config_hash: 4caff63b74a41f71006987db702f2918
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9e41d2d5471d2c28bff0d616f4476f5b0e6c541ef4cb51bdaaef5fdf5e13c8b2.yml
+openapi_spec_hash: 86f765e18d00e32cf2ce9db7ab84d946
+config_hash: dc5515e257676a27cb1ace1784aa92b3
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fa2951ea..299d5e37 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,26 @@
# Changelog
+## 0.9.0 (2025-06-17)
+
+Full Changelog: [v0.8.0...v0.9.0](https://github.com/openai/openai-ruby/compare/v0.8.0...v0.9.0)
+
+### Features
+
+* **api:** add reusable prompt IDs ([72e35ad](https://github.com/openai/openai-ruby/commit/72e35ad4a677a70a98db291a20aa212e53c367ea))
+* **api:** manual updates ([a4bcab7](https://github.com/openai/openai-ruby/commit/a4bcab736d59404c61b148a468d3bf0bc570fa39))
+
+
+### Chores
+
+* **ci:** enable for pull requests ([e8dfcf9](https://github.com/openai/openai-ruby/commit/e8dfcf97f3af426d3ad83472fa6eaac718acbd4d))
+* **ci:** link to correct github repo ([7b34316](https://github.com/openai/openai-ruby/commit/7b3431612ea66d123bc114ec55bdf07f6081439e))
+
+
+### Documentation
+
+* structured outputs in README ([#723](https://github.com/openai/openai-ruby/issues/723)) ([7212e61](https://github.com/openai/openai-ruby/commit/7212e61ee2fb9ebff0576b0bff4424f43ae54af2))
+* use image edit example in readme ([#722](https://github.com/openai/openai-ruby/issues/722)) ([eaa5055](https://github.com/openai/openai-ruby/commit/eaa5055eebca620c261c749ae4945845532c012d))
+
## 0.8.0 (2025-06-10)
Full Changelog: [v0.7.0...v0.8.0](https://github.com/openai/openai-ruby/compare/v0.7.0...v0.8.0)
diff --git a/Gemfile.lock b/Gemfile.lock
index 1316055a..48d1c60f 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -11,7 +11,7 @@ GIT
PATH
remote: .
specs:
- openai (0.8.0)
+ openai (0.9.0)
connection_pool
GEM
diff --git a/README.md b/README.md
index aa388af0..9f277784 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
```ruby
-gem "openai", "~> 0.8.0"
+gem "openai", "~> 0.9.0"
```
@@ -96,15 +96,126 @@ file_object = openai.files.create(file: Pathname("input.jsonl"), purpose: "fine-
# Alternatively, pass file contents or a `StringIO` directly:
file_object = openai.files.create(file: File.read("input.jsonl"), purpose: "fine-tune")
+puts(file_object.id)
+
# Or, to control the filename and/or content type:
-file = OpenAI::FilePart.new(File.read("input.jsonl"), filename: "input.jsonl", content_type: "…")
-file_object = openai.files.create(file: file, purpose: "fine-tune")
+image = OpenAI::FilePart.new(Pathname('dog.jpg'), content_type: 'image/jpeg')
+edited = openai.images.edit(
+ prompt: "make this image look like a painting",
+ model: "gpt-image-1",
+ size: '1024x1024',
+ image: image
+)
-puts(file_object.id)
+puts(edited.data.first)
```
Note that you can also pass a raw `IO` descriptor, but this disables retries, as the library can't be sure if the descriptor is a file or pipe (which cannot be rewound).
+### [Structured outputs](https://platform.openai.com/docs/guides/structured-outputs) and function calling
+
+This SDK ships with helpers in `OpenAI::BaseModel`, `OpenAI::ArrayOf`, `OpenAI::EnumOf`, and `OpenAI::UnionOf` to help you define the supported JSON schemas used in making structured outputs and function calling requests.
+
+
+Snippet
+
+```ruby
+# Participant model with an optional last_name and an enum for status
+class Participant < OpenAI::BaseModel
+ required :first_name, String
+ required :last_name, String, nil?: true
+ required :status, OpenAI::EnumOf[:confirmed, :unconfirmed, :tentative]
+end
+
+# CalendarEvent model with a list of participants.
+class CalendarEvent < OpenAI::BaseModel
+ required :name, String
+ required :date, String
+ required :participants, OpenAI::ArrayOf[Participant]
+end
+
+
+client = OpenAI::Client.new
+
+response = client.responses.create(
+ model: "gpt-4o-2024-08-06",
+ input: [
+ {role: :system, content: "Extract the event information."},
+ {
+ role: :user,
+ content: <<~CONTENT
+ Alice Shah and Lena are going to a science fair on Friday at 123 Main St. in San Diego.
+ They have also invited Jasper Vellani and Talia Groves - Jasper has not responded and Talia said she is thinking about it.
+ CONTENT
+ }
+ ],
+ text: CalendarEvent
+)
+
+response
+ .output
+ .flat_map { _1.content }
+ # filter out refusal responses
+ .grep_v(OpenAI::Models::Responses::ResponseOutputRefusal)
+ .each do |content|
+ # parsed is an instance of `CalendarEvent`
+ pp(content.parsed)
+ end
+```
+
+
+
+See the [examples](https://github.com/openai/openai-ruby/tree/main/examples) directory for more usage examples for helper usage.
+
+To make the equivalent request using raw JSON schema format, you would do the following:
+
+
+Snippet
+
+```ruby
+response = client.responses.create(
+ model: "gpt-4o-2024-08-06",
+ input: [
+ {role: :system, content: "Extract the event information."},
+ {
+ role: :user,
+ content: "..."
+ }
+ ],
+ text: {
+ format: {
+ type: :json_schema,
+ name: "CalendarEvent",
+ strict: true,
+ schema: {
+ type: "object",
+ properties: {
+ name: {type: "string"},
+ date: {type: "string"},
+ participants: {
+ type: "array",
+ items: {
+ type: "object",
+ properties: {
+ first_name: {type: "string"},
+ last_name: {type: %w[string null]},
+ status: {type: "string", enum: %w[confirmed unconfirmed tentative]}
+ },
+ required: %w[first_name last_name status],
+ additionalProperties: false
+ }
+ }
+ },
+ required: %w[name date participants],
+ additionalProperties: false
+ }
+ }
+ }
+)
+```
+
+
+
### Handling errors
When the library is unable to connect to the API, or if the API returns a non-success status code (i.e., 4xx or 5xx response), a subclass of `OpenAI::Errors::APIError` will be thrown:
diff --git a/examples/structured_outputs_chat_completions.rb b/examples/structured_outputs_chat_completions.rb
index 3debd078..fa6ec9cd 100755
--- a/examples/structured_outputs_chat_completions.rb
+++ b/examples/structured_outputs_chat_completions.rb
@@ -51,5 +51,6 @@ class CalendarEvent < OpenAI::BaseModel
.choices
.reject { _1.message.refusal }
.each do |choice|
+ # parsed is an instance of `CalendarEvent`
pp(choice.message.parsed)
end
diff --git a/examples/structured_outputs_chat_completions_function_calling.rb b/examples/structured_outputs_chat_completions_function_calling.rb
index 847781a3..456c22a2 100755
--- a/examples/structured_outputs_chat_completions_function_calling.rb
+++ b/examples/structured_outputs_chat_completions_function_calling.rb
@@ -27,5 +27,6 @@ class GetWeather < OpenAI::BaseModel
.reject { _1.message.refusal }
.flat_map { _1.message.tool_calls.to_a }
.each do |tool_call|
+ # parsed is an instance of `GetWeather`
pp(tool_call.function.parsed)
end
diff --git a/examples/structured_outputs_responses.rb b/examples/structured_outputs_responses.rb
index 9fedcfd7..fd8a7b5a 100755
--- a/examples/structured_outputs_responses.rb
+++ b/examples/structured_outputs_responses.rb
@@ -51,5 +51,6 @@ class CalendarEvent < OpenAI::BaseModel
# filter out refusal responses
.grep_v(OpenAI::Models::Responses::ResponseOutputRefusal)
.each do |content|
+ # parsed is an instance of `CalendarEvent`
pp(content.parsed)
end
diff --git a/examples/structured_outputs_responses_function_calling.rb b/examples/structured_outputs_responses_function_calling.rb
index 78b79ce7..dd92d350 100755
--- a/examples/structured_outputs_responses_function_calling.rb
+++ b/examples/structured_outputs_responses_function_calling.rb
@@ -24,5 +24,6 @@ class GetWeather < OpenAI::BaseModel
response
.output
.each do |output|
+ # parsed is an instance of `GetWeather`
pp(output.parsed)
end
diff --git a/lib/openai.rb b/lib/openai.rb
index 296a5f3d..60ae2c65 100644
--- a/lib/openai.rb
+++ b/lib/openai.rb
@@ -416,6 +416,7 @@
require_relative "openai/models/responses/response_output_refusal"
require_relative "openai/models/responses/response_output_text"
require_relative "openai/models/responses/response_output_text_annotation_added_event"
+require_relative "openai/models/responses/response_prompt"
require_relative "openai/models/responses/response_queued_event"
require_relative "openai/models/responses/response_reasoning_delta_event"
require_relative "openai/models/responses/response_reasoning_done_event"
diff --git a/lib/openai/models/chat/chat_completion.rb b/lib/openai/models/chat/chat_completion.rb
index b1a17a6f..79d3e56e 100644
--- a/lib/openai/models/chat/chat_completion.rb
+++ b/lib/openai/models/chat/chat_completion.rb
@@ -213,6 +213,7 @@ module ServiceTier
AUTO = :auto
DEFAULT = :default
FLEX = :flex
+ SCALE = :scale
# @!method self.values
# @return [Array]
diff --git a/lib/openai/models/chat/chat_completion_chunk.rb b/lib/openai/models/chat/chat_completion_chunk.rb
index 63c1109e..bdce048f 100644
--- a/lib/openai/models/chat/chat_completion_chunk.rb
+++ b/lib/openai/models/chat/chat_completion_chunk.rb
@@ -396,6 +396,7 @@ module ServiceTier
AUTO = :auto
DEFAULT = :default
FLEX = :flex
+ SCALE = :scale
# @!method self.values
# @return [Array]
diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb
index 97c5a09a..a89fcfb7 100644
--- a/lib/openai/models/chat/completion_create_params.rb
+++ b/lib/openai/models/chat/completion_create_params.rb
@@ -569,6 +569,7 @@ module ServiceTier
AUTO = :auto
DEFAULT = :default
FLEX = :flex
+ SCALE = :scale
# @!method self.values
# @return [Array]
diff --git a/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb b/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb
index 6ffbdf4d..e22c36d8 100644
--- a/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb
+++ b/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb
@@ -6,76 +6,41 @@ module FineTuning
module Checkpoints
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#retrieve
class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel
- # @!attribute data
+ # @!attribute id
+ # The permission identifier, which can be referenced in the API endpoints.
#
- # @return [Array]
- required :data,
- -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data] }
+ # @return [String]
+ required :id, String
- # @!attribute has_more
+ # @!attribute created_at
+ # The Unix timestamp (in seconds) for when the permission was created.
#
- # @return [Boolean]
- required :has_more, OpenAI::Internal::Type::Boolean
+ # @return [Integer]
+ required :created_at, Integer
# @!attribute object
+ # The object type, which is always "checkpoint.permission".
#
- # @return [Symbol, :list]
- required :object, const: :list
+ # @return [Symbol, :"checkpoint.permission"]
+ required :object, const: :"checkpoint.permission"
- # @!attribute first_id
+ # @!attribute project_id
+ # The project identifier that the permission is for.
#
- # @return [String, nil]
- optional :first_id, String, nil?: true
+ # @return [String]
+ required :project_id, String
- # @!attribute last_id
+ # @!method initialize(id:, created_at:, project_id:, object: :"checkpoint.permission")
+ # The `checkpoint.permission` object represents a permission for a fine-tuned
+ # model checkpoint.
#
- # @return [String, nil]
- optional :last_id, String, nil?: true
-
- # @!method initialize(data:, has_more:, first_id: nil, last_id: nil, object: :list)
- # @param data [Array]
- # @param has_more [Boolean]
- # @param first_id [String, nil]
- # @param last_id [String, nil]
- # @param object [Symbol, :list]
-
- class Data < OpenAI::Internal::Type::BaseModel
- # @!attribute id
- # The permission identifier, which can be referenced in the API endpoints.
- #
- # @return [String]
- required :id, String
-
- # @!attribute created_at
- # The Unix timestamp (in seconds) for when the permission was created.
- #
- # @return [Integer]
- required :created_at, Integer
-
- # @!attribute object
- # The object type, which is always "checkpoint.permission".
- #
- # @return [Symbol, :"checkpoint.permission"]
- required :object, const: :"checkpoint.permission"
-
- # @!attribute project_id
- # The project identifier that the permission is for.
- #
- # @return [String]
- required :project_id, String
-
- # @!method initialize(id:, created_at:, project_id:, object: :"checkpoint.permission")
- # The `checkpoint.permission` object represents a permission for a fine-tuned
- # model checkpoint.
- #
- # @param id [String] The permission identifier, which can be referenced in the API endpoints.
- #
- # @param created_at [Integer] The Unix timestamp (in seconds) for when the permission was created.
- #
- # @param project_id [String] The project identifier that the permission is for.
- #
- # @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission".
- end
+ # @param id [String] The permission identifier, which can be referenced in the API endpoints.
+ #
+ # @param created_at [Integer] The Unix timestamp (in seconds) for when the permission was created.
+ #
+ # @param project_id [String] The project identifier that the permission is for.
+ #
+ # @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission".
end
end
end
diff --git a/lib/openai/models/fine_tuning/job_create_params.rb b/lib/openai/models/fine_tuning/job_create_params.rb
index 193bc2d0..1d4258c1 100644
--- a/lib/openai/models/fine_tuning/job_create_params.rb
+++ b/lib/openai/models/fine_tuning/job_create_params.rb
@@ -31,7 +31,8 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel
# [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
# format.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
#
# @return [String]
@@ -100,7 +101,8 @@ class JobCreateParams < OpenAI::Internal::Type::BaseModel
# Your dataset must be formatted as a JSONL file. You must upload your file with
# the purpose `fine-tune`.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
#
# @return [String, nil]
diff --git a/lib/openai/models/image_edit_params.rb b/lib/openai/models/image_edit_params.rb
index 6162af1a..833d7a5a 100644
--- a/lib/openai/models/image_edit_params.rb
+++ b/lib/openai/models/image_edit_params.rb
@@ -61,6 +61,22 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel
# @return [Integer, nil]
optional :n, Integer, nil?: true
+ # @!attribute output_compression
+ # The compression level (0-100%) for the generated images. This parameter is only
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ # defaults to 100.
+ #
+ # @return [Integer, nil]
+ optional :output_compression, Integer, nil?: true
+
+ # @!attribute output_format
+ # The format in which the generated images are returned. This parameter is only
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ # default value is `png`.
+ #
+ # @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil]
+ optional :output_format, enum: -> { OpenAI::ImageEditParams::OutputFormat }, nil?: true
+
# @!attribute quality
# The quality of the image that will be generated. `high`, `medium` and `low` are
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -94,7 +110,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel
# @return [String, nil]
optional :user, String
- # @!method initialize(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
+ # @!method initialize(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
# Some parameter documentations has been truncated, see
# {OpenAI::Models::ImageEditParams} for more details.
#
@@ -110,6 +126,10 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel
#
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
#
+ # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
+ #
+ # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
+ #
# @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
#
# @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
@@ -174,6 +194,20 @@ module Model
# @return [Array(String, Symbol, OpenAI::Models::ImageModel)]
end
+ # The format in which the generated images are returned. This parameter is only
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ # default value is `png`.
+ module OutputFormat
+ extend OpenAI::Internal::Type::Enum
+
+ PNG = :png
+ JPEG = :jpeg
+ WEBP = :webp
+
+ # @!method self.values
+ # @return [Array]
+ end
+
# The quality of the image that will be generated. `high`, `medium` and `low` are
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
# Defaults to `auto`.
diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb
index 38b6465d..0fbf7be3 100644
--- a/lib/openai/models/responses/response.rb
+++ b/lib/openai/models/responses/response.rb
@@ -32,15 +32,14 @@ class Response < OpenAI::Internal::Type::BaseModel
required :incomplete_details, -> { OpenAI::Responses::Response::IncompleteDetails }, nil?: true
# @!attribute instructions
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
# swap out system (or developer) messages in new responses.
#
- # @return [String, nil]
- required :instructions, String, nil?: true
+ # @return [String, Array, nil]
+ required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true
# @!attribute metadata
# Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -156,6 +155,13 @@ class Response < OpenAI::Internal::Type::BaseModel
# @return [String, nil]
optional :previous_response_id, String, nil?: true
+ # @!attribute prompt
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ #
+ # @return [OpenAI::Models::Responses::ResponsePrompt, nil]
+ optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
+
# @!attribute reasoning
# **o-series models only**
#
@@ -231,7 +237,7 @@ class Response < OpenAI::Internal::Type::BaseModel
# @return [String, nil]
optional :user, String
- # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
+ # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
# Some parameter documentations has been truncated, see
# {OpenAI::Models::Responses::Response} for more details.
#
@@ -243,7 +249,7 @@ class Response < OpenAI::Internal::Type::BaseModel
#
# @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete.
#
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
+ # @param instructions [String, Array, nil] A system (or developer) message inserted into the model's context.
#
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
#
@@ -267,6 +273,8 @@ class Response < OpenAI::Internal::Type::BaseModel
#
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
#
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
+ #
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
#
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -310,6 +318,32 @@ module Reason
end
end
+ # A system (or developer) message inserted into the model's context.
+ #
+ # When using along with `previous_response_id`, the instructions from a previous
+ # response will not be carried over to the next response. This makes it simple to
+ # swap out system (or developer) messages in new responses.
+ #
+ # @see OpenAI::Models::Responses::Response#instructions
+ module Instructions
+ extend OpenAI::Internal::Type::Union
+
+ # A text input to the model, equivalent to a text input with the
+ # `developer` role.
+ variant String
+
+ # A list of one or many input items to the model, containing
+ # different content types.
+ variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray }
+
+ # @!method self.variants
+ # @return [Array(String, Array)]
+
+ # @type [OpenAI::Internal::Type::Converter]
+ ResponseInputItemArray =
+ OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }]
+ end
+
# How the model should select which tool (or tools) to use when generating a
# response. See the `tools` parameter to see how to specify which tools the model
# can call.
@@ -364,6 +398,7 @@ module ServiceTier
AUTO = :auto
DEFAULT = :default
FLEX = :flex
+ SCALE = :scale
# @!method self.values
# @return [Array]
diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb
index 7f26b349..19516f28 100644
--- a/lib/openai/models/responses/response_create_params.rb
+++ b/lib/openai/models/responses/response_create_params.rb
@@ -64,8 +64,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
nil?: true
# @!attribute instructions
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
@@ -107,6 +106,13 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
# @return [String, nil]
optional :previous_response_id, String, nil?: true
+ # @!attribute prompt
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ #
+ # @return [OpenAI::Models::Responses::ResponsePrompt, nil]
+ optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
+
# @!attribute reasoning
# **o-series models only**
#
@@ -226,7 +232,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
# @return [String, nil]
optional :user, String
- # @!method initialize(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
+ # @!method initialize(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
# Some parameter documentations has been truncated, see
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
#
@@ -238,7 +244,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
#
# @param include [Array, nil] Specify additional output data to include in the model response. Currently
#
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
#
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
#
@@ -248,6 +254,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
#
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
#
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
+ #
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
#
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -317,6 +325,7 @@ module ServiceTier
AUTO = :auto
DEFAULT = :default
FLEX = :flex
+ SCALE = :scale
# @!method self.values
# @return [Array]
diff --git a/lib/openai/models/responses/response_prompt.rb b/lib/openai/models/responses/response_prompt.rb
new file mode 100644
index 00000000..aa5b7fb0
--- /dev/null
+++ b/lib/openai/models/responses/response_prompt.rb
@@ -0,0 +1,63 @@
+# frozen_string_literal: true
+
+module OpenAI
+ module Models
+ module Responses
+ class ResponsePrompt < OpenAI::Internal::Type::BaseModel
+ # @!attribute id
+ # The unique identifier of the prompt template to use.
+ #
+ # @return [String]
+ required :id, String
+
+ # @!attribute variables
+ # Optional map of values to substitute in for variables in your prompt. The
+ # substitution values can either be strings, or other Response input types like
+ # images or files.
+ #
+ # @return [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil]
+ optional :variables,
+ -> { OpenAI::Internal::Type::HashOf[union: OpenAI::Responses::ResponsePrompt::Variable] },
+ nil?: true
+
+ # @!attribute version
+ # Optional version of the prompt template.
+ #
+ # @return [String, nil]
+ optional :version, String, nil?: true
+
+ # @!method initialize(id:, variables: nil, version: nil)
+ # Some parameter documentations has been truncated, see
+ # {OpenAI::Models::Responses::ResponsePrompt} for more details.
+ #
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ #
+ # @param id [String] The unique identifier of the prompt template to use.
+ #
+ # @param variables [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil] Optional map of values to substitute in for variables in your
+ #
+ # @param version [String, nil] Optional version of the prompt template.
+
+ # A text input to the model.
+ module Variable
+ extend OpenAI::Internal::Type::Union
+
+ variant String
+
+ # A text input to the model.
+ variant -> { OpenAI::Responses::ResponseInputText }
+
+ # An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
+ variant -> { OpenAI::Responses::ResponseInputImage }
+
+ # A file input to the model.
+ variant -> { OpenAI::Responses::ResponseInputFile }
+
+ # @!method self.variants
+ # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)]
+ end
+ end
+ end
+ end
+end
diff --git a/lib/openai/resources/fine_tuning/checkpoints/permissions.rb b/lib/openai/resources/fine_tuning/checkpoints/permissions.rb
index f0afec14..4fe26ec9 100644
--- a/lib/openai/resources/fine_tuning/checkpoints/permissions.rb
+++ b/lib/openai/resources/fine_tuning/checkpoints/permissions.rb
@@ -60,7 +60,7 @@ def create(fine_tuned_model_checkpoint, params)
#
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
#
- # @return [OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse]
+ # @return [OpenAI::Internal::CursorPage]
#
# @see OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams
def retrieve(fine_tuned_model_checkpoint, params = {})
@@ -69,6 +69,7 @@ def retrieve(fine_tuned_model_checkpoint, params = {})
method: :get,
path: ["fine_tuning/checkpoints/%1$s/permissions", fine_tuned_model_checkpoint],
query: parsed,
+ page: OpenAI::Internal::CursorPage,
model: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse,
options: options
)
diff --git a/lib/openai/resources/fine_tuning/jobs.rb b/lib/openai/resources/fine_tuning/jobs.rb
index c978c56c..9db01f0c 100644
--- a/lib/openai/resources/fine_tuning/jobs.rb
+++ b/lib/openai/resources/fine_tuning/jobs.rb
@@ -16,7 +16,7 @@ class Jobs
# Response includes details of the enqueued job including job status and the name
# of the fine-tuned models once complete.
#
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
#
# @overload create(model:, training_file:, hyperparameters: nil, integrations: nil, metadata: nil, method_: nil, seed: nil, suffix: nil, validation_file: nil, request_options: {})
#
@@ -59,7 +59,7 @@ def create(params)
#
# Get info about a fine-tuning job.
#
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
#
# @overload retrieve(fine_tuning_job_id, request_options: {})
#
diff --git a/lib/openai/resources/images.rb b/lib/openai/resources/images.rb
index 6a521f3b..e1d26736 100644
--- a/lib/openai/resources/images.rb
+++ b/lib/openai/resources/images.rb
@@ -45,7 +45,7 @@ def create_variation(params)
# Creates an edited or extended image given one or more source images and a
# prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
#
- # @overload edit(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
+ # @overload edit(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
#
# @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array] The image(s) to edit. Must be a supported image file or an array of images.
#
@@ -59,6 +59,10 @@ def create_variation(params)
#
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
#
+ # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
+ #
+ # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
+ #
# @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
#
# @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb
index 64f9f674..21d22a18 100644
--- a/lib/openai/resources/responses.rb
+++ b/lib/openai/resources/responses.rb
@@ -23,7 +23,7 @@ class Responses
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
# your own data as input for the model's response.
#
- # @overload create(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
+ # @overload create(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
#
# @param input [String, Array] Text, image, or file inputs to the model, used to generate a response.
#
@@ -33,7 +33,7 @@ class Responses
#
# @param include [Array, nil] Specify additional output data to include in the model response. Currently
#
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
#
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
#
@@ -43,6 +43,8 @@ class Responses
#
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
#
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
+ #
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
#
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -181,7 +183,7 @@ def stream
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
# your own data as input for the model's response.
#
- # @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
+ # @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
#
# @param input [String, Array] Text, image, or file inputs to the model, used to generate a response.
#
@@ -191,7 +193,7 @@ def stream
#
# @param include [Array, nil] Specify additional output data to include in the model response. Currently
#
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
#
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
#
@@ -201,6 +203,8 @@ def stream
#
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
#
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
+ #
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
#
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
diff --git a/lib/openai/version.rb b/lib/openai/version.rb
index 3d1c96c8..502435cb 100644
--- a/lib/openai/version.rb
+++ b/lib/openai/version.rb
@@ -1,5 +1,5 @@
# frozen_string_literal: true
module OpenAI
- VERSION = "0.8.0"
+ VERSION = "0.9.0"
end
diff --git a/rbi/openai/models/chat/chat_completion.rbi b/rbi/openai/models/chat/chat_completion.rbi
index 7408d715..6c8172ca 100644
--- a/rbi/openai/models/chat/chat_completion.rbi
+++ b/rbi/openai/models/chat/chat_completion.rbi
@@ -404,6 +404,11 @@ module OpenAI
:flex,
OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol
)
+ SCALE =
+ T.let(
+ :scale,
+ OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol
+ )
sig do
override.returns(
diff --git a/rbi/openai/models/chat/chat_completion_chunk.rbi b/rbi/openai/models/chat/chat_completion_chunk.rbi
index b37b09ef..cd2d322e 100644
--- a/rbi/openai/models/chat/chat_completion_chunk.rbi
+++ b/rbi/openai/models/chat/chat_completion_chunk.rbi
@@ -824,6 +824,11 @@ module OpenAI
:flex,
OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
)
+ SCALE =
+ T.let(
+ :scale,
+ OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
+ )
sig do
override.returns(
diff --git a/rbi/openai/models/chat/completion_create_params.rbi b/rbi/openai/models/chat/completion_create_params.rbi
index a64aff1b..9450ae72 100644
--- a/rbi/openai/models/chat/completion_create_params.rbi
+++ b/rbi/openai/models/chat/completion_create_params.rbi
@@ -1049,6 +1049,11 @@ module OpenAI
:flex,
OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
)
+ SCALE =
+ T.let(
+ :scale,
+ OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
+ )
sig do
override.returns(
diff --git a/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi b/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi
index 2501be7b..8690aad8 100644
--- a/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi
+++ b/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi
@@ -13,125 +13,56 @@ module OpenAI
)
end
- sig do
- returns(
- T::Array[
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
- ]
- )
- end
- attr_accessor :data
+ # The permission identifier, which can be referenced in the API endpoints.
+ sig { returns(String) }
+ attr_accessor :id
- sig { returns(T::Boolean) }
- attr_accessor :has_more
+ # The Unix timestamp (in seconds) for when the permission was created.
+ sig { returns(Integer) }
+ attr_accessor :created_at
+ # The object type, which is always "checkpoint.permission".
sig { returns(Symbol) }
attr_accessor :object
- sig { returns(T.nilable(String)) }
- attr_accessor :first_id
-
- sig { returns(T.nilable(String)) }
- attr_accessor :last_id
+ # The project identifier that the permission is for.
+ sig { returns(String) }
+ attr_accessor :project_id
+ # The `checkpoint.permission` object represents a permission for a fine-tuned
+ # model checkpoint.
sig do
params(
- data:
- T::Array[
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data::OrHash
- ],
- has_more: T::Boolean,
- first_id: T.nilable(String),
- last_id: T.nilable(String),
+ id: String,
+ created_at: Integer,
+ project_id: String,
object: Symbol
).returns(T.attached_class)
end
def self.new(
- data:,
- has_more:,
- first_id: nil,
- last_id: nil,
- object: :list
+ # The permission identifier, which can be referenced in the API endpoints.
+ id:,
+ # The Unix timestamp (in seconds) for when the permission was created.
+ created_at:,
+ # The project identifier that the permission is for.
+ project_id:,
+ # The object type, which is always "checkpoint.permission".
+ object: :"checkpoint.permission"
)
end
sig do
override.returns(
{
- data:
- T::Array[
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
- ],
- has_more: T::Boolean,
+ id: String,
+ created_at: Integer,
object: Symbol,
- first_id: T.nilable(String),
- last_id: T.nilable(String)
+ project_id: String
}
)
end
def to_hash
end
-
- class Data < OpenAI::Internal::Type::BaseModel
- OrHash =
- T.type_alias do
- T.any(
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data,
- OpenAI::Internal::AnyHash
- )
- end
-
- # The permission identifier, which can be referenced in the API endpoints.
- sig { returns(String) }
- attr_accessor :id
-
- # The Unix timestamp (in seconds) for when the permission was created.
- sig { returns(Integer) }
- attr_accessor :created_at
-
- # The object type, which is always "checkpoint.permission".
- sig { returns(Symbol) }
- attr_accessor :object
-
- # The project identifier that the permission is for.
- sig { returns(String) }
- attr_accessor :project_id
-
- # The `checkpoint.permission` object represents a permission for a fine-tuned
- # model checkpoint.
- sig do
- params(
- id: String,
- created_at: Integer,
- project_id: String,
- object: Symbol
- ).returns(T.attached_class)
- end
- def self.new(
- # The permission identifier, which can be referenced in the API endpoints.
- id:,
- # The Unix timestamp (in seconds) for when the permission was created.
- created_at:,
- # The project identifier that the permission is for.
- project_id:,
- # The object type, which is always "checkpoint.permission".
- object: :"checkpoint.permission"
- )
- end
-
- sig do
- override.returns(
- {
- id: String,
- created_at: Integer,
- object: Symbol,
- project_id: String
- }
- )
- end
- def to_hash
- end
- end
end
end
end
diff --git a/rbi/openai/models/fine_tuning/job_create_params.rbi b/rbi/openai/models/fine_tuning/job_create_params.rbi
index 32e49ed1..4ac7cefa 100644
--- a/rbi/openai/models/fine_tuning/job_create_params.rbi
+++ b/rbi/openai/models/fine_tuning/job_create_params.rbi
@@ -39,7 +39,8 @@ module OpenAI
# [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
# format.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
sig { returns(String) }
attr_accessor :training_file
@@ -115,7 +116,8 @@ module OpenAI
# Your dataset must be formatted as a JSONL file. You must upload your file with
# the purpose `fine-tune`.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
sig { returns(T.nilable(String)) }
attr_accessor :validation_file
@@ -163,7 +165,8 @@ module OpenAI
# [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
# format.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
training_file:,
# The hyperparameters used for the fine-tuning job. This value is now deprecated
@@ -200,7 +203,8 @@ module OpenAI
# Your dataset must be formatted as a JSONL file. You must upload your file with
# the purpose `fine-tune`.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
validation_file: nil,
request_options: {}
diff --git a/rbi/openai/models/image_edit_params.rbi b/rbi/openai/models/image_edit_params.rbi
index f3e7df1e..0e0957b1 100644
--- a/rbi/openai/models/image_edit_params.rbi
+++ b/rbi/openai/models/image_edit_params.rbi
@@ -56,6 +56,20 @@ module OpenAI
sig { returns(T.nilable(Integer)) }
attr_accessor :n
+ # The compression level (0-100%) for the generated images. This parameter is only
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ # defaults to 100.
+ sig { returns(T.nilable(Integer)) }
+ attr_accessor :output_compression
+
+ # The format in which the generated images are returned. This parameter is only
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ # default value is `png`.
+ sig do
+ returns(T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol))
+ end
+ attr_accessor :output_format
+
# The quality of the image that will be generated. `high`, `medium` and `low` are
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
# Defaults to `auto`.
@@ -94,6 +108,9 @@ module OpenAI
mask: OpenAI::Internal::FileInput,
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
n: T.nilable(Integer),
+ output_compression: T.nilable(Integer),
+ output_format:
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
response_format:
T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -133,6 +150,14 @@ module OpenAI
model: nil,
# The number of images to generate. Must be between 1 and 10.
n: nil,
+ # The compression level (0-100%) for the generated images. This parameter is only
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ # defaults to 100.
+ output_compression: nil,
+ # The format in which the generated images are returned. This parameter is only
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ # default value is `png`.
+ output_format: nil,
# The quality of the image that will be generated. `high`, `medium` and `low` are
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
# Defaults to `auto`.
@@ -164,6 +189,9 @@ module OpenAI
mask: OpenAI::Internal::FileInput,
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
n: T.nilable(Integer),
+ output_compression: T.nilable(Integer),
+ output_format:
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
response_format:
T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -246,6 +274,29 @@ module OpenAI
end
end
+ # The format in which the generated images are returned. This parameter is only
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ # default value is `png`.
+ module OutputFormat
+ extend OpenAI::Internal::Type::Enum
+
+ TaggedSymbol =
+ T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::OutputFormat) }
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
+
+ PNG = T.let(:png, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol)
+ JPEG = T.let(:jpeg, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol)
+ WEBP = T.let(:webp, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol)
+
+ sig do
+ override.returns(
+ T::Array[OpenAI::ImageEditParams::OutputFormat::TaggedSymbol]
+ )
+ end
+ def self.values
+ end
+ end
+
# The quality of the image that will be generated. `high`, `medium` and `low` are
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
# Defaults to `auto`.
diff --git a/rbi/openai/models/responses/response.rbi b/rbi/openai/models/responses/response.rbi
index d7a1789e..1ebda937 100644
--- a/rbi/openai/models/responses/response.rbi
+++ b/rbi/openai/models/responses/response.rbi
@@ -42,13 +42,16 @@ module OpenAI
end
attr_writer :incomplete_details
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
# swap out system (or developer) messages in new responses.
- sig { returns(T.nilable(String)) }
+ sig do
+ returns(
+ T.nilable(OpenAI::Responses::Response::Instructions::Variants)
+ )
+ end
attr_accessor :instructions
# Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -143,6 +146,18 @@ module OpenAI
sig { returns(T.nilable(String)) }
attr_accessor :previous_response_id
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ sig { returns(T.nilable(OpenAI::Responses::ResponsePrompt)) }
+ attr_reader :prompt
+
+ sig do
+ params(
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash)
+ ).void
+ end
+ attr_writer :prompt
+
# **o-series models only**
#
# Configuration options for
@@ -236,7 +251,8 @@ module OpenAI
error: T.nilable(OpenAI::Responses::ResponseError::OrHash),
incomplete_details:
T.nilable(OpenAI::Responses::Response::IncompleteDetails::OrHash),
- instructions: T.nilable(String),
+ instructions:
+ T.nilable(OpenAI::Responses::Response::Instructions::Variants),
metadata: T.nilable(T::Hash[Symbol, String]),
model:
T.any(
@@ -286,6 +302,7 @@ module OpenAI
background: T.nilable(T::Boolean),
max_output_tokens: T.nilable(Integer),
previous_response_id: T.nilable(String),
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
service_tier:
T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol),
@@ -307,8 +324,7 @@ module OpenAI
error:,
# Details about why the response is incomplete.
incomplete_details:,
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
@@ -378,6 +394,9 @@ module OpenAI
# multi-turn conversations. Learn more about
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
previous_response_id: nil,
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ prompt: nil,
# **o-series models only**
#
# Configuration options for
@@ -438,7 +457,8 @@ module OpenAI
error: T.nilable(OpenAI::Responses::ResponseError),
incomplete_details:
T.nilable(OpenAI::Responses::Response::IncompleteDetails),
- instructions: T.nilable(String),
+ instructions:
+ T.nilable(OpenAI::Responses::Response::Instructions::Variants),
metadata: T.nilable(T::Hash[Symbol, String]),
model: OpenAI::ResponsesModel::Variants,
object: Symbol,
@@ -451,6 +471,7 @@ module OpenAI
background: T.nilable(T::Boolean),
max_output_tokens: T.nilable(Integer),
previous_response_id: T.nilable(String),
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
reasoning: T.nilable(OpenAI::Reasoning),
service_tier:
T.nilable(
@@ -557,6 +578,39 @@ module OpenAI
end
end
+ # A system (or developer) message inserted into the model's context.
+ #
+ # When using along with `previous_response_id`, the instructions from a previous
+ # response will not be carried over to the next response. This makes it simple to
+ # swap out system (or developer) messages in new responses.
+ module Instructions
+ extend OpenAI::Internal::Type::Union
+
+ Variants =
+ T.type_alias do
+ T.any(
+ String,
+ T::Array[OpenAI::Responses::ResponseInputItem::Variants]
+ )
+ end
+
+ sig do
+ override.returns(
+ T::Array[OpenAI::Responses::Response::Instructions::Variants]
+ )
+ end
+ def self.variants
+ end
+
+ ResponseInputItemArray =
+ T.let(
+ OpenAI::Internal::Type::ArrayOf[
+ union: OpenAI::Responses::ResponseInputItem
+ ],
+ OpenAI::Internal::Type::Converter
+ )
+ end
+
# How the model should select which tool (or tools) to use when generating a
# response. See the `tools` parameter to see how to specify which tools the model
# can call.
@@ -616,6 +670,11 @@ module OpenAI
)
FLEX =
T.let(:flex, OpenAI::Responses::Response::ServiceTier::TaggedSymbol)
+ SCALE =
+ T.let(
+ :scale,
+ OpenAI::Responses::Response::ServiceTier::TaggedSymbol
+ )
sig do
override.returns(
diff --git a/rbi/openai/models/responses/response_create_params.rbi b/rbi/openai/models/responses/response_create_params.rbi
index 2f868bca..83454ddd 100644
--- a/rbi/openai/models/responses/response_create_params.rbi
+++ b/rbi/openai/models/responses/response_create_params.rbi
@@ -72,8 +72,7 @@ module OpenAI
end
attr_accessor :include
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
@@ -106,6 +105,18 @@ module OpenAI
sig { returns(T.nilable(String)) }
attr_accessor :previous_response_id
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ sig { returns(T.nilable(OpenAI::Responses::ResponsePrompt)) }
+ attr_reader :prompt
+
+ sig do
+ params(
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash)
+ ).void
+ end
+ attr_writer :prompt
+
# **o-series models only**
#
# Configuration options for
@@ -305,6 +316,7 @@ module OpenAI
metadata: T.nilable(T::Hash[Symbol, String]),
parallel_tool_calls: T.nilable(T::Boolean),
previous_response_id: T.nilable(String),
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
service_tier:
T.nilable(
@@ -377,8 +389,7 @@ module OpenAI
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
# in code interpreter tool call items.
include: nil,
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
@@ -401,6 +412,9 @@ module OpenAI
# multi-turn conversations. Learn more about
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
previous_response_id: nil,
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ prompt: nil,
# **o-series models only**
#
# Configuration options for
@@ -498,6 +512,7 @@ module OpenAI
metadata: T.nilable(T::Hash[Symbol, String]),
parallel_tool_calls: T.nilable(T::Boolean),
previous_response_id: T.nilable(String),
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
reasoning: T.nilable(OpenAI::Reasoning),
service_tier:
T.nilable(
@@ -611,6 +626,11 @@ module OpenAI
:flex,
OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol
)
+ SCALE =
+ T.let(
+ :scale,
+ OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol
+ )
sig do
override.returns(
diff --git a/rbi/openai/models/responses/response_prompt.rbi b/rbi/openai/models/responses/response_prompt.rbi
new file mode 100644
index 00000000..4a90fa5c
--- /dev/null
+++ b/rbi/openai/models/responses/response_prompt.rbi
@@ -0,0 +1,120 @@
+# typed: strong
+
+module OpenAI
+ module Models
+ module Responses
+ class ResponsePrompt < OpenAI::Internal::Type::BaseModel
+ OrHash =
+ T.type_alias do
+ T.any(OpenAI::Responses::ResponsePrompt, OpenAI::Internal::AnyHash)
+ end
+
+ # The unique identifier of the prompt template to use.
+ sig { returns(String) }
+ attr_accessor :id
+
+ # Optional map of values to substitute in for variables in your prompt. The
+ # substitution values can either be strings, or other Response input types like
+ # images or files.
+ sig do
+ returns(
+ T.nilable(
+ T::Hash[
+ Symbol,
+ T.any(
+ String,
+ OpenAI::Responses::ResponseInputText,
+ OpenAI::Responses::ResponseInputImage,
+ OpenAI::Responses::ResponseInputFile
+ )
+ ]
+ )
+ )
+ end
+ attr_accessor :variables
+
+ # Optional version of the prompt template.
+ sig { returns(T.nilable(String)) }
+ attr_accessor :version
+
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ sig do
+ params(
+ id: String,
+ variables:
+ T.nilable(
+ T::Hash[
+ Symbol,
+ T.any(
+ String,
+ OpenAI::Responses::ResponseInputText::OrHash,
+ OpenAI::Responses::ResponseInputImage::OrHash,
+ OpenAI::Responses::ResponseInputFile::OrHash
+ )
+ ]
+ ),
+ version: T.nilable(String)
+ ).returns(T.attached_class)
+ end
+ def self.new(
+ # The unique identifier of the prompt template to use.
+ id:,
+ # Optional map of values to substitute in for variables in your prompt. The
+ # substitution values can either be strings, or other Response input types like
+ # images or files.
+ variables: nil,
+ # Optional version of the prompt template.
+ version: nil
+ )
+ end
+
+ sig do
+ override.returns(
+ {
+ id: String,
+ variables:
+ T.nilable(
+ T::Hash[
+ Symbol,
+ T.any(
+ String,
+ OpenAI::Responses::ResponseInputText,
+ OpenAI::Responses::ResponseInputImage,
+ OpenAI::Responses::ResponseInputFile
+ )
+ ]
+ ),
+ version: T.nilable(String)
+ }
+ )
+ end
+ def to_hash
+ end
+
+ # A text input to the model.
+ module Variable
+ extend OpenAI::Internal::Type::Union
+
+ Variants =
+ T.type_alias do
+ T.any(
+ String,
+ OpenAI::Responses::ResponseInputText,
+ OpenAI::Responses::ResponseInputImage,
+ OpenAI::Responses::ResponseInputFile
+ )
+ end
+
+ sig do
+ override.returns(
+ T::Array[OpenAI::Responses::ResponsePrompt::Variable::Variants]
+ )
+ end
+ def self.variants
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi b/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi
index be76789c..dae4366b 100644
--- a/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi
+++ b/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi
@@ -43,7 +43,9 @@ module OpenAI
project_id: String,
request_options: OpenAI::RequestOptions::OrHash
).returns(
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
+ OpenAI::Internal::CursorPage[
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
+ ]
)
end
def retrieve(
diff --git a/rbi/openai/resources/fine_tuning/jobs.rbi b/rbi/openai/resources/fine_tuning/jobs.rbi
index e60f057d..4e823288 100644
--- a/rbi/openai/resources/fine_tuning/jobs.rbi
+++ b/rbi/openai/resources/fine_tuning/jobs.rbi
@@ -13,7 +13,7 @@ module OpenAI
# Response includes details of the enqueued job including job status and the name
# of the fine-tuned models once complete.
#
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
sig do
params(
model:
@@ -57,7 +57,8 @@ module OpenAI
# [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
# format.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
training_file:,
# The hyperparameters used for the fine-tuning job. This value is now deprecated
@@ -94,7 +95,8 @@ module OpenAI
# Your dataset must be formatted as a JSONL file. You must upload your file with
# the purpose `fine-tune`.
#
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
+ # See the
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
# for more details.
validation_file: nil,
request_options: {}
@@ -103,7 +105,7 @@ module OpenAI
# Get info about a fine-tuning job.
#
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
sig do
params(
fine_tuning_job_id: String,
diff --git a/rbi/openai/resources/images.rbi b/rbi/openai/resources/images.rbi
index c8440e47..f4f67d10 100644
--- a/rbi/openai/resources/images.rbi
+++ b/rbi/openai/resources/images.rbi
@@ -52,6 +52,9 @@ module OpenAI
mask: OpenAI::Internal::FileInput,
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
n: T.nilable(Integer),
+ output_compression: T.nilable(Integer),
+ output_format:
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
response_format:
T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -91,6 +94,14 @@ module OpenAI
model: nil,
# The number of images to generate. Must be between 1 and 10.
n: nil,
+ # The compression level (0-100%) for the generated images. This parameter is only
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ # defaults to 100.
+ output_compression: nil,
+ # The format in which the generated images are returned. This parameter is only
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ # default value is `png`.
+ output_format: nil,
# The quality of the image that will be generated. `high`, `medium` and `low` are
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
# Defaults to `auto`.
diff --git a/rbi/openai/resources/responses.rbi b/rbi/openai/resources/responses.rbi
index c4ac4c2d..ec81a7a2 100644
--- a/rbi/openai/resources/responses.rbi
+++ b/rbi/openai/resources/responses.rbi
@@ -38,6 +38,7 @@ module OpenAI
metadata: T.nilable(T::Hash[Symbol, String]),
parallel_tool_calls: T.nilable(T::Boolean),
previous_response_id: T.nilable(String),
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
service_tier:
T.nilable(
@@ -115,8 +116,7 @@ module OpenAI
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
# in code interpreter tool call items.
include: nil,
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
@@ -139,6 +139,9 @@ module OpenAI
# multi-turn conversations. Learn more about
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
previous_response_id: nil,
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ prompt: nil,
# **o-series models only**
#
# Configuration options for
@@ -251,6 +254,7 @@ module OpenAI
metadata: T.nilable(T::Hash[Symbol, String]),
parallel_tool_calls: T.nilable(T::Boolean),
previous_response_id: T.nilable(String),
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
service_tier:
T.nilable(
@@ -328,8 +332,7 @@ module OpenAI
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
# in code interpreter tool call items.
include: nil,
- # Inserts a system (or developer) message as the first item in the model's
- # context.
+ # A system (or developer) message inserted into the model's context.
#
# When using along with `previous_response_id`, the instructions from a previous
# response will not be carried over to the next response. This makes it simple to
@@ -352,6 +355,9 @@ module OpenAI
# multi-turn conversations. Learn more about
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
previous_response_id: nil,
+ # Reference to a prompt template and its variables.
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
+ prompt: nil,
# **o-series models only**
#
# Configuration options for
diff --git a/sig/openai/models/chat/chat_completion.rbs b/sig/openai/models/chat/chat_completion.rbs
index e66a399d..9b3eb5f9 100644
--- a/sig/openai/models/chat/chat_completion.rbs
+++ b/sig/openai/models/chat/chat_completion.rbs
@@ -127,7 +127,7 @@ module OpenAI
end
end
- type service_tier = :auto | :default | :flex
+ type service_tier = :auto | :default | :flex | :scale
module ServiceTier
extend OpenAI::Internal::Type::Enum
@@ -135,6 +135,7 @@ module OpenAI
AUTO: :auto
DEFAULT: :default
FLEX: :flex
+ SCALE: :scale
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::service_tier]
end
diff --git a/sig/openai/models/chat/chat_completion_chunk.rbs b/sig/openai/models/chat/chat_completion_chunk.rbs
index 0e7acf36..e68150c9 100644
--- a/sig/openai/models/chat/chat_completion_chunk.rbs
+++ b/sig/openai/models/chat/chat_completion_chunk.rbs
@@ -272,7 +272,7 @@ module OpenAI
end
end
- type service_tier = :auto | :default | :flex
+ type service_tier = :auto | :default | :flex | :scale
module ServiceTier
extend OpenAI::Internal::Type::Enum
@@ -280,6 +280,7 @@ module OpenAI
AUTO: :auto
DEFAULT: :default
FLEX: :flex
+ SCALE: :scale
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::service_tier]
end
diff --git a/sig/openai/models/chat/completion_create_params.rbs b/sig/openai/models/chat/completion_create_params.rbs
index 0f3ad11e..e35205f3 100644
--- a/sig/openai/models/chat/completion_create_params.rbs
+++ b/sig/openai/models/chat/completion_create_params.rbs
@@ -280,7 +280,7 @@ module OpenAI
def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::response_format]
end
- type service_tier = :auto | :default | :flex
+ type service_tier = :auto | :default | :flex | :scale
module ServiceTier
extend OpenAI::Internal::Type::Enum
@@ -288,6 +288,7 @@ module OpenAI
AUTO: :auto
DEFAULT: :default
FLEX: :flex
+ SCALE: :scale
def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::service_tier]
end
diff --git a/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs b/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs
index 54f9630a..ef545155 100644
--- a/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs
+++ b/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs
@@ -4,71 +4,34 @@ module OpenAI
module Checkpoints
type permission_retrieve_response =
{
- data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
- has_more: bool,
- object: :list,
- first_id: String?,
- last_id: String?
+ id: String,
+ created_at: Integer,
+ object: :"checkpoint.permission",
+ project_id: String
}
class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel
- attr_accessor data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data]
+ attr_accessor id: String
- attr_accessor has_more: bool
+ attr_accessor created_at: Integer
- attr_accessor object: :list
+ attr_accessor object: :"checkpoint.permission"
- attr_accessor first_id: String?
-
- attr_accessor last_id: String?
+ attr_accessor project_id: String
def initialize: (
- data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
- has_more: bool,
- ?first_id: String?,
- ?last_id: String?,
- ?object: :list
+ id: String,
+ created_at: Integer,
+ project_id: String,
+ ?object: :"checkpoint.permission"
) -> void
def to_hash: -> {
- data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
- has_more: bool,
- object: :list,
- first_id: String?,
- last_id: String?
+ id: String,
+ created_at: Integer,
+ object: :"checkpoint.permission",
+ project_id: String
}
-
- type data =
- {
- id: String,
- created_at: Integer,
- object: :"checkpoint.permission",
- project_id: String
- }
-
- class Data < OpenAI::Internal::Type::BaseModel
- attr_accessor id: String
-
- attr_accessor created_at: Integer
-
- attr_accessor object: :"checkpoint.permission"
-
- attr_accessor project_id: String
-
- def initialize: (
- id: String,
- created_at: Integer,
- project_id: String,
- ?object: :"checkpoint.permission"
- ) -> void
-
- def to_hash: -> {
- id: String,
- created_at: Integer,
- object: :"checkpoint.permission",
- project_id: String
- }
- end
end
end
end
diff --git a/sig/openai/models/image_edit_params.rbs b/sig/openai/models/image_edit_params.rbs
index 0fe6ec3d..ca2820e9 100644
--- a/sig/openai/models/image_edit_params.rbs
+++ b/sig/openai/models/image_edit_params.rbs
@@ -8,6 +8,8 @@ module OpenAI
mask: OpenAI::Internal::file_input,
model: OpenAI::Models::ImageEditParams::model?,
n: Integer?,
+ output_compression: Integer?,
+ output_format: OpenAI::Models::ImageEditParams::output_format?,
quality: OpenAI::Models::ImageEditParams::quality?,
response_format: OpenAI::Models::ImageEditParams::response_format?,
size: OpenAI::Models::ImageEditParams::size?,
@@ -33,6 +35,10 @@ module OpenAI
attr_accessor n: Integer?
+ attr_accessor output_compression: Integer?
+
+ attr_accessor output_format: OpenAI::Models::ImageEditParams::output_format?
+
attr_accessor quality: OpenAI::Models::ImageEditParams::quality?
attr_accessor response_format: OpenAI::Models::ImageEditParams::response_format?
@@ -50,6 +56,8 @@ module OpenAI
?mask: OpenAI::Internal::file_input,
?model: OpenAI::Models::ImageEditParams::model?,
?n: Integer?,
+ ?output_compression: Integer?,
+ ?output_format: OpenAI::Models::ImageEditParams::output_format?,
?quality: OpenAI::Models::ImageEditParams::quality?,
?response_format: OpenAI::Models::ImageEditParams::response_format?,
?size: OpenAI::Models::ImageEditParams::size?,
@@ -64,6 +72,8 @@ module OpenAI
mask: OpenAI::Internal::file_input,
model: OpenAI::Models::ImageEditParams::model?,
n: Integer?,
+ output_compression: Integer?,
+ output_format: OpenAI::Models::ImageEditParams::output_format?,
quality: OpenAI::Models::ImageEditParams::quality?,
response_format: OpenAI::Models::ImageEditParams::response_format?,
size: OpenAI::Models::ImageEditParams::size?,
@@ -102,6 +112,18 @@ module OpenAI
def self?.variants: -> ::Array[OpenAI::Models::ImageEditParams::model]
end
+ type output_format = :png | :jpeg | :webp
+
+ module OutputFormat
+ extend OpenAI::Internal::Type::Enum
+
+ PNG: :png
+ JPEG: :jpeg
+ WEBP: :webp
+
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::output_format]
+ end
+
type quality = :standard | :low | :medium | :high | :auto
module Quality
diff --git a/sig/openai/models/responses/response.rbs b/sig/openai/models/responses/response.rbs
index d5658b8f..8825885e 100644
--- a/sig/openai/models/responses/response.rbs
+++ b/sig/openai/models/responses/response.rbs
@@ -7,7 +7,7 @@ module OpenAI
created_at: Float,
error: OpenAI::Responses::ResponseError?,
incomplete_details: OpenAI::Responses::Response::IncompleteDetails?,
- instructions: String?,
+ instructions: OpenAI::Models::Responses::Response::instructions?,
metadata: OpenAI::Models::metadata?,
model: OpenAI::Models::responses_model,
object: :response,
@@ -20,6 +20,7 @@ module OpenAI
background: bool?,
max_output_tokens: Integer?,
previous_response_id: String?,
+ prompt: OpenAI::Responses::ResponsePrompt?,
reasoning: OpenAI::Reasoning?,
service_tier: OpenAI::Models::Responses::Response::service_tier?,
status: OpenAI::Models::Responses::response_status,
@@ -38,7 +39,7 @@ module OpenAI
attr_accessor incomplete_details: OpenAI::Responses::Response::IncompleteDetails?
- attr_accessor instructions: String?
+ attr_accessor instructions: OpenAI::Models::Responses::Response::instructions?
attr_accessor metadata: OpenAI::Models::metadata?
@@ -64,6 +65,8 @@ module OpenAI
attr_accessor previous_response_id: String?
+ attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
+
attr_accessor reasoning: OpenAI::Reasoning?
attr_accessor service_tier: OpenAI::Models::Responses::Response::service_tier?
@@ -97,7 +100,7 @@ module OpenAI
created_at: Float,
error: OpenAI::Responses::ResponseError?,
incomplete_details: OpenAI::Responses::Response::IncompleteDetails?,
- instructions: String?,
+ instructions: OpenAI::Models::Responses::Response::instructions?,
metadata: OpenAI::Models::metadata?,
model: OpenAI::Models::responses_model,
output: ::Array[OpenAI::Models::Responses::response_output_item],
@@ -109,6 +112,7 @@ module OpenAI
?background: bool?,
?max_output_tokens: Integer?,
?previous_response_id: String?,
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
?reasoning: OpenAI::Reasoning?,
?service_tier: OpenAI::Models::Responses::Response::service_tier?,
?status: OpenAI::Models::Responses::response_status,
@@ -124,7 +128,7 @@ module OpenAI
created_at: Float,
error: OpenAI::Responses::ResponseError?,
incomplete_details: OpenAI::Responses::Response::IncompleteDetails?,
- instructions: String?,
+ instructions: OpenAI::Models::Responses::Response::instructions?,
metadata: OpenAI::Models::metadata?,
model: OpenAI::Models::responses_model,
object: :response,
@@ -137,6 +141,7 @@ module OpenAI
background: bool?,
max_output_tokens: Integer?,
previous_response_id: String?,
+ prompt: OpenAI::Responses::ResponsePrompt?,
reasoning: OpenAI::Reasoning?,
service_tier: OpenAI::Models::Responses::Response::service_tier?,
status: OpenAI::Models::Responses::response_status,
@@ -178,6 +183,17 @@ module OpenAI
end
end
+ type instructions =
+ String | ::Array[OpenAI::Models::Responses::response_input_item]
+
+ module Instructions
+ extend OpenAI::Internal::Type::Union
+
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::instructions]
+
+ ResponseInputItemArray: OpenAI::Internal::Type::Converter
+ end
+
type tool_choice =
OpenAI::Models::Responses::tool_choice_options
| OpenAI::Responses::ToolChoiceTypes
@@ -189,7 +205,7 @@ module OpenAI
def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::tool_choice]
end
- type service_tier = :auto | :default | :flex
+ type service_tier = :auto | :default | :flex | :scale
module ServiceTier
extend OpenAI::Internal::Type::Enum
@@ -197,6 +213,7 @@ module OpenAI
AUTO: :auto
DEFAULT: :default
FLEX: :flex
+ SCALE: :scale
def self?.values: -> ::Array[OpenAI::Models::Responses::Response::service_tier]
end
diff --git a/sig/openai/models/responses/response_create_params.rbs b/sig/openai/models/responses/response_create_params.rbs
index 3050697d..28a6db94 100644
--- a/sig/openai/models/responses/response_create_params.rbs
+++ b/sig/openai/models/responses/response_create_params.rbs
@@ -12,6 +12,7 @@ module OpenAI
metadata: OpenAI::Models::metadata?,
parallel_tool_calls: bool?,
previous_response_id: String?,
+ prompt: OpenAI::Responses::ResponsePrompt?,
reasoning: OpenAI::Reasoning?,
service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
store: bool?,
@@ -47,6 +48,8 @@ module OpenAI
attr_accessor previous_response_id: String?
+ attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
+
attr_accessor reasoning: OpenAI::Reasoning?
attr_accessor service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?
@@ -91,6 +94,7 @@ module OpenAI
?metadata: OpenAI::Models::metadata?,
?parallel_tool_calls: bool?,
?previous_response_id: String?,
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
?reasoning: OpenAI::Reasoning?,
?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
?store: bool?,
@@ -114,6 +118,7 @@ module OpenAI
metadata: OpenAI::Models::metadata?,
parallel_tool_calls: bool?,
previous_response_id: String?,
+ prompt: OpenAI::Responses::ResponsePrompt?,
reasoning: OpenAI::Reasoning?,
service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
store: bool?,
@@ -135,7 +140,7 @@ module OpenAI
def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::input]
end
- type service_tier = :auto | :default | :flex
+ type service_tier = :auto | :default | :flex | :scale
module ServiceTier
extend OpenAI::Internal::Type::Enum
@@ -143,6 +148,7 @@ module OpenAI
AUTO: :auto
DEFAULT: :default
FLEX: :flex
+ SCALE: :scale
def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::service_tier]
end
diff --git a/sig/openai/models/responses/response_prompt.rbs b/sig/openai/models/responses/response_prompt.rbs
new file mode 100644
index 00000000..0ccc20f8
--- /dev/null
+++ b/sig/openai/models/responses/response_prompt.rbs
@@ -0,0 +1,44 @@
+module OpenAI
+ module Models
+ module Responses
+ type response_prompt =
+ {
+ id: String,
+ variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?,
+ version: String?
+ }
+
+ class ResponsePrompt < OpenAI::Internal::Type::BaseModel
+ attr_accessor id: String
+
+ attr_accessor variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?
+
+ attr_accessor version: String?
+
+ def initialize: (
+ id: String,
+ ?variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?,
+ ?version: String?
+ ) -> void
+
+ def to_hash: -> {
+ id: String,
+ variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?,
+ version: String?
+ }
+
+ type variable =
+ String
+ | OpenAI::Responses::ResponseInputText
+ | OpenAI::Responses::ResponseInputImage
+ | OpenAI::Responses::ResponseInputFile
+
+ module Variable
+ extend OpenAI::Internal::Type::Union
+
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponsePrompt::variable]
+ end
+ end
+ end
+ end
+end
diff --git a/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs b/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs
index f36dcbbb..011e1286 100644
--- a/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs
+++ b/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs
@@ -16,7 +16,7 @@ module OpenAI
?order: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order,
?project_id: String,
?request_options: OpenAI::request_opts
- ) -> OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
+ ) -> OpenAI::Internal::CursorPage[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse]
def delete: (
String permission_id,
diff --git a/sig/openai/resources/images.rbs b/sig/openai/resources/images.rbs
index f441f385..43595baa 100644
--- a/sig/openai/resources/images.rbs
+++ b/sig/openai/resources/images.rbs
@@ -18,6 +18,8 @@ module OpenAI
?mask: OpenAI::Internal::file_input,
?model: OpenAI::Models::ImageEditParams::model?,
?n: Integer?,
+ ?output_compression: Integer?,
+ ?output_format: OpenAI::Models::ImageEditParams::output_format?,
?quality: OpenAI::Models::ImageEditParams::quality?,
?response_format: OpenAI::Models::ImageEditParams::response_format?,
?size: OpenAI::Models::ImageEditParams::size?,
diff --git a/sig/openai/resources/responses.rbs b/sig/openai/resources/responses.rbs
index 7db1e5e4..fdbdf256 100644
--- a/sig/openai/resources/responses.rbs
+++ b/sig/openai/resources/responses.rbs
@@ -13,6 +13,7 @@ module OpenAI
?metadata: OpenAI::Models::metadata?,
?parallel_tool_calls: bool?,
?previous_response_id: String?,
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
?reasoning: OpenAI::Reasoning?,
?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
?store: bool?,
@@ -36,6 +37,7 @@ module OpenAI
?metadata: OpenAI::Models::metadata?,
?parallel_tool_calls: bool?,
?previous_response_id: String?,
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
?reasoning: OpenAI::Reasoning?,
?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
?store: bool?,
diff --git a/test/openai/resources/fine_tuning/checkpoints/permissions_test.rb b/test/openai/resources/fine_tuning/checkpoints/permissions_test.rb
index 0ed0d4c1..60bc8e8f 100644
--- a/test/openai/resources/fine_tuning/checkpoints/permissions_test.rb
+++ b/test/openai/resources/fine_tuning/checkpoints/permissions_test.rb
@@ -35,16 +35,22 @@ def test_retrieve
response = @openai.fine_tuning.checkpoints.permissions.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F")
assert_pattern do
- response => OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
+ response => OpenAI::Internal::CursorPage
end
+ row = response.to_enum.first
+ return if row.nil?
+
assert_pattern do
- response => {
- data: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data]),
- has_more: OpenAI::Internal::Type::Boolean,
+ row => OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
+ end
+
+ assert_pattern do
+ row => {
+ id: String,
+ created_at: Integer,
object: Symbol,
- first_id: String | nil,
- last_id: String | nil
+ project_id: String
}
end
end
diff --git a/test/openai/resources/responses_test.rb b/test/openai/resources/responses_test.rb
index fe1e2ad5..105b7ce6 100644
--- a/test/openai/resources/responses_test.rb
+++ b/test/openai/resources/responses_test.rb
@@ -16,7 +16,7 @@ def test_create_required_params
created_at: Float,
error: OpenAI::Responses::ResponseError | nil,
incomplete_details: OpenAI::Responses::Response::IncompleteDetails | nil,
- instructions: String | nil,
+ instructions: OpenAI::Responses::Response::Instructions | nil,
metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil,
model: OpenAI::ResponsesModel,
object: Symbol,
@@ -29,6 +29,7 @@ def test_create_required_params
background: OpenAI::Internal::Type::Boolean | nil,
max_output_tokens: Integer | nil,
previous_response_id: String | nil,
+ prompt: OpenAI::Responses::ResponsePrompt | nil,
reasoning: OpenAI::Reasoning | nil,
service_tier: OpenAI::Responses::Response::ServiceTier | nil,
status: OpenAI::Responses::ResponseStatus | nil,
@@ -53,7 +54,7 @@ def test_retrieve
created_at: Float,
error: OpenAI::Responses::ResponseError | nil,
incomplete_details: OpenAI::Responses::Response::IncompleteDetails | nil,
- instructions: String | nil,
+ instructions: OpenAI::Responses::Response::Instructions | nil,
metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil,
model: OpenAI::ResponsesModel,
object: Symbol,
@@ -66,6 +67,7 @@ def test_retrieve
background: OpenAI::Internal::Type::Boolean | nil,
max_output_tokens: Integer | nil,
previous_response_id: String | nil,
+ prompt: OpenAI::Responses::ResponsePrompt | nil,
reasoning: OpenAI::Reasoning | nil,
service_tier: OpenAI::Responses::Response::ServiceTier | nil,
status: OpenAI::Responses::ResponseStatus | nil,
@@ -98,7 +100,7 @@ def test_cancel
created_at: Float,
error: OpenAI::Responses::ResponseError | nil,
incomplete_details: OpenAI::Responses::Response::IncompleteDetails | nil,
- instructions: String | nil,
+ instructions: OpenAI::Responses::Response::Instructions | nil,
metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil,
model: OpenAI::ResponsesModel,
object: Symbol,
@@ -111,6 +113,7 @@ def test_cancel
background: OpenAI::Internal::Type::Boolean | nil,
max_output_tokens: Integer | nil,
previous_response_id: String | nil,
+ prompt: OpenAI::Responses::ResponsePrompt | nil,
reasoning: OpenAI::Reasoning | nil,
service_tier: OpenAI::Responses::Response::ServiceTier | nil,
status: OpenAI::Responses::ResponseStatus | nil,