Skip to content

Commit 8f521c7

Browse files
feat(api): Add missing image options, scale tier
1 parent 7b34316 commit 8f521c7

File tree

427 files changed

+11472
-25107
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

427 files changed

+11472
-25107
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-3ae9c18dd7ccfc3ac5206f24394665f563a19015cfa8847b2801a2694d012abc.yml
3-
openapi_spec_hash: 48175b03b58805cd5c80793c66fd54e5
1+
configured_endpoints: 95
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b34433562ecd94088ceb7bb715de382302a61035aece192c0fc61cf965c8a54e.yml
3+
openapi_spec_hash: 2b55bcc8efd5519ae4d995dfec59003a
44
config_hash: 4caff63b74a41f71006987db702f2918

lib/openai.rb

Lines changed: 0 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,6 @@
6060
require_relative "openai/structured_output"
6161
require_relative "openai/models/reasoning_effort"
6262
require_relative "openai/models/chat/chat_completion_message"
63-
require_relative "openai/models/graders/score_model_grader"
64-
require_relative "openai/models/graders/python_grader"
65-
require_relative "openai/models/graders/text_similarity_grader"
6663
require_relative "openai/models/fine_tuning/fine_tuning_job_wandb_integration_object"
6764
require_relative "openai/models/responses/response_function_tool_call"
6865
require_relative "openai/models/all_models"
@@ -223,21 +220,6 @@
223220
require_relative "openai/models/completion_create_params"
224221
require_relative "openai/models/completion_usage"
225222
require_relative "openai/models/compound_filter"
226-
require_relative "openai/models/container_create_params"
227-
require_relative "openai/models/container_create_response"
228-
require_relative "openai/models/container_delete_params"
229-
require_relative "openai/models/container_list_params"
230-
require_relative "openai/models/container_list_response"
231-
require_relative "openai/models/container_retrieve_params"
232-
require_relative "openai/models/container_retrieve_response"
233-
require_relative "openai/models/containers/file_create_params"
234-
require_relative "openai/models/containers/file_create_response"
235-
require_relative "openai/models/containers/file_delete_params"
236-
require_relative "openai/models/containers/file_list_params"
237-
require_relative "openai/models/containers/file_list_response"
238-
require_relative "openai/models/containers/file_retrieve_params"
239-
require_relative "openai/models/containers/file_retrieve_response"
240-
require_relative "openai/models/containers/files/content_retrieve_params"
241223
require_relative "openai/models/create_embedding_response"
242224
require_relative "openai/models/embedding"
243225
require_relative "openai/models/embedding_create_params"
@@ -283,17 +265,12 @@
283265
require_relative "openai/models/file_object"
284266
require_relative "openai/models/file_purpose"
285267
require_relative "openai/models/file_retrieve_params"
286-
require_relative "openai/models/fine_tuning/alpha/grader_run_params"
287-
require_relative "openai/models/fine_tuning/alpha/grader_run_response"
288-
require_relative "openai/models/fine_tuning/alpha/grader_validate_params"
289-
require_relative "openai/models/fine_tuning/alpha/grader_validate_response"
290268
require_relative "openai/models/fine_tuning/checkpoints/permission_create_params"
291269
require_relative "openai/models/fine_tuning/checkpoints/permission_create_response"
292270
require_relative "openai/models/fine_tuning/checkpoints/permission_delete_params"
293271
require_relative "openai/models/fine_tuning/checkpoints/permission_delete_response"
294272
require_relative "openai/models/fine_tuning/checkpoints/permission_retrieve_params"
295273
require_relative "openai/models/fine_tuning/checkpoints/permission_retrieve_response"
296-
require_relative "openai/models/fine_tuning/dpo_hyperparameters"
297274
require_relative "openai/models/fine_tuning/dpo_method"
298275
require_relative "openai/models/fine_tuning/fine_tuning_job"
299276
require_relative "openai/models/fine_tuning/fine_tuning_job_event"
@@ -303,20 +280,12 @@
303280
require_relative "openai/models/fine_tuning/job_create_params"
304281
require_relative "openai/models/fine_tuning/job_list_events_params"
305282
require_relative "openai/models/fine_tuning/job_list_params"
306-
require_relative "openai/models/fine_tuning/job_pause_params"
307-
require_relative "openai/models/fine_tuning/job_resume_params"
308283
require_relative "openai/models/fine_tuning/job_retrieve_params"
309284
require_relative "openai/models/fine_tuning/jobs/checkpoint_list_params"
310285
require_relative "openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint"
311-
require_relative "openai/models/fine_tuning/reinforcement_hyperparameters"
312-
require_relative "openai/models/fine_tuning/reinforcement_method"
313-
require_relative "openai/models/fine_tuning/supervised_hyperparameters"
314286
require_relative "openai/models/fine_tuning/supervised_method"
315287
require_relative "openai/models/function_definition"
316288
require_relative "openai/models/function_parameters"
317-
require_relative "openai/models/graders/label_model_grader"
318-
require_relative "openai/models/graders/multi_grader"
319-
require_relative "openai/models/graders/string_check_grader"
320289
require_relative "openai/models/image"
321290
require_relative "openai/models/image_create_variation_params"
322291
require_relative "openai/models/image_edit_params"
@@ -351,7 +320,6 @@
351320
require_relative "openai/models/responses/response_audio_done_event"
352321
require_relative "openai/models/responses/response_audio_transcript_delta_event"
353322
require_relative "openai/models/responses/response_audio_transcript_done_event"
354-
require_relative "openai/models/responses/response_cancel_params"
355323
require_relative "openai/models/responses/response_code_interpreter_call_code_delta_event"
356324
require_relative "openai/models/responses/response_code_interpreter_call_code_done_event"
357325
require_relative "openai/models/responses/response_code_interpreter_call_completed_event"
@@ -382,10 +350,6 @@
382350
require_relative "openai/models/responses/response_function_tool_call_item"
383351
require_relative "openai/models/responses/response_function_tool_call_output_item"
384352
require_relative "openai/models/responses/response_function_web_search"
385-
require_relative "openai/models/responses/response_image_gen_call_completed_event"
386-
require_relative "openai/models/responses/response_image_gen_call_generating_event"
387-
require_relative "openai/models/responses/response_image_gen_call_in_progress_event"
388-
require_relative "openai/models/responses/response_image_gen_call_partial_image_event"
389353
require_relative "openai/models/responses/response_includable"
390354
require_relative "openai/models/responses/response_incomplete_event"
391355
require_relative "openai/models/responses/response_in_progress_event"
@@ -400,28 +364,14 @@
400364
require_relative "openai/models/responses/response_input_text"
401365
require_relative "openai/models/responses/response_item"
402366
require_relative "openai/models/responses/response_item_list"
403-
require_relative "openai/models/responses/response_mcp_call_arguments_delta_event"
404-
require_relative "openai/models/responses/response_mcp_call_arguments_done_event"
405-
require_relative "openai/models/responses/response_mcp_call_completed_event"
406-
require_relative "openai/models/responses/response_mcp_call_failed_event"
407-
require_relative "openai/models/responses/response_mcp_call_in_progress_event"
408-
require_relative "openai/models/responses/response_mcp_list_tools_completed_event"
409-
require_relative "openai/models/responses/response_mcp_list_tools_failed_event"
410-
require_relative "openai/models/responses/response_mcp_list_tools_in_progress_event"
411367
require_relative "openai/models/responses/response_output_audio"
412368
require_relative "openai/models/responses/response_output_item"
413369
require_relative "openai/models/responses/response_output_item_added_event"
414370
require_relative "openai/models/responses/response_output_item_done_event"
415371
require_relative "openai/models/responses/response_output_message"
416372
require_relative "openai/models/responses/response_output_refusal"
417373
require_relative "openai/models/responses/response_output_text"
418-
require_relative "openai/models/responses/response_output_text_annotation_added_event"
419-
require_relative "openai/models/responses/response_queued_event"
420-
require_relative "openai/models/responses/response_reasoning_delta_event"
421-
require_relative "openai/models/responses/response_reasoning_done_event"
422374
require_relative "openai/models/responses/response_reasoning_item"
423-
require_relative "openai/models/responses/response_reasoning_summary_delta_event"
424-
require_relative "openai/models/responses/response_reasoning_summary_done_event"
425375
require_relative "openai/models/responses/response_reasoning_summary_part_added_event"
426376
require_relative "openai/models/responses/response_reasoning_summary_part_done_event"
427377
require_relative "openai/models/responses/response_reasoning_summary_text_delta_event"

lib/openai/models.rb

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -81,16 +81,6 @@ module OpenAI
8181

8282
CompoundFilter = OpenAI::Models::CompoundFilter
8383

84-
ContainerCreateParams = OpenAI::Models::ContainerCreateParams
85-
86-
ContainerDeleteParams = OpenAI::Models::ContainerDeleteParams
87-
88-
ContainerListParams = OpenAI::Models::ContainerListParams
89-
90-
ContainerRetrieveParams = OpenAI::Models::ContainerRetrieveParams
91-
92-
Containers = OpenAI::Models::Containers
93-
9484
CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse
9585

9686
Embedding = OpenAI::Models::Embedding
@@ -146,8 +136,6 @@ module OpenAI
146136
# @type [OpenAI::Internal::Type::Converter]
147137
FunctionParameters = OpenAI::Models::FunctionParameters
148138

149-
Graders = OpenAI::Models::Graders
150-
151139
Image = OpenAI::Models::Image
152140

153141
ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams

lib/openai/models/all_models.rb

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,6 @@ module ResponsesOnlyModel
1616

1717
O1_PRO = :"o1-pro"
1818
O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
19-
O3_PRO = :"o3-pro"
20-
O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
2119
COMPUTER_USE_PREVIEW = :"computer-use-preview"
2220
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
2321

lib/openai/models/audio/speech_create_params.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel
4646

4747
# @!attribute speed
4848
# The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
49-
# the default. Does not work with `gpt-4o-mini-tts`.
49+
# the default.
5050
#
5151
# @return [Float, nil]
5252
optional :speed, Float

lib/openai/models/audio/transcription_create_params.rb

Lines changed: 1 addition & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -25,17 +25,6 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
2525
# @return [String, Symbol, OpenAI::Models::AudioModel]
2626
required :model, union: -> { OpenAI::Audio::TranscriptionCreateParams::Model }
2727

28-
# @!attribute chunking_strategy
29-
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
30-
# first normalizes loudness and then uses voice activity detection (VAD) to choose
31-
# boundaries. `server_vad` object can be provided to tweak VAD detection
32-
# parameters manually. If unset, the audio is transcribed as a single block.
33-
#
34-
# @return [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil]
35-
optional :chunking_strategy,
36-
union: -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy },
37-
nil?: true
38-
3928
# @!attribute include
4029
# Additional information to include in the transcription response. `logprobs` will
4130
# return the log probabilities of the tokens in the response to understand the
@@ -94,16 +83,14 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
9483
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity]
9584
}
9685

97-
# @!method initialize(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {})
86+
# @!method initialize(file:, model:, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {})
9887
# Some parameter documentations has been truncated, see
9988
# {OpenAI::Models::Audio::TranscriptionCreateParams} for more details.
10089
#
10190
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl
10291
#
10392
# @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc
10493
#
105-
# @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs
106-
#
10794
# @param include [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>] Additional information to include in the transcription response.
10895
#
10996
# @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt
@@ -133,80 +120,6 @@ module Model
133120
# @return [Array(String, Symbol, OpenAI::Models::AudioModel)]
134121
end
135122

136-
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
137-
# first normalizes loudness and then uses voice activity detection (VAD) to choose
138-
# boundaries. `server_vad` object can be provided to tweak VAD detection
139-
# parameters manually. If unset, the audio is transcribed as a single block.
140-
module ChunkingStrategy
141-
extend OpenAI::Internal::Type::Union
142-
143-
# Automatically set chunking parameters based on the audio. Must be set to `"auto"`.
144-
variant const: :auto
145-
146-
variant -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig }
147-
148-
class VadConfig < OpenAI::Internal::Type::BaseModel
149-
# @!attribute type
150-
# Must be set to `server_vad` to enable manual chunking using server side VAD.
151-
#
152-
# @return [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type]
153-
required :type,
154-
enum: -> {
155-
OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type
156-
}
157-
158-
# @!attribute prefix_padding_ms
159-
# Amount of audio to include before the VAD detected speech (in milliseconds).
160-
#
161-
# @return [Integer, nil]
162-
optional :prefix_padding_ms, Integer
163-
164-
# @!attribute silence_duration_ms
165-
# Duration of silence to detect speech stop (in milliseconds). With shorter values
166-
# the model will respond more quickly, but may jump in on short pauses from the
167-
# user.
168-
#
169-
# @return [Integer, nil]
170-
optional :silence_duration_ms, Integer
171-
172-
# @!attribute threshold
173-
# Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher
174-
# threshold will require louder audio to activate the model, and thus might
175-
# perform better in noisy environments.
176-
#
177-
# @return [Float, nil]
178-
optional :threshold, Float
179-
180-
# @!method initialize(type:, prefix_padding_ms: nil, silence_duration_ms: nil, threshold: nil)
181-
# Some parameter documentations has been truncated, see
182-
# {OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig}
183-
# for more details.
184-
#
185-
# @param type [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type] Must be set to `server_vad` to enable manual chunking using server side VAD.
186-
#
187-
# @param prefix_padding_ms [Integer] Amount of audio to include before the VAD detected speech (in
188-
#
189-
# @param silence_duration_ms [Integer] Duration of silence to detect speech stop (in milliseconds).
190-
#
191-
# @param threshold [Float] Sensitivity threshold (0.0 to 1.0) for voice activity detection. A
192-
193-
# Must be set to `server_vad` to enable manual chunking using server side VAD.
194-
#
195-
# @see OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig#type
196-
module Type
197-
extend OpenAI::Internal::Type::Enum
198-
199-
SERVER_VAD = :server_vad
200-
201-
# @!method self.values
202-
# @return [Array<Symbol>]
203-
end
204-
end
205-
206-
# @!method self.variants
207-
# @return [Array(Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig)]
208-
end
209-
210123
module TimestampGranularity
211124
extend OpenAI::Internal::Type::Enum
212125

lib/openai/models/audio/transcription_text_delta_event.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel
5050
# @!attribute bytes
5151
# The bytes that were used to generate the log probability.
5252
#
53-
# @return [Array<Integer>, nil]
54-
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
53+
# @return [Array<Object>, nil]
54+
optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
5555

5656
# @!attribute logprob
5757
# The log probability of the token.
@@ -65,7 +65,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel
6565
#
6666
# @param token [String] The token that was used to generate the log probability.
6767
#
68-
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
68+
# @param bytes [Array<Object>] The bytes that were used to generate the log probability.
6969
#
7070
# @param logprob [Float] The log probability of the token.
7171
end

lib/openai/models/audio/transcription_text_done_event.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel
5151
# @!attribute bytes
5252
# The bytes that were used to generate the log probability.
5353
#
54-
# @return [Array<Integer>, nil]
55-
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
54+
# @return [Array<Object>, nil]
55+
optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
5656

5757
# @!attribute logprob
5858
# The log probability of the token.
@@ -66,7 +66,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel
6666
#
6767
# @param token [String] The token that was used to generate the log probability.
6868
#
69-
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
69+
# @param bytes [Array<Object>] The bytes that were used to generate the log probability.
7070
#
7171
# @param logprob [Float] The log probability of the token.
7272
end

lib/openai/models/chat/chat_completion.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel
4646
# utilize scale tier credits until they are exhausted.
4747
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
4848
# be processed using the default service tier with a lower uptime SLA and no
49-
# latency guarantee.
49+
# latency guarentee.
5050
# - If set to 'default', the request will be processed using the default service
51-
# tier with a lower uptime SLA and no latency guarantee.
51+
# tier with a lower uptime SLA and no latency guarentee.
5252
# - If set to 'flex', the request will be processed with the Flex Processing
5353
# service tier.
5454
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -195,9 +195,9 @@ class Logprobs < OpenAI::Internal::Type::BaseModel
195195
# utilize scale tier credits until they are exhausted.
196196
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
197197
# be processed using the default service tier with a lower uptime SLA and no
198-
# latency guarantee.
198+
# latency guarentee.
199199
# - If set to 'default', the request will be processed using the default service
200-
# tier with a lower uptime SLA and no latency guarantee.
200+
# tier with a lower uptime SLA and no latency guarentee.
201201
# - If set to 'flex', the request will be processed with the Flex Processing
202202
# service tier.
203203
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

0 commit comments

Comments
 (0)