From 4ba442bcb92845dc9273a4a7dd01c22ac5b59e90 Mon Sep 17 00:00:00 2001 From: Shreemaan Abhishek Date: Mon, 3 Mar 2025 08:30:01 +0545 Subject: [PATCH] fix(ai-proxy): remove model options' `stream` default value (#12013) --- apisix/plugins/ai-proxy/schema.lua | 1 - docs/en/latest/plugins/ai-proxy-multi.md | 2 +- docs/en/latest/plugins/ai-proxy.md | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/apisix/plugins/ai-proxy/schema.lua b/apisix/plugins/ai-proxy/schema.lua index 4f756216ad31..8fe8e2d533c0 100644 --- a/apisix/plugins/ai-proxy/schema.lua +++ b/apisix/plugins/ai-proxy/schema.lua @@ -73,7 +73,6 @@ local model_options_schema = { stream = { description = "Stream response by SSE", type = "boolean", - default = false, } } } diff --git a/docs/en/latest/plugins/ai-proxy-multi.md b/docs/en/latest/plugins/ai-proxy-multi.md index 2359d4b9a7dd..bc449e138b54 100644 --- a/docs/en/latest/plugins/ai-proxy-multi.md +++ b/docs/en/latest/plugins/ai-proxy-multi.md @@ -68,7 +68,7 @@ Proxying requests to OpenAI is supported now. Other LLM services will be support | provider.options.output_cost | No | number | Cost per 1M tokens in the AI-generated output. Minimum is 0. | | | provider.options.temperature | No | number | Defines the model's temperature (0.0 - 5.0) for randomness in responses. | | | provider.options.top_p | No | number | Defines the top-p probability mass (0 - 1) for nucleus sampling. | | -| provider.options.stream | No | boolean | Enables streaming responses via SSE. | false | +| provider.options.stream | No | boolean | Enables streaming responses via SSE. | | | provider.override.endpoint | No | string | Custom host override for the AI provider. | | | passthrough | No | boolean | If true, requests are forwarded without processing. | false | | timeout | No | integer | Request timeout in milliseconds (1-60000). | 3000 | diff --git a/docs/en/latest/plugins/ai-proxy.md b/docs/en/latest/plugins/ai-proxy.md index 0194205d94fc..ab910368b8e4 100644 --- a/docs/en/latest/plugins/ai-proxy.md +++ b/docs/en/latest/plugins/ai-proxy.md @@ -61,7 +61,7 @@ Proxying requests to OpenAI is supported now. Other LLM services will be support | model.options.output_cost | No | Number | Cost per 1M tokens in the output of the AI. Minimum: 0 | | model.options.temperature | No | Number | Matching temperature for models. Range: 0.0 - 5.0 | | model.options.top_p | No | Number | Top-p probability mass. Range: 0 - 1 | -| model.options.stream | No | Boolean | Stream response by SSE. Default: false | +| model.options.stream | No | Boolean | Stream response by SSE. | | override.endpoint | No | String | Override the endpoint of the AI provider | | passthrough | No | Boolean | If enabled, the response from LLM will be sent to the upstream. Default: false | | timeout | No | Integer | Timeout in milliseconds for requests to LLM. Range: 1 - 60000. Default: 3000 |