diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 97765ed16..dd10aca31 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -4652,11 +4652,11 @@ async def search( of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the - default method. `_shards:,` to run the search only on the specified - shards. You can combine this value with other `preference` values. However, - the `_shards` value must come first. For example: `_shards:2,3|_local`. `` - (any string that does not start with `_`) to route searches with the same - `` to the same shards in the same order. + default method. * `_shards:,` to run the search only on the + specified shards. You can combine this value with other `preference` values. + However, the `_shards` value must come first. For example: `_shards:2,3|_local`. + * `` (any string that does not start with `_`) to route searches + with the same `` to the same shards in the same order. :param profile: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index bb289c12d..1736c4b35 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -1767,7 +1767,200 @@ async def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1794,16 +1987,17 @@ async def nodes( to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 07e7c58c2..0b5c9fde2 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -656,7 +656,15 @@ async def create( ``_ - :param index: Name of the index you wish to create. + :param index: Name of the index you wish to create. Index names must meet the + following criteria: * Lowercase only * Cannot include `\\`, `/`, `*`, `?`, + `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` * Indices prior to + 7.0 could contain a colon (`:`), but that has been deprecated and will not + be supported in later versions * Cannot start with `-`, `_`, or `+` * Cannot + be `.` or `..` * Cannot be longer than 255 bytes (note thtat it is bytes, + so multi-byte characters will reach the limit faster) * Names starting with + `.` are deprecated, except for hidden indices and internal indices managed + by plugins :param aliases: Aliases for the index. :param mappings: Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters @@ -1246,7 +1254,8 @@ async def delete_template( """ .. raw:: html -

Delete a legacy index template.

+

Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ @@ -2880,7 +2889,7 @@ async def get_template( """ .. raw:: html -

Get index templates. +

Get legacy index templates. Get information about one or more index templates.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

@@ -3973,7 +3982,7 @@ async def put_template( """ .. raw:: html -

Create or update an index template. +

Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 71318b954..8437a8b89 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -370,12 +370,7 @@ async def put( """ .. raw:: html -

Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+

Create an inference endpoint.

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

@@ -458,11 +453,6 @@ async def put_alibabacloud(

Create an AlibabaCloud AI Search inference endpoint.

Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -558,11 +548,6 @@ async def put_amazonbedrock(

info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -654,11 +639,6 @@ async def put_anthropic(

Create an Anthropic inference endpoint.

Create an inference endpoint to perform an inference task with the anthropic service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -751,11 +731,6 @@ async def put_azureaistudio(

Create an Azure AI studio inference endpoint.

Create an inference endpoint to perform an inference task with the azureaistudio service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -853,11 +828,6 @@ async def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -951,11 +921,6 @@ async def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1239,11 +1204,6 @@ async def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1331,11 +1291,6 @@ async def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1434,11 +1389,6 @@ async def put_hugging_face(
  • multilingual-e5-base
  • multilingual-e5-small
  • -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1528,11 +1478,6 @@ async def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1616,11 +1561,6 @@ async def put_mistral(

    Create a Mistral inference endpoint.

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1708,12 +1648,7 @@ async def put_openai( .. raw:: html

    Create an OpenAI inference endpoint.

    -

    Create an inference endpoint to perform an inference task with the openai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    ``_ @@ -1890,11 +1825,6 @@ async def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 56ecd4f2f..a6230144c 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -4650,11 +4650,11 @@ def search( of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the - default method. `_shards:,` to run the search only on the specified - shards. You can combine this value with other `preference` values. However, - the `_shards` value must come first. For example: `_shards:2,3|_local`. `` - (any string that does not start with `_`) to route searches with the same - `` to the same shards in the same order. + default method. * `_shards:,` to run the search only on the + specified shards. You can combine this value with other `preference` values. + However, the `_shards` value must come first. For example: `_shards:2,3|_local`. + * `` (any string that does not start with `_`) to route searches + with the same `` to the same shards in the same order. :param profile: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 2a218a36c..d71571c57 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -1767,7 +1767,200 @@ def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1794,16 +1987,17 @@ def nodes( to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 6cb3cb8d6..08913867b 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -656,7 +656,15 @@ def create( ``_ - :param index: Name of the index you wish to create. + :param index: Name of the index you wish to create. Index names must meet the + following criteria: * Lowercase only * Cannot include `\\`, `/`, `*`, `?`, + `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` * Indices prior to + 7.0 could contain a colon (`:`), but that has been deprecated and will not + be supported in later versions * Cannot start with `-`, `_`, or `+` * Cannot + be `.` or `..` * Cannot be longer than 255 bytes (note thtat it is bytes, + so multi-byte characters will reach the limit faster) * Names starting with + `.` are deprecated, except for hidden indices and internal indices managed + by plugins :param aliases: Aliases for the index. :param mappings: Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters @@ -1246,7 +1254,8 @@ def delete_template( """ .. raw:: html -

    Delete a legacy index template.

    +

    Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    ``_ @@ -2880,7 +2889,7 @@ def get_template( """ .. raw:: html -

    Get index templates. +

    Get legacy index templates. Get information about one or more index templates.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    @@ -3973,7 +3982,7 @@ def put_template( """ .. raw:: html -

    Create or update an index template. +

    Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 2dfcfd671..fa679f6e3 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -370,12 +370,7 @@ def put( """ .. raw:: html -

    Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint.

    IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    @@ -458,11 +453,6 @@ def put_alibabacloud(

    Create an AlibabaCloud AI Search inference endpoint.

    Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -558,11 +548,6 @@ def put_amazonbedrock(

    info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -654,11 +639,6 @@ def put_anthropic(

    Create an Anthropic inference endpoint.

    Create an inference endpoint to perform an inference task with the anthropic service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -751,11 +731,6 @@ def put_azureaistudio(

    Create an Azure AI studio inference endpoint.

    Create an inference endpoint to perform an inference task with the azureaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -853,11 +828,6 @@ def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -951,11 +921,6 @@ def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1239,11 +1204,6 @@ def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1331,11 +1291,6 @@ def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1434,11 +1389,6 @@ def put_hugging_face(
  • multilingual-e5-base
  • multilingual-e5-small
  • -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1528,11 +1478,6 @@ def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1616,11 +1561,6 @@ def put_mistral(

    Create a Mistral inference endpoint.

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1708,12 +1648,7 @@ def put_openai( .. raw:: html

    Create an OpenAI inference endpoint.

    -

    Create an inference endpoint to perform an inference task with the openai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    ``_ @@ -1890,11 +1825,6 @@ def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index 06be2f7fb..03f50b951 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -1084,7 +1084,7 @@ class Knn(Query): :arg similarity: The minimum similarity for a vector to be considered a match :arg rescore_vector: Apply oversampling and rescoring to quantized - vectors * + vectors :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index e6e19e410..1b75e1fa9 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -371,6 +371,9 @@ class DenseVectorIndexOptions(AttrDict[Any]): :arg m: The number of neighbors each node will be connected to in the HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. Defaults to `16` if omitted. + :arg rescore_vector: The rescore vector options. This is only + applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, + `int4_flat`, and `int8_flat` index types. """ type: Union[ @@ -389,6 +392,9 @@ class DenseVectorIndexOptions(AttrDict[Any]): confidence_interval: Union[float, DefaultType] ef_construction: Union[int, DefaultType] m: Union[int, DefaultType] + rescore_vector: Union[ + "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType + ] def __init__( self, @@ -409,6 +415,9 @@ def __init__( confidence_interval: Union[float, DefaultType] = DEFAULT, ef_construction: Union[int, DefaultType] = DEFAULT, m: Union[int, DefaultType] = DEFAULT, + rescore_vector: Union[ + "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType + ] = DEFAULT, **kwargs: Any, ): if type is not DEFAULT: @@ -419,6 +428,29 @@ def __init__( kwargs["ef_construction"] = ef_construction if m is not DEFAULT: kwargs["m"] = m + if rescore_vector is not DEFAULT: + kwargs["rescore_vector"] = rescore_vector + super().__init__(kwargs) + + +class DenseVectorIndexOptionsRescoreVector(AttrDict[Any]): + """ + :arg oversample: (required) The oversampling factor to use when + searching for the nearest neighbor. This is only applicable to the + quantized formats: `bbq_*`, `int4_*`, and `int8_*`. When provided, + `oversample * k` vectors will be gathered and then their scores + will be re-computed with the original vectors. valid values are + between `1.0` and `10.0` (inclusive), or `0` exactly to disable + oversampling. + """ + + oversample: Union[float, DefaultType] + + def __init__( + self, *, oversample: Union[float, DefaultType] = DEFAULT, **kwargs: Any + ): + if oversample is not DEFAULT: + kwargs["oversample"] = oversample super().__init__(kwargs)